1 /*
  2  * Copyright (c) 2016, 2023, Red Hat, Inc. All rights reserved.
  3  * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
  4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  6  *
  7  * This code is free software; you can redistribute it and/or modify it
  8  * under the terms of the GNU General Public License version 2 only, as
  9  * published by the Free Software Foundation.
 10  *
 11  * This code is distributed in the hope that it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  *
 25  */
 26 
 27 #include "precompiled.hpp"
 28 
 29 #include "gc/shenandoah/shenandoahAgeCensus.hpp"
 30 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
 31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 32 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
 33 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
 34 #include "gc/shenandoah/shenandoahUtils.hpp"
 35 #include "runtime/atomic.hpp"
 36 #include "nmt/memTracker.hpp"
 37 #include "utilities/copy.hpp"
 38 
 39 ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedSpace space, char* heap_base) :
 40   _map_size(heap->num_regions()),
 41   _region_size_bytes_shift(ShenandoahHeapRegion::region_size_bytes_shift()),
 42   _map_space(space),
 43   _cset_map(_map_space.base() + ((uintx)heap_base >> _region_size_bytes_shift)),
 44   _biased_cset_map(_map_space.base()),
 45   _heap(heap),
 46   _has_old_regions(false),
 47   _garbage(0),
 48   _used(0),
 49   _live(0),
 50   _region_count(0),
 51   _old_garbage(0),
 52   _preselected_regions(nullptr),
 53   _current_index(0) {
 54 
 55   // The collection set map is reserved to cover the entire heap *and* zero addresses.
 56   // This is needed to accept in-cset checks for both heap oops and nulls, freeing
 57   // high-performance code from checking for null first.
 58   //
 59   // Since heap_base can be far away, committing the entire map would waste memory.
 60   // Therefore, we only commit the parts that are needed to operate: the heap view,
 61   // and the zero page.
 62   //
 63   // Note: we could instead commit the entire map, and piggyback on OS virtual memory
 64   // subsystem for mapping not-yet-written-to pages to a single physical backing page,
 65   // but this is not guaranteed, and would confuse NMT and other memory accounting tools.
 66 
 67   MemTracker::record_virtual_memory_tag(_map_space.base(), mtGC);
 68 
 69   size_t page_size = os::vm_page_size();
 70 
 71   if (!_map_space.special()) {
 72     // Commit entire pages that cover the heap cset map.
 73     char* bot_addr = align_down(_cset_map, page_size);
 74     char* top_addr = align_up(_cset_map + _map_size, page_size);
 75     os::commit_memory_or_exit(bot_addr, pointer_delta(top_addr, bot_addr, 1), false,
 76                               "Unable to commit collection set bitmap: heap");
 77 
 78     // Commit the zero page, if not yet covered by heap cset map.
 79     if (bot_addr != _biased_cset_map) {
 80       os::commit_memory_or_exit(_biased_cset_map, page_size, false,
 81                                 "Unable to commit collection set bitmap: zero page");
 82     }
 83   }
 84 
 85   Copy::zero_to_bytes(_cset_map, _map_size);
 86   Copy::zero_to_bytes(_biased_cset_map, page_size);
 87 }
 88 
 89 void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) {
 90   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 91   assert(Thread::current()->is_VM_thread(), "Must be VMThread");
 92   assert(!is_in(r), "Already in collection set");
 93   assert(!r->is_humongous(), "Only add regular regions to the collection set");
 94 
 95   _cset_map[r->index()] = 1;
 96   size_t live    = r->get_live_data_bytes();
 97   size_t garbage = r->garbage();
 98   size_t free    = r->free();
 99   if (r->is_young()) {
100     _young_bytes_to_evacuate += live;
101     _young_available_bytes_collected += free;
102     if (ShenandoahHeap::heap()->mode()->is_generational() && r->age() >= ShenandoahGenerationalHeap::heap()->age_census()->tenuring_threshold()) {
103       _young_bytes_to_promote += live;
104     }
105   } else if (r->is_old()) {
106     _old_bytes_to_evacuate += live;
107     _old_garbage += garbage;
108   }
109 
110   _region_count++;
111   _has_old_regions |= r->is_old();
112   _garbage += garbage;
113   _used += r->used();
114   _live += live;
115   // Update the region status too. State transition would be checked internally.
116   r->make_cset();
117 }
118 
119 void ShenandoahCollectionSet::clear() {
120   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
121 
122   Copy::zero_to_bytes(_cset_map, _map_size);
123 
124 #ifdef ASSERT
125   for (size_t index = 0; index < _heap->num_regions(); index ++) {
126     assert (!_heap->get_region(index)->is_cset(), "should have been cleared before");
127   }
128 #endif
129 
130   _garbage = 0;
131   _old_garbage = 0;
132   _used = 0;
133   _live = 0;
134 
135   _region_count = 0;
136   _current_index = 0;
137 
138   _young_bytes_to_evacuate = 0;
139   _young_bytes_to_promote = 0;
140   _old_bytes_to_evacuate = 0;
141 
142   _young_available_bytes_collected = 0;
143 
144   _has_old_regions = false;
145 }
146 
147 ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() {
148   // This code is optimized for the case when collection set contains only
149   // a few regions. In this case, it is more constructive to check for is_in
150   // before hitting the (potentially contended) atomic index.
151 
152   size_t max = _heap->num_regions();
153   size_t old = Atomic::load(&_current_index);
154 
155   for (size_t index = old; index < max; index++) {
156     if (is_in(index)) {
157       size_t cur = Atomic::cmpxchg(&_current_index, old, index + 1, memory_order_relaxed);
158       assert(cur >= old, "Always move forward");
159       if (cur == old) {
160         // Successfully moved the claim index, this is our region.
161         return _heap->get_region(index);
162       } else {
163         // Somebody else moved the claim index, restart from there.
164         index = cur - 1; // adjust for loop post-increment
165         old = cur;
166       }
167     }
168   }
169   return nullptr;
170 }
171 
172 ShenandoahHeapRegion* ShenandoahCollectionSet::next() {
173   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
174   assert(Thread::current()->is_VM_thread(), "Must be VMThread");
175 
176   size_t max = _heap->num_regions();
177   for (size_t index = _current_index; index < max; index++) {
178     if (is_in(index)) {
179       _current_index = index + 1;
180       return _heap->get_region(index);
181     }
182   }
183 
184   return nullptr;
185 }
186 
187 void ShenandoahCollectionSet::print_on(outputStream* out) const {
188   out->print_cr("Collection Set: Regions: "
189                 SIZE_FORMAT ", Garbage: " SIZE_FORMAT "%s, Live: " SIZE_FORMAT "%s, Used: " SIZE_FORMAT "%s", count(),
190                 byte_size_in_proper_unit(garbage()), proper_unit_for_byte_size(garbage()),
191                 byte_size_in_proper_unit(live()),    proper_unit_for_byte_size(live()),
192                 byte_size_in_proper_unit(used()),    proper_unit_for_byte_size(used()));
193 
194   debug_only(size_t regions = 0;)
195   for (size_t index = 0; index < _heap->num_regions(); index ++) {
196     if (is_in(index)) {
197       _heap->get_region(index)->print_on(out);
198       debug_only(regions ++;)
199     }
200   }
201   assert(regions == count(), "Must match");
202 }