1 /*
  2  * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/parallel/mutableSpace.hpp"
 27 #include "gc/shared/pretouchTask.hpp"
 28 #include "gc/shared/spaceDecorator.inline.hpp"
 29 #include "memory/iterator.inline.hpp"
 30 #include "memory/universe.hpp"
 31 #include "oops/oop.inline.hpp"
 32 #include "runtime/atomic.hpp"
 33 #include "runtime/javaThread.hpp"
 34 #include "runtime/safepoint.hpp"
 35 #include "utilities/align.hpp"
 36 #include "utilities/macros.hpp"
 37 
 38 MutableSpace::MutableSpace(size_t alignment) :
 39   _mangler(nullptr),
 40   _last_setup_region(),
 41   _alignment(alignment),
 42   _bottom(nullptr),
 43   _top(nullptr),
 44   _end(nullptr)
 45 {
 46   assert(MutableSpace::alignment() % os::vm_page_size() == 0,
 47          "Space should be aligned");
 48   _mangler = new MutableSpaceMangler(this);
 49 }
 50 
 51 MutableSpace::~MutableSpace() {
 52   delete _mangler;
 53 }
 54 
 55 void MutableSpace::numa_setup_pages(MemRegion mr, size_t page_size, bool clear_space) {
 56   if (!mr.is_empty()) {
 57     HeapWord *start = align_up(mr.start(), page_size);
 58     HeapWord *end =   align_down(mr.end(), page_size);
 59     if (end > start) {
 60       size_t size = pointer_delta(end, start, sizeof(char));
 61       if (clear_space) {
 62         // Prefer page reallocation to migration.
 63         os::free_memory((char*)start, size, page_size);
 64       }
 65       os::numa_make_global((char*)start, size);
 66     }
 67   }
 68 }
 69 
 70 void MutableSpace::initialize(MemRegion mr,
 71                               bool clear_space,
 72                               bool mangle_space,
 73                               bool setup_pages,
 74                               WorkerThreads* pretouch_workers) {
 75 
 76   assert(Universe::on_page_boundary(mr.start()) && Universe::on_page_boundary(mr.end()),
 77          "invalid space boundaries");
 78 
 79   if (setup_pages && (UseNUMA || AlwaysPreTouch)) {
 80     // The space may move left and right or expand/shrink.
 81     // We'd like to enforce the desired page placement.
 82     MemRegion head, tail;
 83     if (last_setup_region().is_empty()) {
 84       // If it's the first initialization don't limit the amount of work.
 85       head = mr;
 86       tail = MemRegion(mr.end(), mr.end());
 87     } else {
 88       // Is there an intersection with the address space?
 89       MemRegion intersection = last_setup_region().intersection(mr);
 90       if (intersection.is_empty()) {
 91         intersection = MemRegion(mr.end(), mr.end());
 92       }
 93       // All the sizes below are in words.
 94       size_t head_size = 0, tail_size = 0;
 95       if (mr.start() <= intersection.start()) {
 96         head_size = pointer_delta(intersection.start(), mr.start());
 97       }
 98       if(intersection.end() <= mr.end()) {
 99         tail_size = pointer_delta(mr.end(), intersection.end());
100       }
101       // Limit the amount of page manipulation if necessary.
102       if (NUMASpaceResizeRate > 0 && !AlwaysPreTouch) {
103         const size_t change_size = head_size + tail_size;
104         const float setup_rate_words = NUMASpaceResizeRate >> LogBytesPerWord;
105         head_size = MIN2((size_t)(setup_rate_words * head_size / change_size),
106                          head_size);
107         tail_size = MIN2((size_t)(setup_rate_words * tail_size / change_size),
108                          tail_size);
109       }
110       head = MemRegion(intersection.start() - head_size, intersection.start());
111       tail = MemRegion(intersection.end(), intersection.end() + tail_size);
112     }
113     assert(mr.contains(head) && mr.contains(tail), "Sanity");
114 
115     size_t page_size = alignment();
116 
117     if (UseNUMA) {
118       numa_setup_pages(head, page_size, clear_space);
119       numa_setup_pages(tail, page_size, clear_space);
120     }
121 
122     if (AlwaysPreTouch) {
123       size_t pretouch_page_size = UseLargePages ? page_size : os::vm_page_size();
124       PretouchTask::pretouch("ParallelGC PreTouch head", (char*)head.start(), (char*)head.end(),
125                              pretouch_page_size, pretouch_workers);
126 
127       PretouchTask::pretouch("ParallelGC PreTouch tail", (char*)tail.start(), (char*)tail.end(),
128                              pretouch_page_size, pretouch_workers);
129     }
130 
131     // Remember where we stopped so that we can continue later.
132     set_last_setup_region(MemRegion(head.start(), tail.end()));
133   }
134 
135   set_bottom(mr.start());
136   // When expanding concurrently with callers of cas_allocate, setting end
137   // makes the new space available for allocation by other threads.  So this
138   // assignment must follow all other configuration and initialization that
139   // might be done for expansion.
140   Atomic::release_store(end_addr(), mr.end());
141 
142   if (clear_space) {
143     clear(mangle_space);
144   }
145 }
146 
147 void MutableSpace::clear(bool mangle_space) {
148   set_top(bottom());
149   if (ZapUnusedHeapArea && mangle_space) {
150     mangle_unused_area();
151   }
152 }
153 
154 #ifndef PRODUCT
155 void MutableSpace::check_mangled_unused_area(HeapWord* limit) {
156   mangler()->check_mangled_unused_area(limit);
157 }
158 
159 void MutableSpace::check_mangled_unused_area_complete() {
160   mangler()->check_mangled_unused_area_complete();
161 }
162 
163 // Mangle only the unused space that has not previously
164 // been mangled and that has not been allocated since being
165 // mangled.
166 void MutableSpace::mangle_unused_area() {
167   mangler()->mangle_unused_area();
168 }
169 
170 void MutableSpace::mangle_unused_area_complete() {
171   mangler()->mangle_unused_area_complete();
172 }
173 
174 void MutableSpace::mangle_region(MemRegion mr) {
175   SpaceMangler::mangle_region(mr);
176 }
177 
178 void MutableSpace::set_top_for_allocations(HeapWord* v) {
179   mangler()->set_top_for_allocations(v);
180 }
181 
182 void MutableSpace::set_top_for_allocations() {
183   mangler()->set_top_for_allocations(top());
184 }
185 #endif
186 
187 HeapWord* MutableSpace::cas_allocate(size_t size) {
188   do {
189     // Read top before end, else the range check may pass when it shouldn't.
190     // If end is read first, other threads may advance end and top such that
191     // current top > old end and current top + size > current end.  Then
192     // pointer_delta underflows, allowing installation of top > current end.
193     HeapWord* obj = Atomic::load_acquire(top_addr());
194     if (pointer_delta(end(), obj) >= size) {
195       HeapWord* new_top = obj + size;
196       HeapWord* result = Atomic::cmpxchg(top_addr(), obj, new_top);
197       // result can be one of two:
198       //  the old top value: the exchange succeeded
199       //  otherwise: the new value of the top is returned.
200       if (result != obj) {
201         continue; // another thread beat us to the allocation, try again
202       }
203       assert(is_object_aligned(obj) && is_object_aligned(new_top),
204              "checking alignment");
205       return obj;
206     } else {
207       return nullptr;
208     }
209   } while (true);
210 }
211 
212 // Try to deallocate previous allocation. Returns true upon success.
213 bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) {
214   HeapWord* expected_top = obj + size;
215   return Atomic::cmpxchg(top_addr(), expected_top, obj) == expected_top;
216 }
217 
218 // Only used by oldgen allocation.
219 bool MutableSpace::needs_expand(size_t word_size) const {
220   assert_lock_strong(PSOldGenExpand_lock);
221   // Holding the lock means end is stable.  So while top may be advancing
222   // via concurrent allocations, there is no need to order the reads of top
223   // and end here, unlike in cas_allocate.
224   return pointer_delta(end(), top()) < word_size;
225 }
226 
227 void MutableSpace::oop_iterate(OopIterateClosure* cl) {
228   HeapWord* obj_addr = bottom();
229   HeapWord* t = top();
230   // Could call objects iterate, but this is easier.
231   while (obj_addr < t) {
232     obj_addr += cast_to_oop(obj_addr)->oop_iterate_size(cl);
233   }
234 }
235 
236 template<bool COMPACT_HEADERS>
237 void MutableSpace::object_iterate_impl(ObjectClosure* cl) {
238   HeapWord* p = bottom();
239   while (p < top()) {
240     oop obj = cast_to_oop(p);
241     // When promotion-failure occurs during Young GC, eden/from space is not cleared,
242     // so we can encounter objects with "forwarded" markword.
243     // They are essentially dead, so skipping them
244     if (!obj->is_forwarded()) {
245       cl->do_object(obj);
246       p += obj->size();
247     } else {
248       assert(obj->forwardee() != obj, "must not be self-forwarded");
249       if (COMPACT_HEADERS) {
250         // It is safe to use the forwardee here. Parallel GC only uses
251         // header-based forwarding during promotion. Full GC doesn't
252         // use the object header for forwarding at all.
253         p += obj->forwardee()->size();
254       } else {
255         p += obj->size();
256       }
257     }
258   }
259 }
260 
261 void MutableSpace::object_iterate(ObjectClosure* cl) {
262   if (UseCompactObjectHeaders) {
263     object_iterate_impl<true>(cl);
264   } else {
265     object_iterate_impl<false>(cl);
266   }
267 }
268 
269 void MutableSpace::print_short() const { print_short_on(tty); }
270 void MutableSpace::print_short_on( outputStream* st) const {
271   st->print(" space " SIZE_FORMAT "K, %d%% used", capacity_in_bytes() / K,
272             (int) ((double) used_in_bytes() * 100 / capacity_in_bytes()));
273 }
274 
275 void MutableSpace::print() const { print_on(tty); }
276 void MutableSpace::print_on(outputStream* st) const {
277   MutableSpace::print_short_on(st);
278   st->print_cr(" [" PTR_FORMAT "," PTR_FORMAT "," PTR_FORMAT ")",
279                  p2i(bottom()), p2i(top()), p2i(end()));
280 }
281 
282 void MutableSpace::verify() {
283   HeapWord* p = bottom();
284   HeapWord* t = top();
285   while (p < t) {
286     oopDesc::verify(cast_to_oop(p));
287     p += cast_to_oop(p)->size();
288   }
289   guarantee(p == top(), "end of last object must match end of space");
290 }