1 /*
  2  * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "gc/parallel/mutableSpace.hpp"
 26 #include "gc/shared/pretouchTask.hpp"
 27 #include "gc/shared/spaceDecorator.hpp"
 28 #include "memory/iterator.inline.hpp"
 29 #include "memory/universe.hpp"
 30 #include "oops/oop.inline.hpp"
 31 #include "runtime/atomicAccess.hpp"
 32 #include "runtime/javaThread.hpp"
 33 #include "runtime/safepoint.hpp"
 34 #include "utilities/align.hpp"
 35 #include "utilities/macros.hpp"
 36 
 37 MutableSpace::MutableSpace(size_t page_size) :
 38   _last_setup_region(),
 39   _page_size(page_size),
 40   _bottom(nullptr),
 41   _top(nullptr),
 42   _end(nullptr) {}
 43 
 44 void MutableSpace::numa_setup_pages(MemRegion mr, bool clear_space) {
 45   assert(is_aligned(mr.start(), page_size()), "precondition");
 46   assert(is_aligned(mr.end(), page_size()), "precondition");
 47 
 48   if (mr.is_empty()) {
 49     return;
 50   }
 51 
 52   if (clear_space) {
 53     // Prefer page reallocation to migration.
 54     os::disclaim_memory((char*) mr.start(), mr.byte_size());
 55   }
 56   os::numa_make_global((char*) mr.start(), mr.byte_size());
 57 }
 58 
 59 void MutableSpace::initialize(MemRegion mr,
 60                               bool clear_space,
 61                               bool mangle_space,
 62                               bool setup_pages,
 63                               WorkerThreads* pretouch_workers) {
 64 
 65   assert(Universe::on_page_boundary(mr.start()) && Universe::on_page_boundary(mr.end()),
 66          "invalid space boundaries");
 67 
 68   if (setup_pages && (UseNUMA || AlwaysPreTouch)) {
 69     // The space may move left and right or expand/shrink.
 70     // We'd like to enforce the desired page placement.
 71     MemRegion head, tail;
 72     if (last_setup_region().is_empty()) {
 73       // If it's the first initialization don't limit the amount of work.
 74       head = mr;
 75       tail = MemRegion(mr.end(), mr.end());
 76     } else {
 77       // Is there an intersection with the address space?
 78       MemRegion intersection = last_setup_region().intersection(mr);
 79       if (intersection.is_empty()) {
 80         intersection = MemRegion(mr.end(), mr.end());
 81       }
 82       // All the sizes below are in words.
 83       size_t head_size = 0, tail_size = 0;
 84       if (mr.start() <= intersection.start()) {
 85         head_size = pointer_delta(intersection.start(), mr.start());
 86       }
 87       if(intersection.end() <= mr.end()) {
 88         tail_size = pointer_delta(mr.end(), intersection.end());
 89       }
 90       // Limit the amount of page manipulation if necessary.
 91       if (NUMASpaceResizeRate > 0 && !AlwaysPreTouch) {
 92         const size_t change_size = head_size + tail_size;
 93         const float setup_rate_words = NUMASpaceResizeRate >> LogBytesPerWord;
 94         head_size = MIN2((size_t)(setup_rate_words * head_size / change_size),
 95                          head_size);
 96         tail_size = MIN2((size_t)(setup_rate_words * tail_size / change_size),
 97                          tail_size);
 98       }
 99       head = MemRegion(intersection.start() - head_size, intersection.start());
100       tail = MemRegion(intersection.end(), intersection.end() + tail_size);
101     }
102     assert(mr.contains(head) && mr.contains(tail), "Sanity");
103 
104     if (UseNUMA) {
105       numa_setup_pages(head, clear_space);
106       numa_setup_pages(tail, clear_space);
107     }
108 
109     if (AlwaysPreTouch) {
110       PretouchTask::pretouch("ParallelGC PreTouch head", (char*)head.start(), (char*)head.end(),
111                              page_size(), pretouch_workers);
112 
113       PretouchTask::pretouch("ParallelGC PreTouch tail", (char*)tail.start(), (char*)tail.end(),
114                              page_size(), pretouch_workers);
115     }
116 
117     // Remember where we stopped so that we can continue later.
118     set_last_setup_region(MemRegion(head.start(), tail.end()));
119   }
120 
121   set_bottom(mr.start());
122   // When expanding concurrently with callers of cas_allocate, setting end
123   // makes the new space available for allocation by other threads.  So this
124   // assignment must follow all other configuration and initialization that
125   // might be done for expansion.
126   AtomicAccess::release_store(end_addr(), mr.end());
127 
128   if (clear_space) {
129     clear(mangle_space);
130   }
131 }
132 
133 void MutableSpace::clear(bool mangle_space) {
134   set_top(bottom());
135   if (ZapUnusedHeapArea && mangle_space) {
136     mangle_unused_area();
137   }
138 }
139 
140 #ifndef PRODUCT
141 
142 void MutableSpace::mangle_unused_area() {
143   mangle_region(MemRegion(_top, _end));
144 }
145 
146 void MutableSpace::mangle_region(MemRegion mr) {
147   SpaceMangler::mangle_region(mr);
148 }
149 
150 #endif
151 
152 HeapWord* MutableSpace::cas_allocate(size_t size) {
153   do {
154     // Read top before end, else the range check may pass when it shouldn't.
155     // If end is read first, other threads may advance end and top such that
156     // current top > old end and current top + size > current end.  Then
157     // pointer_delta underflows, allowing installation of top > current end.
158     HeapWord* obj = AtomicAccess::load_acquire(top_addr());
159     if (pointer_delta(end(), obj) >= size) {
160       HeapWord* new_top = obj + size;
161       HeapWord* result = AtomicAccess::cmpxchg(top_addr(), obj, new_top);
162       // result can be one of two:
163       //  the old top value: the exchange succeeded
164       //  otherwise: the new value of the top is returned.
165       if (result != obj) {
166         continue; // another thread beat us to the allocation, try again
167       }
168       assert(is_object_aligned(obj) && is_object_aligned(new_top),
169              "checking alignment");
170       return obj;
171     } else {
172       return nullptr;
173     }
174   } while (true);
175 }
176 
177 // Try to deallocate previous allocation. Returns true upon success.
178 bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) {
179   HeapWord* expected_top = obj + size;
180   return AtomicAccess::cmpxchg(top_addr(), expected_top, obj) == expected_top;
181 }
182 
183 void MutableSpace::oop_iterate(OopIterateClosure* cl) {
184   HeapWord* obj_addr = bottom();
185   HeapWord* t = top();
186   // Could call objects iterate, but this is easier.
187   while (obj_addr < t) {
188     obj_addr += cast_to_oop(obj_addr)->oop_iterate_size(cl);
189   }
190 }
191 
192 void MutableSpace::object_iterate(ObjectClosure* cl) {
193   HeapWord* p = bottom();
194   while (p < top()) {
195     oop obj = cast_to_oop(p);
196     // When promotion-failure occurs during Young GC, eden/from space is not cleared,
197     // so we can encounter objects with "forwarded" markword.
198     // They are essentially dead, so skipping them
199     if (obj->is_forwarded()) {
200       assert(!obj->is_self_forwarded(), "must not be self-forwarded");
201       // It is safe to use the forwardee here. Parallel GC only uses
202       // header-based forwarding during promotion. Full GC doesn't
203       // use the object header for forwarding at all.
204       p += obj->forwardee()->size();
205     } else {
206       cl->do_object(obj);
207       p += obj->size();
208     }
209   }
210 }
211 
212 void MutableSpace::print_short() const { print_short_on(tty); }
213 void MutableSpace::print_short_on( outputStream* st) const {
214   st->print("space %zuK, %d%% used", capacity_in_bytes() / K,
215             (int) ((double) used_in_bytes() * 100 / capacity_in_bytes()));
216 }
217 
218 void MutableSpace::print() const { print_on(tty, ""); }
219 void MutableSpace::print_on(outputStream* st, const char* prefix) const {
220   st->print("%s", prefix);
221   MutableSpace::print_short_on(st);
222   st->print_cr(" [" PTR_FORMAT "," PTR_FORMAT "," PTR_FORMAT ")",
223                  p2i(bottom()), p2i(top()), p2i(end()));
224 }
225 
226 void MutableSpace::verify() {
227   HeapWord* p = bottom();
228   HeapWord* t = top();
229   while (p < t) {
230     oopDesc::verify(cast_to_oop(p));
231     p += cast_to_oop(p)->size();
232   }
233   guarantee(p == top(), "end of last object must match end of space");
234 }