1 /*
  2  * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/parallel/mutableSpace.hpp"
 27 #include "gc/shared/pretouchTask.hpp"
 28 #include "memory/iterator.inline.hpp"
 29 #include "memory/universe.hpp"
 30 #include "oops/oop.inline.hpp"
 31 #include "runtime/atomic.hpp"
 32 #include "runtime/javaThread.hpp"
 33 #include "runtime/safepoint.hpp"
 34 #include "utilities/align.hpp"
 35 #include "utilities/macros.hpp"
 36 
 37 MutableSpace::MutableSpace(size_t alignment) :
 38   _last_setup_region(),
 39   _alignment(alignment),
 40   _bottom(nullptr),
 41   _top(nullptr),
 42   _end(nullptr)
 43 {
 44   assert(MutableSpace::alignment() % os::vm_page_size() == 0,
 45          "Space should be aligned");
 46 }
 47 
 48 void MutableSpace::numa_setup_pages(MemRegion mr, size_t page_size, bool clear_space) {
 49   if (!mr.is_empty()) {
 50     HeapWord *start = align_up(mr.start(), page_size);
 51     HeapWord *end =   align_down(mr.end(), page_size);
 52     if (end > start) {
 53       size_t size = pointer_delta(end, start, sizeof(char));
 54       if (clear_space) {
 55         // Prefer page reallocation to migration.
 56         os::free_memory((char*)start, size, page_size);
 57       }
 58       os::numa_make_global((char*)start, size);
 59     }
 60   }
 61 }
 62 
 63 void MutableSpace::initialize(MemRegion mr,
 64                               bool clear_space,
 65                               bool mangle_space,
 66                               bool setup_pages,
 67                               WorkerThreads* pretouch_workers) {
 68 
 69   assert(Universe::on_page_boundary(mr.start()) && Universe::on_page_boundary(mr.end()),
 70          "invalid space boundaries");
 71 
 72   if (setup_pages && (UseNUMA || AlwaysPreTouch)) {
 73     // The space may move left and right or expand/shrink.
 74     // We'd like to enforce the desired page placement.
 75     MemRegion head, tail;
 76     if (last_setup_region().is_empty()) {
 77       // If it's the first initialization don't limit the amount of work.
 78       head = mr;
 79       tail = MemRegion(mr.end(), mr.end());
 80     } else {
 81       // Is there an intersection with the address space?
 82       MemRegion intersection = last_setup_region().intersection(mr);
 83       if (intersection.is_empty()) {
 84         intersection = MemRegion(mr.end(), mr.end());
 85       }
 86       // All the sizes below are in words.
 87       size_t head_size = 0, tail_size = 0;
 88       if (mr.start() <= intersection.start()) {
 89         head_size = pointer_delta(intersection.start(), mr.start());
 90       }
 91       if(intersection.end() <= mr.end()) {
 92         tail_size = pointer_delta(mr.end(), intersection.end());
 93       }
 94       // Limit the amount of page manipulation if necessary.
 95       if (NUMASpaceResizeRate > 0 && !AlwaysPreTouch) {
 96         const size_t change_size = head_size + tail_size;
 97         const float setup_rate_words = NUMASpaceResizeRate >> LogBytesPerWord;
 98         head_size = MIN2((size_t)(setup_rate_words * head_size / change_size),
 99                          head_size);
100         tail_size = MIN2((size_t)(setup_rate_words * tail_size / change_size),
101                          tail_size);
102       }
103       head = MemRegion(intersection.start() - head_size, intersection.start());
104       tail = MemRegion(intersection.end(), intersection.end() + tail_size);
105     }
106     assert(mr.contains(head) && mr.contains(tail), "Sanity");
107 
108     size_t page_size = alignment();
109 
110     if (UseNUMA) {
111       numa_setup_pages(head, page_size, clear_space);
112       numa_setup_pages(tail, page_size, clear_space);
113     }
114 
115     if (AlwaysPreTouch) {
116       size_t pretouch_page_size = UseLargePages ? page_size : os::vm_page_size();
117       PretouchTask::pretouch("ParallelGC PreTouch head", (char*)head.start(), (char*)head.end(),
118                              pretouch_page_size, pretouch_workers);
119 
120       PretouchTask::pretouch("ParallelGC PreTouch tail", (char*)tail.start(), (char*)tail.end(),
121                              pretouch_page_size, pretouch_workers);
122     }
123 
124     // Remember where we stopped so that we can continue later.
125     set_last_setup_region(MemRegion(head.start(), tail.end()));
126   }
127 
128   set_bottom(mr.start());
129   // When expanding concurrently with callers of cas_allocate, setting end
130   // makes the new space available for allocation by other threads.  So this
131   // assignment must follow all other configuration and initialization that
132   // might be done for expansion.
133   Atomic::release_store(end_addr(), mr.end());
134 
135   if (clear_space) {
136     clear(mangle_space);
137   }
138 }
139 
140 void MutableSpace::clear(bool mangle_space) {
141   set_top(bottom());
142   if (ZapUnusedHeapArea && mangle_space) {
143     mangle_unused_area();
144   }
145 }
146 
147 #ifndef PRODUCT
148 
149 void MutableSpace::mangle_unused_area() {
150   mangle_region(MemRegion(_top, _end));
151 }
152 
153 void MutableSpace::mangle_region(MemRegion mr) {
154   SpaceMangler::mangle_region(mr);
155 }
156 
157 #endif
158 
159 HeapWord* MutableSpace::cas_allocate(size_t size) {
160   do {
161     // Read top before end, else the range check may pass when it shouldn't.
162     // If end is read first, other threads may advance end and top such that
163     // current top > old end and current top + size > current end.  Then
164     // pointer_delta underflows, allowing installation of top > current end.
165     HeapWord* obj = Atomic::load_acquire(top_addr());
166     if (pointer_delta(end(), obj) >= size) {
167       HeapWord* new_top = obj + size;
168       HeapWord* result = Atomic::cmpxchg(top_addr(), obj, new_top);
169       // result can be one of two:
170       //  the old top value: the exchange succeeded
171       //  otherwise: the new value of the top is returned.
172       if (result != obj) {
173         continue; // another thread beat us to the allocation, try again
174       }
175       assert(is_object_aligned(obj) && is_object_aligned(new_top),
176              "checking alignment");
177       return obj;
178     } else {
179       return nullptr;
180     }
181   } while (true);
182 }
183 
184 // Try to deallocate previous allocation. Returns true upon success.
185 bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) {
186   HeapWord* expected_top = obj + size;
187   return Atomic::cmpxchg(top_addr(), expected_top, obj) == expected_top;
188 }
189 
190 // Only used by oldgen allocation.
191 bool MutableSpace::needs_expand(size_t word_size) const {
192   assert_lock_strong(PSOldGenExpand_lock);
193   // Holding the lock means end is stable.  So while top may be advancing
194   // via concurrent allocations, there is no need to order the reads of top
195   // and end here, unlike in cas_allocate.
196   return pointer_delta(end(), top()) < word_size;
197 }
198 
199 void MutableSpace::oop_iterate(OopIterateClosure* cl) {
200   HeapWord* obj_addr = bottom();
201   HeapWord* t = top();
202   // Could call objects iterate, but this is easier.
203   while (obj_addr < t) {
204     obj_addr += cast_to_oop(obj_addr)->oop_iterate_size(cl);
205   }
206 }
207 
208 template<bool COMPACT_HEADERS>
209 void MutableSpace::object_iterate_impl(ObjectClosure* cl) {
210   HeapWord* p = bottom();
211   while (p < top()) {
212     oop obj = cast_to_oop(p);
213     // When promotion-failure occurs during Young GC, eden/from space is not cleared,
214     // so we can encounter objects with "forwarded" markword.
215     // They are essentially dead, so skipping them
216     if (!obj->is_forwarded()) {
217       cl->do_object(obj);
218       p += obj->size();
219     } else {
220       assert(obj->forwardee() != obj, "must not be self-forwarded");
221       if (COMPACT_HEADERS) {
222         // It is safe to use the forwardee here. Parallel GC only uses
223         // header-based forwarding during promotion. Full GC doesn't
224         // use the object header for forwarding at all.
225         p += obj->forwardee()->size();
226       } else {
227         p += obj->size();
228       }
229     }
230   }
231 }
232 
233 void MutableSpace::object_iterate(ObjectClosure* cl) {
234   if (UseCompactObjectHeaders) {
235     object_iterate_impl<true>(cl);
236   } else {
237     object_iterate_impl<false>(cl);
238   }
239 }
240 
241 void MutableSpace::print_short() const { print_short_on(tty); }
242 void MutableSpace::print_short_on( outputStream* st) const {
243   st->print(" space " SIZE_FORMAT "K, %d%% used", capacity_in_bytes() / K,
244             (int) ((double) used_in_bytes() * 100 / capacity_in_bytes()));
245 }
246 
247 void MutableSpace::print() const { print_on(tty); }
248 void MutableSpace::print_on(outputStream* st) const {
249   MutableSpace::print_short_on(st);
250   st->print_cr(" [" PTR_FORMAT "," PTR_FORMAT "," PTR_FORMAT ")",
251                  p2i(bottom()), p2i(top()), p2i(end()));
252 }
253 
254 void MutableSpace::verify() {
255   HeapWord* p = bottom();
256   HeapWord* t = top();
257   while (p < t) {
258     oopDesc::verify(cast_to_oop(p));
259     p += cast_to_oop(p)->size();
260   }
261   guarantee(p == top(), "end of last object must match end of space");
262 }