1 /*
2 * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/javaClasses.inline.hpp"
26 #include "gc/parallel/mutableSpace.hpp"
27 #include "gc/parallel/parallelScavengeHeap.hpp"
28 #include "gc/parallel/psOldGen.hpp"
29 #include "gc/parallel/psPromotionManager.inline.hpp"
30 #include "gc/parallel/psScavenge.hpp"
31 #include "gc/shared/continuationGCSupport.inline.hpp"
32 #include "gc/shared/gcTrace.hpp"
33 #include "gc/shared/partialArraySplitter.inline.hpp"
34 #include "gc/shared/partialArrayState.hpp"
35 #include "gc/shared/preservedMarks.inline.hpp"
36 #include "gc/shared/taskqueue.inline.hpp"
37 #include "logging/log.hpp"
38 #include "logging/logStream.hpp"
39 #include "memory/allocation.inline.hpp"
40 #include "memory/iterator.inline.hpp"
41 #include "memory/memRegion.hpp"
42 #include "memory/padded.inline.hpp"
43 #include "memory/resourceArea.hpp"
44 #include "oops/access.inline.hpp"
45 #include "oops/compressedOops.inline.hpp"
46 #include "oops/flatArrayKlass.inline.hpp"
47 #include "utilities/checkedCast.hpp"
48
49 PaddedEnd<PSPromotionManager>* PSPromotionManager::_manager_array = nullptr;
50 PSPromotionManager::PSScannerTasksQueueSet* PSPromotionManager::_stack_array_depth = nullptr;
51 PreservedMarksSet* PSPromotionManager::_preserved_marks_set = nullptr;
52 PSOldGen* PSPromotionManager::_old_gen = nullptr;
53 MutableSpace* PSPromotionManager::_young_space = nullptr;
54 PartialArrayStateManager* PSPromotionManager::_partial_array_state_manager = nullptr;
55
56 void PSPromotionManager::initialize() {
57 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
58
59 _old_gen = heap->old_gen();
60 _young_space = heap->young_gen()->to_space();
61
62 const uint promotion_manager_num = ParallelGCThreads;
63
64 assert(_partial_array_state_manager == nullptr, "Attempt to initialize twice");
65 _partial_array_state_manager
66 = new PartialArrayStateManager(promotion_manager_num);
67
68 // To prevent false sharing, we pad the PSPromotionManagers
69 // and make sure that the first instance starts at a cache line.
70 assert(_manager_array == nullptr, "Attempt to initialize twice");
71 _manager_array = PaddedArray<PSPromotionManager, mtGC>::create_unfreeable(promotion_manager_num);
72
73 _stack_array_depth = new PSScannerTasksQueueSet(promotion_manager_num);
74
75 // Create and register the PSPromotionManager(s) for the worker threads.
76 for(uint i=0; i<ParallelGCThreads; i++) {
77 stack_array_depth()->register_queue(i, _manager_array[i].claimed_stack_depth());
78 }
79 // The VMThread gets its own PSPromotionManager, which is not available
80 // for work stealing.
81
82 assert(_preserved_marks_set == nullptr, "Attempt to initialize twice");
83 _preserved_marks_set = new PreservedMarksSet(true /* in_c_heap */);
84 _preserved_marks_set->init(promotion_manager_num);
85 for (uint i = 0; i < promotion_manager_num; i += 1) {
86 _manager_array[i].register_preserved_marks(_preserved_marks_set->get(i));
87 }
88 }
89
90 PSPromotionManager* PSPromotionManager::gc_thread_promotion_manager(uint index) {
91 assert(index < ParallelGCThreads, "index out of range");
92 assert(_manager_array != nullptr, "Sanity");
93 return &_manager_array[index];
94 }
95
96 PSPromotionManager* PSPromotionManager::vm_thread_promotion_manager() {
97 assert(_manager_array != nullptr, "Sanity");
98 return &_manager_array[0];
99 }
100
101 void PSPromotionManager::pre_scavenge() {
102 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
103
104 _preserved_marks_set->assert_empty();
105 _young_space = heap->young_gen()->to_space();
106
107 for(uint i=0; i<ParallelGCThreads; i++) {
108 manager_array(i)->reset();
109 }
110 }
111
112 bool PSPromotionManager::post_scavenge(YoungGCTracer& gc_tracer) {
113 bool promotion_failure_occurred = false;
114
115 TASKQUEUE_STATS_ONLY(print_and_reset_taskqueue_stats());
116 for (uint i = 0; i < ParallelGCThreads; i++) {
117 PSPromotionManager* manager = manager_array(i);
118 assert(manager->claimed_stack_depth()->is_empty(), "should be empty");
119 if (manager->_promotion_failed_info.has_failed()) {
120 gc_tracer.report_promotion_failed(manager->_promotion_failed_info);
121 promotion_failure_occurred = true;
122 }
123 manager->flush_labs();
124 manager->flush_string_dedup_requests();
125 }
126 // All PartialArrayStates have been returned to the allocator, since the
127 // claimed_stack_depths are all empty. Leave them there for use by future
128 // collections.
129
130 if (!promotion_failure_occurred) {
131 // If there was no promotion failure, the preserved mark stacks
132 // should be empty.
133 _preserved_marks_set->assert_empty();
134 }
135 return promotion_failure_occurred;
136 }
137
138 #if TASKQUEUE_STATS
139
140 void PSPromotionManager::print_and_reset_taskqueue_stats() {
141 stack_array_depth()->print_and_reset_taskqueue_stats("Oop Queue");
142
143 auto get_pa_stats = [&](uint i) {
144 return manager_array(i)->partial_array_task_stats();
145 };
146 PartialArrayTaskStats::log_set(ParallelGCThreads, get_pa_stats,
147 "Partial Array Task Stats");
148 for (uint i = 0; i < ParallelGCThreads; ++i) {
149 get_pa_stats(i)->reset();
150 }
151 }
152
153 PartialArrayTaskStats* PSPromotionManager::partial_array_task_stats() {
154 return _partial_array_splitter.stats();
155 }
156
157 #endif // TASKQUEUE_STATS
158
159 // Most members are initialized either by initialize() or reset().
160 PSPromotionManager::PSPromotionManager()
161 : _partial_array_splitter(_partial_array_state_manager, ParallelGCThreads, ParGCArrayScanChunk)
162 {
163 // We set the old lab's start array.
164 _old_lab.set_start_array(old_gen()->start_array());
165
166 if (ParallelGCThreads == 1) {
167 _target_stack_size = 0;
168 } else {
169 _target_stack_size = GCDrainStackTargetSize;
170 }
171
172 // let's choose 1.5x the chunk size
173 _min_array_size_for_chunking = (3 * ParGCArrayScanChunk / 2);
174
175 _preserved_marks = nullptr;
176
177 reset();
178 }
179
180 void PSPromotionManager::reset() {
181 assert(stacks_empty(), "reset of non-empty stack");
182
183 // We need to get an assert in here to make sure the labs are always flushed.
184
185 // Do not prefill the LAB's, save heap wastage!
186 HeapWord* lab_base = young_space()->top();
187 _young_lab.initialize(MemRegion(lab_base, (size_t)0));
188 _young_gen_has_alloc_failure = false;
189 _young_gen_is_full = false;
190
191 lab_base = old_gen()->object_space()->top();
192 _old_lab.initialize(MemRegion(lab_base, (size_t)0));
193 _old_gen_is_full = false;
194
195 _promotion_failed_info.reset();
196 }
197
198 void PSPromotionManager::register_preserved_marks(PreservedMarks* preserved_marks) {
199 assert(_preserved_marks == nullptr, "do not set it twice");
200 _preserved_marks = preserved_marks;
201 }
202
203 void PSPromotionManager::restore_preserved_marks() {
204 _preserved_marks_set->restore(&ParallelScavengeHeap::heap()->workers());
205 }
206
207 void PSPromotionManager::drain_stacks(bool totally_drain) {
208 const uint threshold = totally_drain ? 0
209 : _target_stack_size;
210
211 PSScannerTasksQueue* const tq = claimed_stack_depth();
212 do {
213 ScannerTask task;
214
215 // Drain overflow stack first, so other threads can steal from
216 // claimed stack while we work.
217 while (tq->pop_overflow(task)) {
218 if (!tq->try_push_to_taskqueue(task)) {
219 process_popped_location_depth(task, false);
220 }
221 }
222
223 while (tq->pop_local(task, threshold)) {
224 process_popped_location_depth(task, false);
225 }
226 } while (!tq->overflow_empty());
227
228 assert(!totally_drain || tq->taskqueue_empty(), "Sanity");
229 assert(totally_drain || tq->size() <= _target_stack_size, "Sanity");
230 assert(tq->overflow_empty(), "Sanity");
231 }
232
233 void PSPromotionManager::flush_labs() {
234 assert(stacks_empty(), "Attempt to flush lab with live stack");
235
236 // If either promotion lab fills up, we can flush the
237 // lab but not refill it, so check first.
238 assert(!_young_lab.is_flushed() || _young_gen_is_full, "Sanity");
239 if (!_young_lab.is_flushed())
240 _young_lab.flush();
241
242 assert(!_old_lab.is_flushed() || _old_gen_is_full, "Sanity");
243 if (!_old_lab.is_flushed())
244 _old_lab.flush();
245
246 // Let PSScavenge know if we overflowed
247 if (_young_gen_is_full || _young_gen_has_alloc_failure) {
248 PSScavenge::set_survivor_overflow(true);
249 }
250 }
251
252 template <class T>
253 void PSPromotionManager::process_array_chunk_work(oop obj, int start, int end) {
254 assert(start <= end, "invariant");
255 T* const base = (T*)objArrayOop(obj)->base();
256 T* p = base + start;
257 T* const chunk_end = base + end;
258 while (p < chunk_end) {
259 claim_or_forward_depth(p);
260 ++p;
261 }
262 }
263
264 void PSPromotionManager::process_array_chunk(PartialArrayState* state, bool stolen) {
265 // Access before release by claim().
266 oop new_obj = state->destination();
267 PartialArraySplitter::Claim claim =
268 _partial_array_splitter.claim(state, &_claimed_stack_depth, stolen);
269 int start = checked_cast<int>(claim._start);
270 int end = checked_cast<int>(claim._end);
271 if (UseCompressedOops) {
272 process_array_chunk_work<narrowOop>(new_obj, start, end);
273 } else {
274 process_array_chunk_work<oop>(new_obj, start, end);
275 }
276 }
277
278 void PSPromotionManager::push_objArray(oop old_obj, oop new_obj) {
279 assert(old_obj->is_forwarded(), "precondition");
280 assert(old_obj->forwardee() == new_obj, "precondition");
281 assert(new_obj->is_objArray(), "precondition");
282
283 objArrayOop to_array = objArrayOop(new_obj);
284 size_t array_length = to_array->length();
285 size_t initial_chunk_size =
286 // The source array is unused when processing states.
287 _partial_array_splitter.start(&_claimed_stack_depth, nullptr, to_array, array_length);
288 int end = checked_cast<int>(initial_chunk_size);
289 if (UseCompressedOops) {
290 process_array_chunk_work<narrowOop>(to_array, 0, end);
291 } else {
292 process_array_chunk_work<oop>(to_array, 0, end);
293 }
294 }
295
296 oop PSPromotionManager::oop_promotion_failed(oop obj, markWord obj_mark) {
297 assert(_old_gen_is_full || PromotionFailureALot, "Sanity");
298
299 // Attempt to CAS in the header.
300 // This tests if the header is still the same as when
301 // this started. If it is the same (i.e., no forwarding
302 // pointer has been installed), then this thread owns
303 // it.
304 if (obj->forward_to_self_atomic(obj_mark) == nullptr) {
305 // We won any races, we "own" this object.
306 assert(obj == obj->forwardee(), "Sanity");
307
308 _promotion_failed_info.register_copy_failure(obj->size());
309
310 ContinuationGCSupport::transform_stack_chunk(obj);
311
312 push_contents(obj);
313
314 // Save the markWord of promotion-failed objs in _preserved_marks for later
315 // restoration. This way we don't have to walk the young-gen to locate
316 // these promotion-failed objs.
317 _preserved_marks->push_always(obj, obj_mark);
318 } else {
319 // We lost, someone else "owns" this object
320 guarantee(obj->is_forwarded(), "Object must be forwarded if the cas failed.");
321
322 // No unallocation to worry about.
323 obj = obj->forwardee();
324 }
325
326 return obj;
327 }