1 /* 2 * Copyright (c) 2002, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_PARALLEL_PSPROMOTIONMANAGER_INLINE_HPP 26 #define SHARE_GC_PARALLEL_PSPROMOTIONMANAGER_INLINE_HPP 27 28 #include "gc/parallel/psPromotionManager.hpp" 29 30 #include "gc/parallel/parallelScavengeHeap.hpp" 31 #include "gc/parallel/parMarkBitMap.inline.hpp" 32 #include "gc/parallel/psOldGen.hpp" 33 #include "gc/parallel/psPromotionLAB.inline.hpp" 34 #include "gc/parallel/psScavenge.inline.hpp" 35 #include "gc/parallel/psStringDedup.hpp" 36 #include "gc/shared/continuationGCSupport.inline.hpp" 37 #include "gc/shared/taskqueue.inline.hpp" 38 #include "gc/shared/tlab_globals.hpp" 39 #include "logging/log.hpp" 40 #include "memory/iterator.inline.hpp" 41 #include "oops/access.inline.hpp" 42 #include "oops/oop.inline.hpp" 43 #include "runtime/orderAccess.hpp" 44 #include "runtime/prefetch.inline.hpp" 45 #include "utilities/copy.hpp" 46 47 inline PSPromotionManager* PSPromotionManager::manager_array(uint index) { 48 assert(_manager_array != nullptr, "access of null manager_array"); 49 assert(index < ParallelGCThreads, "out of range manager_array access"); 50 return &_manager_array[index]; 51 } 52 53 inline void PSPromotionManager::push_depth(ScannerTask task) { 54 claimed_stack_depth()->push(task); 55 } 56 57 template <class T> 58 inline void PSPromotionManager::claim_or_forward_depth(T* p) { 59 assert(ParallelScavengeHeap::heap()->is_in(p), "pointer outside heap"); 60 T heap_oop = RawAccess<>::oop_load(p); 61 if (PSScavenge::is_obj_in_young(heap_oop)) { 62 oop obj = CompressedOops::decode_not_null(heap_oop); 63 assert(!PSScavenge::is_obj_in_to_space(obj), "revisiting object?"); 64 Prefetch::write(obj->mark_addr(), 0); 65 push_depth(ScannerTask(p)); 66 } 67 } 68 69 inline void PSPromotionManager::promotion_trace_event(oop new_obj, Klass* klass, 70 size_t obj_size, 71 uint age, bool tenured, 72 const PSPromotionLAB* lab) { 73 // Skip if memory allocation failed 74 if (new_obj != nullptr) { 75 const ParallelScavengeTracer* gc_tracer = PSScavenge::gc_tracer(); 76 77 if (lab != nullptr) { 78 // Promotion of object through newly allocated PLAB 79 if (gc_tracer->should_report_promotion_in_new_plab_event()) { 80 size_t obj_bytes = obj_size * HeapWordSize; 81 size_t lab_size = lab->capacity(); 82 gc_tracer->report_promotion_in_new_plab_event(klass, obj_bytes, 83 age, tenured, lab_size); 84 } 85 } else { 86 // Promotion of object directly to heap 87 if (gc_tracer->should_report_promotion_outside_plab_event()) { 88 size_t obj_bytes = obj_size * HeapWordSize; 89 gc_tracer->report_promotion_outside_plab_event(klass, obj_bytes, 90 age, tenured); 91 } 92 } 93 } 94 } 95 96 class PSPushContentsClosure: public BasicOopIterateClosure { 97 PSPromotionManager* _pm; 98 public: 99 PSPushContentsClosure(PSPromotionManager* pm) : BasicOopIterateClosure(PSScavenge::reference_processor()), _pm(pm) {} 100 101 template <typename T> void do_oop_work(T* p) { 102 _pm->claim_or_forward_depth(p); 103 } 104 105 virtual void do_oop(oop* p) { do_oop_work(p); } 106 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 107 }; 108 109 // 110 // This closure specialization will override the one that is defined in 111 // instanceRefKlass.inline.cpp. It swaps the order of oop_oop_iterate and 112 // oop_oop_iterate_ref_processing. Unfortunately G1 and Parallel behaves 113 // significantly better (especially in the Derby benchmark) using opposite 114 // order of these function calls. 115 // 116 template <> 117 inline void InstanceRefKlass::oop_oop_iterate_reverse<oop, PSPushContentsClosure>(oop obj, PSPushContentsClosure* closure) { 118 oop_oop_iterate_ref_processing<oop>(obj, closure); 119 InstanceKlass::oop_oop_iterate_reverse<oop>(obj, closure); 120 } 121 122 template <> 123 inline void InstanceRefKlass::oop_oop_iterate_reverse<narrowOop, PSPushContentsClosure>(oop obj, PSPushContentsClosure* closure) { 124 oop_oop_iterate_ref_processing<narrowOop>(obj, closure); 125 InstanceKlass::oop_oop_iterate_reverse<narrowOop>(obj, closure); 126 } 127 128 inline void PSPromotionManager::push_contents(oop obj) { 129 if (!obj->klass()->is_typeArray_klass()) { 130 PSPushContentsClosure pcc(this); 131 obj->oop_iterate_backwards(&pcc); 132 } 133 } 134 135 inline void PSPromotionManager::push_contents_bounded(oop obj, HeapWord* left, HeapWord* right) { 136 PSPushContentsClosure pcc(this); 137 obj->oop_iterate(&pcc, MemRegion(left, right)); 138 } 139 140 template<bool promote_immediately> 141 inline oop PSPromotionManager::copy_to_survivor_space(oop o) { 142 assert(should_scavenge(&o), "Sanity"); 143 144 // NOTE! We must be very careful with any methods that access the mark 145 // in o. There may be multiple threads racing on it, and it may be forwarded 146 // at any time. 147 markWord m = o->mark(); 148 if (!m.is_forwarded()) { 149 return copy_unmarked_to_survivor_space<promote_immediately>(o, m); 150 } else { 151 // Return the already installed forwardee. 152 return o->forwardee(m); 153 } 154 } 155 156 // 157 // This method is pretty bulky. It would be nice to split it up 158 // into smaller submethods, but we need to be careful not to hurt 159 // performance. 160 // 161 template<bool promote_immediately> 162 inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o, 163 markWord test_mark) { 164 assert(should_scavenge(&o), "Sanity"); 165 166 oop new_obj = nullptr; 167 bool new_obj_is_tenured = false; 168 169 // NOTE: With compact headers, it is not safe to load the Klass* from old, because 170 // that would access the mark-word, that might change at any time by concurrent 171 // workers. 172 // This mark word would refer to a forwardee, which may not yet have completed 173 // copying. Therefore we must load the Klass* from the mark-word that we already 174 // loaded. This is safe, because we only enter here if not yet forwarded. 175 assert(!test_mark.is_forwarded(), "precondition"); 176 Klass* klass = UseCompactObjectHeaders 177 ? test_mark.klass() 178 : o->klass(); 179 180 size_t old_obj_size = o->size_given_mark_and_klass(test_mark, klass); 181 size_t new_obj_size = o->copy_size(old_obj_size, test_mark); 182 183 // Find the objects age, MT safe. 184 uint age = (test_mark.has_displaced_mark_helper() /* o->has_displaced_mark() */) ? 185 test_mark.displaced_mark_helper().age() : test_mark.age(); 186 187 if (!promote_immediately) { 188 // Try allocating obj in to-space (unless too old) 189 if (age < PSScavenge::tenuring_threshold()) { 190 new_obj = cast_to_oop(_young_lab.allocate(new_obj_size)); 191 if (new_obj == nullptr && !_young_gen_is_full) { 192 // Do we allocate directly, or flush and refill? 193 if (new_obj_size > (YoungPLABSize / 2)) { 194 // Allocate this object directly 195 new_obj = cast_to_oop(young_space()->cas_allocate(new_obj_size)); 196 promotion_trace_event(new_obj, klass, new_obj_size, age, false, nullptr); 197 } else { 198 // Flush and fill 199 _young_lab.flush(); 200 201 HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize); 202 if (lab_base != nullptr) { 203 _young_lab.initialize(MemRegion(lab_base, YoungPLABSize)); 204 // Try the young lab allocation again. 205 new_obj = cast_to_oop(_young_lab.allocate(new_obj_size)); 206 promotion_trace_event(new_obj, klass, new_obj_size, age, false, &_young_lab); 207 } else { 208 _young_gen_is_full = true; 209 } 210 } 211 } 212 } 213 } 214 215 // Otherwise try allocating obj tenured 216 if (new_obj == nullptr) { 217 #ifndef PRODUCT 218 if (ParallelScavengeHeap::heap()->promotion_should_fail()) { 219 return oop_promotion_failed(o, test_mark); 220 } 221 #endif // #ifndef PRODUCT 222 223 new_obj = cast_to_oop(_old_lab.allocate(new_obj_size)); 224 new_obj_is_tenured = true; 225 226 if (new_obj == nullptr) { 227 if (!_old_gen_is_full) { 228 // Do we allocate directly, or flush and refill? 229 if (new_obj_size > (OldPLABSize / 2)) { 230 // Allocate this object directly 231 new_obj = cast_to_oop(old_gen()->allocate(new_obj_size)); 232 promotion_trace_event(new_obj, klass, new_obj_size, age, true, nullptr); 233 } else { 234 // Flush and fill 235 _old_lab.flush(); 236 237 HeapWord* lab_base = old_gen()->allocate(OldPLABSize); 238 if(lab_base != nullptr) { 239 _old_lab.initialize(MemRegion(lab_base, OldPLABSize)); 240 // Try the old lab allocation again. 241 new_obj = cast_to_oop(_old_lab.allocate(new_obj_size)); 242 promotion_trace_event(new_obj, klass, new_obj_size, age, true, &_old_lab); 243 } 244 } 245 } 246 247 // This is the promotion failed test, and code handling. 248 // The code belongs here for two reasons. It is slightly 249 // different than the code below, and cannot share the 250 // CAS testing code. Keeping the code here also minimizes 251 // the impact on the common case fast path code. 252 253 if (new_obj == nullptr) { 254 _old_gen_is_full = true; 255 return oop_promotion_failed(o, test_mark); 256 } 257 } 258 } 259 260 assert(new_obj != nullptr, "allocation should have succeeded"); 261 262 // Copy obj 263 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(o), cast_from_oop<HeapWord*>(new_obj), new_obj_size); 264 265 // Now we have to CAS in the header. 266 // Because the forwarding is done with memory_order_relaxed there is no 267 // ordering with the above copy. Clients that get the forwardee must not 268 // examine its contents without other synchronization, since the contents 269 // may not be up to date for them. 270 oop forwardee = o->forward_to_atomic(new_obj, test_mark, memory_order_relaxed); 271 if (forwardee == nullptr) { // forwardee is null when forwarding is successful 272 // We won any races, we "own" this object. 273 assert(new_obj == o->forwardee(), "Sanity"); 274 275 // Increment age if obj still in new generation. Now that 276 // we're dealing with a markWord that cannot change, it is 277 // okay to use the non mt safe oop methods. 278 if (!new_obj_is_tenured) { 279 new_obj->incr_age(); 280 assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj"); 281 } 282 283 ContinuationGCSupport::transform_stack_chunk(new_obj); 284 285 // Do the size comparison first with new_obj_size, which we 286 // already have. Hopefully, only a few objects are larger than 287 // _min_array_size_for_chunking, and most of them will be arrays. 288 // So, the is->objArray() test would be very infrequent. 289 if (new_obj_size > _min_array_size_for_chunking && 290 new_obj->is_objArray() && 291 PSChunkLargeArrays) { 292 push_objArray(o, new_obj); 293 } else { 294 // we'll just push its contents 295 push_contents(new_obj); 296 297 if (StringDedup::is_enabled() && 298 java_lang_String::is_instance(new_obj) && 299 psStringDedup::is_candidate_from_evacuation(new_obj, new_obj_is_tenured)) { 300 _string_dedup_requests.add(o); 301 } 302 } 303 return new_obj; 304 } else { 305 // We lost, someone else "owns" this object. 306 307 assert(o->is_forwarded(), "Object must be forwarded if the cas failed."); 308 assert(o->forwardee() == forwardee, "invariant"); 309 310 if (new_obj_is_tenured) { 311 _old_lab.unallocate_object(cast_from_oop<HeapWord*>(new_obj), new_obj_size); 312 } else { 313 _young_lab.unallocate_object(cast_from_oop<HeapWord*>(new_obj), new_obj_size); 314 } 315 return forwardee; 316 } 317 } 318 319 // Attempt to "claim" oop at p via CAS, push the new obj if successful 320 template <bool promote_immediately, class T> 321 inline void PSPromotionManager::copy_and_push_safe_barrier(T* p) { 322 assert(ParallelScavengeHeap::heap()->is_in_reserved(p), "precondition"); 323 assert(should_scavenge(p, true), "revisiting object?"); 324 325 oop o = RawAccess<IS_NOT_NULL>::oop_load(p); 326 oop new_obj = copy_to_survivor_space<promote_immediately>(o); 327 RawAccess<IS_NOT_NULL>::oop_store(p, new_obj); 328 329 if (!PSScavenge::is_obj_in_young((HeapWord*)p) && 330 PSScavenge::is_obj_in_young(new_obj)) { 331 PSScavenge::card_table()->inline_write_ref_field_gc(p); 332 } 333 } 334 335 inline void PSPromotionManager::process_popped_location_depth(ScannerTask task) { 336 if (task.is_partial_array_state()) { 337 assert(PSChunkLargeArrays, "invariant"); 338 process_array_chunk(task.to_partial_array_state()); 339 } else { 340 if (task.is_narrow_oop_ptr()) { 341 assert(UseCompressedOops, "Error"); 342 copy_and_push_safe_barrier</*promote_immediately=*/false>(task.to_narrow_oop_ptr()); 343 } else { 344 copy_and_push_safe_barrier</*promote_immediately=*/false>(task.to_oop_ptr()); 345 } 346 } 347 } 348 349 inline bool PSPromotionManager::steal_depth(int queue_num, ScannerTask& t) { 350 return stack_array_depth()->steal(queue_num, t); 351 } 352 353 #if TASKQUEUE_STATS 354 void PSPromotionManager::record_steal(ScannerTask task) { 355 if (task.is_partial_array_state()) { 356 ++_array_chunk_steals; 357 } 358 } 359 #endif // TASKQUEUE_STATS 360 361 #endif // SHARE_GC_PARALLEL_PSPROMOTIONMANAGER_INLINE_HPP