1 /* 2 * Copyright (c) 2002, 2021, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_PARALLEL_PSPROMOTIONMANAGER_INLINE_HPP 26 #define SHARE_GC_PARALLEL_PSPROMOTIONMANAGER_INLINE_HPP 27 28 #include "gc/parallel/psPromotionManager.hpp" 29 30 #include "gc/parallel/parallelScavengeHeap.hpp" 31 #include "gc/parallel/parMarkBitMap.inline.hpp" 32 #include "gc/parallel/psOldGen.hpp" 33 #include "gc/parallel/psPromotionLAB.inline.hpp" 34 #include "gc/parallel/psScavenge.inline.hpp" 35 #include "gc/shared/taskqueue.inline.hpp" 36 #include "gc/shared/tlab_globals.hpp" 37 #include "logging/log.hpp" 38 #include "memory/iterator.inline.hpp" 39 #include "oops/access.inline.hpp" 40 #include "oops/oop.inline.hpp" 41 #include "runtime/orderAccess.hpp" 42 #include "runtime/prefetch.inline.hpp" 43 44 inline PSPromotionManager* PSPromotionManager::manager_array(uint index) { 45 assert(_manager_array != NULL, "access of NULL manager_array"); 46 assert(index <= ParallelGCThreads, "out of range manager_array access"); 47 return &_manager_array[index]; 48 } 49 50 inline void PSPromotionManager::push_depth(ScannerTask task) { 51 claimed_stack_depth()->push(task); 52 } 53 54 template <class T> 55 inline void PSPromotionManager::claim_or_forward_depth(T* p) { 56 assert(should_scavenge(p, true), "revisiting object?"); 57 assert(ParallelScavengeHeap::heap()->is_in(p), "pointer outside heap"); 58 oop obj = RawAccess<IS_NOT_NULL>::oop_load(p); 59 Prefetch::write(obj->mark_addr(), 0); 60 push_depth(ScannerTask(p)); 61 } 62 63 inline void PSPromotionManager::promotion_trace_event(oop new_obj, oop old_obj, Klass* klass, 64 size_t obj_size, 65 uint age, bool tenured, 66 const PSPromotionLAB* lab) { 67 // Skip if memory allocation failed 68 if (new_obj != NULL) { 69 const ParallelScavengeTracer* gc_tracer = PSScavenge::gc_tracer(); 70 71 if (lab != NULL) { 72 // Promotion of object through newly allocated PLAB 73 if (gc_tracer->should_report_promotion_in_new_plab_event()) { 74 size_t obj_bytes = obj_size * HeapWordSize; 75 size_t lab_size = lab->capacity(); 76 gc_tracer->report_promotion_in_new_plab_event(klass, obj_bytes, 77 age, tenured, lab_size); 78 } 79 } else { 80 // Promotion of object directly to heap 81 if (gc_tracer->should_report_promotion_outside_plab_event()) { 82 size_t obj_bytes = obj_size * HeapWordSize; 83 gc_tracer->report_promotion_outside_plab_event(klass, obj_bytes, 84 age, tenured); 85 } 86 } 87 } 88 } 89 90 class PSPushContentsClosure: public BasicOopIterateClosure { 91 PSPromotionManager* _pm; 92 public: 93 PSPushContentsClosure(PSPromotionManager* pm) : BasicOopIterateClosure(PSScavenge::reference_processor()), _pm(pm) {} 94 95 template <typename T> void do_oop_nv(T* p) { 96 if (PSScavenge::should_scavenge(p)) { 97 _pm->claim_or_forward_depth(p); 98 } 99 } 100 101 virtual void do_oop(oop* p) { do_oop_nv(p); } 102 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } 103 }; 104 105 // 106 // This closure specialization will override the one that is defined in 107 // instanceRefKlass.inline.cpp. It swaps the order of oop_oop_iterate and 108 // oop_oop_iterate_ref_processing. Unfortunately G1 and Parallel behaves 109 // significantly better (especially in the Derby benchmark) using opposite 110 // order of these function calls. 111 // 112 template <> 113 inline void InstanceRefKlass::oop_oop_iterate_reverse<oop, PSPushContentsClosure>(oop obj, PSPushContentsClosure* closure) { 114 oop_oop_iterate_ref_processing<oop>(obj, closure); 115 InstanceKlass::oop_oop_iterate_reverse<oop>(obj, closure); 116 } 117 118 template <> 119 inline void InstanceRefKlass::oop_oop_iterate_reverse<narrowOop, PSPushContentsClosure>(oop obj, PSPushContentsClosure* closure) { 120 oop_oop_iterate_ref_processing<narrowOop>(obj, closure); 121 InstanceKlass::oop_oop_iterate_reverse<narrowOop>(obj, closure); 122 } 123 124 inline void PSPromotionManager::push_contents(oop obj) { 125 if (!obj->klass()->is_typeArray_klass()) { 126 PSPushContentsClosure pcc(this); 127 obj->oop_iterate_backwards(&pcc); 128 } 129 } 130 131 template<bool promote_immediately> 132 inline oop PSPromotionManager::copy_to_survivor_space(oop o) { 133 assert(should_scavenge(&o), "Sanity"); 134 135 // NOTE! We must be very careful with any methods that access the mark 136 // in o. There may be multiple threads racing on it, and it may be forwarded 137 // at any time. 138 markWord m = o->mark(); 139 if (!m.is_marked()) { 140 return copy_unmarked_to_survivor_space<promote_immediately>(o, m); 141 } else { 142 // Ensure any loads from the forwardee follow all changes that precede 143 // the release-cmpxchg that performed the forwarding, possibly in some 144 // other thread. 145 OrderAccess::acquire(); 146 // Return the already installed forwardee. 147 return o->forwardee(m); 148 } 149 } 150 151 // 152 // This method is pretty bulky. It would be nice to split it up 153 // into smaller submethods, but we need to be careful not to hurt 154 // performance. 155 // 156 template<bool promote_immediately> 157 inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o, 158 markWord test_mark) { 159 assert(should_scavenge(&o), "Sanity"); 160 161 oop new_obj = NULL; 162 bool new_obj_is_tenured = false; 163 Klass* klass; 164 #ifdef _LP64 165 if (UseCompactObjectHeaders) { 166 klass = test_mark.safe_klass(); 167 } else 168 #endif 169 { 170 klass = o->klass(); 171 } 172 size_t new_obj_size = o->size_given_klass(klass); 173 174 // Find the objects age, MT safe. 175 uint age = (test_mark.has_displaced_mark_helper() /* o->has_displaced_mark() */) ? 176 test_mark.displaced_mark_helper().age() : test_mark.age(); 177 178 if (!promote_immediately) { 179 // Try allocating obj in to-space (unless too old) 180 if (age < PSScavenge::tenuring_threshold()) { 181 new_obj = cast_to_oop(_young_lab.allocate(new_obj_size)); 182 if (new_obj == NULL && !_young_gen_is_full) { 183 // Do we allocate directly, or flush and refill? 184 if (new_obj_size > (YoungPLABSize / 2)) { 185 // Allocate this object directly 186 new_obj = cast_to_oop(young_space()->cas_allocate(new_obj_size)); 187 promotion_trace_event(new_obj, o, klass, new_obj_size, age, false, NULL); 188 } else { 189 // Flush and fill 190 _young_lab.flush(); 191 192 HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize); 193 if (lab_base != NULL) { 194 _young_lab.initialize(MemRegion(lab_base, YoungPLABSize)); 195 // Try the young lab allocation again. 196 new_obj = cast_to_oop(_young_lab.allocate(new_obj_size)); 197 promotion_trace_event(new_obj, o, klass, new_obj_size, age, false, &_young_lab); 198 } else { 199 _young_gen_is_full = true; 200 } 201 } 202 } 203 } 204 } 205 206 // Otherwise try allocating obj tenured 207 if (new_obj == NULL) { 208 #ifndef PRODUCT 209 if (ParallelScavengeHeap::heap()->promotion_should_fail()) { 210 return oop_promotion_failed(o, test_mark); 211 } 212 #endif // #ifndef PRODUCT 213 214 new_obj = cast_to_oop(_old_lab.allocate(new_obj_size)); 215 new_obj_is_tenured = true; 216 217 if (new_obj == NULL) { 218 if (!_old_gen_is_full) { 219 // Do we allocate directly, or flush and refill? 220 if (new_obj_size > (OldPLABSize / 2)) { 221 // Allocate this object directly 222 new_obj = cast_to_oop(old_gen()->allocate(new_obj_size)); 223 promotion_trace_event(new_obj, o, klass, new_obj_size, age, true, NULL); 224 } else { 225 // Flush and fill 226 _old_lab.flush(); 227 228 HeapWord* lab_base = old_gen()->allocate(OldPLABSize); 229 if(lab_base != NULL) { 230 #ifdef ASSERT 231 // Delay the initialization of the promotion lab (plab). 232 // This exposes uninitialized plabs to card table processing. 233 if (GCWorkerDelayMillis > 0) { 234 os::naked_sleep(GCWorkerDelayMillis); 235 } 236 #endif 237 _old_lab.initialize(MemRegion(lab_base, OldPLABSize)); 238 // Try the old lab allocation again. 239 new_obj = cast_to_oop(_old_lab.allocate(new_obj_size)); 240 promotion_trace_event(new_obj, o, klass, new_obj_size, age, true, &_old_lab); 241 } 242 } 243 } 244 245 // This is the promotion failed test, and code handling. 246 // The code belongs here for two reasons. It is slightly 247 // different than the code below, and cannot share the 248 // CAS testing code. Keeping the code here also minimizes 249 // the impact on the common case fast path code. 250 251 if (new_obj == NULL) { 252 _old_gen_is_full = true; 253 return oop_promotion_failed(o, test_mark); 254 } 255 } 256 } 257 258 assert(new_obj != NULL, "allocation should have succeeded"); 259 260 // Copy obj 261 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(o), cast_from_oop<HeapWord*>(new_obj), new_obj_size); 262 263 // Now we have to CAS in the header. 264 // Make copy visible to threads reading the forwardee. 265 oop forwardee = o->forward_to_atomic(new_obj, test_mark, memory_order_release); 266 if (forwardee == NULL) { // forwardee is NULL when forwarding is successful 267 // We won any races, we "own" this object. 268 assert(new_obj == o->forwardee(), "Sanity"); 269 270 // Increment age if obj still in new generation. Now that 271 // we're dealing with a markWord that cannot change, it is 272 // okay to use the non mt safe oop methods. 273 if (!new_obj_is_tenured) { 274 new_obj->incr_age(); 275 assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj"); 276 } 277 278 log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", 279 new_obj_is_tenured ? "copying" : "tenuring", 280 new_obj->klass()->internal_name(), 281 p2i((void *)o), p2i((void *)new_obj), new_obj->size()); 282 283 // Do the size comparison first with new_obj_size, which we 284 // already have. Hopefully, only a few objects are larger than 285 // _min_array_size_for_chunking, and most of them will be arrays. 286 // So, the is->objArray() test would be very infrequent. 287 if (new_obj_size > _min_array_size_for_chunking && 288 new_obj->is_objArray() && 289 PSChunkLargeArrays) { 290 // we'll chunk it 291 push_depth(ScannerTask(PartialArrayScanTask(o))); 292 TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_array_chunk_pushes); 293 } else { 294 // we'll just push its contents 295 push_contents(new_obj); 296 } 297 return new_obj; 298 } else { 299 // We lost, someone else "owns" this object. 300 // Ensure loads from the forwardee follow all changes that preceeded the 301 // release-cmpxchg that performed the forwarding in another thread. 302 OrderAccess::acquire(); 303 304 assert(o->is_forwarded(), "Object must be forwarded if the cas failed."); 305 assert(o->forwardee() == forwardee, "invariant"); 306 307 // Try to deallocate the space. If it was directly allocated we cannot 308 // deallocate it, so we have to test. If the deallocation fails, 309 // overwrite with a filler object. 310 if (new_obj_is_tenured) { 311 if (!_old_lab.unallocate_object(cast_from_oop<HeapWord*>(new_obj), new_obj_size)) { 312 CollectedHeap::fill_with_object(cast_from_oop<HeapWord*>(new_obj), new_obj_size); 313 } 314 } else if (!_young_lab.unallocate_object(cast_from_oop<HeapWord*>(new_obj), new_obj_size)) { 315 CollectedHeap::fill_with_object(cast_from_oop<HeapWord*>(new_obj), new_obj_size); 316 } 317 return forwardee; 318 } 319 } 320 321 // Attempt to "claim" oop at p via CAS, push the new obj if successful 322 // This version tests the oop* to make sure it is within the heap before 323 // attempting marking. 324 template <bool promote_immediately, class T> 325 inline void PSPromotionManager::copy_and_push_safe_barrier(T* p) { 326 assert(should_scavenge(p, true), "revisiting object?"); 327 328 oop o = RawAccess<IS_NOT_NULL>::oop_load(p); 329 oop new_obj = copy_to_survivor_space<promote_immediately>(o); 330 RawAccess<IS_NOT_NULL>::oop_store(p, new_obj); 331 332 // We cannot mark without test, as some code passes us pointers 333 // that are outside the heap. These pointers are either from roots 334 // or from metadata. 335 if ((!PSScavenge::is_obj_in_young((HeapWord*)p)) && 336 ParallelScavengeHeap::heap()->is_in_reserved(p)) { 337 if (PSScavenge::is_obj_in_young(new_obj)) { 338 PSScavenge::card_table()->inline_write_ref_field_gc(p, new_obj); 339 } 340 } 341 } 342 343 inline void PSPromotionManager::process_popped_location_depth(ScannerTask task) { 344 if (task.is_partial_array_task()) { 345 assert(PSChunkLargeArrays, "invariant"); 346 process_array_chunk(task.to_partial_array_task()); 347 } else { 348 if (task.is_narrow_oop_ptr()) { 349 assert(UseCompressedOops, "Error"); 350 copy_and_push_safe_barrier</*promote_immediately=*/false>(task.to_narrow_oop_ptr()); 351 } else { 352 copy_and_push_safe_barrier</*promote_immediately=*/false>(task.to_oop_ptr()); 353 } 354 } 355 } 356 357 inline bool PSPromotionManager::steal_depth(int queue_num, ScannerTask& t) { 358 return stack_array_depth()->steal(queue_num, t); 359 } 360 361 #if TASKQUEUE_STATS 362 void PSPromotionManager::record_steal(ScannerTask task) { 363 if (task.is_partial_array_task()) { 364 ++_array_chunk_steals; 365 } 366 } 367 #endif // TASKQUEUE_STATS 368 369 #endif // SHARE_GC_PARALLEL_PSPROMOTIONMANAGER_INLINE_HPP --- EOF ---