1 /* 2 * Copyright (c) 2002, 2021, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_PARALLEL_PSPROMOTIONMANAGER_INLINE_HPP 26 #define SHARE_GC_PARALLEL_PSPROMOTIONMANAGER_INLINE_HPP 27 28 #include "gc/parallel/psPromotionManager.hpp" 29 30 #include "gc/parallel/parallelScavengeHeap.hpp" 31 #include "gc/parallel/parMarkBitMap.inline.hpp" 32 #include "gc/parallel/psOldGen.hpp" 33 #include "gc/parallel/psPromotionLAB.inline.hpp" 34 #include "gc/parallel/psScavenge.inline.hpp" 35 #include "gc/shared/taskqueue.inline.hpp" 36 #include "gc/shared/tlab_globals.hpp" 37 #include "logging/log.hpp" 38 #include "memory/iterator.inline.hpp" 39 #include "oops/access.inline.hpp" 40 #include "oops/oop.inline.hpp" 41 #include "runtime/orderAccess.hpp" 42 #include "runtime/prefetch.inline.hpp" 43 44 inline PSPromotionManager* PSPromotionManager::manager_array(uint index) { 45 assert(_manager_array != NULL, "access of NULL manager_array"); 46 assert(index <= ParallelGCThreads, "out of range manager_array access"); 47 return &_manager_array[index]; 48 } 49 50 inline void PSPromotionManager::push_depth(ScannerTask task) { 51 claimed_stack_depth()->push(task); 52 } 53 54 template <class T> 55 inline void PSPromotionManager::claim_or_forward_depth(T* p) { 56 assert(should_scavenge(p, true), "revisiting object?"); 57 assert(ParallelScavengeHeap::heap()->is_in(p), "pointer outside heap"); 58 oop obj = RawAccess<IS_NOT_NULL>::oop_load(p); 59 Prefetch::write(obj->mark_addr(), 0); 60 push_depth(ScannerTask(p)); 61 } 62 63 inline void PSPromotionManager::promotion_trace_event(oop new_obj, oop old_obj, 64 size_t obj_size, 65 uint age, bool tenured, 66 const PSPromotionLAB* lab) { 67 // Skip if memory allocation failed 68 if (new_obj != NULL) { 69 const ParallelScavengeTracer* gc_tracer = PSScavenge::gc_tracer(); 70 71 if (lab != NULL) { 72 // Promotion of object through newly allocated PLAB 73 if (gc_tracer->should_report_promotion_in_new_plab_event()) { 74 size_t obj_bytes = obj_size * HeapWordSize; 75 size_t lab_size = lab->capacity(); 76 gc_tracer->report_promotion_in_new_plab_event(old_obj->klass(), obj_bytes, 77 age, tenured, lab_size); 78 } 79 } else { 80 // Promotion of object directly to heap 81 if (gc_tracer->should_report_promotion_outside_plab_event()) { 82 size_t obj_bytes = obj_size * HeapWordSize; 83 gc_tracer->report_promotion_outside_plab_event(old_obj->klass(), obj_bytes, 84 age, tenured); 85 } 86 } 87 } 88 } 89 90 class PSPushContentsClosure: public BasicOopIterateClosure { 91 PSPromotionManager* _pm; 92 public: 93 PSPushContentsClosure(PSPromotionManager* pm) : BasicOopIterateClosure(PSScavenge::reference_processor()), _pm(pm) {} 94 95 template <typename T> void do_oop_nv(T* p) { 96 if (PSScavenge::should_scavenge(p)) { 97 _pm->claim_or_forward_depth(p); 98 } 99 } 100 101 virtual void do_oop(oop* p) { do_oop_nv(p); } 102 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } 103 }; 104 105 // 106 // This closure specialization will override the one that is defined in 107 // instanceRefKlass.inline.cpp. It swaps the order of oop_oop_iterate and 108 // oop_oop_iterate_ref_processing. Unfortunately G1 and Parallel behaves 109 // significantly better (especially in the Derby benchmark) using opposite 110 // order of these function calls. 111 // 112 template <> 113 inline void InstanceRefKlass::oop_oop_iterate_reverse<oop, PSPushContentsClosure>(oop obj, PSPushContentsClosure* closure) { 114 oop_oop_iterate_ref_processing<oop>(obj, closure); 115 InstanceKlass::oop_oop_iterate_reverse<oop>(obj, closure); 116 } 117 118 template <> 119 inline void InstanceRefKlass::oop_oop_iterate_reverse<narrowOop, PSPushContentsClosure>(oop obj, PSPushContentsClosure* closure) { 120 oop_oop_iterate_ref_processing<narrowOop>(obj, closure); 121 InstanceKlass::oop_oop_iterate_reverse<narrowOop>(obj, closure); 122 } 123 124 inline void PSPromotionManager::push_contents(oop obj) { 125 if (!obj->klass()->is_typeArray_klass()) { 126 PSPushContentsClosure pcc(this); 127 obj->oop_iterate_backwards(&pcc); 128 } 129 } 130 131 template<bool promote_immediately> 132 inline oop PSPromotionManager::copy_to_survivor_space(oop o) { 133 assert(should_scavenge(&o), "Sanity"); 134 135 // NOTE! We must be very careful with any methods that access the mark 136 // in o. There may be multiple threads racing on it, and it may be forwarded 137 // at any time. 138 markWord m = o->mark(); 139 if (!m.is_marked()) { 140 return copy_unmarked_to_survivor_space<promote_immediately>(o, m); 141 } else { 142 // Ensure any loads from the forwardee follow all changes that precede 143 // the release-cmpxchg that performed the forwarding, possibly in some 144 // other thread. 145 OrderAccess::acquire(); 146 // Return the already installed forwardee. 147 return cast_to_oop(m.decode_pointer()); 148 } 149 } 150 151 // 152 // This method is pretty bulky. It would be nice to split it up 153 // into smaller submethods, but we need to be careful not to hurt 154 // performance. 155 // 156 template<bool promote_immediately> 157 inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o, 158 markWord test_mark) { 159 assert(should_scavenge(&o), "Sanity"); 160 161 oop new_obj = NULL; 162 bool new_obj_is_tenured = false; 163 size_t new_obj_size = o->size(); 164 165 // Find the objects age, MT safe. 166 uint age = (test_mark.has_displaced_mark_helper() /* o->has_displaced_mark() */) ? 167 test_mark.displaced_mark_helper().age() : test_mark.age(); 168 169 if (!promote_immediately) { 170 // Try allocating obj in to-space (unless too old) 171 if (age < PSScavenge::tenuring_threshold()) { 172 new_obj = cast_to_oop(_young_lab.allocate(new_obj_size)); 173 if (new_obj == NULL && !_young_gen_is_full) { 174 // Do we allocate directly, or flush and refill? 175 if (new_obj_size > (YoungPLABSize / 2)) { 176 // Allocate this object directly 177 new_obj = cast_to_oop(young_space()->cas_allocate(new_obj_size)); 178 promotion_trace_event(new_obj, o, new_obj_size, age, false, NULL); 179 } else { 180 // Flush and fill 181 _young_lab.flush(); 182 183 HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize); 184 if (lab_base != NULL) { 185 _young_lab.initialize(MemRegion(lab_base, YoungPLABSize)); 186 // Try the young lab allocation again. 187 new_obj = cast_to_oop(_young_lab.allocate(new_obj_size)); 188 promotion_trace_event(new_obj, o, new_obj_size, age, false, &_young_lab); 189 } else { 190 _young_gen_is_full = true; 191 } 192 } 193 } 194 } 195 } 196 197 // Otherwise try allocating obj tenured 198 if (new_obj == NULL) { 199 #ifndef PRODUCT 200 if (ParallelScavengeHeap::heap()->promotion_should_fail()) { 201 return oop_promotion_failed(o, test_mark); 202 } 203 #endif // #ifndef PRODUCT 204 205 new_obj = cast_to_oop(_old_lab.allocate(new_obj_size)); 206 new_obj_is_tenured = true; 207 208 if (new_obj == NULL) { 209 if (!_old_gen_is_full) { 210 // Do we allocate directly, or flush and refill? 211 if (new_obj_size > (OldPLABSize / 2)) { 212 // Allocate this object directly 213 new_obj = cast_to_oop(old_gen()->allocate(new_obj_size)); 214 promotion_trace_event(new_obj, o, new_obj_size, age, true, NULL); 215 } else { 216 // Flush and fill 217 _old_lab.flush(); 218 219 HeapWord* lab_base = old_gen()->allocate(OldPLABSize); 220 if(lab_base != NULL) { 221 #ifdef ASSERT 222 // Delay the initialization of the promotion lab (plab). 223 // This exposes uninitialized plabs to card table processing. 224 if (GCWorkerDelayMillis > 0) { 225 os::naked_sleep(GCWorkerDelayMillis); 226 } 227 #endif 228 _old_lab.initialize(MemRegion(lab_base, OldPLABSize)); 229 // Try the old lab allocation again. 230 new_obj = cast_to_oop(_old_lab.allocate(new_obj_size)); 231 promotion_trace_event(new_obj, o, new_obj_size, age, true, &_old_lab); 232 } 233 } 234 } 235 236 // This is the promotion failed test, and code handling. 237 // The code belongs here for two reasons. It is slightly 238 // different than the code below, and cannot share the 239 // CAS testing code. Keeping the code here also minimizes 240 // the impact on the common case fast path code. 241 242 if (new_obj == NULL) { 243 _old_gen_is_full = true; 244 return oop_promotion_failed(o, test_mark); 245 } 246 } 247 } 248 249 assert(new_obj != NULL, "allocation should have succeeded"); 250 251 // Copy obj 252 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(o), cast_from_oop<HeapWord*>(new_obj), new_obj_size); 253 254 // Now we have to CAS in the header. 255 // Make copy visible to threads reading the forwardee. 256 oop forwardee = o->forward_to_atomic(new_obj, test_mark, memory_order_release); 257 if (forwardee == NULL) { // forwardee is NULL when forwarding is successful 258 // We won any races, we "own" this object. 259 assert(new_obj == o->forwardee(), "Sanity"); 260 261 // Increment age if obj still in new generation. Now that 262 // we're dealing with a markWord that cannot change, it is 263 // okay to use the non mt safe oop methods. 264 if (!new_obj_is_tenured) { 265 new_obj->incr_age(); 266 assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj"); 267 } 268 269 log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", 270 new_obj_is_tenured ? "copying" : "tenuring", 271 new_obj->klass()->internal_name(), 272 p2i((void *)o), p2i((void *)new_obj), new_obj->size()); 273 274 // Do the size comparison first with new_obj_size, which we 275 // already have. Hopefully, only a few objects are larger than 276 // _min_array_size_for_chunking, and most of them will be arrays. 277 // So, the is->objArray() test would be very infrequent. 278 if (new_obj_size > _min_array_size_for_chunking && 279 new_obj->is_objArray() && 280 PSChunkLargeArrays) { 281 // we'll chunk it 282 push_depth(ScannerTask(PartialArrayScanTask(o))); 283 TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_array_chunk_pushes); 284 } else { 285 // we'll just push its contents 286 push_contents(new_obj); 287 } 288 return new_obj; 289 } else { 290 // We lost, someone else "owns" this object. 291 // Ensure loads from the forwardee follow all changes that preceeded the 292 // release-cmpxchg that performed the forwarding in another thread. 293 OrderAccess::acquire(); 294 295 assert(o->is_forwarded(), "Object must be forwarded if the cas failed."); 296 assert(o->forwardee() == forwardee, "invariant"); 297 298 // Try to deallocate the space. If it was directly allocated we cannot 299 // deallocate it, so we have to test. If the deallocation fails, 300 // overwrite with a filler object. 301 if (new_obj_is_tenured) { 302 if (!_old_lab.unallocate_object(cast_from_oop<HeapWord*>(new_obj), new_obj_size)) { 303 CollectedHeap::fill_with_object(cast_from_oop<HeapWord*>(new_obj), new_obj_size); 304 } 305 } else if (!_young_lab.unallocate_object(cast_from_oop<HeapWord*>(new_obj), new_obj_size)) { 306 CollectedHeap::fill_with_object(cast_from_oop<HeapWord*>(new_obj), new_obj_size); 307 } 308 return forwardee; 309 } 310 } 311 312 // Attempt to "claim" oop at p via CAS, push the new obj if successful 313 // This version tests the oop* to make sure it is within the heap before 314 // attempting marking. 315 template <bool promote_immediately, class T> 316 inline void PSPromotionManager::copy_and_push_safe_barrier(T* p) { 317 assert(should_scavenge(p, true), "revisiting object?"); 318 319 oop o = RawAccess<IS_NOT_NULL>::oop_load(p); 320 oop new_obj = copy_to_survivor_space<promote_immediately>(o); 321 RawAccess<IS_NOT_NULL>::oop_store(p, new_obj); 322 323 // We cannot mark without test, as some code passes us pointers 324 // that are outside the heap. These pointers are either from roots 325 // or from metadata. 326 if ((!PSScavenge::is_obj_in_young((HeapWord*)p)) && 327 ParallelScavengeHeap::heap()->is_in_reserved(p)) { 328 if (PSScavenge::is_obj_in_young(new_obj)) { 329 PSScavenge::card_table()->inline_write_ref_field_gc(p, new_obj); 330 } 331 } 332 } 333 334 inline void PSPromotionManager::process_popped_location_depth(ScannerTask task) { 335 if (task.is_partial_array_task()) { 336 assert(PSChunkLargeArrays, "invariant"); 337 process_array_chunk(task.to_partial_array_task()); 338 } else { 339 if (task.is_narrow_oop_ptr()) { 340 assert(UseCompressedOops, "Error"); 341 copy_and_push_safe_barrier</*promote_immediately=*/false>(task.to_narrow_oop_ptr()); 342 } else { 343 copy_and_push_safe_barrier</*promote_immediately=*/false>(task.to_oop_ptr()); 344 } 345 } 346 } 347 348 inline bool PSPromotionManager::steal_depth(int queue_num, ScannerTask& t) { 349 return stack_array_depth()->steal(queue_num, t); 350 } 351 352 #if TASKQUEUE_STATS 353 void PSPromotionManager::record_steal(ScannerTask task) { 354 if (task.is_partial_array_task()) { 355 ++_array_chunk_steals; 356 } 357 } 358 #endif // TASKQUEUE_STATS 359 360 #endif // SHARE_GC_PARALLEL_PSPROMOTIONMANAGER_INLINE_HPP