1 /* 2 * Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1Allocator.inline.hpp" 27 #include "gc/g1/g1CollectedHeap.inline.hpp" 28 #include "gc/g1/g1CollectionSet.hpp" 29 #include "gc/g1/g1EvacFailureRegions.inline.hpp" 30 #include "gc/g1/g1HeapRegionPrinter.hpp" 31 #include "gc/g1/g1OopClosures.inline.hpp" 32 #include "gc/g1/g1ParScanThreadState.inline.hpp" 33 #include "gc/g1/g1RootClosures.hpp" 34 #include "gc/g1/g1StringDedup.hpp" 35 #include "gc/g1/g1Trace.hpp" 36 #include "gc/g1/g1YoungGCAllocationFailureInjector.inline.hpp" 37 #include "gc/shared/continuationGCSupport.inline.hpp" 38 #include "gc/shared/partialArrayState.hpp" 39 #include "gc/shared/partialArrayTaskStepper.inline.hpp" 40 #include "gc/shared/preservedMarks.inline.hpp" 41 #include "gc/shared/stringdedup/stringDedup.hpp" 42 #include "gc/shared/taskqueue.inline.hpp" 43 #include "memory/allocation.inline.hpp" 44 #include "oops/access.inline.hpp" 45 #include "oops/oop.inline.hpp" 46 #include "runtime/atomic.hpp" 47 #include "runtime/mutexLocker.hpp" 48 #include "runtime/prefetch.inline.hpp" 49 #include "utilities/globalDefinitions.hpp" 50 #include "utilities/macros.hpp" 51 52 // In fastdebug builds the code size can get out of hand, potentially 53 // tripping over compiler limits (which may be bugs, but nevertheless 54 // need to be taken into consideration). A side benefit of limiting 55 // inlining is that we get more call frames that might aid debugging. 56 // And the fastdebug compile time for this file is much reduced. 57 // Explicit NOINLINE to block ATTRIBUTE_FLATTENing. 58 #define MAYBE_INLINE_EVACUATION NOT_DEBUG(inline) DEBUG_ONLY(NOINLINE) 59 60 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, 61 G1RedirtyCardsQueueSet* rdcqs, 62 PreservedMarks* preserved_marks, 63 uint worker_id, 64 uint num_workers, 65 G1CollectionSet* collection_set, 66 G1EvacFailureRegions* evac_failure_regions, 67 PartialArrayStateAllocator* pas_allocator) 68 : _g1h(g1h), 69 _task_queue(g1h->task_queue(worker_id)), 70 _rdc_local_qset(rdcqs), 71 _ct(g1h->card_table()), 72 _closures(nullptr), 73 _plab_allocator(nullptr), 74 _age_table(false), 75 _tenuring_threshold(g1h->policy()->tenuring_threshold()), 76 _scanner(g1h, this), 77 _worker_id(worker_id), 78 _last_enqueued_card(SIZE_MAX), 79 _stack_trim_upper_threshold(GCDrainStackTargetSize * 2 + 1), 80 _stack_trim_lower_threshold(GCDrainStackTargetSize), 81 _trim_ticks(), 82 _surviving_young_words_base(nullptr), 83 _surviving_young_words(nullptr), 84 _surviving_words_length(collection_set->young_region_length() + 1), 85 _old_gen_is_full(false), 86 _partial_array_state_allocator(pas_allocator), 87 _partial_array_stepper(num_workers, ParGCArrayScanChunk), 88 _string_dedup_requests(), 89 _max_num_optional_regions(collection_set->optional_region_length()), 90 _numa(g1h->numa()), 91 _obj_alloc_stat(nullptr), 92 ALLOCATION_FAILURE_INJECTOR_ONLY(_allocation_failure_inject_counter(0) COMMA) 93 _preserved_marks(preserved_marks), 94 _evacuation_failed_info(), 95 _evac_failure_regions(evac_failure_regions), 96 _evac_failure_enqueued_cards(0) 97 { 98 // We allocate number of young gen regions in the collection set plus one 99 // entries, since entry 0 keeps track of surviving bytes for non-young regions. 100 // We also add a few elements at the beginning and at the end in 101 // an attempt to eliminate cache contention 102 const size_t padding_elem_num = (DEFAULT_PADDING_SIZE / sizeof(size_t)); 103 size_t array_length = padding_elem_num + _surviving_words_length + padding_elem_num; 104 105 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC); 106 _surviving_young_words = _surviving_young_words_base + padding_elem_num; 107 memset(_surviving_young_words, 0, _surviving_words_length * sizeof(size_t)); 108 109 _plab_allocator = new G1PLABAllocator(_g1h->allocator()); 110 111 _closures = G1EvacuationRootClosures::create_root_closures(_g1h, 112 this, 113 collection_set->only_contains_young_regions()); 114 115 _oops_into_optional_regions = new G1OopStarChunkedList[_max_num_optional_regions]; 116 117 initialize_numa_stats(); 118 } 119 120 size_t G1ParScanThreadState::flush_stats(size_t* surviving_young_words, uint num_workers, BufferNodeList* rdc_buffers) { 121 *rdc_buffers = _rdc_local_qset.flush(); 122 flush_numa_stats(); 123 // Update allocation statistics. 124 _plab_allocator->flush_and_retire_stats(num_workers); 125 _g1h->policy()->record_age_table(&_age_table); 126 127 if (_evacuation_failed_info.has_failed()) { 128 _g1h->gc_tracer_stw()->report_evacuation_failed(_evacuation_failed_info); 129 } 130 131 size_t sum = 0; 132 for (uint i = 0; i < _surviving_words_length; i++) { 133 surviving_young_words[i] += _surviving_young_words[i]; 134 sum += _surviving_young_words[i]; 135 } 136 return sum; 137 } 138 139 G1ParScanThreadState::~G1ParScanThreadState() { 140 delete _plab_allocator; 141 delete _closures; 142 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base); 143 delete[] _oops_into_optional_regions; 144 FREE_C_HEAP_ARRAY(size_t, _obj_alloc_stat); 145 } 146 147 size_t G1ParScanThreadState::lab_waste_words() const { 148 return _plab_allocator->waste(); 149 } 150 151 size_t G1ParScanThreadState::lab_undo_waste_words() const { 152 return _plab_allocator->undo_waste(); 153 } 154 155 size_t G1ParScanThreadState::evac_failure_enqueued_cards() const { 156 return _evac_failure_enqueued_cards; 157 } 158 159 #ifdef ASSERT 160 void G1ParScanThreadState::verify_task(narrowOop* task) const { 161 assert(task != nullptr, "invariant"); 162 assert(UseCompressedOops, "sanity"); 163 oop p = RawAccess<>::oop_load(task); 164 assert(_g1h->is_in_reserved(p), 165 "task=" PTR_FORMAT " p=" PTR_FORMAT, p2i(task), p2i(p)); 166 } 167 168 void G1ParScanThreadState::verify_task(oop* task) const { 169 assert(task != nullptr, "invariant"); 170 oop p = RawAccess<>::oop_load(task); 171 assert(_g1h->is_in_reserved(p), 172 "task=" PTR_FORMAT " p=" PTR_FORMAT, p2i(task), p2i(p)); 173 } 174 175 void G1ParScanThreadState::verify_task(PartialArrayState* task) const { 176 // Must be in the collection set--it's already been copied. 177 oop p = task->source(); 178 assert(_g1h->is_in_cset(p), "p=" PTR_FORMAT, p2i(p)); 179 } 180 181 void G1ParScanThreadState::verify_task(ScannerTask task) const { 182 if (task.is_narrow_oop_ptr()) { 183 verify_task(task.to_narrow_oop_ptr()); 184 } else if (task.is_oop_ptr()) { 185 verify_task(task.to_oop_ptr()); 186 } else if (task.is_partial_array_state()) { 187 verify_task(task.to_partial_array_state()); 188 } else { 189 ShouldNotReachHere(); 190 } 191 } 192 #endif // ASSERT 193 194 template <class T> 195 MAYBE_INLINE_EVACUATION 196 void G1ParScanThreadState::do_oop_evac(T* p) { 197 // Reference should not be null here as such are never pushed to the task queue. 198 oop obj = RawAccess<IS_NOT_NULL>::oop_load(p); 199 200 // Although we never intentionally push references outside of the collection 201 // set, due to (benign) races in the claim mechanism during RSet scanning more 202 // than one thread might claim the same card. So the same card may be 203 // processed multiple times, and so we might get references into old gen here. 204 // So we need to redo this check. 205 const G1HeapRegionAttr region_attr = _g1h->region_attr(obj); 206 // References pushed onto the work stack should never point to a humongous region 207 // as they are not added to the collection set due to above precondition. 208 assert(!region_attr.is_humongous_candidate(), 209 "Obj " PTR_FORMAT " should not refer to humongous region %u from " PTR_FORMAT, 210 p2i(obj), _g1h->addr_to_region(obj), p2i(p)); 211 212 if (!region_attr.is_in_cset()) { 213 // In this case somebody else already did all the work. 214 return; 215 } 216 217 markWord m = obj->mark(); 218 if (m.is_forwarded()) { 219 obj = m.forwardee(); 220 } else { 221 obj = do_copy_to_survivor_space(region_attr, obj, m); 222 } 223 RawAccess<IS_NOT_NULL>::oop_store(p, obj); 224 225 write_ref_field_post(p, obj); 226 } 227 228 MAYBE_INLINE_EVACUATION 229 void G1ParScanThreadState::do_partial_array(PartialArrayState* state) { 230 oop to_obj = state->destination(); 231 232 #ifdef ASSERT 233 oop from_obj = state->source(); 234 assert(_g1h->is_in_reserved(from_obj), "must be in heap."); 235 assert(from_obj->is_objArray(), "must be obj array"); 236 assert(from_obj->is_forwarded(), "must be forwarded"); 237 assert(from_obj != to_obj, "should not be chunking self-forwarded objects"); 238 assert(to_obj->is_objArray(), "must be obj array"); 239 #endif // ASSERT 240 241 objArrayOop to_array = objArrayOop(to_obj); 242 243 // Claim a chunk and get number of additional tasks to enqueue. 244 PartialArrayTaskStepper::Step step = _partial_array_stepper.next(state); 245 // Push any additional partial scan tasks needed. Pushed before processing 246 // the claimed chunk to allow other workers to steal while we're processing. 247 if (step._ncreate > 0) { 248 state->add_references(step._ncreate); 249 for (uint i = 0; i < step._ncreate; ++i) { 250 push_on_queue(ScannerTask(state)); 251 } 252 } 253 254 G1HeapRegionAttr dest_attr = _g1h->region_attr(to_array); 255 G1SkipCardEnqueueSetter x(&_scanner, dest_attr.is_new_survivor()); 256 // Process claimed task. 257 to_array->oop_iterate_range(&_scanner, 258 checked_cast<int>(step._index), 259 checked_cast<int>(step._index + _partial_array_stepper.chunk_size())); 260 // Release reference to the state, now that we're done with it. 261 _partial_array_state_allocator->release(_worker_id, state); 262 } 263 264 MAYBE_INLINE_EVACUATION 265 void G1ParScanThreadState::start_partial_objarray(G1HeapRegionAttr dest_attr, 266 oop from_obj, 267 oop to_obj) { 268 assert(from_obj->is_objArray(), "precondition"); 269 assert(from_obj->is_forwarded(), "precondition"); 270 assert(from_obj->forwardee() == to_obj, "precondition"); 271 assert(to_obj->is_objArray(), "precondition"); 272 273 objArrayOop to_array = objArrayOop(to_obj); 274 275 size_t array_length = to_array->length(); 276 PartialArrayTaskStepper::Step step = _partial_array_stepper.start(array_length); 277 278 // Push any needed partial scan tasks. Pushed before processing the 279 // initial chunk to allow other workers to steal while we're processing. 280 if (step._ncreate > 0) { 281 assert(step._index < array_length, "invariant"); 282 assert(((array_length - step._index) % _partial_array_stepper.chunk_size()) == 0, 283 "invariant"); 284 PartialArrayState* state = 285 _partial_array_state_allocator->allocate(_worker_id, 286 from_obj, to_obj, 287 step._index, 288 array_length, 289 step._ncreate); 290 for (uint i = 0; i < step._ncreate; ++i) { 291 push_on_queue(ScannerTask(state)); 292 } 293 } else { 294 assert(step._index == array_length, "invariant"); 295 } 296 297 // Skip the card enqueue iff the object (to_array) is in survivor region. 298 // However, G1HeapRegion::is_survivor() is too expensive here. 299 // Instead, we use dest_attr.is_young() because the two values are always 300 // equal: successfully allocated young regions must be survivor regions. 301 assert(dest_attr.is_young() == _g1h->heap_region_containing(to_array)->is_survivor(), "must be"); 302 G1SkipCardEnqueueSetter x(&_scanner, dest_attr.is_young()); 303 // Process the initial chunk. No need to process the type in the 304 // klass, as it will already be handled by processing the built-in 305 // module. 306 to_array->oop_iterate_range(&_scanner, 0, checked_cast<int>(step._index)); 307 } 308 309 MAYBE_INLINE_EVACUATION 310 void G1ParScanThreadState::dispatch_task(ScannerTask task) { 311 verify_task(task); 312 if (task.is_narrow_oop_ptr()) { 313 do_oop_evac(task.to_narrow_oop_ptr()); 314 } else if (task.is_oop_ptr()) { 315 do_oop_evac(task.to_oop_ptr()); 316 } else { 317 do_partial_array(task.to_partial_array_state()); 318 } 319 } 320 321 // Process tasks until overflow queue is empty and local queue 322 // contains no more than threshold entries. NOINLINE to prevent 323 // inlining into steal_and_trim_queue. 324 ATTRIBUTE_FLATTEN NOINLINE 325 void G1ParScanThreadState::trim_queue_to_threshold(uint threshold) { 326 ScannerTask task; 327 do { 328 while (_task_queue->pop_overflow(task)) { 329 if (!_task_queue->try_push_to_taskqueue(task)) { 330 dispatch_task(task); 331 } 332 } 333 while (_task_queue->pop_local(task, threshold)) { 334 dispatch_task(task); 335 } 336 } while (!_task_queue->overflow_empty()); 337 } 338 339 ATTRIBUTE_FLATTEN 340 void G1ParScanThreadState::steal_and_trim_queue(G1ScannerTasksQueueSet* task_queues) { 341 ScannerTask stolen_task; 342 while (task_queues->steal(_worker_id, stolen_task)) { 343 dispatch_task(stolen_task); 344 // Processing stolen task may have added tasks to our queue. 345 trim_queue(); 346 } 347 } 348 349 HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr* dest, 350 size_t word_sz, 351 bool previous_plab_refill_failed, 352 uint node_index) { 353 354 assert(dest->is_in_cset_or_humongous_candidate(), "Unexpected dest: %s region attr", dest->get_type_str()); 355 356 // Right now we only have two types of regions (young / old) so 357 // let's keep the logic here simple. We can generalize it when necessary. 358 if (dest->is_young()) { 359 bool plab_refill_in_old_failed = false; 360 HeapWord* const obj_ptr = _plab_allocator->allocate(G1HeapRegionAttr::Old, 361 word_sz, 362 &plab_refill_in_old_failed, 363 node_index); 364 // Make sure that we won't attempt to copy any other objects out 365 // of a survivor region (given that apparently we cannot allocate 366 // any new ones) to avoid coming into this slow path again and again. 367 // Only consider failed PLAB refill here: failed inline allocations are 368 // typically large, so not indicative of remaining space. 369 if (previous_plab_refill_failed) { 370 _tenuring_threshold = 0; 371 } 372 373 if (obj_ptr != nullptr) { 374 dest->set_old(); 375 } else { 376 // We just failed to allocate in old gen. The same idea as explained above 377 // for making survivor gen unavailable for allocation applies for old gen. 378 _old_gen_is_full = plab_refill_in_old_failed; 379 } 380 return obj_ptr; 381 } else { 382 _old_gen_is_full = previous_plab_refill_failed; 383 assert(dest->is_old(), "Unexpected dest region attr: %s", dest->get_type_str()); 384 // no other space to try. 385 return nullptr; 386 } 387 } 388 389 G1HeapRegionAttr G1ParScanThreadState::next_region_attr(G1HeapRegionAttr const region_attr, markWord const m, uint& age) { 390 assert(region_attr.is_young() || region_attr.is_old(), "must be either Young or Old"); 391 392 if (region_attr.is_young()) { 393 age = !m.has_displaced_mark_helper() ? m.age() 394 : m.displaced_mark_helper().age(); 395 if (age < _tenuring_threshold) { 396 return region_attr; 397 } 398 } 399 // young-to-old (promotion) or old-to-old; destination is old in both cases. 400 return G1HeapRegionAttr::Old; 401 } 402 403 void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_attr, 404 oop const old, size_t word_sz, uint age, 405 HeapWord * const obj_ptr, uint node_index) const { 406 PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr, node_index); 407 if (alloc_buf->contains(obj_ptr)) { 408 _g1h->gc_tracer_stw()->report_promotion_in_new_plab_event(old->klass(), word_sz * HeapWordSize, age, 409 dest_attr.type() == G1HeapRegionAttr::Old, 410 alloc_buf->word_sz() * HeapWordSize); 411 } else { 412 _g1h->gc_tracer_stw()->report_promotion_outside_plab_event(old->klass(), word_sz * HeapWordSize, age, 413 dest_attr.type() == G1HeapRegionAttr::Old); 414 } 415 } 416 417 NOINLINE 418 HeapWord* G1ParScanThreadState::allocate_copy_slow(G1HeapRegionAttr* dest_attr, 419 oop old, 420 size_t word_sz, 421 uint age, 422 uint node_index) { 423 HeapWord* obj_ptr = nullptr; 424 // Try slow-path allocation unless we're allocating old and old is already full. 425 if (!(dest_attr->is_old() && _old_gen_is_full)) { 426 bool plab_refill_failed = false; 427 obj_ptr = _plab_allocator->allocate_direct_or_new_plab(*dest_attr, 428 word_sz, 429 &plab_refill_failed, 430 node_index); 431 if (obj_ptr == nullptr) { 432 obj_ptr = allocate_in_next_plab(dest_attr, 433 word_sz, 434 plab_refill_failed, 435 node_index); 436 } 437 } 438 if (obj_ptr != nullptr) { 439 update_numa_stats(node_index); 440 if (_g1h->gc_tracer_stw()->should_report_promotion_events()) { 441 // The events are checked individually as part of the actual commit 442 report_promotion_event(*dest_attr, old, word_sz, age, obj_ptr, node_index); 443 } 444 } 445 return obj_ptr; 446 } 447 448 #if ALLOCATION_FAILURE_INJECTOR 449 bool G1ParScanThreadState::inject_allocation_failure(uint region_idx) { 450 return _g1h->allocation_failure_injector()->allocation_should_fail(_allocation_failure_inject_counter, region_idx); 451 } 452 #endif 453 454 NOINLINE 455 void G1ParScanThreadState::undo_allocation(G1HeapRegionAttr dest_attr, 456 HeapWord* obj_ptr, 457 size_t word_sz, 458 uint node_index) { 459 _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index); 460 } 461 462 void G1ParScanThreadState::update_bot_after_copying(oop obj, size_t word_sz) { 463 HeapWord* obj_start = cast_from_oop<HeapWord*>(obj); 464 G1HeapRegion* region = _g1h->heap_region_containing(obj_start); 465 region->update_bot_for_block(obj_start, obj_start + word_sz); 466 } 467 468 // Private inline function, for direct internal use and providing the 469 // implementation of the public not-inline function. 470 MAYBE_INLINE_EVACUATION 471 oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const region_attr, 472 oop const old, 473 markWord const old_mark) { 474 assert(region_attr.is_in_cset(), 475 "Unexpected region attr type: %s", region_attr.get_type_str()); 476 477 // Get the klass once. We'll need it again later, and this avoids 478 // re-decoding when it's compressed. 479 Klass* klass = old->klass(); 480 const size_t word_sz = old->size_given_klass(klass); 481 482 // JNI only allows pinning of typeArrays, so we only need to keep those in place. 483 if (region_attr.is_pinned() && klass->is_typeArray_klass()) { 484 return handle_evacuation_failure_par(old, old_mark, word_sz, true /* cause_pinned */); 485 } 486 487 uint age = 0; 488 G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age); 489 G1HeapRegion* const from_region = _g1h->heap_region_containing(old); 490 uint node_index = from_region->node_index(); 491 492 HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index); 493 494 // PLAB allocations should succeed most of the time, so we'll 495 // normally check against null once and that's it. 496 if (obj_ptr == nullptr) { 497 obj_ptr = allocate_copy_slow(&dest_attr, old, word_sz, age, node_index); 498 if (obj_ptr == nullptr) { 499 // This will either forward-to-self, or detect that someone else has 500 // installed a forwarding pointer. 501 return handle_evacuation_failure_par(old, old_mark, word_sz, false /* cause_pinned */); 502 } 503 } 504 505 assert(obj_ptr != nullptr, "when we get here, allocation should have succeeded"); 506 assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap"); 507 508 // Should this evacuation fail? 509 if (inject_allocation_failure(from_region->hrm_index())) { 510 // Doing this after all the allocation attempts also tests the 511 // undo_allocation() method too. 512 undo_allocation(dest_attr, obj_ptr, word_sz, node_index); 513 return handle_evacuation_failure_par(old, old_mark, word_sz, false /* cause_pinned */); 514 } 515 516 // We're going to allocate linearly, so might as well prefetch ahead. 517 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); 518 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), obj_ptr, word_sz); 519 520 const oop obj = cast_to_oop(obj_ptr); 521 // Because the forwarding is done with memory_order_relaxed there is no 522 // ordering with the above copy. Clients that get the forwardee must not 523 // examine its contents without other synchronization, since the contents 524 // may not be up to date for them. 525 const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed); 526 if (forward_ptr == nullptr) { 527 528 { 529 const uint young_index = from_region->young_index_in_cset(); 530 assert((from_region->is_young() && young_index > 0) || 531 (!from_region->is_young() && young_index == 0), "invariant" ); 532 _surviving_young_words[young_index] += word_sz; 533 } 534 535 if (dest_attr.is_young()) { 536 if (age < markWord::max_age) { 537 age++; 538 obj->incr_age(); 539 } 540 _age_table.add(age, word_sz); 541 } else { 542 update_bot_after_copying(obj, word_sz); 543 } 544 545 // Most objects are not arrays, so do one array check rather than 546 // checking for each array category for each object. 547 // CMH: Valhalla flat arrays can split this work up, but for now, doesn't 548 if (klass->is_array_klass() && !klass->is_flatArray_klass()) { 549 if (klass->is_objArray_klass()) { 550 start_partial_objarray(dest_attr, old, obj); 551 } else { 552 // Nothing needs to be done for typeArrays. Body doesn't contain 553 // any oops to scan, and the type in the klass will already be handled 554 // by processing the built-in module. 555 assert(klass->is_typeArray_klass(), "invariant"); 556 } 557 return obj; 558 } 559 560 ContinuationGCSupport::transform_stack_chunk(obj); 561 562 // Check for deduplicating young Strings. 563 if (G1StringDedup::is_candidate_from_evacuation(klass, 564 region_attr, 565 dest_attr, 566 age)) { 567 // Record old; request adds a new weak reference, which reference 568 // processing expects to refer to a from-space object. 569 _string_dedup_requests.add(old); 570 } 571 572 // Skip the card enqueue iff the object (obj) is in survivor region. 573 // However, G1HeapRegion::is_survivor() is too expensive here. 574 // Instead, we use dest_attr.is_young() because the two values are always 575 // equal: successfully allocated young regions must be survivor regions. 576 assert(dest_attr.is_young() == _g1h->heap_region_containing(obj)->is_survivor(), "must be"); 577 G1SkipCardEnqueueSetter x(&_scanner, dest_attr.is_young()); 578 obj->oop_iterate_backwards(&_scanner, klass); 579 return obj; 580 } else { 581 _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index); 582 return forward_ptr; 583 } 584 } 585 586 // Public not-inline entry point. 587 ATTRIBUTE_FLATTEN 588 oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr region_attr, 589 oop old, 590 markWord old_mark) { 591 return do_copy_to_survivor_space(region_attr, old, old_mark); 592 } 593 594 G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) { 595 assert(worker_id < _num_workers, "out of bounds access"); 596 if (_states[worker_id] == nullptr) { 597 _states[worker_id] = 598 new G1ParScanThreadState(_g1h, rdcqs(), 599 _preserved_marks_set.get(worker_id), 600 worker_id, 601 _num_workers, 602 _collection_set, 603 _evac_failure_regions, 604 &_partial_array_state_allocator); 605 } 606 return _states[worker_id]; 607 } 608 609 const size_t* G1ParScanThreadStateSet::surviving_young_words() const { 610 assert(_flushed, "thread local state from the per thread states should have been flushed"); 611 return _surviving_young_words_total; 612 } 613 614 void G1ParScanThreadStateSet::flush_stats() { 615 assert(!_flushed, "thread local state from the per thread states should be flushed once"); 616 for (uint worker_id = 0; worker_id < _num_workers; ++worker_id) { 617 G1ParScanThreadState* pss = _states[worker_id]; 618 assert(pss != nullptr, "must be initialized"); 619 620 G1GCPhaseTimes* p = _g1h->phase_times(); 621 622 // Need to get the following two before the call to G1ParThreadScanState::flush() 623 // because it resets the PLAB allocator where we get this info from. 624 size_t lab_waste_bytes = pss->lab_waste_words() * HeapWordSize; 625 size_t lab_undo_waste_bytes = pss->lab_undo_waste_words() * HeapWordSize; 626 size_t copied_bytes = pss->flush_stats(_surviving_young_words_total, _num_workers, &_rdc_buffers[worker_id]) * HeapWordSize; 627 size_t evac_fail_enqueued_cards = pss->evac_failure_enqueued_cards(); 628 629 p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, copied_bytes, G1GCPhaseTimes::MergePSSCopiedBytes); 630 p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, lab_waste_bytes, G1GCPhaseTimes::MergePSSLABWasteBytes); 631 p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, lab_undo_waste_bytes, G1GCPhaseTimes::MergePSSLABUndoWasteBytes); 632 p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, evac_fail_enqueued_cards, G1GCPhaseTimes::MergePSSEvacFailExtra); 633 634 delete pss; 635 _states[worker_id] = nullptr; 636 } 637 638 G1DirtyCardQueueSet& dcq = G1BarrierSet::dirty_card_queue_set(); 639 dcq.merge_bufferlists(rdcqs()); 640 rdcqs()->verify_empty(); 641 642 _flushed = true; 643 } 644 645 void G1ParScanThreadStateSet::record_unused_optional_region(G1HeapRegion* hr) { 646 for (uint worker_index = 0; worker_index < _num_workers; ++worker_index) { 647 G1ParScanThreadState* pss = _states[worker_index]; 648 assert(pss != nullptr, "must be initialized"); 649 650 size_t used_memory = pss->oops_into_optional_region(hr)->used_memory(); 651 _g1h->phase_times()->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanHR, worker_index, used_memory, G1GCPhaseTimes::ScanHRUsedMemory); 652 } 653 } 654 655 NOINLINE 656 oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m, size_t word_sz, bool cause_pinned) { 657 assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old)); 658 659 oop forward_ptr = old->forward_to_atomic(old, m, memory_order_relaxed); 660 if (forward_ptr == nullptr) { 661 // Forward-to-self succeeded. We are the "owner" of the object. 662 G1HeapRegion* r = _g1h->heap_region_containing(old); 663 664 if (_evac_failure_regions->record(_worker_id, r->hrm_index(), cause_pinned)) { 665 G1HeapRegionPrinter::evac_failure(r); 666 } 667 668 // Mark the failing object in the marking bitmap and later use the bitmap to handle 669 // evacuation failure recovery. 670 _g1h->mark_evac_failure_object(_worker_id, old, word_sz); 671 672 _preserved_marks->push_if_necessary(old, m); 673 674 ContinuationGCSupport::transform_stack_chunk(old); 675 676 _evacuation_failed_info.register_copy_failure(word_sz); 677 678 // For iterating objects that failed evacuation currently we can reuse the 679 // existing closure to scan evacuated objects; since we are iterating from a 680 // collection set region (i.e. never a Survivor region), we always need to 681 // gather cards for this case. 682 G1SkipCardEnqueueSetter x(&_scanner, false /* skip_card_enqueue */); 683 old->oop_iterate_backwards(&_scanner); 684 685 return old; 686 } else { 687 // Forward-to-self failed. Either someone else managed to allocate 688 // space for this object (old != forward_ptr) or they beat us in 689 // self-forwarding it (old == forward_ptr). 690 assert(old == forward_ptr || !_g1h->is_in_cset(forward_ptr), 691 "Object " PTR_FORMAT " forwarded to: " PTR_FORMAT " " 692 "should not be in the CSet", 693 p2i(old), p2i(forward_ptr)); 694 return forward_ptr; 695 } 696 } 697 698 void G1ParScanThreadState::initialize_numa_stats() { 699 if (_numa->is_enabled()) { 700 LogTarget(Info, gc, heap, numa) lt; 701 702 if (lt.is_enabled()) { 703 uint num_nodes = _numa->num_active_nodes(); 704 // Record only if there are multiple active nodes. 705 _obj_alloc_stat = NEW_C_HEAP_ARRAY(size_t, num_nodes, mtGC); 706 memset(_obj_alloc_stat, 0, sizeof(size_t) * num_nodes); 707 } 708 } 709 } 710 711 void G1ParScanThreadState::flush_numa_stats() { 712 if (_obj_alloc_stat != nullptr) { 713 uint node_index = _numa->index_of_current_thread(); 714 _numa->copy_statistics(G1NUMAStats::LocalObjProcessAtCopyToSurv, node_index, _obj_alloc_stat); 715 } 716 } 717 718 void G1ParScanThreadState::update_numa_stats(uint node_index) { 719 if (_obj_alloc_stat != nullptr) { 720 _obj_alloc_stat[node_index]++; 721 } 722 } 723 724 G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h, 725 uint num_workers, 726 G1CollectionSet* collection_set, 727 G1EvacFailureRegions* evac_failure_regions) : 728 _g1h(g1h), 729 _collection_set(collection_set), 730 _rdcqs(G1BarrierSet::dirty_card_queue_set().allocator()), 731 _preserved_marks_set(true /* in_c_heap */), 732 _states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, num_workers, mtGC)), 733 _rdc_buffers(NEW_C_HEAP_ARRAY(BufferNodeList, num_workers, mtGC)), 734 _surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, collection_set->young_region_length() + 1, mtGC)), 735 _num_workers(num_workers), 736 _flushed(false), 737 _evac_failure_regions(evac_failure_regions), 738 _partial_array_state_allocator(num_workers) 739 { 740 _preserved_marks_set.init(num_workers); 741 for (uint i = 0; i < num_workers; ++i) { 742 _states[i] = nullptr; 743 _rdc_buffers[i] = BufferNodeList(); 744 } 745 memset(_surviving_young_words_total, 0, (collection_set->young_region_length() + 1) * sizeof(size_t)); 746 } 747 748 G1ParScanThreadStateSet::~G1ParScanThreadStateSet() { 749 assert(_flushed, "thread local state from the per thread states should have been flushed"); 750 FREE_C_HEAP_ARRAY(G1ParScanThreadState*, _states); 751 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_total); 752 FREE_C_HEAP_ARRAY(BufferNodeList, _rdc_buffers); 753 _preserved_marks_set.reclaim(); 754 }