1 /*
  2  * Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/g1/g1Allocator.inline.hpp"
 27 #include "gc/g1/g1CollectedHeap.inline.hpp"
 28 #include "gc/g1/g1CollectionSet.hpp"
 29 #include "gc/g1/g1EvacFailureRegions.inline.hpp"
 30 #include "gc/g1/g1HeapRegionPrinter.hpp"
 31 #include "gc/g1/g1OopClosures.inline.hpp"
 32 #include "gc/g1/g1ParScanThreadState.inline.hpp"
 33 #include "gc/g1/g1RootClosures.hpp"
 34 #include "gc/g1/g1StringDedup.hpp"
 35 #include "gc/g1/g1Trace.hpp"
 36 #include "gc/g1/g1YoungGCAllocationFailureInjector.inline.hpp"
 37 #include "gc/shared/continuationGCSupport.inline.hpp"
 38 #include "gc/shared/partialArrayState.hpp"
 39 #include "gc/shared/partialArrayTaskStepper.inline.hpp"
 40 #include "gc/shared/stringdedup/stringDedup.hpp"
 41 #include "gc/shared/taskqueue.inline.hpp"
 42 #include "memory/allocation.inline.hpp"
 43 #include "oops/access.inline.hpp"
 44 #include "oops/oop.inline.hpp"
 45 #include "runtime/atomic.hpp"
 46 #include "runtime/mutexLocker.hpp"
 47 #include "runtime/prefetch.inline.hpp"
 48 #include "utilities/globalDefinitions.hpp"
 49 #include "utilities/macros.hpp"
 50 
 51 // In fastdebug builds the code size can get out of hand, potentially
 52 // tripping over compiler limits (which may be bugs, but nevertheless
 53 // need to be taken into consideration).  A side benefit of limiting
 54 // inlining is that we get more call frames that might aid debugging.
 55 // And the fastdebug compile time for this file is much reduced.
 56 // Explicit NOINLINE to block ATTRIBUTE_FLATTENing.
 57 #define MAYBE_INLINE_EVACUATION NOT_DEBUG(inline) DEBUG_ONLY(NOINLINE)
 58 
 59 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
 60                                            G1RedirtyCardsQueueSet* rdcqs,
 61                                            uint worker_id,
 62                                            uint num_workers,
 63                                            G1CollectionSet* collection_set,
 64                                            G1EvacFailureRegions* evac_failure_regions)
 65   : _g1h(g1h),
 66     _task_queue(g1h->task_queue(worker_id)),
 67     _rdc_local_qset(rdcqs),
 68     _ct(g1h->card_table()),
 69     _closures(nullptr),
 70     _plab_allocator(nullptr),
 71     _age_table(false),
 72     _tenuring_threshold(g1h->policy()->tenuring_threshold()),
 73     _scanner(g1h, this),
 74     _worker_id(worker_id),
 75     _last_enqueued_card(SIZE_MAX),
 76     _stack_trim_upper_threshold(GCDrainStackTargetSize * 2 + 1),
 77     _stack_trim_lower_threshold(GCDrainStackTargetSize),
 78     _trim_ticks(),
 79     _surviving_young_words_base(nullptr),
 80     _surviving_young_words(nullptr),
 81     _surviving_words_length(collection_set->young_region_length() + 1),
 82     _old_gen_is_full(false),
 83     _partial_array_state_allocator(g1h->partial_array_state_manager()),
 84     _partial_array_stepper(num_workers, ParGCArrayScanChunk),
 85     _string_dedup_requests(),
 86     _max_num_optional_regions(collection_set->optional_region_length()),
 87     _numa(g1h->numa()),
 88     _obj_alloc_stat(nullptr),
 89     ALLOCATION_FAILURE_INJECTOR_ONLY(_allocation_failure_inject_counter(0) COMMA)
 90     _evacuation_failed_info(),
 91     _evac_failure_regions(evac_failure_regions),
 92     _evac_failure_enqueued_cards(0)
 93 {
 94   // We allocate number of young gen regions in the collection set plus one
 95   // entries, since entry 0 keeps track of surviving bytes for non-young regions.
 96   // We also add a few elements at the beginning and at the end in
 97   // an attempt to eliminate cache contention
 98   const size_t padding_elem_num = (DEFAULT_PADDING_SIZE / sizeof(size_t));
 99   size_t array_length = padding_elem_num + _surviving_words_length + padding_elem_num;
100 
101   _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
102   _surviving_young_words = _surviving_young_words_base + padding_elem_num;
103   memset(_surviving_young_words, 0, _surviving_words_length * sizeof(size_t));
104 
105   _plab_allocator = new G1PLABAllocator(_g1h->allocator());
106 
107   _closures = G1EvacuationRootClosures::create_root_closures(_g1h,
108                                                              this,
109                                                              collection_set->only_contains_young_regions());
110 
111   _oops_into_optional_regions = new G1OopStarChunkedList[_max_num_optional_regions];
112 
113   initialize_numa_stats();
114 }
115 
116 size_t G1ParScanThreadState::flush_stats(size_t* surviving_young_words, uint num_workers, BufferNodeList* rdc_buffers) {
117   *rdc_buffers = _rdc_local_qset.flush();
118   flush_numa_stats();
119   // Update allocation statistics.
120   _plab_allocator->flush_and_retire_stats(num_workers);
121   _g1h->policy()->record_age_table(&_age_table);
122 
123   if (_evacuation_failed_info.has_failed()) {
124     _g1h->gc_tracer_stw()->report_evacuation_failed(_evacuation_failed_info);
125   }
126 
127   size_t sum = 0;
128   for (uint i = 0; i < _surviving_words_length; i++) {
129     surviving_young_words[i] += _surviving_young_words[i];
130     sum += _surviving_young_words[i];
131   }
132   return sum;
133 }
134 
135 G1ParScanThreadState::~G1ParScanThreadState() {
136   delete _plab_allocator;
137   delete _closures;
138   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
139   delete[] _oops_into_optional_regions;
140   FREE_C_HEAP_ARRAY(size_t, _obj_alloc_stat);
141 }
142 
143 size_t G1ParScanThreadState::lab_waste_words() const {
144   return _plab_allocator->waste();
145 }
146 
147 size_t G1ParScanThreadState::lab_undo_waste_words() const {
148   return _plab_allocator->undo_waste();
149 }
150 
151 size_t G1ParScanThreadState::evac_failure_enqueued_cards() const {
152   return _evac_failure_enqueued_cards;
153 }
154 
155 #ifdef ASSERT
156 void G1ParScanThreadState::verify_task(narrowOop* task) const {
157   assert(task != nullptr, "invariant");
158   assert(UseCompressedOops, "sanity");
159   oop p = RawAccess<>::oop_load(task);
160   assert(_g1h->is_in_reserved(p),
161          "task=" PTR_FORMAT " p=" PTR_FORMAT, p2i(task), p2i(p));
162 }
163 
164 void G1ParScanThreadState::verify_task(oop* task) const {
165   assert(task != nullptr, "invariant");
166   oop p = RawAccess<>::oop_load(task);
167   assert(_g1h->is_in_reserved(p),
168          "task=" PTR_FORMAT " p=" PTR_FORMAT, p2i(task), p2i(p));
169 }
170 
171 void G1ParScanThreadState::verify_task(PartialArrayState* task) const {
172   // Must be in the collection set--it's already been copied.
173   oop p = task->source();
174   assert(_g1h->is_in_cset(p), "p=" PTR_FORMAT, p2i(p));
175 }
176 
177 void G1ParScanThreadState::verify_task(ScannerTask task) const {
178   if (task.is_narrow_oop_ptr()) {
179     verify_task(task.to_narrow_oop_ptr());
180   } else if (task.is_oop_ptr()) {
181     verify_task(task.to_oop_ptr());
182   } else if (task.is_partial_array_state()) {
183     verify_task(task.to_partial_array_state());
184   } else {
185     ShouldNotReachHere();
186   }
187 }
188 #endif // ASSERT
189 
190 template <class T>
191 MAYBE_INLINE_EVACUATION
192 void G1ParScanThreadState::do_oop_evac(T* p) {
193   // Reference should not be null here as such are never pushed to the task queue.
194   oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
195 
196   // Although we never intentionally push references outside of the collection
197   // set, due to (benign) races in the claim mechanism during RSet scanning more
198   // than one thread might claim the same card. So the same card may be
199   // processed multiple times, and so we might get references into old gen here.
200   // So we need to redo this check.
201   const G1HeapRegionAttr region_attr = _g1h->region_attr(obj);
202   // References pushed onto the work stack should never point to a humongous region
203   // as they are not added to the collection set due to above precondition.
204   assert(!region_attr.is_humongous_candidate(),
205          "Obj " PTR_FORMAT " should not refer to humongous region %u from " PTR_FORMAT,
206          p2i(obj), _g1h->addr_to_region(obj), p2i(p));
207 
208   if (!region_attr.is_in_cset()) {
209     // In this case somebody else already did all the work.
210     return;
211   }
212 
213   markWord m = obj->mark();
214   if (m.is_forwarded()) {
215     obj = obj->forwardee(m);
216   } else {
217     obj = do_copy_to_survivor_space(region_attr, obj, m);
218   }
219   RawAccess<IS_NOT_NULL>::oop_store(p, obj);
220 
221   write_ref_field_post(p, obj);
222 }
223 
224 MAYBE_INLINE_EVACUATION
225 void G1ParScanThreadState::do_partial_array(PartialArrayState* state) {
226   oop to_obj = state->destination();
227 
228 #ifdef ASSERT
229   oop from_obj = state->source();
230   assert(_g1h->is_in_reserved(from_obj), "must be in heap.");
231   assert(from_obj->is_forwarded(), "must be forwarded");
232   assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
233   assert(to_obj->is_objArray(), "must be obj array");
234 #endif // ASSERT
235 
236   objArrayOop to_array = objArrayOop(to_obj);
237 
238   // Claim a chunk and get number of additional tasks to enqueue.
239   PartialArrayTaskStepper::Step step = _partial_array_stepper.next(state);
240   // Push any additional partial scan tasks needed.  Pushed before processing
241   // the claimed chunk to allow other workers to steal while we're processing.
242   if (step._ncreate > 0) {
243     state->add_references(step._ncreate);
244     for (uint i = 0; i < step._ncreate; ++i) {
245       push_on_queue(ScannerTask(state));
246     }
247   }
248 
249   G1HeapRegionAttr dest_attr = _g1h->region_attr(to_array);
250   G1SkipCardEnqueueSetter x(&_scanner, dest_attr.is_new_survivor());
251   // Process claimed task.
252   to_array->oop_iterate_range(&_scanner,
253                               checked_cast<int>(step._index),
254                               checked_cast<int>(step._index + _partial_array_stepper.chunk_size()));
255   // Release reference to the state, now that we're done with it.
256   _partial_array_state_allocator.release(state);
257 }
258 
259 MAYBE_INLINE_EVACUATION
260 void G1ParScanThreadState::start_partial_objarray(G1HeapRegionAttr dest_attr,
261                                                   oop from_obj,
262                                                   oop to_obj) {
263   assert(from_obj->is_forwarded(), "precondition");
264   assert(from_obj->forwardee() == to_obj, "precondition");
265   assert(to_obj->is_objArray(), "precondition");
266 
267   objArrayOop to_array = objArrayOop(to_obj);
268 
269   size_t array_length = to_array->length();
270   PartialArrayTaskStepper::Step step = _partial_array_stepper.start(array_length);
271 
272   // Push any needed partial scan tasks.  Pushed before processing the
273   // initial chunk to allow other workers to steal while we're processing.
274   if (step._ncreate > 0) {
275     assert(step._index < array_length, "invariant");
276     assert(((array_length - step._index) % _partial_array_stepper.chunk_size()) == 0,
277            "invariant");
278     PartialArrayState* state =
279       _partial_array_state_allocator.allocate(from_obj, to_obj,
280                                               step._index,
281                                               array_length,
282                                               step._ncreate);
283     for (uint i = 0; i < step._ncreate; ++i) {
284       push_on_queue(ScannerTask(state));
285     }
286   } else {
287     assert(step._index == array_length, "invariant");
288   }
289 
290   // Skip the card enqueue iff the object (to_array) is in survivor region.
291   // However, G1HeapRegion::is_survivor() is too expensive here.
292   // Instead, we use dest_attr.is_young() because the two values are always
293   // equal: successfully allocated young regions must be survivor regions.
294   assert(dest_attr.is_young() == _g1h->heap_region_containing(to_array)->is_survivor(), "must be");
295   G1SkipCardEnqueueSetter x(&_scanner, dest_attr.is_young());
296   // Process the initial chunk.  No need to process the type in the
297   // klass, as it will already be handled by processing the built-in
298   // module.
299   to_array->oop_iterate_range(&_scanner, 0, checked_cast<int>(step._index));
300 }
301 
302 MAYBE_INLINE_EVACUATION
303 void G1ParScanThreadState::dispatch_task(ScannerTask task) {
304   verify_task(task);
305   if (task.is_narrow_oop_ptr()) {
306     do_oop_evac(task.to_narrow_oop_ptr());
307   } else if (task.is_oop_ptr()) {
308     do_oop_evac(task.to_oop_ptr());
309   } else {
310     do_partial_array(task.to_partial_array_state());
311   }
312 }
313 
314 // Process tasks until overflow queue is empty and local queue
315 // contains no more than threshold entries.  NOINLINE to prevent
316 // inlining into steal_and_trim_queue.
317 ATTRIBUTE_FLATTEN NOINLINE
318 void G1ParScanThreadState::trim_queue_to_threshold(uint threshold) {
319   ScannerTask task;
320   do {
321     while (_task_queue->pop_overflow(task)) {
322       if (!_task_queue->try_push_to_taskqueue(task)) {
323         dispatch_task(task);
324       }
325     }
326     while (_task_queue->pop_local(task, threshold)) {
327       dispatch_task(task);
328     }
329   } while (!_task_queue->overflow_empty());
330 }
331 
332 ATTRIBUTE_FLATTEN
333 void G1ParScanThreadState::steal_and_trim_queue(G1ScannerTasksQueueSet* task_queues) {
334   ScannerTask stolen_task;
335   while (task_queues->steal(_worker_id, stolen_task)) {
336     dispatch_task(stolen_task);
337     // Processing stolen task may have added tasks to our queue.
338     trim_queue();
339   }
340 }
341 
342 HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr* dest,
343                                                       size_t word_sz,
344                                                       bool previous_plab_refill_failed,
345                                                       uint node_index) {
346 
347   assert(dest->is_in_cset_or_humongous_candidate(), "Unexpected dest: %s region attr", dest->get_type_str());
348 
349   // Right now we only have two types of regions (young / old) so
350   // let's keep the logic here simple. We can generalize it when necessary.
351   if (dest->is_young()) {
352     bool plab_refill_in_old_failed = false;
353     HeapWord* const obj_ptr = _plab_allocator->allocate(G1HeapRegionAttr::Old,
354                                                         word_sz,
355                                                         &plab_refill_in_old_failed,
356                                                         node_index);
357     // Make sure that we won't attempt to copy any other objects out
358     // of a survivor region (given that apparently we cannot allocate
359     // any new ones) to avoid coming into this slow path again and again.
360     // Only consider failed PLAB refill here: failed inline allocations are
361     // typically large, so not indicative of remaining space.
362     if (previous_plab_refill_failed) {
363       _tenuring_threshold = 0;
364     }
365 
366     if (obj_ptr != nullptr) {
367       dest->set_old();
368     } else {
369       // We just failed to allocate in old gen. The same idea as explained above
370       // for making survivor gen unavailable for allocation applies for old gen.
371       _old_gen_is_full = plab_refill_in_old_failed;
372     }
373     return obj_ptr;
374   } else {
375     _old_gen_is_full = previous_plab_refill_failed;
376     assert(dest->is_old(), "Unexpected dest region attr: %s", dest->get_type_str());
377     // no other space to try.
378     return nullptr;
379   }
380 }
381 
382 G1HeapRegionAttr G1ParScanThreadState::next_region_attr(G1HeapRegionAttr const region_attr, markWord const m, uint& age) {
383   assert(region_attr.is_young() || region_attr.is_old(), "must be either Young or Old");
384 
385   if (region_attr.is_young()) {
386     age = !m.has_displaced_mark_helper() ? m.age()
387                                          : m.displaced_mark_helper().age();
388     if (age < _tenuring_threshold) {
389       return region_attr;
390     }
391   }
392   // young-to-old (promotion) or old-to-old; destination is old in both cases.
393   return G1HeapRegionAttr::Old;
394 }
395 
396 void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_attr,
397                                                   Klass* klass, size_t word_sz, uint age,
398                                                   HeapWord * const obj_ptr, uint node_index) const {
399   PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr, node_index);
400   if (alloc_buf->contains(obj_ptr)) {
401     _g1h->gc_tracer_stw()->report_promotion_in_new_plab_event(klass, word_sz * HeapWordSize, age,
402                                                               dest_attr.type() == G1HeapRegionAttr::Old,
403                                                               alloc_buf->word_sz() * HeapWordSize);
404   } else {
405     _g1h->gc_tracer_stw()->report_promotion_outside_plab_event(klass, word_sz * HeapWordSize, age,
406                                                                dest_attr.type() == G1HeapRegionAttr::Old);
407   }
408 }
409 
410 NOINLINE
411 HeapWord* G1ParScanThreadState::allocate_copy_slow(G1HeapRegionAttr* dest_attr,
412                                                    Klass* klass,
413                                                    size_t word_sz,
414                                                    uint age,
415                                                    uint node_index) {
416   HeapWord* obj_ptr = nullptr;
417   // Try slow-path allocation unless we're allocating old and old is already full.
418   if (!(dest_attr->is_old() && _old_gen_is_full)) {
419     bool plab_refill_failed = false;
420     obj_ptr = _plab_allocator->allocate_direct_or_new_plab(*dest_attr,
421                                                            word_sz,
422                                                            &plab_refill_failed,
423                                                            node_index);
424     if (obj_ptr == nullptr) {
425       obj_ptr = allocate_in_next_plab(dest_attr,
426                                       word_sz,
427                                       plab_refill_failed,
428                                       node_index);
429     }
430   }
431   if (obj_ptr != nullptr) {
432     update_numa_stats(node_index);
433     if (_g1h->gc_tracer_stw()->should_report_promotion_events()) {
434       // The events are checked individually as part of the actual commit
435       report_promotion_event(*dest_attr, klass, word_sz, age, obj_ptr, node_index);
436     }
437   }
438   return obj_ptr;
439 }
440 
441 #if ALLOCATION_FAILURE_INJECTOR
442 bool G1ParScanThreadState::inject_allocation_failure(uint region_idx) {
443   return _g1h->allocation_failure_injector()->allocation_should_fail(_allocation_failure_inject_counter, region_idx);
444 }
445 #endif
446 
447 NOINLINE
448 void G1ParScanThreadState::undo_allocation(G1HeapRegionAttr dest_attr,
449                                            HeapWord* obj_ptr,
450                                            size_t word_sz,
451                                            uint node_index) {
452   _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
453 }
454 
455 void G1ParScanThreadState::update_bot_after_copying(oop obj, size_t word_sz) {
456   HeapWord* obj_start = cast_from_oop<HeapWord*>(obj);
457   G1HeapRegion* region = _g1h->heap_region_containing(obj_start);
458   region->update_bot_for_block(obj_start, obj_start + word_sz);
459 }
460 
461 // Private inline function, for direct internal use and providing the
462 // implementation of the public not-inline function.
463 MAYBE_INLINE_EVACUATION
464 oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const region_attr,
465                                                     oop const old,
466                                                     markWord const old_mark) {
467   assert(region_attr.is_in_cset(),
468          "Unexpected region attr type: %s", region_attr.get_type_str());
469 
470   // NOTE: With compact headers, it is not safe to load the Klass* from old, because
471   // that would access the mark-word, that might change at any time by concurrent
472   // workers.
473   // This mark word would refer to a forwardee, which may not yet have completed
474   // copying. Therefore we must load the Klass* from the mark-word that we already
475   // loaded. This is safe, because we only enter here if not yet forwarded.
476   assert(!old_mark.is_forwarded(), "precondition");
477   Klass* klass = UseCompactObjectHeaders
478       ? old_mark.klass()
479       : old->klass();
480 
481   const size_t old_size = old->size_given_mark_and_klass(old_mark, klass);
482   const size_t word_sz = old->copy_size(old_size, old_mark);
483 
484   // JNI only allows pinning of typeArrays, so we only need to keep those in place.
485   if (region_attr.is_pinned() && klass->is_typeArray_klass()) {
486     return handle_evacuation_failure_par(old, old_mark, word_sz, true /* cause_pinned */);
487   }
488 
489   uint age = 0;
490   G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age);
491   G1HeapRegion* const from_region = _g1h->heap_region_containing(old);
492   uint node_index = from_region->node_index();
493 
494   HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
495 
496   // PLAB allocations should succeed most of the time, so we'll
497   // normally check against null once and that's it.
498   if (obj_ptr == nullptr) {
499     obj_ptr = allocate_copy_slow(&dest_attr, klass, word_sz, age, node_index);
500     if (obj_ptr == nullptr) {
501       // This will either forward-to-self, or detect that someone else has
502       // installed a forwarding pointer.
503       return handle_evacuation_failure_par(old, old_mark, word_sz, false /* cause_pinned */);
504     }
505   }
506 
507   assert(obj_ptr != nullptr, "when we get here, allocation should have succeeded");
508   assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
509 
510   // Should this evacuation fail?
511   if (inject_allocation_failure(from_region->hrm_index())) {
512     // Doing this after all the allocation attempts also tests the
513     // undo_allocation() method too.
514     undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
515     return handle_evacuation_failure_par(old, old_mark, word_sz, false /* cause_pinned */);
516   }
517 
518   // We're going to allocate linearly, so might as well prefetch ahead.
519   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
520   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), obj_ptr, old_size);
521 
522   const oop obj = cast_to_oop(obj_ptr);
523   // Because the forwarding is done with memory_order_relaxed there is no
524   // ordering with the above copy.  Clients that get the forwardee must not
525   // examine its contents without other synchronization, since the contents
526   // may not be up to date for them.
527   const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed);
528   if (forward_ptr == nullptr) {
529 
530     {
531       const uint young_index = from_region->young_index_in_cset();
532       assert((from_region->is_young() && young_index >  0) ||
533              (!from_region->is_young() && young_index == 0), "invariant" );
534       _surviving_young_words[young_index] += word_sz;
535     }
536 
537     obj->initialize_hash_if_necessary(old);
538 
539     if (dest_attr.is_young()) {
540       if (age < markWord::max_age) {
541         age++;
542         obj->incr_age();
543       }
544       _age_table.add(age, word_sz);
545     } else {
546       update_bot_after_copying(obj, word_sz);
547     }
548 
549     // Most objects are not arrays, so do one array check rather than
550     // checking for each array category for each object.
551     if (klass->is_array_klass()) {
552       if (klass->is_objArray_klass()) {
553         start_partial_objarray(dest_attr, old, obj);
554       } else {
555         // Nothing needs to be done for typeArrays.  Body doesn't contain
556         // any oops to scan, and the type in the klass will already be handled
557         // by processing the built-in module.
558         assert(klass->is_typeArray_klass(), "invariant");
559       }
560       return obj;
561     }
562 
563     ContinuationGCSupport::transform_stack_chunk(obj);
564 
565     // Check for deduplicating young Strings.
566     if (G1StringDedup::is_candidate_from_evacuation(klass,
567                                                     region_attr,
568                                                     dest_attr,
569                                                     age)) {
570       // Record old; request adds a new weak reference, which reference
571       // processing expects to refer to a from-space object.
572       _string_dedup_requests.add(old);
573     }
574 
575     // Skip the card enqueue iff the object (obj) is in survivor region.
576     // However, G1HeapRegion::is_survivor() is too expensive here.
577     // Instead, we use dest_attr.is_young() because the two values are always
578     // equal: successfully allocated young regions must be survivor regions.
579     assert(dest_attr.is_young() == _g1h->heap_region_containing(obj)->is_survivor(), "must be");
580     G1SkipCardEnqueueSetter x(&_scanner, dest_attr.is_young());
581     obj->oop_iterate_backwards(&_scanner, klass);
582     return obj;
583   } else {
584     _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
585     return forward_ptr;
586   }
587 }
588 
589 // Public not-inline entry point.
590 ATTRIBUTE_FLATTEN
591 oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr region_attr,
592                                                  oop old,
593                                                  markWord old_mark) {
594   return do_copy_to_survivor_space(region_attr, old, old_mark);
595 }
596 
597 G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) {
598   assert(worker_id < _num_workers, "out of bounds access");
599   if (_states[worker_id] == nullptr) {
600     _states[worker_id] =
601       new G1ParScanThreadState(_g1h, rdcqs(),
602                                worker_id,
603                                _num_workers,
604                                _collection_set,
605                                _evac_failure_regions);
606   }
607   return _states[worker_id];
608 }
609 
610 const size_t* G1ParScanThreadStateSet::surviving_young_words() const {
611   assert(_flushed, "thread local state from the per thread states should have been flushed");
612   return _surviving_young_words_total;
613 }
614 
615 void G1ParScanThreadStateSet::flush_stats() {
616   assert(!_flushed, "thread local state from the per thread states should be flushed once");
617   for (uint worker_id = 0; worker_id < _num_workers; ++worker_id) {
618     G1ParScanThreadState* pss = _states[worker_id];
619     assert(pss != nullptr, "must be initialized");
620 
621     G1GCPhaseTimes* p = _g1h->phase_times();
622 
623     // Need to get the following two before the call to G1ParThreadScanState::flush()
624     // because it resets the PLAB allocator where we get this info from.
625     size_t lab_waste_bytes = pss->lab_waste_words() * HeapWordSize;
626     size_t lab_undo_waste_bytes = pss->lab_undo_waste_words() * HeapWordSize;
627     size_t copied_bytes = pss->flush_stats(_surviving_young_words_total, _num_workers, &_rdc_buffers[worker_id]) * HeapWordSize;
628     size_t evac_fail_enqueued_cards = pss->evac_failure_enqueued_cards();
629 
630     p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, copied_bytes, G1GCPhaseTimes::MergePSSCopiedBytes);
631     p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, lab_waste_bytes, G1GCPhaseTimes::MergePSSLABWasteBytes);
632     p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, lab_undo_waste_bytes, G1GCPhaseTimes::MergePSSLABUndoWasteBytes);
633     p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, evac_fail_enqueued_cards, G1GCPhaseTimes::MergePSSEvacFailExtra);
634 
635     delete pss;
636     _states[worker_id] = nullptr;
637   }
638 
639   G1DirtyCardQueueSet& dcq = G1BarrierSet::dirty_card_queue_set();
640   dcq.merge_bufferlists(rdcqs());
641   rdcqs()->verify_empty();
642 
643   _flushed = true;
644 }
645 
646 void G1ParScanThreadStateSet::record_unused_optional_region(G1HeapRegion* hr) {
647   for (uint worker_index = 0; worker_index < _num_workers; ++worker_index) {
648     G1ParScanThreadState* pss = _states[worker_index];
649     assert(pss != nullptr, "must be initialized");
650 
651     size_t used_memory = pss->oops_into_optional_region(hr)->used_memory();
652     _g1h->phase_times()->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanHR, worker_index, used_memory, G1GCPhaseTimes::ScanHRUsedMemory);
653   }
654 }
655 
656 NOINLINE
657 oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m, size_t word_sz, bool cause_pinned) {
658   assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old));
659 
660   oop forward_ptr = old->forward_to_self_atomic(m, memory_order_relaxed);
661   if (forward_ptr == nullptr) {
662     // Forward-to-self succeeded. We are the "owner" of the object.
663     G1HeapRegion* r = _g1h->heap_region_containing(old);
664 
665     if (_evac_failure_regions->record(_worker_id, r->hrm_index(), cause_pinned)) {
666       G1HeapRegionPrinter::evac_failure(r);
667     }
668 
669     // Mark the failing object in the marking bitmap and later use the bitmap to handle
670     // evacuation failure recovery.
671     _g1h->mark_evac_failure_object(_worker_id, old, word_sz);
672 
673     ContinuationGCSupport::transform_stack_chunk(old);
674 
675     _evacuation_failed_info.register_copy_failure(word_sz);
676 
677     // For iterating objects that failed evacuation currently we can reuse the
678     // existing closure to scan evacuated objects; since we are iterating from a
679     // collection set region (i.e. never a Survivor region), we always need to
680     // gather cards for this case.
681     G1SkipCardEnqueueSetter x(&_scanner, false /* skip_card_enqueue */);
682     old->oop_iterate_backwards(&_scanner);
683 
684     return old;
685   } else {
686     // Forward-to-self failed. Either someone else managed to allocate
687     // space for this object (old != forward_ptr) or they beat us in
688     // self-forwarding it (old == forward_ptr).
689     assert(old == forward_ptr || !_g1h->is_in_cset(forward_ptr),
690            "Object " PTR_FORMAT " forwarded to: " PTR_FORMAT " "
691            "should not be in the CSet",
692            p2i(old), p2i(forward_ptr));
693     return forward_ptr;
694   }
695 }
696 
697 void G1ParScanThreadState::initialize_numa_stats() {
698   if (_numa->is_enabled()) {
699     LogTarget(Info, gc, heap, numa) lt;
700 
701     if (lt.is_enabled()) {
702       uint num_nodes = _numa->num_active_nodes();
703       // Record only if there are multiple active nodes.
704       _obj_alloc_stat = NEW_C_HEAP_ARRAY(size_t, num_nodes, mtGC);
705       memset(_obj_alloc_stat, 0, sizeof(size_t) * num_nodes);
706     }
707   }
708 }
709 
710 void G1ParScanThreadState::flush_numa_stats() {
711   if (_obj_alloc_stat != nullptr) {
712     uint node_index = _numa->index_of_current_thread();
713     _numa->copy_statistics(G1NUMAStats::LocalObjProcessAtCopyToSurv, node_index, _obj_alloc_stat);
714   }
715 }
716 
717 void G1ParScanThreadState::update_numa_stats(uint node_index) {
718   if (_obj_alloc_stat != nullptr) {
719     _obj_alloc_stat[node_index]++;
720   }
721 }
722 
723 G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h,
724                                                  uint num_workers,
725                                                  G1CollectionSet* collection_set,
726                                                  G1EvacFailureRegions* evac_failure_regions) :
727     _g1h(g1h),
728     _collection_set(collection_set),
729     _rdcqs(G1BarrierSet::dirty_card_queue_set().allocator()),
730     _states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, num_workers, mtGC)),
731     _rdc_buffers(NEW_C_HEAP_ARRAY(BufferNodeList, num_workers, mtGC)),
732     _surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, collection_set->young_region_length() + 1, mtGC)),
733     _num_workers(num_workers),
734     _flushed(false),
735     _evac_failure_regions(evac_failure_regions)
736 {
737   for (uint i = 0; i < num_workers; ++i) {
738     _states[i] = nullptr;
739     _rdc_buffers[i] = BufferNodeList();
740   }
741   memset(_surviving_young_words_total, 0, (collection_set->young_region_length() + 1) * sizeof(size_t));
742 }
743 
744 G1ParScanThreadStateSet::~G1ParScanThreadStateSet() {
745   assert(_flushed, "thread local state from the per thread states should have been flushed");
746   FREE_C_HEAP_ARRAY(G1ParScanThreadState*, _states);
747   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_total);
748   FREE_C_HEAP_ARRAY(BufferNodeList, _rdc_buffers);
749 }