1 /*
  2  * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/g1/g1Allocator.inline.hpp"
 27 #include "gc/g1/g1CollectedHeap.inline.hpp"
 28 #include "gc/g1/g1CollectionSet.hpp"
 29 #include "gc/g1/g1EvacFailureRegions.inline.hpp"
 30 #include "gc/g1/g1OopClosures.inline.hpp"
 31 #include "gc/g1/g1ParScanThreadState.inline.hpp"
 32 #include "gc/g1/g1RootClosures.hpp"
 33 #include "gc/g1/g1StringDedup.hpp"
 34 #include "gc/g1/g1Trace.hpp"
 35 #include "gc/g1/g1YoungGCEvacFailureInjector.inline.hpp"
 36 #include "gc/shared/continuationGCSupport.inline.hpp"
 37 #include "gc/shared/partialArrayTaskStepper.inline.hpp"
 38 #include "gc/shared/preservedMarks.inline.hpp"
 39 #include "gc/shared/stringdedup/stringDedup.hpp"
 40 #include "gc/shared/taskqueue.inline.hpp"
 41 #include "memory/allocation.inline.hpp"
 42 #include "oops/access.inline.hpp"
 43 #include "oops/oop.inline.hpp"
 44 #include "runtime/atomic.hpp"
 45 #include "runtime/prefetch.inline.hpp"
 46 #include "utilities/globalDefinitions.hpp"
 47 #include "utilities/macros.hpp"
 48 
 49 // In fastdebug builds the code size can get out of hand, potentially
 50 // tripping over compiler limits (which may be bugs, but nevertheless
 51 // need to be taken into consideration).  A side benefit of limiting
 52 // inlining is that we get more call frames that might aid debugging.
 53 // And the fastdebug compile time for this file is much reduced.
 54 // Explicit NOINLINE to block ATTRIBUTE_FLATTENing.
 55 #define MAYBE_INLINE_EVACUATION NOT_DEBUG(inline) DEBUG_ONLY(NOINLINE)
 56 
 57 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
 58                                            G1RedirtyCardsQueueSet* rdcqs,
 59                                            PreservedMarks* preserved_marks,
 60                                            uint worker_id,
 61                                            uint num_workers,
 62                                            G1CollectionSet* collection_set,
 63                                            G1EvacFailureRegions* evac_failure_regions)
 64   : _g1h(g1h),
 65     _task_queue(g1h->task_queue(worker_id)),
 66     _rdc_local_qset(rdcqs),
 67     _ct(g1h->card_table()),
 68     _closures(nullptr),
 69     _plab_allocator(nullptr),
 70     _age_table(false),
 71     _tenuring_threshold(g1h->policy()->tenuring_threshold()),
 72     _scanner(g1h, this),
 73     _worker_id(worker_id),
 74     _last_enqueued_card(SIZE_MAX),
 75     _stack_trim_upper_threshold(GCDrainStackTargetSize * 2 + 1),
 76     _stack_trim_lower_threshold(GCDrainStackTargetSize),
 77     _trim_ticks(),
 78     _surviving_young_words_base(nullptr),
 79     _surviving_young_words(nullptr),
 80     _surviving_words_length(collection_set->young_region_length() + 1),
 81     _old_gen_is_full(false),
 82     _partial_objarray_chunk_size(ParGCArrayScanChunk),
 83     _partial_array_stepper(num_workers),
 84     _string_dedup_requests(),
 85     _max_num_optional_regions(collection_set->optional_region_length()),
 86     _numa(g1h->numa()),
 87     _obj_alloc_stat(nullptr),
 88     EVAC_FAILURE_INJECTOR_ONLY(_evac_failure_inject_counter(0) COMMA)
 89     _preserved_marks(preserved_marks),
 90     _evacuation_failed_info(),
 91     _evac_failure_regions(evac_failure_regions)
 92 {
 93   // We allocate number of young gen regions in the collection set plus one
 94   // entries, since entry 0 keeps track of surviving bytes for non-young regions.
 95   // We also add a few elements at the beginning and at the end in
 96   // an attempt to eliminate cache contention
 97   const size_t padding_elem_num = (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t));
 98   size_t array_length = padding_elem_num + _surviving_words_length + padding_elem_num;
 99 
100   _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
101   _surviving_young_words = _surviving_young_words_base + padding_elem_num;
102   memset(_surviving_young_words, 0, _surviving_words_length * sizeof(size_t));
103 
104   _plab_allocator = new G1PLABAllocator(_g1h->allocator());
105 
106   _closures = G1EvacuationRootClosures::create_root_closures(_g1h,
107                                                              this,
108                                                              collection_set->only_contains_young_regions());
109 
110   _oops_into_optional_regions = new G1OopStarChunkedList[_max_num_optional_regions];
111 
112   initialize_numa_stats();
113 }
114 
115 size_t G1ParScanThreadState::flush_stats(size_t* surviving_young_words, uint num_workers) {
116   _rdc_local_qset.flush();
117   flush_numa_stats();
118   // Update allocation statistics.
119   _plab_allocator->flush_and_retire_stats(num_workers);
120   _g1h->policy()->record_age_table(&_age_table);
121 
122   if (_evacuation_failed_info.has_failed()) {
123      _g1h->gc_tracer_stw()->report_evacuation_failed(_evacuation_failed_info);
124   }
125 
126   size_t sum = 0;
127   for (uint i = 0; i < _surviving_words_length; i++) {
128     surviving_young_words[i] += _surviving_young_words[i];
129     sum += _surviving_young_words[i];
130   }
131   return sum;
132 }
133 
134 G1ParScanThreadState::~G1ParScanThreadState() {
135   delete _plab_allocator;
136   delete _closures;
137   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
138   delete[] _oops_into_optional_regions;
139   FREE_C_HEAP_ARRAY(size_t, _obj_alloc_stat);
140 }
141 
142 size_t G1ParScanThreadState::lab_waste_words() const {
143   return _plab_allocator->waste();
144 }
145 
146 size_t G1ParScanThreadState::lab_undo_waste_words() const {
147   return _plab_allocator->undo_waste();
148 }
149 
150 #ifdef ASSERT
151 void G1ParScanThreadState::verify_task(narrowOop* task) const {
152   assert(task != nullptr, "invariant");
153   assert(UseCompressedOops, "sanity");
154   oop p = RawAccess<>::oop_load(task);
155   assert(_g1h->is_in_reserved(p),
156          "task=" PTR_FORMAT " p=" PTR_FORMAT, p2i(task), p2i(p));
157 }
158 
159 void G1ParScanThreadState::verify_task(oop* task) const {
160   assert(task != nullptr, "invariant");
161   oop p = RawAccess<>::oop_load(task);
162   assert(_g1h->is_in_reserved(p),
163          "task=" PTR_FORMAT " p=" PTR_FORMAT, p2i(task), p2i(p));
164 }
165 
166 void G1ParScanThreadState::verify_task(PartialArrayScanTask task) const {
167   // Must be in the collection set--it's already been copied.
168   oop p = task.to_source_array();
169   assert(_g1h->is_in_cset(p), "p=" PTR_FORMAT, p2i(p));
170 }
171 
172 void G1ParScanThreadState::verify_task(ScannerTask task) const {
173   if (task.is_narrow_oop_ptr()) {
174     verify_task(task.to_narrow_oop_ptr());
175   } else if (task.is_oop_ptr()) {
176     verify_task(task.to_oop_ptr());
177   } else if (task.is_partial_array_task()) {
178     verify_task(task.to_partial_array_task());
179   } else {
180     ShouldNotReachHere();
181   }
182 }
183 #endif // ASSERT
184 
185 template <class T>
186 MAYBE_INLINE_EVACUATION
187 void G1ParScanThreadState::do_oop_evac(T* p) {
188   // Reference should not be null here as such are never pushed to the task queue.
189   oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
190 
191   // Although we never intentionally push references outside of the collection
192   // set, due to (benign) races in the claim mechanism during RSet scanning more
193   // than one thread might claim the same card. So the same card may be
194   // processed multiple times, and so we might get references into old gen here.
195   // So we need to redo this check.
196   const G1HeapRegionAttr region_attr = _g1h->region_attr(obj);
197   // References pushed onto the work stack should never point to a humongous region
198   // as they are not added to the collection set due to above precondition.
199   assert(!region_attr.is_humongous_candidate(),
200          "Obj " PTR_FORMAT " should not refer to humongous region %u from " PTR_FORMAT,
201          p2i(obj), _g1h->addr_to_region(obj), p2i(p));
202 
203   if (!region_attr.is_in_cset()) {
204     // In this case somebody else already did all the work.
205     return;
206   }
207 
208   markWord m = obj->mark();
209   if (m.is_marked()) {
210     obj = cast_to_oop(m.decode_pointer());
211   } else {
212     obj = do_copy_to_survivor_space(region_attr, obj, m);
213   }
214   RawAccess<IS_NOT_NULL>::oop_store(p, obj);
215 
216   write_ref_field_post(p, obj);
217 }
218 
219 MAYBE_INLINE_EVACUATION
220 void G1ParScanThreadState::do_partial_array(PartialArrayScanTask task) {
221   oop from_obj = task.to_source_array();
222 
223   assert(_g1h->is_in_reserved(from_obj), "must be in heap.");
224   assert(from_obj->is_objArray(), "must be obj array");
225   assert(from_obj->is_forwarded(), "must be forwarded");
226 
227   oop to_obj = from_obj->forwardee();
228   assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
229   assert(to_obj->is_objArray(), "must be obj array");
230   objArrayOop to_array = objArrayOop(to_obj);
231 
232   PartialArrayTaskStepper::Step step
233     = _partial_array_stepper.next(objArrayOop(from_obj),
234                                   to_array,
235                                   _partial_objarray_chunk_size);
236   for (uint i = 0; i < step._ncreate; ++i) {
237     push_on_queue(ScannerTask(PartialArrayScanTask(from_obj)));
238   }
239 
240   G1HeapRegionAttr dest_attr = _g1h->region_attr(to_array);
241   G1SkipCardEnqueueSetter x(&_scanner, dest_attr.is_new_survivor());
242   // Process claimed task.  The length of to_array is not correct, but
243   // fortunately the iteration ignores the length field and just relies
244   // on start/end.
245   to_array->oop_iterate_range(&_scanner,
246                               step._index,
247                               step._index + _partial_objarray_chunk_size);
248 }
249 
250 MAYBE_INLINE_EVACUATION
251 void G1ParScanThreadState::start_partial_objarray(G1HeapRegionAttr dest_attr,
252                                                   oop from_obj,
253                                                   oop to_obj) {
254   assert(from_obj->is_objArray(), "precondition");
255   assert(from_obj->is_forwarded(), "precondition");
256   assert(from_obj->forwardee() == to_obj, "precondition");
257   assert(from_obj != to_obj, "should not be scanning self-forwarded objects");
258   assert(to_obj->is_objArray(), "precondition");
259 
260   objArrayOop to_array = objArrayOop(to_obj);
261 
262   PartialArrayTaskStepper::Step step
263     = _partial_array_stepper.start(objArrayOop(from_obj),
264                                    to_array,
265                                    _partial_objarray_chunk_size);
266 
267   // Push any needed partial scan tasks.  Pushed before processing the
268   // initial chunk to allow other workers to steal while we're processing.
269   for (uint i = 0; i < step._ncreate; ++i) {
270     push_on_queue(ScannerTask(PartialArrayScanTask(from_obj)));
271   }
272 
273   // Skip the card enqueue iff the object (to_array) is in survivor region.
274   // However, HeapRegion::is_survivor() is too expensive here.
275   // Instead, we use dest_attr.is_young() because the two values are always
276   // equal: successfully allocated young regions must be survivor regions.
277   assert(dest_attr.is_young() == _g1h->heap_region_containing(to_array)->is_survivor(), "must be");
278   G1SkipCardEnqueueSetter x(&_scanner, dest_attr.is_young());
279   // Process the initial chunk.  No need to process the type in the
280   // klass, as it will already be handled by processing the built-in
281   // module. The length of to_array is not correct, but fortunately
282   // the iteration ignores that length field and relies on start/end.
283   to_array->oop_iterate_range(&_scanner, 0, step._index);
284 }
285 
286 MAYBE_INLINE_EVACUATION
287 void G1ParScanThreadState::dispatch_task(ScannerTask task) {
288   verify_task(task);
289   if (task.is_narrow_oop_ptr()) {
290     do_oop_evac(task.to_narrow_oop_ptr());
291   } else if (task.is_oop_ptr()) {
292     do_oop_evac(task.to_oop_ptr());
293   } else {
294     do_partial_array(task.to_partial_array_task());
295   }
296 }
297 
298 // Process tasks until overflow queue is empty and local queue
299 // contains no more than threshold entries.  NOINLINE to prevent
300 // inlining into steal_and_trim_queue.
301 ATTRIBUTE_FLATTEN NOINLINE
302 void G1ParScanThreadState::trim_queue_to_threshold(uint threshold) {
303   ScannerTask task;
304   do {
305     while (_task_queue->pop_overflow(task)) {
306       if (!_task_queue->try_push_to_taskqueue(task)) {
307         dispatch_task(task);
308       }
309     }
310     while (_task_queue->pop_local(task, threshold)) {
311       dispatch_task(task);
312     }
313   } while (!_task_queue->overflow_empty());
314 }
315 
316 ATTRIBUTE_FLATTEN
317 void G1ParScanThreadState::steal_and_trim_queue(G1ScannerTasksQueueSet* task_queues) {
318   ScannerTask stolen_task;
319   while (task_queues->steal(_worker_id, stolen_task)) {
320     dispatch_task(stolen_task);
321     // Processing stolen task may have added tasks to our queue.
322     trim_queue();
323   }
324 }
325 
326 HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr* dest,
327                                                       size_t word_sz,
328                                                       bool previous_plab_refill_failed,
329                                                       uint node_index) {
330 
331   assert(dest->is_in_cset_or_humongous_candidate(), "Unexpected dest: %s region attr", dest->get_type_str());
332 
333   // Right now we only have two types of regions (young / old) so
334   // let's keep the logic here simple. We can generalize it when necessary.
335   if (dest->is_young()) {
336     bool plab_refill_in_old_failed = false;
337     HeapWord* const obj_ptr = _plab_allocator->allocate(G1HeapRegionAttr::Old,
338                                                         word_sz,
339                                                         &plab_refill_in_old_failed,
340                                                         node_index);
341     // Make sure that we won't attempt to copy any other objects out
342     // of a survivor region (given that apparently we cannot allocate
343     // any new ones) to avoid coming into this slow path again and again.
344     // Only consider failed PLAB refill here: failed inline allocations are
345     // typically large, so not indicative of remaining space.
346     if (previous_plab_refill_failed) {
347       _tenuring_threshold = 0;
348     }
349 
350     if (obj_ptr != nullptr) {
351       dest->set_old();
352     } else {
353       // We just failed to allocate in old gen. The same idea as explained above
354       // for making survivor gen unavailable for allocation applies for old gen.
355       _old_gen_is_full = plab_refill_in_old_failed;
356     }
357     return obj_ptr;
358   } else {
359     _old_gen_is_full = previous_plab_refill_failed;
360     assert(dest->is_old(), "Unexpected dest region attr: %s", dest->get_type_str());
361     // no other space to try.
362     return nullptr;
363   }
364 }
365 
366 G1HeapRegionAttr G1ParScanThreadState::next_region_attr(G1HeapRegionAttr const region_attr, markWord const m, uint& age) {
367   assert(region_attr.is_young() || region_attr.is_old(), "must be either Young or Old");
368 
369   if (region_attr.is_young()) {
370     age = !m.has_displaced_mark_helper() ? m.age()
371                                          : m.displaced_mark_helper().age();
372     if (age < _tenuring_threshold) {
373       return region_attr;
374     }
375   }
376   // young-to-old (promotion) or old-to-old; destination is old in both cases.
377   return G1HeapRegionAttr::Old;
378 }
379 
380 void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_attr,
381                                                   oop const old, size_t word_sz, uint age,
382                                                   HeapWord * const obj_ptr, uint node_index) const {
383   PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr, node_index);
384   if (alloc_buf->contains(obj_ptr)) {
385     _g1h->gc_tracer_stw()->report_promotion_in_new_plab_event(old->klass(), word_sz * HeapWordSize, age,
386                                                               dest_attr.type() == G1HeapRegionAttr::Old,
387                                                               alloc_buf->word_sz() * HeapWordSize);
388   } else {
389     _g1h->gc_tracer_stw()->report_promotion_outside_plab_event(old->klass(), word_sz * HeapWordSize, age,
390                                                                dest_attr.type() == G1HeapRegionAttr::Old);
391   }
392 }
393 
394 NOINLINE
395 HeapWord* G1ParScanThreadState::allocate_copy_slow(G1HeapRegionAttr* dest_attr,
396                                                    oop old,
397                                                    size_t word_sz,
398                                                    uint age,
399                                                    uint node_index) {
400   HeapWord* obj_ptr = nullptr;
401   // Try slow-path allocation unless we're allocating old and old is already full.
402   if (!(dest_attr->is_old() && _old_gen_is_full)) {
403     bool plab_refill_failed = false;
404     obj_ptr = _plab_allocator->allocate_direct_or_new_plab(*dest_attr,
405                                                            word_sz,
406                                                            &plab_refill_failed,
407                                                            node_index);
408     if (obj_ptr == nullptr) {
409       obj_ptr = allocate_in_next_plab(dest_attr,
410                                       word_sz,
411                                       plab_refill_failed,
412                                       node_index);
413     }
414   }
415   if (obj_ptr != nullptr) {
416     update_numa_stats(node_index);
417     if (_g1h->gc_tracer_stw()->should_report_promotion_events()) {
418       // The events are checked individually as part of the actual commit
419       report_promotion_event(*dest_attr, old, word_sz, age, obj_ptr, node_index);
420     }
421   }
422   return obj_ptr;
423 }
424 
425 #if EVAC_FAILURE_INJECTOR
426 bool G1ParScanThreadState::inject_evacuation_failure(uint region_idx) {
427   return _g1h->evac_failure_injector()->evacuation_should_fail(_evac_failure_inject_counter, region_idx);
428 }
429 #endif
430 
431 NOINLINE
432 void G1ParScanThreadState::undo_allocation(G1HeapRegionAttr dest_attr,
433                                            HeapWord* obj_ptr,
434                                            size_t word_sz,
435                                            uint node_index) {
436   _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
437 }
438 
439 void G1ParScanThreadState::update_bot_after_copying(oop obj, size_t word_sz) {
440   HeapWord* obj_start = cast_from_oop<HeapWord*>(obj);
441   HeapRegion* region = _g1h->heap_region_containing(obj_start);
442   region->update_bot_for_obj(obj_start, word_sz);
443 }
444 
445 // Private inline function, for direct internal use and providing the
446 // implementation of the public not-inline function.
447 MAYBE_INLINE_EVACUATION
448 oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const region_attr,
449                                                     oop const old,
450                                                     markWord const old_mark) {
451   assert(region_attr.is_in_cset(),
452          "Unexpected region attr type: %s", region_attr.get_type_str());
453 
454   // Get the klass once.  We'll need it again later, and this avoids
455   // re-decoding when it's compressed.
456   Klass* klass = old->klass();
457   const size_t word_sz = old->size_given_klass(klass);
458 
459   uint age = 0;
460   G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age);
461   HeapRegion* const from_region = _g1h->heap_region_containing(old);
462   uint node_index = from_region->node_index();
463 
464   HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
465 
466   // PLAB allocations should succeed most of the time, so we'll
467   // normally check against null once and that's it.
468   if (obj_ptr == nullptr) {
469     obj_ptr = allocate_copy_slow(&dest_attr, old, word_sz, age, node_index);
470     if (obj_ptr == nullptr) {
471       // This will either forward-to-self, or detect that someone else has
472       // installed a forwarding pointer.
473       return handle_evacuation_failure_par(old, old_mark, word_sz);
474     }
475   }
476 
477   assert(obj_ptr != nullptr, "when we get here, allocation should have succeeded");
478   assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
479 
480   // Should this evacuation fail?
481   if (inject_evacuation_failure(from_region->hrm_index())) {
482     // Doing this after all the allocation attempts also tests the
483     // undo_allocation() method too.
484     undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
485     return handle_evacuation_failure_par(old, old_mark, word_sz);
486   }
487 
488   // We're going to allocate linearly, so might as well prefetch ahead.
489   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
490   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), obj_ptr, word_sz);
491 
492   const oop obj = cast_to_oop(obj_ptr);
493   // Because the forwarding is done with memory_order_relaxed there is no
494   // ordering with the above copy.  Clients that get the forwardee must not
495   // examine its contents without other synchronization, since the contents
496   // may not be up to date for them.
497   const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed);
498   if (forward_ptr == nullptr) {
499 
500     {
501       const uint young_index = from_region->young_index_in_cset();
502       assert((from_region->is_young() && young_index >  0) ||
503              (!from_region->is_young() && young_index == 0), "invariant" );
504       _surviving_young_words[young_index] += word_sz;
505     }
506 
507     if (dest_attr.is_young()) {
508       if (age < markWord::max_age) {
509         age++;
510         obj->incr_age();
511       }
512       _age_table.add(age, word_sz);
513     } else {
514       update_bot_after_copying(obj, word_sz);
515     }
516 
517     // Most objects are not arrays, so do one array check rather than
518     // checking for each array category for each object.
519     if (klass->is_array_klass()) {
520       if (klass->is_objArray_klass()) {
521         start_partial_objarray(dest_attr, old, obj);
522       } else {
523         // Nothing needs to be done for typeArrays.  Body doesn't contain
524         // any oops to scan, and the type in the klass will already be handled
525         // by processing the built-in module.
526         assert(klass->is_typeArray_klass(), "invariant");
527       }
528       return obj;
529     }
530 
531     ContinuationGCSupport::transform_stack_chunk(obj);
532 
533     // Check for deduplicating young Strings.
534     if (G1StringDedup::is_candidate_from_evacuation(klass,
535                                                     region_attr,
536                                                     dest_attr,
537                                                     age)) {
538       // Record old; request adds a new weak reference, which reference
539       // processing expects to refer to a from-space object.
540       _string_dedup_requests.add(old);
541     }
542 
543     // Skip the card enqueue iff the object (obj) is in survivor region.
544     // However, HeapRegion::is_survivor() is too expensive here.
545     // Instead, we use dest_attr.is_young() because the two values are always
546     // equal: successfully allocated young regions must be survivor regions.
547     assert(dest_attr.is_young() == _g1h->heap_region_containing(obj)->is_survivor(), "must be");
548     G1SkipCardEnqueueSetter x(&_scanner, dest_attr.is_young());
549     obj->oop_iterate_backwards(&_scanner, klass);
550     return obj;
551   } else {
552     _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
553     return forward_ptr;
554   }
555 }
556 
557 // Public not-inline entry point.
558 ATTRIBUTE_FLATTEN
559 oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr region_attr,
560                                                  oop old,
561                                                  markWord old_mark) {
562   return do_copy_to_survivor_space(region_attr, old, old_mark);
563 }
564 
565 G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) {
566   assert(worker_id < _num_workers, "out of bounds access");
567   if (_states[worker_id] == nullptr) {
568     _states[worker_id] =
569       new G1ParScanThreadState(_g1h, rdcqs(),
570                                _preserved_marks_set.get(worker_id),
571                                worker_id,
572                                _num_workers,
573                                _collection_set,
574                                _evac_failure_regions);
575   }
576   return _states[worker_id];
577 }
578 
579 const size_t* G1ParScanThreadStateSet::surviving_young_words() const {
580   assert(_flushed, "thread local state from the per thread states should have been flushed");
581   return _surviving_young_words_total;
582 }
583 
584 void G1ParScanThreadStateSet::flush_stats() {
585   assert(!_flushed, "thread local state from the per thread states should be flushed once");
586 
587   for (uint worker_id = 0; worker_id < _num_workers; ++worker_id) {
588     G1ParScanThreadState* pss = _states[worker_id];
589     assert(pss != nullptr, "must be initialized");
590 
591     G1GCPhaseTimes* p = _g1h->phase_times();
592 
593     // Need to get the following two before the call to G1ParThreadScanState::flush()
594     // because it resets the PLAB allocator where we get this info from.
595     size_t lab_waste_bytes = pss->lab_waste_words() * HeapWordSize;
596     size_t lab_undo_waste_bytes = pss->lab_undo_waste_words() * HeapWordSize;
597     size_t copied_bytes = pss->flush_stats(_surviving_young_words_total, _num_workers) * HeapWordSize;
598 
599     p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, copied_bytes, G1GCPhaseTimes::MergePSSCopiedBytes);
600     p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, lab_waste_bytes, G1GCPhaseTimes::MergePSSLABWasteBytes);
601     p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, lab_undo_waste_bytes, G1GCPhaseTimes::MergePSSLABUndoWasteBytes);
602 
603     delete pss;
604     _states[worker_id] = nullptr;
605   }
606   _flushed = true;
607 }
608 
609 void G1ParScanThreadStateSet::record_unused_optional_region(HeapRegion* hr) {
610   for (uint worker_index = 0; worker_index < _num_workers; ++worker_index) {
611     G1ParScanThreadState* pss = _states[worker_index];
612     assert(pss != nullptr, "must be initialized");
613 
614     size_t used_memory = pss->oops_into_optional_region(hr)->used_memory();
615     _g1h->phase_times()->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanHR, worker_index, used_memory, G1GCPhaseTimes::ScanHRUsedMemory);
616   }
617 }
618 
619 NOINLINE
620 oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m, size_t word_sz) {
621   assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old));
622 
623   oop forward_ptr = old->forward_to_atomic(old, m, memory_order_relaxed);
624   if (forward_ptr == nullptr) {
625     // Forward-to-self succeeded. We are the "owner" of the object.
626     HeapRegion* r = _g1h->heap_region_containing(old);
627 
628     if (_evac_failure_regions->record(r->hrm_index())) {
629       _g1h->hr_printer()->evac_failure(r);
630     }
631 
632     // Mark the failing object in the marking bitmap and later use the bitmap to handle
633     // evacuation failure recovery.
634     _g1h->mark_evac_failure_object(_worker_id, old, word_sz);
635 
636     _preserved_marks->push_if_necessary(old, m);
637 
638     ContinuationGCSupport::transform_stack_chunk(old);
639 
640     _evacuation_failed_info.register_copy_failure(word_sz);
641 
642     // For iterating objects that failed evacuation currently we can reuse the
643     // existing closure to scan evacuated objects because:
644     // - for objects referring into the collection set we do not need to gather
645     // cards at this time. The regions they are in will be unconditionally turned
646     // to old regions without remembered sets.
647     // - since we are iterating from a collection set region (i.e. never a Survivor
648     // region), we always need to gather cards for this case.
649     G1SkipCardEnqueueSetter x(&_scanner, false /* skip_card_enqueue */);
650     old->oop_iterate_backwards(&_scanner);
651 
652     return old;
653   } else {
654     // Forward-to-self failed. Either someone else managed to allocate
655     // space for this object (old != forward_ptr) or they beat us in
656     // self-forwarding it (old == forward_ptr).
657     assert(old == forward_ptr || !_g1h->is_in_cset(forward_ptr),
658            "Object " PTR_FORMAT " forwarded to: " PTR_FORMAT " "
659            "should not be in the CSet",
660            p2i(old), p2i(forward_ptr));
661     return forward_ptr;
662   }
663 }
664 
665 void G1ParScanThreadState::initialize_numa_stats() {
666   if (_numa->is_enabled()) {
667     LogTarget(Info, gc, heap, numa) lt;
668 
669     if (lt.is_enabled()) {
670       uint num_nodes = _numa->num_active_nodes();
671       // Record only if there are multiple active nodes.
672       _obj_alloc_stat = NEW_C_HEAP_ARRAY(size_t, num_nodes, mtGC);
673       memset(_obj_alloc_stat, 0, sizeof(size_t) * num_nodes);
674     }
675   }
676 }
677 
678 void G1ParScanThreadState::flush_numa_stats() {
679   if (_obj_alloc_stat != nullptr) {
680     uint node_index = _numa->index_of_current_thread();
681     _numa->copy_statistics(G1NUMAStats::LocalObjProcessAtCopyToSurv, node_index, _obj_alloc_stat);
682   }
683 }
684 
685 void G1ParScanThreadState::update_numa_stats(uint node_index) {
686   if (_obj_alloc_stat != nullptr) {
687     _obj_alloc_stat[node_index]++;
688   }
689 }
690 
691 G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h,
692                                                  uint num_workers,
693                                                  G1CollectionSet* collection_set,
694                                                  G1EvacFailureRegions* evac_failure_regions) :
695     _g1h(g1h),
696     _collection_set(collection_set),
697     _rdcqs(G1BarrierSet::dirty_card_queue_set().allocator()),
698     _preserved_marks_set(true /* in_c_heap */),
699     _states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, num_workers, mtGC)),
700     _surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, collection_set->young_region_length() + 1, mtGC)),
701     _num_workers(num_workers),
702     _flushed(false),
703     _evac_failure_regions(evac_failure_regions) {
704   _preserved_marks_set.init(num_workers);
705   for (uint i = 0; i < num_workers; ++i) {
706     _states[i] = nullptr;
707   }
708   memset(_surviving_young_words_total, 0, (collection_set->young_region_length() + 1) * sizeof(size_t));
709 }
710 
711 G1ParScanThreadStateSet::~G1ParScanThreadStateSet() {
712   assert(_flushed, "thread local state from the per thread states should have been flushed");
713   FREE_C_HEAP_ARRAY(G1ParScanThreadState*, _states);
714   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_total);
715   _preserved_marks_set.assert_empty();
716   _preserved_marks_set.reclaim();
717 }