1 /*
  2  * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/g1/g1Allocator.inline.hpp"
 27 #include "gc/g1/g1CollectedHeap.inline.hpp"
 28 #include "gc/g1/g1CollectionSet.hpp"
 29 #include "gc/g1/g1OopClosures.inline.hpp"
 30 #include "gc/g1/g1ParScanThreadState.inline.hpp"
 31 #include "gc/g1/g1RootClosures.hpp"
 32 #include "gc/g1/g1StringDedup.hpp"
 33 #include "gc/g1/g1Trace.hpp"
 34 #include "gc/shared/partialArrayTaskStepper.inline.hpp"
 35 #include "gc/shared/stringdedup/stringDedup.hpp"
 36 #include "gc/shared/taskqueue.inline.hpp"
 37 #include "memory/allocation.inline.hpp"
 38 #include "oops/access.inline.hpp"
 39 #include "oops/oop.inline.hpp"
 40 #include "runtime/atomic.hpp"
 41 #include "runtime/prefetch.inline.hpp"
 42 #include "utilities/globalDefinitions.hpp"
 43 #include "utilities/macros.hpp"
 44 
 45 // In fastdebug builds the code size can get out of hand, potentially
 46 // tripping over compiler limits (which may be bugs, but nevertheless
 47 // need to be taken into consideration).  A side benefit of limiting
 48 // inlining is that we get more call frames that might aid debugging.
 49 // And the fastdebug compile time for this file is much reduced.
 50 // Explicit NOINLINE to block ATTRIBUTE_FLATTENing.
 51 #define MAYBE_INLINE_EVACUATION NOT_DEBUG(inline) DEBUG_ONLY(NOINLINE)
 52 
 53 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
 54                                            G1RedirtyCardsQueueSet* rdcqs,
 55                                            uint worker_id,
 56                                            uint n_workers,
 57                                            size_t young_cset_length,
 58                                            size_t optional_cset_length)
 59   : _g1h(g1h),
 60     _task_queue(g1h->task_queue(worker_id)),
 61     _rdc_local_qset(rdcqs),
 62     _ct(g1h->card_table()),
 63     _closures(NULL),
 64     _plab_allocator(NULL),
 65     _age_table(false),
 66     _tenuring_threshold(g1h->policy()->tenuring_threshold()),
 67     _scanner(g1h, this),
 68     _worker_id(worker_id),
 69     _last_enqueued_card(SIZE_MAX),
 70     _stack_trim_upper_threshold(GCDrainStackTargetSize * 2 + 1),
 71     _stack_trim_lower_threshold(GCDrainStackTargetSize),
 72     _trim_ticks(),
 73     _surviving_young_words_base(NULL),
 74     _surviving_young_words(NULL),
 75     _surviving_words_length(young_cset_length + 1),
 76     _old_gen_is_full(false),
 77     _partial_objarray_chunk_size(ParGCArrayScanChunk),
 78     _partial_array_stepper(n_workers),
 79     _string_dedup_requests(),
 80     _num_optional_regions(optional_cset_length),
 81     _numa(g1h->numa()),
 82     _obj_alloc_stat(NULL)
 83 {
 84   // We allocate number of young gen regions in the collection set plus one
 85   // entries, since entry 0 keeps track of surviving bytes for non-young regions.
 86   // We also add a few elements at the beginning and at the end in
 87   // an attempt to eliminate cache contention
 88   const size_t padding_elem_num = (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t));
 89   size_t array_length = padding_elem_num + _surviving_words_length + padding_elem_num;
 90 
 91   _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
 92   _surviving_young_words = _surviving_young_words_base + padding_elem_num;
 93   memset(_surviving_young_words, 0, _surviving_words_length * sizeof(size_t));
 94 
 95   _plab_allocator = new G1PLABAllocator(_g1h->allocator());
 96 
 97   // The dest for Young is used when the objects are aged enough to
 98   // need to be moved to the next space.
 99   _dest[G1HeapRegionAttr::Young] = G1HeapRegionAttr::Old;
100   _dest[G1HeapRegionAttr::Old]   = G1HeapRegionAttr::Old;
101 
102   _closures = G1EvacuationRootClosures::create_root_closures(this, _g1h);
103 
104   _oops_into_optional_regions = new G1OopStarChunkedList[_num_optional_regions];
105 
106   initialize_numa_stats();
107 }
108 
109 size_t G1ParScanThreadState::flush(size_t* surviving_young_words) {
110   _rdc_local_qset.flush();
111   flush_numa_stats();
112   // Update allocation statistics.
113   _plab_allocator->flush_and_retire_stats();
114   _g1h->policy()->record_age_table(&_age_table);
115 
116   size_t sum = 0;
117   for (uint i = 0; i < _surviving_words_length; i++) {
118     surviving_young_words[i] += _surviving_young_words[i];
119     sum += _surviving_young_words[i];
120   }
121   return sum;
122 }
123 
124 G1ParScanThreadState::~G1ParScanThreadState() {
125   delete _plab_allocator;
126   delete _closures;
127   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
128   delete[] _oops_into_optional_regions;
129   FREE_C_HEAP_ARRAY(size_t, _obj_alloc_stat);
130 }
131 
132 size_t G1ParScanThreadState::lab_waste_words() const {
133   return _plab_allocator->waste();
134 }
135 
136 size_t G1ParScanThreadState::lab_undo_waste_words() const {
137   return _plab_allocator->undo_waste();
138 }
139 
140 #ifdef ASSERT
141 void G1ParScanThreadState::verify_task(narrowOop* task) const {
142   assert(task != NULL, "invariant");
143   assert(UseCompressedOops, "sanity");
144   oop p = RawAccess<>::oop_load(task);
145   assert(_g1h->is_in_reserved(p),
146          "task=" PTR_FORMAT " p=" PTR_FORMAT, p2i(task), p2i(p));
147 }
148 
149 void G1ParScanThreadState::verify_task(oop* task) const {
150   assert(task != NULL, "invariant");
151   oop p = RawAccess<>::oop_load(task);
152   assert(_g1h->is_in_reserved(p),
153          "task=" PTR_FORMAT " p=" PTR_FORMAT, p2i(task), p2i(p));
154 }
155 
156 void G1ParScanThreadState::verify_task(PartialArrayScanTask task) const {
157   // Must be in the collection set--it's already been copied.
158   oop p = task.to_source_array();
159   assert(_g1h->is_in_cset(p), "p=" PTR_FORMAT, p2i(p));
160 }
161 
162 void G1ParScanThreadState::verify_task(ScannerTask task) const {
163   if (task.is_narrow_oop_ptr()) {
164     verify_task(task.to_narrow_oop_ptr());
165   } else if (task.is_oop_ptr()) {
166     verify_task(task.to_oop_ptr());
167   } else if (task.is_partial_array_task()) {
168     verify_task(task.to_partial_array_task());
169   } else {
170     ShouldNotReachHere();
171   }
172 }
173 #endif // ASSERT
174 
175 template <class T>
176 MAYBE_INLINE_EVACUATION
177 void G1ParScanThreadState::do_oop_evac(T* p) {
178   // Reference should not be NULL here as such are never pushed to the task queue.
179   oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
180 
181   // Although we never intentionally push references outside of the collection
182   // set, due to (benign) races in the claim mechanism during RSet scanning more
183   // than one thread might claim the same card. So the same card may be
184   // processed multiple times, and so we might get references into old gen here.
185   // So we need to redo this check.
186   const G1HeapRegionAttr region_attr = _g1h->region_attr(obj);
187   // References pushed onto the work stack should never point to a humongous region
188   // as they are not added to the collection set due to above precondition.
189   assert(!region_attr.is_humongous(),
190          "Obj " PTR_FORMAT " should not refer to humongous region %u from " PTR_FORMAT,
191          p2i(obj), _g1h->addr_to_region(cast_from_oop<HeapWord*>(obj)), p2i(p));
192 
193   if (!region_attr.is_in_cset()) {
194     // In this case somebody else already did all the work.
195     return;
196   }
197 
198   markWord m = obj->mark();
199   if (m.is_marked()) {
200     obj = obj->forwardee(m);
201   } else {
202     obj = do_copy_to_survivor_space(region_attr, obj, m);
203   }
204   RawAccess<IS_NOT_NULL>::oop_store(p, obj);
205 
206   write_ref_field_post(p, obj);
207 }
208 
209 MAYBE_INLINE_EVACUATION
210 void G1ParScanThreadState::do_partial_array(PartialArrayScanTask task) {
211   oop from_obj = task.to_source_array();
212 
213   assert(_g1h->is_in_reserved(from_obj), "must be in heap.");
214   assert(UseCompactObjectHeaders || from_obj->is_objArray(), "must be obj array");
215   assert(from_obj->is_forwarded(), "must be forwarded");
216 
217   oop to_obj = from_obj->forwardee();
218   assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
219   assert(to_obj->is_objArray(), "must be obj array");
220   objArrayOop to_array = objArrayOop(to_obj);
221 
222   PartialArrayTaskStepper::Step step
223     = _partial_array_stepper.next(objArrayOop(from_obj),
224                                   to_array,
225                                   _partial_objarray_chunk_size);
226   for (uint i = 0; i < step._ncreate; ++i) {
227     push_on_queue(ScannerTask(PartialArrayScanTask(from_obj)));
228   }
229 
230   HeapRegion* hr = _g1h->heap_region_containing(to_array);
231   G1ScanInYoungSetter x(&_scanner, hr->is_young());
232   // Process claimed task.  The length of to_array is not correct, but
233   // fortunately the iteration ignores the length field and just relies
234   // on start/end.
235   to_array->oop_iterate_range(&_scanner,
236                               step._index,
237                               step._index + _partial_objarray_chunk_size);
238 }
239 
240 MAYBE_INLINE_EVACUATION
241 void G1ParScanThreadState::start_partial_objarray(G1HeapRegionAttr dest_attr,
242                                                   oop from_obj,
243                                                   oop to_obj) {
244   assert(UseCompactObjectHeaders || from_obj->is_objArray(), "precondition");
245   assert(from_obj->is_forwarded(), "precondition");
246   assert(from_obj->forwardee() == to_obj, "precondition");
247   assert(from_obj != to_obj, "should not be scanning self-forwarded objects");
248   assert(to_obj->is_objArray(), "precondition");
249 
250   objArrayOop to_array = objArrayOop(to_obj);
251 
252   PartialArrayTaskStepper::Step step
253     = _partial_array_stepper.start(objArrayOop(from_obj),
254                                    to_array,
255                                    _partial_objarray_chunk_size);
256 
257   // Push any needed partial scan tasks.  Pushed before processing the
258   // intitial chunk to allow other workers to steal while we're processing.
259   for (uint i = 0; i < step._ncreate; ++i) {
260     push_on_queue(ScannerTask(PartialArrayScanTask(from_obj)));
261   }
262 
263   G1ScanInYoungSetter x(&_scanner, dest_attr.is_young());
264   // Process the initial chunk.  No need to process the type in the
265   // klass, as it will already be handled by processing the built-in
266   // module. The length of to_array is not correct, but fortunately
267   // the iteration ignores that length field and relies on start/end.
268   to_array->oop_iterate_range(&_scanner, 0, step._index);
269 }
270 
271 MAYBE_INLINE_EVACUATION
272 void G1ParScanThreadState::dispatch_task(ScannerTask task) {
273   verify_task(task);
274   if (task.is_narrow_oop_ptr()) {
275     do_oop_evac(task.to_narrow_oop_ptr());
276   } else if (task.is_oop_ptr()) {
277     do_oop_evac(task.to_oop_ptr());
278   } else {
279     do_partial_array(task.to_partial_array_task());
280   }
281 }
282 
283 // Process tasks until overflow queue is empty and local queue
284 // contains no more than threshold entries.  NOINLINE to prevent
285 // inlining into steal_and_trim_queue.
286 ATTRIBUTE_FLATTEN NOINLINE
287 void G1ParScanThreadState::trim_queue_to_threshold(uint threshold) {
288   ScannerTask task;
289   do {
290     while (_task_queue->pop_overflow(task)) {
291       if (!_task_queue->try_push_to_taskqueue(task)) {
292         dispatch_task(task);
293       }
294     }
295     while (_task_queue->pop_local(task, threshold)) {
296       dispatch_task(task);
297     }
298   } while (!_task_queue->overflow_empty());
299 }
300 
301 ATTRIBUTE_FLATTEN
302 void G1ParScanThreadState::steal_and_trim_queue(G1ScannerTasksQueueSet* task_queues) {
303   ScannerTask stolen_task;
304   while (task_queues->steal(_worker_id, stolen_task)) {
305     dispatch_task(stolen_task);
306     // Processing stolen task may have added tasks to our queue.
307     trim_queue();
308   }
309 }
310 
311 HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr* dest,
312                                                       size_t word_sz,
313                                                       bool previous_plab_refill_failed,
314                                                       uint node_index) {
315 
316   assert(dest->is_in_cset_or_humongous(), "Unexpected dest: %s region attr", dest->get_type_str());
317 
318   // Right now we only have two types of regions (young / old) so
319   // let's keep the logic here simple. We can generalize it when necessary.
320   if (dest->is_young()) {
321     bool plab_refill_in_old_failed = false;
322     HeapWord* const obj_ptr = _plab_allocator->allocate(G1HeapRegionAttr::Old,
323                                                         word_sz,
324                                                         &plab_refill_in_old_failed,
325                                                         node_index);
326     // Make sure that we won't attempt to copy any other objects out
327     // of a survivor region (given that apparently we cannot allocate
328     // any new ones) to avoid coming into this slow path again and again.
329     // Only consider failed PLAB refill here: failed inline allocations are
330     // typically large, so not indicative of remaining space.
331     if (previous_plab_refill_failed) {
332       _tenuring_threshold = 0;
333     }
334 
335     if (obj_ptr != NULL) {
336       dest->set_old();
337     } else {
338       // We just failed to allocate in old gen. The same idea as explained above
339       // for making survivor gen unavailable for allocation applies for old gen.
340       _old_gen_is_full = plab_refill_in_old_failed;
341     }
342     return obj_ptr;
343   } else {
344     _old_gen_is_full = previous_plab_refill_failed;
345     assert(dest->is_old(), "Unexpected dest region attr: %s", dest->get_type_str());
346     // no other space to try.
347     return NULL;
348   }
349 }
350 
351 G1HeapRegionAttr G1ParScanThreadState::next_region_attr(G1HeapRegionAttr const region_attr, markWord const m, uint& age) {
352   if (region_attr.is_young()) {
353     age = !m.has_displaced_mark_helper() ? m.age()
354                                          : m.displaced_mark_helper().age();
355     if (age < _tenuring_threshold) {
356       return region_attr;
357     }
358   }
359   return dest(region_attr);
360 }
361 
362 void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_attr,
363                                                   Klass* klass, size_t word_sz, uint age,
364                                                   HeapWord * const obj_ptr, uint node_index) const {
365   PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr, node_index);
366   if (alloc_buf->contains(obj_ptr)) {
367     _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(klass, word_sz * HeapWordSize, age,
368                                                              dest_attr.type() == G1HeapRegionAttr::Old,
369                                                              alloc_buf->word_sz() * HeapWordSize);
370   } else {
371     _g1h->_gc_tracer_stw->report_promotion_outside_plab_event(klass, word_sz * HeapWordSize, age,
372                                                               dest_attr.type() == G1HeapRegionAttr::Old);
373   }
374 }
375 
376 NOINLINE
377 HeapWord* G1ParScanThreadState::allocate_copy_slow(G1HeapRegionAttr* dest_attr,
378                                                    Klass* klass,
379                                                    size_t word_sz,
380                                                    uint age,
381                                                    uint node_index) {
382   HeapWord* obj_ptr = NULL;
383   // Try slow-path allocation unless we're allocating old and old is already full.
384   if (!(dest_attr->is_old() && _old_gen_is_full)) {
385     bool plab_refill_failed = false;
386     obj_ptr = _plab_allocator->allocate_direct_or_new_plab(*dest_attr,
387                                                            word_sz,
388                                                            &plab_refill_failed,
389                                                            node_index);
390     if (obj_ptr == NULL) {
391       obj_ptr = allocate_in_next_plab(dest_attr,
392                                       word_sz,
393                                       plab_refill_failed,
394                                       node_index);
395     }
396   }
397   if (obj_ptr != NULL) {
398     update_numa_stats(node_index);
399     if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
400       // The events are checked individually as part of the actual commit
401       report_promotion_event(*dest_attr, klass, word_sz, age, obj_ptr, node_index);
402     }
403   }
404   return obj_ptr;
405 }
406 
407 NOINLINE
408 void G1ParScanThreadState::undo_allocation(G1HeapRegionAttr dest_attr,
409                                            HeapWord* obj_ptr,
410                                            size_t word_sz,
411                                            uint node_index) {
412   _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
413 }
414 
415 // Private inline function, for direct internal use and providing the
416 // implementation of the public not-inline function.
417 MAYBE_INLINE_EVACUATION
418 oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const region_attr,
419                                                     oop const old,
420                                                     markWord const old_mark) {
421   assert(region_attr.is_in_cset(),
422          "Unexpected region attr type: %s", region_attr.get_type_str());
423 
424   // Get the klass once.  We'll need it again later, and this avoids
425   // re-decoding when it's compressed.
426   // NOTE: With compact headers, it is not safe to load the Klass* from o, because
427   // that would access the mark-word, and the mark-word might change at any time by
428   // concurrent promotion. The promoted mark-word would point to the forwardee, which
429   // may not yet have completed copying. Therefore we must load the Klass* from
430   // the mark-word that we have already loaded. This is safe, because we have checked
431   // that this is not yet forwarded in the caller.
432   Klass* klass = old->forward_safe_klass(old_mark);
433   const size_t word_sz = old->size_given_klass(klass);
434 
435   uint age = 0;
436   G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age);
437   HeapRegion* const from_region = _g1h->heap_region_containing(old);
438   uint node_index = from_region->node_index();
439 
440   HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
441 
442   // PLAB allocations should succeed most of the time, so we'll
443   // normally check against NULL once and that's it.
444   if (obj_ptr == NULL) {
445     obj_ptr = allocate_copy_slow(&dest_attr, klass, word_sz, age, node_index);
446     if (obj_ptr == NULL) {
447       // This will either forward-to-self, or detect that someone else has
448       // installed a forwarding pointer.
449       return handle_evacuation_failure_par(old, old_mark);
450     }
451   }
452 
453   assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
454   assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
455 
456 #ifndef PRODUCT
457   // Should this evacuation fail?
458   if (_g1h->evacuation_should_fail()) {
459     // Doing this after all the allocation attempts also tests the
460     // undo_allocation() method too.
461     undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
462     return handle_evacuation_failure_par(old, old_mark);
463   }
464 #endif // !PRODUCT
465 
466   // We're going to allocate linearly, so might as well prefetch ahead.
467   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
468 
469   const oop obj = cast_to_oop(obj_ptr);
470   const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed);
471   if (forward_ptr == NULL) {
472     Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), obj_ptr, word_sz);
473 
474     {
475       const uint young_index = from_region->young_index_in_cset();
476       assert((from_region->is_young() && young_index >  0) ||
477              (!from_region->is_young() && young_index == 0), "invariant" );
478       _surviving_young_words[young_index] += word_sz;
479     }
480 
481     if (dest_attr.is_young()) {
482       if (age < markWord::max_age) {
483         age++;
484       }
485       if (old_mark.has_displaced_mark_helper()) {
486         // In this case, we have to install the old mark word containing the
487         // displacement tag, and update the age in the displaced mark word.
488         markWord new_mark = old_mark.displaced_mark_helper().set_age(age);
489         old_mark.set_displaced_mark_helper(new_mark);
490         obj->set_mark(old_mark);
491       } else {
492         obj->set_mark(old_mark.set_age(age));
493       }
494       _age_table.add(age, word_sz);
495     } else {
496       obj->set_mark(old_mark);
497     }
498 
499     // Most objects are not arrays, so do one array check rather than
500     // checking for each array category for each object.
501     if (klass->is_array_klass()) {
502       if (klass->is_objArray_klass()) {
503         start_partial_objarray(dest_attr, old, obj);
504       } else {
505         // Nothing needs to be done for typeArrays.  Body doesn't contain
506         // any oops to scan, and the type in the klass will already be handled
507         // by processing the built-in module.
508         assert(klass->is_typeArray_klass(), "invariant");
509       }
510       return obj;
511     }
512 
513     // Check for deduplicating young Strings.
514     if (G1StringDedup::is_candidate_from_evacuation(klass,
515                                                     region_attr,
516                                                     dest_attr,
517                                                     age)) {
518       // Record old; request adds a new weak reference, which reference
519       // processing expects to refer to a from-space object.
520       _string_dedup_requests.add(old);
521     }
522 
523     G1ScanInYoungSetter x(&_scanner, dest_attr.is_young());
524     obj->oop_iterate_backwards(&_scanner, klass);
525     return obj;
526 
527   } else {
528     _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
529     return forward_ptr;
530   }
531 }
532 
533 // Public not-inline entry point.
534 ATTRIBUTE_FLATTEN
535 oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr region_attr,
536                                                  oop old,
537                                                  markWord old_mark) {
538   return do_copy_to_survivor_space(region_attr, old, old_mark);
539 }
540 
541 G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) {
542   assert(worker_id < _n_workers, "out of bounds access");
543   if (_states[worker_id] == NULL) {
544     _states[worker_id] =
545       new G1ParScanThreadState(_g1h, _rdcqs,
546                                worker_id, _n_workers,
547                                _young_cset_length, _optional_cset_length);
548   }
549   return _states[worker_id];
550 }
551 
552 const size_t* G1ParScanThreadStateSet::surviving_young_words() const {
553   assert(_flushed, "thread local state from the per thread states should have been flushed");
554   return _surviving_young_words_total;
555 }
556 
557 void G1ParScanThreadStateSet::flush() {
558   assert(!_flushed, "thread local state from the per thread states should be flushed once");
559 
560   for (uint worker_id = 0; worker_id < _n_workers; ++worker_id) {
561     G1ParScanThreadState* pss = _states[worker_id];
562 
563     if (pss == NULL) {
564       continue;
565     }
566 
567     G1GCPhaseTimes* p = _g1h->phase_times();
568 
569     // Need to get the following two before the call to G1ParThreadScanState::flush()
570     // because it resets the PLAB allocator where we get this info from.
571     size_t lab_waste_bytes = pss->lab_waste_words() * HeapWordSize;
572     size_t lab_undo_waste_bytes = pss->lab_undo_waste_words() * HeapWordSize;
573     size_t copied_bytes = pss->flush(_surviving_young_words_total) * HeapWordSize;
574 
575     p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, copied_bytes, G1GCPhaseTimes::MergePSSCopiedBytes);
576     p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, lab_waste_bytes, G1GCPhaseTimes::MergePSSLABWasteBytes);
577     p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, lab_undo_waste_bytes, G1GCPhaseTimes::MergePSSLABUndoWasteBytes);
578 
579     delete pss;
580     _states[worker_id] = NULL;
581   }
582   _flushed = true;
583 }
584 
585 void G1ParScanThreadStateSet::record_unused_optional_region(HeapRegion* hr) {
586   for (uint worker_index = 0; worker_index < _n_workers; ++worker_index) {
587     G1ParScanThreadState* pss = _states[worker_index];
588 
589     if (pss == NULL) {
590       continue;
591     }
592 
593     size_t used_memory = pss->oops_into_optional_region(hr)->used_memory();
594     _g1h->phase_times()->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanHR, worker_index, used_memory, G1GCPhaseTimes::ScanHRUsedMemory);
595   }
596 }
597 
598 NOINLINE
599 oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m) {
600   assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old));
601 
602   oop forward_ptr = old->forward_to_self_atomic(m, memory_order_relaxed);
603   if (forward_ptr == NULL) {
604     // Forward-to-self succeeded. We are the "owner" of the object.
605     HeapRegion* r = _g1h->heap_region_containing(old);
606 
607     if (_g1h->notify_region_failed_evacuation(r->hrm_index())) {
608       _g1h->hr_printer()->evac_failure(r);
609     }
610 
611     _g1h->preserve_mark_during_evac_failure(_worker_id, old, m);
612 
613     G1ScanInYoungSetter x(&_scanner, r->is_young());
614     old->oop_iterate_backwards(&_scanner);
615 
616     return old;
617   } else {
618     // Forward-to-self failed. Either someone else managed to allocate
619     // space for this object (old != forward_ptr) or they beat us in
620     // self-forwarding it (old == forward_ptr).
621     assert(old == forward_ptr || !_g1h->is_in_cset(forward_ptr),
622            "Object " PTR_FORMAT " forwarded to: " PTR_FORMAT " "
623            "should not be in the CSet",
624            p2i(old), p2i(forward_ptr));
625     return forward_ptr;
626   }
627 }
628 
629 void G1ParScanThreadState::initialize_numa_stats() {
630   if (_numa->is_enabled()) {
631     LogTarget(Info, gc, heap, numa) lt;
632 
633     if (lt.is_enabled()) {
634       uint num_nodes = _numa->num_active_nodes();
635       // Record only if there are multiple active nodes.
636       _obj_alloc_stat = NEW_C_HEAP_ARRAY(size_t, num_nodes, mtGC);
637       memset(_obj_alloc_stat, 0, sizeof(size_t) * num_nodes);
638     }
639   }
640 }
641 
642 void G1ParScanThreadState::flush_numa_stats() {
643   if (_obj_alloc_stat != NULL) {
644     uint node_index = _numa->index_of_current_thread();
645     _numa->copy_statistics(G1NUMAStats::LocalObjProcessAtCopyToSurv, node_index, _obj_alloc_stat);
646   }
647 }
648 
649 void G1ParScanThreadState::update_numa_stats(uint node_index) {
650   if (_obj_alloc_stat != NULL) {
651     _obj_alloc_stat[node_index]++;
652   }
653 }
654 
655 G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h,
656                                                  G1RedirtyCardsQueueSet* rdcqs,
657                                                  uint n_workers,
658                                                  size_t young_cset_length,
659                                                  size_t optional_cset_length) :
660     _g1h(g1h),
661     _rdcqs(rdcqs),
662     _states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, n_workers, mtGC)),
663     _surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, young_cset_length + 1, mtGC)),
664     _young_cset_length(young_cset_length),
665     _optional_cset_length(optional_cset_length),
666     _n_workers(n_workers),
667     _flushed(false) {
668   for (uint i = 0; i < n_workers; ++i) {
669     _states[i] = NULL;
670   }
671   memset(_surviving_young_words_total, 0, (young_cset_length + 1) * sizeof(size_t));
672 }
673 
674 G1ParScanThreadStateSet::~G1ParScanThreadStateSet() {
675   assert(_flushed, "thread local state from the per thread states should have been flushed");
676   FREE_C_HEAP_ARRAY(G1ParScanThreadState*, _states);
677   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_total);
678 }