180
181 // Although we never intentionally push references outside of the collection
182 // set, due to (benign) races in the claim mechanism during RSet scanning more
183 // than one thread might claim the same card. So the same card may be
184 // processed multiple times, and so we might get references into old gen here.
185 // So we need to redo this check.
186 const G1HeapRegionAttr region_attr = _g1h->region_attr(obj);
187 // References pushed onto the work stack should never point to a humongous region
188 // as they are not added to the collection set due to above precondition.
189 assert(!region_attr.is_humongous(),
190 "Obj " PTR_FORMAT " should not refer to humongous region %u from " PTR_FORMAT,
191 p2i(obj), _g1h->addr_to_region(cast_from_oop<HeapWord*>(obj)), p2i(p));
192
193 if (!region_attr.is_in_cset()) {
194 // In this case somebody else already did all the work.
195 return;
196 }
197
198 markWord m = obj->mark();
199 if (m.is_marked()) {
200 obj = cast_to_oop(m.decode_pointer());
201 } else {
202 obj = do_copy_to_survivor_space(region_attr, obj, m);
203 }
204 RawAccess<IS_NOT_NULL>::oop_store(p, obj);
205
206 assert(obj != NULL, "Must be");
207 if (HeapRegion::is_in_same_region(p, obj)) {
208 return;
209 }
210 HeapRegion* from = _g1h->heap_region_containing(p);
211 if (!from->is_young()) {
212 enqueue_card_if_tracked(_g1h->region_attr(obj), p, obj);
213 }
214 }
215
216 MAYBE_INLINE_EVACUATION
217 void G1ParScanThreadState::do_partial_array(PartialArrayScanTask task) {
218 oop from_obj = task.to_source_array();
219
220 assert(_g1h->is_in_reserved(from_obj), "must be in heap.");
221 assert(from_obj->is_objArray(), "must be obj array");
222 assert(from_obj->is_forwarded(), "must be forwarded");
223
224 oop to_obj = from_obj->forwardee();
225 assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
226 assert(to_obj->is_objArray(), "must be obj array");
227 objArrayOop to_array = objArrayOop(to_obj);
228
229 PartialArrayTaskStepper::Step step
230 = _partial_array_stepper.next(objArrayOop(from_obj),
231 to_array,
232 _partial_objarray_chunk_size);
233 for (uint i = 0; i < step._ncreate; ++i) {
234 push_on_queue(ScannerTask(PartialArrayScanTask(from_obj)));
235 }
236
237 HeapRegion* hr = _g1h->heap_region_containing(to_array);
238 G1ScanInYoungSetter x(&_scanner, hr->is_young());
239 // Process claimed task. The length of to_array is not correct, but
240 // fortunately the iteration ignores the length field and just relies
241 // on start/end.
242 to_array->oop_iterate_range(&_scanner,
243 step._index,
244 step._index + _partial_objarray_chunk_size);
245 }
246
247 MAYBE_INLINE_EVACUATION
248 void G1ParScanThreadState::start_partial_objarray(G1HeapRegionAttr dest_attr,
249 oop from_obj,
250 oop to_obj) {
251 assert(from_obj->is_objArray(), "precondition");
252 assert(from_obj->is_forwarded(), "precondition");
253 assert(from_obj->forwardee() == to_obj, "precondition");
254 assert(from_obj != to_obj, "should not be scanning self-forwarded objects");
255 assert(to_obj->is_objArray(), "precondition");
256
257 objArrayOop to_array = objArrayOop(to_obj);
258
259 PartialArrayTaskStepper::Step step
260 = _partial_array_stepper.start(objArrayOop(from_obj),
261 to_array,
262 _partial_objarray_chunk_size);
263
264 // Push any needed partial scan tasks. Pushed before processing the
265 // intitial chunk to allow other workers to steal while we're processing.
266 for (uint i = 0; i < step._ncreate; ++i) {
267 push_on_queue(ScannerTask(PartialArrayScanTask(from_obj)));
268 }
269
270 G1ScanInYoungSetter x(&_scanner, dest_attr.is_young());
271 // Process the initial chunk. No need to process the type in the
350 } else {
351 _old_gen_is_full = previous_plab_refill_failed;
352 assert(dest->is_old(), "Unexpected dest region attr: %s", dest->get_type_str());
353 // no other space to try.
354 return NULL;
355 }
356 }
357
358 G1HeapRegionAttr G1ParScanThreadState::next_region_attr(G1HeapRegionAttr const region_attr, markWord const m, uint& age) {
359 if (region_attr.is_young()) {
360 age = !m.has_displaced_mark_helper() ? m.age()
361 : m.displaced_mark_helper().age();
362 if (age < _tenuring_threshold) {
363 return region_attr;
364 }
365 }
366 return dest(region_attr);
367 }
368
369 void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_attr,
370 oop const old, size_t word_sz, uint age,
371 HeapWord * const obj_ptr, uint node_index) const {
372 PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr, node_index);
373 if (alloc_buf->contains(obj_ptr)) {
374 _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz * HeapWordSize, age,
375 dest_attr.type() == G1HeapRegionAttr::Old,
376 alloc_buf->word_sz() * HeapWordSize);
377 } else {
378 _g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz * HeapWordSize, age,
379 dest_attr.type() == G1HeapRegionAttr::Old);
380 }
381 }
382
383 NOINLINE
384 HeapWord* G1ParScanThreadState::allocate_copy_slow(G1HeapRegionAttr* dest_attr,
385 oop old,
386 size_t word_sz,
387 uint age,
388 uint node_index) {
389 HeapWord* obj_ptr = NULL;
390 // Try slow-path allocation unless we're allocating old and old is already full.
391 if (!(dest_attr->is_old() && _old_gen_is_full)) {
392 bool plab_refill_failed = false;
393 obj_ptr = _plab_allocator->allocate_direct_or_new_plab(*dest_attr,
394 word_sz,
395 &plab_refill_failed,
396 node_index);
397 if (obj_ptr == NULL) {
398 obj_ptr = allocate_in_next_plab(dest_attr,
399 word_sz,
400 plab_refill_failed,
401 node_index);
402 }
403 }
404 if (obj_ptr != NULL) {
405 update_numa_stats(node_index);
406 if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
407 // The events are checked individually as part of the actual commit
408 report_promotion_event(*dest_attr, old, word_sz, age, obj_ptr, node_index);
409 }
410 }
411 return obj_ptr;
412 }
413
414 NOINLINE
415 void G1ParScanThreadState::undo_allocation(G1HeapRegionAttr dest_attr,
416 HeapWord* obj_ptr,
417 size_t word_sz,
418 uint node_index) {
419 _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
420 }
421
422 // Private inline function, for direct internal use and providing the
423 // implementation of the public not-inline function.
424 MAYBE_INLINE_EVACUATION
425 oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const region_attr,
426 oop const old,
427 markWord const old_mark) {
428 assert(region_attr.is_in_cset(),
429 "Unexpected region attr type: %s", region_attr.get_type_str());
430
431 // Get the klass once. We'll need it again later, and this avoids
432 // re-decoding when it's compressed.
433 Klass* klass = old->klass();
434 const size_t word_sz = old->size_given_klass(klass);
435
436 uint age = 0;
437 G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age);
438 HeapRegion* const from_region = _g1h->heap_region_containing(old);
439 uint node_index = from_region->node_index();
440
441 HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
442
443 // PLAB allocations should succeed most of the time, so we'll
444 // normally check against NULL once and that's it.
445 if (obj_ptr == NULL) {
446 obj_ptr = allocate_copy_slow(&dest_attr, old, word_sz, age, node_index);
447 if (obj_ptr == NULL) {
448 // This will either forward-to-self, or detect that someone else has
449 // installed a forwarding pointer.
450 return handle_evacuation_failure_par(old, old_mark);
451 }
452 }
453
454 assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
455 assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
456
457 #ifndef PRODUCT
458 // Should this evacuation fail?
459 if (_g1h->evacuation_should_fail()) {
460 // Doing this after all the allocation attempts also tests the
461 // undo_allocation() method too.
462 undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
463 return handle_evacuation_failure_par(old, old_mark);
464 }
465 #endif // !PRODUCT
466
583 _flushed = true;
584 }
585
586 void G1ParScanThreadStateSet::record_unused_optional_region(HeapRegion* hr) {
587 for (uint worker_index = 0; worker_index < _n_workers; ++worker_index) {
588 G1ParScanThreadState* pss = _states[worker_index];
589
590 if (pss == NULL) {
591 continue;
592 }
593
594 size_t used_memory = pss->oops_into_optional_region(hr)->used_memory();
595 _g1h->phase_times()->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanHR, worker_index, used_memory, G1GCPhaseTimes::ScanHRUsedMemory);
596 }
597 }
598
599 NOINLINE
600 oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m) {
601 assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old));
602
603 oop forward_ptr = old->forward_to_atomic(old, m, memory_order_relaxed);
604 if (forward_ptr == NULL) {
605 // Forward-to-self succeeded. We are the "owner" of the object.
606 HeapRegion* r = _g1h->heap_region_containing(old);
607
608 if (_g1h->notify_region_failed_evacuation(r->hrm_index())) {
609 _g1h->hr_printer()->evac_failure(r);
610 }
611
612 _g1h->preserve_mark_during_evac_failure(_worker_id, old, m);
613
614 G1ScanInYoungSetter x(&_scanner, r->is_young());
615 old->oop_iterate_backwards(&_scanner);
616
617 return old;
618 } else {
619 // Forward-to-self failed. Either someone else managed to allocate
620 // space for this object (old != forward_ptr) or they beat us in
621 // self-forwarding it (old == forward_ptr).
622 assert(old == forward_ptr || !_g1h->is_in_cset(forward_ptr),
623 "Object " PTR_FORMAT " forwarded to: " PTR_FORMAT " "
|
180
181 // Although we never intentionally push references outside of the collection
182 // set, due to (benign) races in the claim mechanism during RSet scanning more
183 // than one thread might claim the same card. So the same card may be
184 // processed multiple times, and so we might get references into old gen here.
185 // So we need to redo this check.
186 const G1HeapRegionAttr region_attr = _g1h->region_attr(obj);
187 // References pushed onto the work stack should never point to a humongous region
188 // as they are not added to the collection set due to above precondition.
189 assert(!region_attr.is_humongous(),
190 "Obj " PTR_FORMAT " should not refer to humongous region %u from " PTR_FORMAT,
191 p2i(obj), _g1h->addr_to_region(cast_from_oop<HeapWord*>(obj)), p2i(p));
192
193 if (!region_attr.is_in_cset()) {
194 // In this case somebody else already did all the work.
195 return;
196 }
197
198 markWord m = obj->mark();
199 if (m.is_marked()) {
200 obj = obj->forwardee(m);
201 } else {
202 obj = do_copy_to_survivor_space(region_attr, obj, m);
203 }
204 RawAccess<IS_NOT_NULL>::oop_store(p, obj);
205
206 assert(obj != NULL, "Must be");
207 if (HeapRegion::is_in_same_region(p, obj)) {
208 return;
209 }
210 HeapRegion* from = _g1h->heap_region_containing(p);
211 if (!from->is_young()) {
212 enqueue_card_if_tracked(_g1h->region_attr(obj), p, obj);
213 }
214 }
215
216 MAYBE_INLINE_EVACUATION
217 void G1ParScanThreadState::do_partial_array(PartialArrayScanTask task) {
218 oop from_obj = task.to_source_array();
219
220 assert(_g1h->is_in_reserved(from_obj), "must be in heap.");
221 assert(from_obj->is_forwarded(), "must be forwarded");
222
223 oop to_obj = from_obj->forwardee();
224 assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
225 assert(to_obj->is_objArray(), "must be obj array");
226 objArrayOop to_array = objArrayOop(to_obj);
227
228 PartialArrayTaskStepper::Step step
229 = _partial_array_stepper.next(objArrayOop(from_obj),
230 to_array,
231 _partial_objarray_chunk_size);
232 for (uint i = 0; i < step._ncreate; ++i) {
233 push_on_queue(ScannerTask(PartialArrayScanTask(from_obj)));
234 }
235
236 HeapRegion* hr = _g1h->heap_region_containing(to_array);
237 G1ScanInYoungSetter x(&_scanner, hr->is_young());
238 // Process claimed task. The length of to_array is not correct, but
239 // fortunately the iteration ignores the length field and just relies
240 // on start/end.
241 to_array->oop_iterate_range(&_scanner,
242 step._index,
243 step._index + _partial_objarray_chunk_size);
244 }
245
246 MAYBE_INLINE_EVACUATION
247 void G1ParScanThreadState::start_partial_objarray(G1HeapRegionAttr dest_attr,
248 oop from_obj,
249 oop to_obj) {
250 assert(from_obj->is_forwarded(), "precondition");
251 assert(from_obj->forwardee() == to_obj, "precondition");
252 assert(from_obj != to_obj, "should not be scanning self-forwarded objects");
253 assert(to_obj->is_objArray(), "precondition");
254
255 objArrayOop to_array = objArrayOop(to_obj);
256
257 PartialArrayTaskStepper::Step step
258 = _partial_array_stepper.start(objArrayOop(from_obj),
259 to_array,
260 _partial_objarray_chunk_size);
261
262 // Push any needed partial scan tasks. Pushed before processing the
263 // intitial chunk to allow other workers to steal while we're processing.
264 for (uint i = 0; i < step._ncreate; ++i) {
265 push_on_queue(ScannerTask(PartialArrayScanTask(from_obj)));
266 }
267
268 G1ScanInYoungSetter x(&_scanner, dest_attr.is_young());
269 // Process the initial chunk. No need to process the type in the
348 } else {
349 _old_gen_is_full = previous_plab_refill_failed;
350 assert(dest->is_old(), "Unexpected dest region attr: %s", dest->get_type_str());
351 // no other space to try.
352 return NULL;
353 }
354 }
355
356 G1HeapRegionAttr G1ParScanThreadState::next_region_attr(G1HeapRegionAttr const region_attr, markWord const m, uint& age) {
357 if (region_attr.is_young()) {
358 age = !m.has_displaced_mark_helper() ? m.age()
359 : m.displaced_mark_helper().age();
360 if (age < _tenuring_threshold) {
361 return region_attr;
362 }
363 }
364 return dest(region_attr);
365 }
366
367 void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_attr,
368 oop const old, Klass* klass, size_t word_sz, uint age,
369 HeapWord * const obj_ptr, uint node_index) const {
370 PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr, node_index);
371 if (alloc_buf->contains(obj_ptr)) {
372 _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(klass, word_sz * HeapWordSize, age,
373 dest_attr.type() == G1HeapRegionAttr::Old,
374 alloc_buf->word_sz() * HeapWordSize);
375 } else {
376 _g1h->_gc_tracer_stw->report_promotion_outside_plab_event(klass, word_sz * HeapWordSize, age,
377 dest_attr.type() == G1HeapRegionAttr::Old);
378 }
379 }
380
381 NOINLINE
382 HeapWord* G1ParScanThreadState::allocate_copy_slow(G1HeapRegionAttr* dest_attr,
383 oop old,
384 Klass* klass,
385 size_t word_sz,
386 uint age,
387 uint node_index) {
388 HeapWord* obj_ptr = NULL;
389 // Try slow-path allocation unless we're allocating old and old is already full.
390 if (!(dest_attr->is_old() && _old_gen_is_full)) {
391 bool plab_refill_failed = false;
392 obj_ptr = _plab_allocator->allocate_direct_or_new_plab(*dest_attr,
393 word_sz,
394 &plab_refill_failed,
395 node_index);
396 if (obj_ptr == NULL) {
397 obj_ptr = allocate_in_next_plab(dest_attr,
398 word_sz,
399 plab_refill_failed,
400 node_index);
401 }
402 }
403 if (obj_ptr != NULL) {
404 update_numa_stats(node_index);
405 if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
406 // The events are checked individually as part of the actual commit
407 report_promotion_event(*dest_attr, old, klass, word_sz, age, obj_ptr, node_index);
408 }
409 }
410 return obj_ptr;
411 }
412
413 NOINLINE
414 void G1ParScanThreadState::undo_allocation(G1HeapRegionAttr dest_attr,
415 HeapWord* obj_ptr,
416 size_t word_sz,
417 uint node_index) {
418 _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
419 }
420
421 // Private inline function, for direct internal use and providing the
422 // implementation of the public not-inline function.
423 MAYBE_INLINE_EVACUATION
424 oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const region_attr,
425 oop const old,
426 markWord const old_mark) {
427 assert(region_attr.is_in_cset(),
428 "Unexpected region attr type: %s", region_attr.get_type_str());
429
430 if (old_mark.is_marked()) {
431 // Already forwarded by somebody else, return forwardee.
432 return old->forwardee(old_mark);
433 }
434 // Get the klass once. We'll need it again later, and this avoids
435 // re-decoding when it's compressed.
436 Klass* klass;
437 #ifdef _LP64
438 if (UseCompactObjectHeaders) {
439 klass = old_mark.safe_klass();
440 } else
441 #endif
442 {
443 klass = old->klass();
444 }
445 const size_t word_sz = old->size_given_klass(klass);
446
447 uint age = 0;
448 G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age);
449 HeapRegion* const from_region = _g1h->heap_region_containing(old);
450 uint node_index = from_region->node_index();
451
452 HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
453
454 // PLAB allocations should succeed most of the time, so we'll
455 // normally check against NULL once and that's it.
456 if (obj_ptr == NULL) {
457 obj_ptr = allocate_copy_slow(&dest_attr, old, klass, word_sz, age, node_index);
458 if (obj_ptr == NULL) {
459 // This will either forward-to-self, or detect that someone else has
460 // installed a forwarding pointer.
461 return handle_evacuation_failure_par(old, old_mark);
462 }
463 }
464
465 assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
466 assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
467
468 #ifndef PRODUCT
469 // Should this evacuation fail?
470 if (_g1h->evacuation_should_fail()) {
471 // Doing this after all the allocation attempts also tests the
472 // undo_allocation() method too.
473 undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
474 return handle_evacuation_failure_par(old, old_mark);
475 }
476 #endif // !PRODUCT
477
594 _flushed = true;
595 }
596
597 void G1ParScanThreadStateSet::record_unused_optional_region(HeapRegion* hr) {
598 for (uint worker_index = 0; worker_index < _n_workers; ++worker_index) {
599 G1ParScanThreadState* pss = _states[worker_index];
600
601 if (pss == NULL) {
602 continue;
603 }
604
605 size_t used_memory = pss->oops_into_optional_region(hr)->used_memory();
606 _g1h->phase_times()->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanHR, worker_index, used_memory, G1GCPhaseTimes::ScanHRUsedMemory);
607 }
608 }
609
610 NOINLINE
611 oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m) {
612 assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old));
613
614 oop forward_ptr = old->forward_to_self_atomic(m, memory_order_relaxed);
615 if (forward_ptr == NULL) {
616 // Forward-to-self succeeded. We are the "owner" of the object.
617 HeapRegion* r = _g1h->heap_region_containing(old);
618
619 if (_g1h->notify_region_failed_evacuation(r->hrm_index())) {
620 _g1h->hr_printer()->evac_failure(r);
621 }
622
623 _g1h->preserve_mark_during_evac_failure(_worker_id, old, m);
624
625 G1ScanInYoungSetter x(&_scanner, r->is_young());
626 old->oop_iterate_backwards(&_scanner);
627
628 return old;
629 } else {
630 // Forward-to-self failed. Either someone else managed to allocate
631 // space for this object (old != forward_ptr) or they beat us in
632 // self-forwarding it (old == forward_ptr).
633 assert(old == forward_ptr || !_g1h->is_in_cset(forward_ptr),
634 "Object " PTR_FORMAT " forwarded to: " PTR_FORMAT " "
|