279 // No GCLABs in this thread, fallback to shared allocation
280 return nullptr;
281 }
282 HeapWord* obj = gclab->allocate(size);
283 if (obj != nullptr) {
284 return obj;
285 }
286 // Otherwise...
287 return allocate_from_gclab_slow(thread, size);
288 }
289
290 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
291 if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
292 // This thread went through the OOM during evac protocol and it is safe to return
293 // the forward pointer. It must not attempt to evacuate any more.
294 return ShenandoahBarrierSet::resolve_forwarded(p);
295 }
296
297 assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
298
299 size_t size = p->size();
300
301 assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
302
303 bool alloc_from_gclab = true;
304 HeapWord* copy = nullptr;
305
306 #ifdef ASSERT
307 if (ShenandoahOOMDuringEvacALot &&
308 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
309 copy = nullptr;
310 } else {
311 #endif
312 if (UseTLAB) {
313 copy = allocate_from_gclab(thread, size);
314 }
315 if (copy == nullptr) {
316 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
317 copy = allocate_memory(req);
318 alloc_from_gclab = false;
319 }
320 #ifdef ASSERT
321 }
322 #endif
323
324 if (copy == nullptr) {
325 control_thread()->handle_alloc_failure_evac(size);
326
327 _oom_evac_handler.handle_out_of_memory_during_evacuation();
328
329 return ShenandoahBarrierSet::resolve_forwarded(p);
330 }
331
332 // Copy the object:
333 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
334
335 // Try to install the new forwarding pointer.
336 oop copy_val = cast_to_oop(copy);
337 ContinuationGCSupport::relativize_stack_chunk(copy_val);
338
339 oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
340 if (result == copy_val) {
341 // Successfully evacuated. Our copy is now the public one!
342 shenandoah_assert_correct(nullptr, copy_val);
343 return copy_val;
344 } else {
345 // Failed to evacuate. We need to deal with the object that is left behind. Since this
346 // new allocation is certainly after TAMS, it will be considered live in the next cycle.
347 // But if it happens to contain references to evacuated regions, those references would
348 // not get updated for this stale copy during this cycle, and we will crash while scanning
349 // it the next cycle.
350 //
351 // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
352 // object will overwrite this stale copy, or the filler object on LAB retirement will
353 // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
354 // have to explicitly overwrite the copy with the filler object. With that overwrite,
355 // we have to keep the fwdptr initialized and pointing to our (stale) copy.
356 if (alloc_from_gclab) {
357 ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
358 } else {
494 assert(oopDesc::is_oop(obj), "sanity");
495 assert(ctx->is_marked(obj), "object expected to be marked");
496 cl->do_object(obj);
497 cb += skip_bitmap_delta;
498 if (cb < limit_bitmap) {
499 cb = ctx->get_next_marked_addr(cb, limit_bitmap);
500 }
501 }
502 }
503
504 // Step 2. Accurate size-based traversal, happens past the TAMS.
505 // This restarts the scan at TAMS, which makes sure we traverse all objects,
506 // regardless of what happened at Step 1.
507 HeapWord* cs = tams;
508 while (cs < limit) {
509 assert (cs >= tams, "only objects past TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams));
510 assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit));
511 oop obj = cast_to_oop(cs);
512 assert(oopDesc::is_oop(obj), "sanity");
513 assert(ctx->is_marked(obj), "object expected to be marked");
514 size_t size = obj->size();
515 cl->do_object(obj);
516 cs += size;
517 }
518 }
519
520 template <class T>
521 class ShenandoahObjectToOopClosure : public ObjectClosure {
522 T* _cl;
523 public:
524 ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}
525
526 void do_object(oop obj) {
527 obj->oop_iterate(_cl);
528 }
529 };
530
531 template <class T>
532 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {
533 T* _cl;
534 MemRegion _bounds;
|
279 // No GCLABs in this thread, fallback to shared allocation
280 return nullptr;
281 }
282 HeapWord* obj = gclab->allocate(size);
283 if (obj != nullptr) {
284 return obj;
285 }
286 // Otherwise...
287 return allocate_from_gclab_slow(thread, size);
288 }
289
290 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
291 if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
292 // This thread went through the OOM during evac protocol and it is safe to return
293 // the forward pointer. It must not attempt to evacuate any more.
294 return ShenandoahBarrierSet::resolve_forwarded(p);
295 }
296
297 assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
298
299 size_t size = p->forward_safe_size();
300
301 assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
302
303 bool alloc_from_gclab = true;
304 HeapWord* copy = nullptr;
305
306 #ifdef ASSERT
307 if (ShenandoahOOMDuringEvacALot &&
308 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
309 copy = nullptr;
310 } else {
311 #endif
312 if (UseTLAB) {
313 copy = allocate_from_gclab(thread, size);
314 }
315 if (copy == nullptr) {
316 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
317 copy = allocate_memory(req);
318 alloc_from_gclab = false;
319 }
320 #ifdef ASSERT
321 }
322 #endif
323
324 if (copy == nullptr) {
325 control_thread()->handle_alloc_failure_evac(size);
326
327 _oom_evac_handler.handle_out_of_memory_during_evacuation();
328
329 return ShenandoahBarrierSet::resolve_forwarded(p);
330 }
331
332 // Copy the object:
333 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
334 oop copy_val = cast_to_oop(copy);
335 if (UseCompactObjectHeaders) {
336 // The copy above is not atomic. Make sure we have seen the proper mark
337 // and re-install it into the copy, so that Klass* is guaranteed to be correct.
338 markWord mark = copy_val->mark();
339 if (!mark.is_marked()) {
340 copy_val->set_mark(mark);
341 ContinuationGCSupport::relativize_stack_chunk(copy_val);
342 } else {
343 // If we copied a mark-word that indicates 'forwarded' state, the object
344 // installation would not succeed. We cannot access Klass* anymore either.
345 // Skip the transformation.
346 }
347 } else {
348 ContinuationGCSupport::relativize_stack_chunk(copy_val);
349 }
350
351 // Try to install the new forwarding pointer.
352 oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
353 if (result == copy_val) {
354 // Successfully evacuated. Our copy is now the public one!
355 shenandoah_assert_correct(nullptr, copy_val);
356 return copy_val;
357 } else {
358 // Failed to evacuate. We need to deal with the object that is left behind. Since this
359 // new allocation is certainly after TAMS, it will be considered live in the next cycle.
360 // But if it happens to contain references to evacuated regions, those references would
361 // not get updated for this stale copy during this cycle, and we will crash while scanning
362 // it the next cycle.
363 //
364 // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
365 // object will overwrite this stale copy, or the filler object on LAB retirement will
366 // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
367 // have to explicitly overwrite the copy with the filler object. With that overwrite,
368 // we have to keep the fwdptr initialized and pointing to our (stale) copy.
369 if (alloc_from_gclab) {
370 ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
371 } else {
507 assert(oopDesc::is_oop(obj), "sanity");
508 assert(ctx->is_marked(obj), "object expected to be marked");
509 cl->do_object(obj);
510 cb += skip_bitmap_delta;
511 if (cb < limit_bitmap) {
512 cb = ctx->get_next_marked_addr(cb, limit_bitmap);
513 }
514 }
515 }
516
517 // Step 2. Accurate size-based traversal, happens past the TAMS.
518 // This restarts the scan at TAMS, which makes sure we traverse all objects,
519 // regardless of what happened at Step 1.
520 HeapWord* cs = tams;
521 while (cs < limit) {
522 assert (cs >= tams, "only objects past TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams));
523 assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit));
524 oop obj = cast_to_oop(cs);
525 assert(oopDesc::is_oop(obj), "sanity");
526 assert(ctx->is_marked(obj), "object expected to be marked");
527 size_t size = obj->forward_safe_size();
528 cl->do_object(obj);
529 cs += size;
530 }
531 }
532
533 template <class T>
534 class ShenandoahObjectToOopClosure : public ObjectClosure {
535 T* _cl;
536 public:
537 ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}
538
539 void do_object(oop obj) {
540 obj->oop_iterate(_cl);
541 }
542 };
543
544 template <class T>
545 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {
546 T* _cl;
547 MemRegion _bounds;
|