< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp

Print this page

279     // No GCLABs in this thread, fallback to shared allocation
280     return nullptr;
281   }
282   HeapWord* obj = gclab->allocate(size);
283   if (obj != nullptr) {
284     return obj;
285   }
286   // Otherwise...
287   return allocate_from_gclab_slow(thread, size);
288 }
289 
290 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
291   if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
292     // This thread went through the OOM during evac protocol and it is safe to return
293     // the forward pointer. It must not attempt to evacuate any more.
294     return ShenandoahBarrierSet::resolve_forwarded(p);
295   }
296 
297   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
298 
299   size_t size = p->size();
300 
301   assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
302 
303   bool alloc_from_gclab = true;
304   HeapWord* copy = nullptr;
305 
306 #ifdef ASSERT
307   if (ShenandoahOOMDuringEvacALot &&
308       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
309         copy = nullptr;
310   } else {
311 #endif
312     if (UseTLAB) {
313       copy = allocate_from_gclab(thread, size);
314     }
315     if (copy == nullptr) {
316       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
317       copy = allocate_memory(req);
318       alloc_from_gclab = false;
319     }
320 #ifdef ASSERT
321   }
322 #endif
323 
324   if (copy == nullptr) {
325     control_thread()->handle_alloc_failure_evac(size);
326 
327     _oom_evac_handler.handle_out_of_memory_during_evacuation();
328 
329     return ShenandoahBarrierSet::resolve_forwarded(p);
330   }
331 
332   // Copy the object:
333   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
334 
335   // Try to install the new forwarding pointer.
336   oop copy_val = cast_to_oop(copy);
337   ContinuationGCSupport::relativize_stack_chunk(copy_val);
338 

















339   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
340   if (result == copy_val) {
341     // Successfully evacuated. Our copy is now the public one!
342     shenandoah_assert_correct(nullptr, copy_val);
343     return copy_val;
344   }  else {
345     // Failed to evacuate. We need to deal with the object that is left behind. Since this
346     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
347     // But if it happens to contain references to evacuated regions, those references would
348     // not get updated for this stale copy during this cycle, and we will crash while scanning
349     // it the next cycle.
350     //
351     // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
352     // object will overwrite this stale copy, or the filler object on LAB retirement will
353     // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
354     // have to explicitly overwrite the copy with the filler object. With that overwrite,
355     // we have to keep the fwdptr initialized and pointing to our (stale) copy.
356     if (alloc_from_gclab) {
357       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
358     } else {

494       assert(oopDesc::is_oop(obj), "sanity");
495       assert(ctx->is_marked(obj), "object expected to be marked");
496       cl->do_object(obj);
497       cb += skip_bitmap_delta;
498       if (cb < limit_bitmap) {
499         cb = ctx->get_next_marked_addr(cb, limit_bitmap);
500       }
501     }
502   }
503 
504   // Step 2. Accurate size-based traversal, happens past the TAMS.
505   // This restarts the scan at TAMS, which makes sure we traverse all objects,
506   // regardless of what happened at Step 1.
507   HeapWord* cs = tams;
508   while (cs < limit) {
509     assert (cs >= tams, "only objects past TAMS here: "   PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams));
510     assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit));
511     oop obj = cast_to_oop(cs);
512     assert(oopDesc::is_oop(obj), "sanity");
513     assert(ctx->is_marked(obj), "object expected to be marked");
514     size_t size = obj->size();
515     cl->do_object(obj);
516     cs += size;
517   }
518 }
519 
520 template <class T>
521 class ShenandoahObjectToOopClosure : public ObjectClosure {
522   T* _cl;
523 public:
524   ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}
525 
526   void do_object(oop obj) {
527     obj->oop_iterate(_cl);
528   }
529 };
530 
531 template <class T>
532 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {
533   T* _cl;
534   MemRegion _bounds;

279     // No GCLABs in this thread, fallback to shared allocation
280     return nullptr;
281   }
282   HeapWord* obj = gclab->allocate(size);
283   if (obj != nullptr) {
284     return obj;
285   }
286   // Otherwise...
287   return allocate_from_gclab_slow(thread, size);
288 }
289 
290 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
291   if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
292     // This thread went through the OOM during evac protocol and it is safe to return
293     // the forward pointer. It must not attempt to evacuate any more.
294     return ShenandoahBarrierSet::resolve_forwarded(p);
295   }
296 
297   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
298 
299   size_t size = p->forward_safe_size();
300 
301   assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
302 
303   bool alloc_from_gclab = true;
304   HeapWord* copy = nullptr;
305 
306 #ifdef ASSERT
307   if (ShenandoahOOMDuringEvacALot &&
308       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
309         copy = nullptr;
310   } else {
311 #endif
312     if (UseTLAB) {
313       copy = allocate_from_gclab(thread, size);
314     }
315     if (copy == nullptr) {
316       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
317       copy = allocate_memory(req);
318       alloc_from_gclab = false;
319     }
320 #ifdef ASSERT
321   }
322 #endif
323 
324   if (copy == nullptr) {
325     control_thread()->handle_alloc_failure_evac(size);
326 
327     _oom_evac_handler.handle_out_of_memory_during_evacuation();
328 
329     return ShenandoahBarrierSet::resolve_forwarded(p);
330   }
331 
332   // Copy the object:
333   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);


334   oop copy_val = cast_to_oop(copy);

335 
336   if (UseCompactObjectHeaders) {
337     // The copy above is not atomic. Make sure we have seen the proper mark
338     // and re-install it into the copy, so that Klass* is guaranteed to be correct.
339     markWord mark = copy_val->mark();
340     if (!mark.is_marked()) {
341       copy_val->set_mark(mark);
342       ContinuationGCSupport::relativize_stack_chunk(copy_val);
343     } else {
344       // If we copied a mark-word that indicates 'forwarded' state, the object
345       // installation would not succeed. We cannot access Klass* anymore either.
346       // Skip the transformation.
347     }
348   } else {
349     ContinuationGCSupport::relativize_stack_chunk(copy_val);
350   }
351 
352   // Try to install the new forwarding pointer.
353   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
354   if (result == copy_val) {
355     // Successfully evacuated. Our copy is now the public one!
356     shenandoah_assert_correct(nullptr, copy_val);
357     return copy_val;
358   }  else {
359     // Failed to evacuate. We need to deal with the object that is left behind. Since this
360     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
361     // But if it happens to contain references to evacuated regions, those references would
362     // not get updated for this stale copy during this cycle, and we will crash while scanning
363     // it the next cycle.
364     //
365     // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
366     // object will overwrite this stale copy, or the filler object on LAB retirement will
367     // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
368     // have to explicitly overwrite the copy with the filler object. With that overwrite,
369     // we have to keep the fwdptr initialized and pointing to our (stale) copy.
370     if (alloc_from_gclab) {
371       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
372     } else {

508       assert(oopDesc::is_oop(obj), "sanity");
509       assert(ctx->is_marked(obj), "object expected to be marked");
510       cl->do_object(obj);
511       cb += skip_bitmap_delta;
512       if (cb < limit_bitmap) {
513         cb = ctx->get_next_marked_addr(cb, limit_bitmap);
514       }
515     }
516   }
517 
518   // Step 2. Accurate size-based traversal, happens past the TAMS.
519   // This restarts the scan at TAMS, which makes sure we traverse all objects,
520   // regardless of what happened at Step 1.
521   HeapWord* cs = tams;
522   while (cs < limit) {
523     assert (cs >= tams, "only objects past TAMS here: "   PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams));
524     assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit));
525     oop obj = cast_to_oop(cs);
526     assert(oopDesc::is_oop(obj), "sanity");
527     assert(ctx->is_marked(obj), "object expected to be marked");
528     size_t size = obj->forward_safe_size();
529     cl->do_object(obj);
530     cs += size;
531   }
532 }
533 
534 template <class T>
535 class ShenandoahObjectToOopClosure : public ObjectClosure {
536   T* _cl;
537 public:
538   ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}
539 
540   void do_object(oop obj) {
541     obj->oop_iterate(_cl);
542   }
543 };
544 
545 template <class T>
546 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {
547   T* _cl;
548   MemRegion _bounds;
< prev index next >