< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp

Print this page

267     // No GCLABs in this thread, fallback to shared allocation
268     return nullptr;
269   }
270   HeapWord* obj = gclab->allocate(size);
271   if (obj != nullptr) {
272     return obj;
273   }
274   // Otherwise...
275   return allocate_from_gclab_slow(thread, size);
276 }
277 
278 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
279   if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
280     // This thread went through the OOM during evac protocol and it is safe to return
281     // the forward pointer. It must not attempt to evacuate any more.
282     return ShenandoahBarrierSet::resolve_forwarded(p);
283   }
284 
285   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
286 
287   size_t size = p->size();
288 
289   assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
290 
291   bool alloc_from_gclab = true;
292   HeapWord* copy = nullptr;
293 
294 #ifdef ASSERT
295   if (ShenandoahOOMDuringEvacALot &&
296       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
297         copy = nullptr;
298   } else {
299 #endif
300     if (UseTLAB) {
301       copy = allocate_from_gclab(thread, size);
302     }
303     if (copy == nullptr) {
304       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
305       copy = allocate_memory(req);
306       alloc_from_gclab = false;
307     }
308 #ifdef ASSERT
309   }
310 #endif
311 
312   if (copy == nullptr) {
313     control_thread()->handle_alloc_failure_evac(size);
314 
315     _oom_evac_handler.handle_out_of_memory_during_evacuation();
316 
317     return ShenandoahBarrierSet::resolve_forwarded(p);
318   }
319 
320   // Copy the object:
321   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
322 
323   // Try to install the new forwarding pointer.
324   oop copy_val = cast_to_oop(copy);
325   ContinuationGCSupport::relativize_stack_chunk(copy_val);
326 

















327   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
328   if (result == copy_val) {
329     // Successfully evacuated. Our copy is now the public one!
330     shenandoah_assert_correct(nullptr, copy_val);
331     return copy_val;
332   }  else {
333     // Failed to evacuate. We need to deal with the object that is left behind. Since this
334     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
335     // But if it happens to contain references to evacuated regions, those references would
336     // not get updated for this stale copy during this cycle, and we will crash while scanning
337     // it the next cycle.
338     //
339     // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
340     // object will overwrite this stale copy, or the filler object on LAB retirement will
341     // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
342     // have to explicitly overwrite the copy with the filler object. With that overwrite,
343     // we have to keep the fwdptr initialized and pointing to our (stale) copy.
344     if (alloc_from_gclab) {
345       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
346     } else {

482       assert(oopDesc::is_oop(obj), "sanity");
483       assert(ctx->is_marked(obj), "object expected to be marked");
484       cl->do_object(obj);
485       cb += skip_bitmap_delta;
486       if (cb < limit_bitmap) {
487         cb = ctx->get_next_marked_addr(cb, limit_bitmap);
488       }
489     }
490   }
491 
492   // Step 2. Accurate size-based traversal, happens past the TAMS.
493   // This restarts the scan at TAMS, which makes sure we traverse all objects,
494   // regardless of what happened at Step 1.
495   HeapWord* cs = tams;
496   while (cs < limit) {
497     assert (cs >= tams, "only objects past TAMS here: "   PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams));
498     assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit));
499     oop obj = cast_to_oop(cs);
500     assert(oopDesc::is_oop(obj), "sanity");
501     assert(ctx->is_marked(obj), "object expected to be marked");
502     size_t size = obj->size();
503     cl->do_object(obj);
504     cs += size;
505   }
506 }
507 
508 template <class T>
509 class ShenandoahObjectToOopClosure : public ObjectClosure {
510   T* _cl;
511 public:
512   ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}
513 
514   void do_object(oop obj) {
515     obj->oop_iterate(_cl);
516   }
517 };
518 
519 template <class T>
520 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {
521   T* _cl;
522   MemRegion _bounds;

267     // No GCLABs in this thread, fallback to shared allocation
268     return nullptr;
269   }
270   HeapWord* obj = gclab->allocate(size);
271   if (obj != nullptr) {
272     return obj;
273   }
274   // Otherwise...
275   return allocate_from_gclab_slow(thread, size);
276 }
277 
278 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
279   if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
280     // This thread went through the OOM during evac protocol and it is safe to return
281     // the forward pointer. It must not attempt to evacuate any more.
282     return ShenandoahBarrierSet::resolve_forwarded(p);
283   }
284 
285   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
286 
287   size_t size = p->forward_safe_size();
288 
289   assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
290 
291   bool alloc_from_gclab = true;
292   HeapWord* copy = nullptr;
293 
294 #ifdef ASSERT
295   if (ShenandoahOOMDuringEvacALot &&
296       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
297         copy = nullptr;
298   } else {
299 #endif
300     if (UseTLAB) {
301       copy = allocate_from_gclab(thread, size);
302     }
303     if (copy == nullptr) {
304       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
305       copy = allocate_memory(req);
306       alloc_from_gclab = false;
307     }
308 #ifdef ASSERT
309   }
310 #endif
311 
312   if (copy == nullptr) {
313     control_thread()->handle_alloc_failure_evac(size);
314 
315     _oom_evac_handler.handle_out_of_memory_during_evacuation();
316 
317     return ShenandoahBarrierSet::resolve_forwarded(p);
318   }
319 
320   // Copy the object:
321   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);


322   oop copy_val = cast_to_oop(copy);

323 
324   if (UseCompactObjectHeaders) {
325     // The copy above is not atomic. Make sure we have seen the proper mark
326     // and re-install it into the copy, so that Klass* is guaranteed to be correct.
327     markWord mark = copy_val->mark();
328     if (!mark.is_marked()) {
329       copy_val->set_mark(mark);
330       ContinuationGCSupport::relativize_stack_chunk(copy_val);
331     } else {
332       // If we copied a mark-word that indicates 'forwarded' state, the object
333       // installation would not succeed. We cannot access Klass* anymore either.
334       // Skip the transformation.
335     }
336   } else {
337     ContinuationGCSupport::relativize_stack_chunk(copy_val);
338   }
339 
340   // Try to install the new forwarding pointer.
341   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
342   if (result == copy_val) {
343     // Successfully evacuated. Our copy is now the public one!
344     shenandoah_assert_correct(nullptr, copy_val);
345     return copy_val;
346   }  else {
347     // Failed to evacuate. We need to deal with the object that is left behind. Since this
348     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
349     // But if it happens to contain references to evacuated regions, those references would
350     // not get updated for this stale copy during this cycle, and we will crash while scanning
351     // it the next cycle.
352     //
353     // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
354     // object will overwrite this stale copy, or the filler object on LAB retirement will
355     // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
356     // have to explicitly overwrite the copy with the filler object. With that overwrite,
357     // we have to keep the fwdptr initialized and pointing to our (stale) copy.
358     if (alloc_from_gclab) {
359       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
360     } else {

496       assert(oopDesc::is_oop(obj), "sanity");
497       assert(ctx->is_marked(obj), "object expected to be marked");
498       cl->do_object(obj);
499       cb += skip_bitmap_delta;
500       if (cb < limit_bitmap) {
501         cb = ctx->get_next_marked_addr(cb, limit_bitmap);
502       }
503     }
504   }
505 
506   // Step 2. Accurate size-based traversal, happens past the TAMS.
507   // This restarts the scan at TAMS, which makes sure we traverse all objects,
508   // regardless of what happened at Step 1.
509   HeapWord* cs = tams;
510   while (cs < limit) {
511     assert (cs >= tams, "only objects past TAMS here: "   PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams));
512     assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit));
513     oop obj = cast_to_oop(cs);
514     assert(oopDesc::is_oop(obj), "sanity");
515     assert(ctx->is_marked(obj), "object expected to be marked");
516     size_t size = obj->forward_safe_size();
517     cl->do_object(obj);
518     cs += size;
519   }
520 }
521 
522 template <class T>
523 class ShenandoahObjectToOopClosure : public ObjectClosure {
524   T* _cl;
525 public:
526   ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}
527 
528   void do_object(oop obj) {
529     obj->oop_iterate(_cl);
530   }
531 };
532 
533 template <class T>
534 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {
535   T* _cl;
536   MemRegion _bounds;
< prev index next >