185 // No GCLABs in this thread, fallback to shared allocation
186 return NULL;
187 }
188 HeapWord* obj = gclab->allocate(size);
189 if (obj != NULL) {
190 return obj;
191 }
192 // Otherwise...
193 return allocate_from_gclab_slow(thread, size);
194 }
195
196 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
197 if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
198 // This thread went through the OOM during evac protocol and it is safe to return
199 // the forward pointer. It must not attempt to evacuate any more.
200 return ShenandoahBarrierSet::resolve_forwarded(p);
201 }
202
203 assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
204
205 size_t size = p->size();
206
207 assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
208
209 bool alloc_from_gclab = true;
210 HeapWord* copy = NULL;
211
212 #ifdef ASSERT
213 if (ShenandoahOOMDuringEvacALot &&
214 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
215 copy = NULL;
216 } else {
217 #endif
218 if (UseTLAB) {
219 copy = allocate_from_gclab(thread, size);
220 }
221 if (copy == NULL) {
222 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
223 copy = allocate_memory(req);
224 alloc_from_gclab = false;
225 }
402 assert(oopDesc::is_oop(obj), "sanity");
403 assert(ctx->is_marked(obj), "object expected to be marked");
404 cl->do_object(obj);
405 cb += skip_bitmap_delta;
406 if (cb < limit_bitmap) {
407 cb = ctx->get_next_marked_addr(cb, limit_bitmap);
408 }
409 }
410 }
411
412 // Step 2. Accurate size-based traversal, happens past the TAMS.
413 // This restarts the scan at TAMS, which makes sure we traverse all objects,
414 // regardless of what happened at Step 1.
415 HeapWord* cs = tams;
416 while (cs < limit) {
417 assert (cs >= tams, "only objects past TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams));
418 assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit));
419 oop obj = cast_to_oop(cs);
420 assert(oopDesc::is_oop(obj), "sanity");
421 assert(ctx->is_marked(obj), "object expected to be marked");
422 int size = obj->size();
423 cl->do_object(obj);
424 cs += size;
425 }
426 }
427
428 template <class T>
429 class ShenandoahObjectToOopClosure : public ObjectClosure {
430 T* _cl;
431 public:
432 ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}
433
434 void do_object(oop obj) {
435 obj->oop_iterate(_cl);
436 }
437 };
438
439 template <class T>
440 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {
441 T* _cl;
442 MemRegion _bounds;
|
185 // No GCLABs in this thread, fallback to shared allocation
186 return NULL;
187 }
188 HeapWord* obj = gclab->allocate(size);
189 if (obj != NULL) {
190 return obj;
191 }
192 // Otherwise...
193 return allocate_from_gclab_slow(thread, size);
194 }
195
196 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
197 if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
198 // This thread went through the OOM during evac protocol and it is safe to return
199 // the forward pointer. It must not attempt to evacuate any more.
200 return ShenandoahBarrierSet::resolve_forwarded(p);
201 }
202
203 assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
204
205 size_t size = p->forward_safe_size();
206
207 assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
208
209 bool alloc_from_gclab = true;
210 HeapWord* copy = NULL;
211
212 #ifdef ASSERT
213 if (ShenandoahOOMDuringEvacALot &&
214 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
215 copy = NULL;
216 } else {
217 #endif
218 if (UseTLAB) {
219 copy = allocate_from_gclab(thread, size);
220 }
221 if (copy == NULL) {
222 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
223 copy = allocate_memory(req);
224 alloc_from_gclab = false;
225 }
402 assert(oopDesc::is_oop(obj), "sanity");
403 assert(ctx->is_marked(obj), "object expected to be marked");
404 cl->do_object(obj);
405 cb += skip_bitmap_delta;
406 if (cb < limit_bitmap) {
407 cb = ctx->get_next_marked_addr(cb, limit_bitmap);
408 }
409 }
410 }
411
412 // Step 2. Accurate size-based traversal, happens past the TAMS.
413 // This restarts the scan at TAMS, which makes sure we traverse all objects,
414 // regardless of what happened at Step 1.
415 HeapWord* cs = tams;
416 while (cs < limit) {
417 assert (cs >= tams, "only objects past TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams));
418 assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit));
419 oop obj = cast_to_oop(cs);
420 assert(oopDesc::is_oop(obj), "sanity");
421 assert(ctx->is_marked(obj), "object expected to be marked");
422 size_t size = obj->forward_safe_size();
423 cl->do_object(obj);
424 cs += size;
425 }
426 }
427
428 template <class T>
429 class ShenandoahObjectToOopClosure : public ObjectClosure {
430 T* _cl;
431 public:
432 ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}
433
434 void do_object(oop obj) {
435 obj->oop_iterate(_cl);
436 }
437 };
438
439 template <class T>
440 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {
441 T* _cl;
442 MemRegion _bounds;
|