< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp

Print this page

1286     // This thread went through the OOM during evac protocol. It is safe to return
1287     // the forward pointer. It must not attempt to evacuate any other objects.
1288     return ShenandoahBarrierSet::resolve_forwarded(p);
1289   }
1290 
1291   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
1292 
1293   ShenandoahHeapRegion* r = heap_region_containing(p);
1294   assert(!r->is_humongous(), "never evacuate humongous objects");
1295 
1296   ShenandoahAffiliation target_gen = r->affiliation();
1297   return try_evacuate_object(p, thread, r, target_gen);
1298 }
1299 
1300 oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
1301                                                ShenandoahAffiliation target_gen) {
1302   assert(target_gen == YOUNG_GENERATION, "Only expect evacuations to young in this mode");
1303   assert(from_region->is_young(), "Only expect evacuations from young in this mode");
1304   bool alloc_from_lab = true;
1305   HeapWord* copy = nullptr;
1306   size_t size = ShenandoahForwarding::size(p);






1307 
1308 #ifdef ASSERT
1309   if (ShenandoahOOMDuringEvacALot &&
1310       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1311     copy = nullptr;
1312   } else {
1313 #endif
1314     if (UseTLAB) {
1315       copy = allocate_from_gclab(thread, size);
1316     }
1317     if (copy == nullptr) {
1318       // If we failed to allocate in LAB, we'll try a shared allocation.
1319       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
1320       copy = allocate_memory(req);
1321       alloc_from_lab = false;
1322     }
1323 #ifdef ASSERT
1324   }
1325 #endif
1326 
1327   if (copy == nullptr) {
1328     control_thread()->handle_alloc_failure_evac(size);
1329 
1330     _oom_evac_handler.handle_out_of_memory_during_evacuation();
1331 
1332     return ShenandoahBarrierSet::resolve_forwarded(p);
1333   }
1334 
1335   // Copy the object:
1336   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
1337 
1338   // Try to install the new forwarding pointer.
1339   oop copy_val = cast_to_oop(copy);
1340   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1341   if (result == copy_val) {
1342     // Successfully evacuated. Our copy is now the public one!

1343     ContinuationGCSupport::relativize_stack_chunk(copy_val);
1344     shenandoah_assert_correct(nullptr, copy_val);
1345     return copy_val;
1346   }  else {
1347     // Failed to evacuate. We need to deal with the object that is left behind. Since this
1348     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1349     // But if it happens to contain references to evacuated regions, those references would
1350     // not get updated for this stale copy during this cycle, and we will crash while scanning
1351     // it the next cycle.
1352     if (alloc_from_lab) {
1353       // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
1354       // object will overwrite this stale copy, or the filler object on LAB retirement will
1355       // do this.
1356       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1357     } else {
1358       // For non-LAB allocations, we have no way to retract the allocation, and
1359       // have to explicitly overwrite the copy with the filler object. With that overwrite,
1360       // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1361       assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
1362       fill_with_object(copy, size);

1286     // This thread went through the OOM during evac protocol. It is safe to return
1287     // the forward pointer. It must not attempt to evacuate any other objects.
1288     return ShenandoahBarrierSet::resolve_forwarded(p);
1289   }
1290 
1291   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
1292 
1293   ShenandoahHeapRegion* r = heap_region_containing(p);
1294   assert(!r->is_humongous(), "never evacuate humongous objects");
1295 
1296   ShenandoahAffiliation target_gen = r->affiliation();
1297   return try_evacuate_object(p, thread, r, target_gen);
1298 }
1299 
1300 oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
1301                                                ShenandoahAffiliation target_gen) {
1302   assert(target_gen == YOUNG_GENERATION, "Only expect evacuations to young in this mode");
1303   assert(from_region->is_young(), "Only expect evacuations from young in this mode");
1304   bool alloc_from_lab = true;
1305   HeapWord* copy = nullptr;
1306 
1307   markWord mark = p->mark();
1308   if (ShenandoahForwarding::is_forwarded(mark)) {
1309     return ShenandoahForwarding::get_forwardee(p);
1310   }
1311   size_t old_size = ShenandoahForwarding::size(p);
1312   size_t size = p->copy_size(old_size, mark);
1313 
1314 #ifdef ASSERT
1315   if (ShenandoahOOMDuringEvacALot &&
1316       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1317     copy = nullptr;
1318   } else {
1319 #endif
1320     if (UseTLAB) {
1321       copy = allocate_from_gclab(thread, size);
1322     }
1323     if (copy == nullptr) {
1324       // If we failed to allocate in LAB, we'll try a shared allocation.
1325       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
1326       copy = allocate_memory(req);
1327       alloc_from_lab = false;
1328     }
1329 #ifdef ASSERT
1330   }
1331 #endif
1332 
1333   if (copy == nullptr) {
1334     control_thread()->handle_alloc_failure_evac(size);
1335 
1336     _oom_evac_handler.handle_out_of_memory_during_evacuation();
1337 
1338     return ShenandoahBarrierSet::resolve_forwarded(p);
1339   }
1340 
1341   // Copy the object:
1342   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, old_size);
1343 
1344   // Try to install the new forwarding pointer.
1345   oop copy_val = cast_to_oop(copy);
1346   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1347   if (result == copy_val) {
1348     // Successfully evacuated. Our copy is now the public one!
1349     copy_val->initialize_hash_if_necessary(p);
1350     ContinuationGCSupport::relativize_stack_chunk(copy_val);
1351     shenandoah_assert_correct(nullptr, copy_val);
1352     return copy_val;
1353   }  else {
1354     // Failed to evacuate. We need to deal with the object that is left behind. Since this
1355     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1356     // But if it happens to contain references to evacuated regions, those references would
1357     // not get updated for this stale copy during this cycle, and we will crash while scanning
1358     // it the next cycle.
1359     if (alloc_from_lab) {
1360       // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
1361       // object will overwrite this stale copy, or the filler object on LAB retirement will
1362       // do this.
1363       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1364     } else {
1365       // For non-LAB allocations, we have no way to retract the allocation, and
1366       // have to explicitly overwrite the copy with the filler object. With that overwrite,
1367       // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1368       assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
1369       fill_with_object(copy, size);
< prev index next >