< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp

Print this page

1343     // This thread went through the OOM during evac protocol. It is safe to return
1344     // the forward pointer. It must not attempt to evacuate any other objects.
1345     return ShenandoahBarrierSet::resolve_forwarded(p);
1346   }
1347 
1348   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
1349 
1350   ShenandoahHeapRegion* r = heap_region_containing(p);
1351   assert(!r->is_humongous(), "never evacuate humongous objects");
1352 
1353   ShenandoahAffiliation target_gen = r->affiliation();
1354   return try_evacuate_object(p, thread, r, target_gen);
1355 }
1356 
1357 oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
1358                                                ShenandoahAffiliation target_gen) {
1359   assert(target_gen == YOUNG_GENERATION, "Only expect evacuations to young in this mode");
1360   assert(from_region->is_young(), "Only expect evacuations from young in this mode");
1361   bool alloc_from_lab = true;
1362   HeapWord* copy = nullptr;
1363   size_t size = ShenandoahForwarding::size(p);






1364 
1365 #ifdef ASSERT
1366   if (ShenandoahOOMDuringEvacALot &&
1367       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1368     copy = nullptr;
1369   } else {
1370 #endif
1371     if (UseTLAB) {
1372       copy = allocate_from_gclab(thread, size);
1373     }
1374     if (copy == nullptr) {
1375       // If we failed to allocate in LAB, we'll try a shared allocation.
1376       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
1377       copy = allocate_memory(req);
1378       alloc_from_lab = false;
1379     }
1380 #ifdef ASSERT
1381   }
1382 #endif
1383 
1384   if (copy == nullptr) {
1385     control_thread()->handle_alloc_failure_evac(size);
1386 
1387     _oom_evac_handler.handle_out_of_memory_during_evacuation();
1388 
1389     return ShenandoahBarrierSet::resolve_forwarded(p);
1390   }
1391 
1392   // Copy the object:
1393   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
1394 
1395   // Try to install the new forwarding pointer.
1396   oop copy_val = cast_to_oop(copy);
1397   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1398   if (result == copy_val) {
1399     // Successfully evacuated. Our copy is now the public one!

1400     ContinuationGCSupport::relativize_stack_chunk(copy_val);
1401     shenandoah_assert_correct(nullptr, copy_val);
1402     return copy_val;
1403   }  else {
1404     // Failed to evacuate. We need to deal with the object that is left behind. Since this
1405     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1406     // But if it happens to contain references to evacuated regions, those references would
1407     // not get updated for this stale copy during this cycle, and we will crash while scanning
1408     // it the next cycle.
1409     if (alloc_from_lab) {
1410       // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
1411       // object will overwrite this stale copy, or the filler object on LAB retirement will
1412       // do this.
1413       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1414     } else {
1415       // For non-LAB allocations, we have no way to retract the allocation, and
1416       // have to explicitly overwrite the copy with the filler object. With that overwrite,
1417       // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1418       assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
1419       fill_with_object(copy, size);

1343     // This thread went through the OOM during evac protocol. It is safe to return
1344     // the forward pointer. It must not attempt to evacuate any other objects.
1345     return ShenandoahBarrierSet::resolve_forwarded(p);
1346   }
1347 
1348   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
1349 
1350   ShenandoahHeapRegion* r = heap_region_containing(p);
1351   assert(!r->is_humongous(), "never evacuate humongous objects");
1352 
1353   ShenandoahAffiliation target_gen = r->affiliation();
1354   return try_evacuate_object(p, thread, r, target_gen);
1355 }
1356 
1357 oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
1358                                                ShenandoahAffiliation target_gen) {
1359   assert(target_gen == YOUNG_GENERATION, "Only expect evacuations to young in this mode");
1360   assert(from_region->is_young(), "Only expect evacuations from young in this mode");
1361   bool alloc_from_lab = true;
1362   HeapWord* copy = nullptr;
1363 
1364   markWord mark = p->mark();
1365   if (ShenandoahForwarding::is_forwarded(mark)) {
1366     return ShenandoahForwarding::get_forwardee(p);
1367   }
1368   size_t old_size = ShenandoahForwarding::size(p);
1369   size_t size = p->copy_size(old_size, mark);
1370 
1371 #ifdef ASSERT
1372   if (ShenandoahOOMDuringEvacALot &&
1373       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1374     copy = nullptr;
1375   } else {
1376 #endif
1377     if (UseTLAB) {
1378       copy = allocate_from_gclab(thread, size);
1379     }
1380     if (copy == nullptr) {
1381       // If we failed to allocate in LAB, we'll try a shared allocation.
1382       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
1383       copy = allocate_memory(req);
1384       alloc_from_lab = false;
1385     }
1386 #ifdef ASSERT
1387   }
1388 #endif
1389 
1390   if (copy == nullptr) {
1391     control_thread()->handle_alloc_failure_evac(size);
1392 
1393     _oom_evac_handler.handle_out_of_memory_during_evacuation();
1394 
1395     return ShenandoahBarrierSet::resolve_forwarded(p);
1396   }
1397 
1398   // Copy the object:
1399   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, old_size);
1400 
1401   // Try to install the new forwarding pointer.
1402   oop copy_val = cast_to_oop(copy);
1403   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1404   if (result == copy_val) {
1405     // Successfully evacuated. Our copy is now the public one!
1406     copy_val->initialize_hash_if_necessary(p);
1407     ContinuationGCSupport::relativize_stack_chunk(copy_val);
1408     shenandoah_assert_correct(nullptr, copy_val);
1409     return copy_val;
1410   }  else {
1411     // Failed to evacuate. We need to deal with the object that is left behind. Since this
1412     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1413     // But if it happens to contain references to evacuated regions, those references would
1414     // not get updated for this stale copy during this cycle, and we will crash while scanning
1415     // it the next cycle.
1416     if (alloc_from_lab) {
1417       // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
1418       // object will overwrite this stale copy, or the filler object on LAB retirement will
1419       // do this.
1420       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1421     } else {
1422       // For non-LAB allocations, we have no way to retract the allocation, and
1423       // have to explicitly overwrite the copy with the filler object. With that overwrite,
1424       // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1425       assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
1426       fill_with_object(copy, size);
< prev index next >