< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp

Print this page




 592   // before moving the allocation to region #N+1.
 593   //
 594   // The worst case realizes when "answer" is "region size", which means it could
 595   // prematurely retire an entire region. Having smaller TLABs does not fix that
 596   // completely, but reduces the probability of too wasteful region retirement.
 597   // With current divisor, we will waste no more than 1/8 of region size in the worst
 598   // case. This also has a secondary effect on collection set selection: even under
 599   // the race, the regions would be at least 7/8 used, which allows relying on
 600   // "used" - "live" for cset selection. Otherwise, we can get the fragmented region
 601   // below the garbage threshold that would never be considered for collection.
 602   //
 603   // The whole thing is mitigated if Elastic TLABs are enabled.
 604   //
 605   guarantee(MaxTLABSizeWords == 0, "we should only set it once");
 606   MaxTLABSizeWords = MIN2(ShenandoahElasticTLAB ? RegionSizeWords : (RegionSizeWords / 8), HumongousThresholdWords);
 607   MaxTLABSizeWords = align_down(MaxTLABSizeWords, MinObjAlignment);
 608 
 609   guarantee(MaxTLABSizeBytes == 0, "we should only set it once");
 610   MaxTLABSizeBytes = MaxTLABSizeWords * HeapWordSize;
 611   assert (MaxTLABSizeBytes > MinTLABSize, "should be larger");








 612 }
 613 
 614 void ShenandoahHeapRegion::do_commit() {
 615   ShenandoahHeap* heap = ShenandoahHeap::heap();
 616   if (!heap->is_heap_region_special() && !os::commit_memory((char *) bottom(), RegionSizeBytes, false)) {
 617     report_java_out_of_memory("Unable to commit region");
 618   }
 619   if (!heap->commit_bitmap_slice(this)) {
 620     report_java_out_of_memory("Unable to commit bitmaps for region");
 621   }
 622   if (AlwaysPreTouch) {
 623     os::pretouch_memory(bottom(), end(), heap->pretouch_heap_page_size());
 624   }
 625   heap->increase_committed(ShenandoahHeapRegion::region_size_bytes());
 626 }
 627 
 628 void ShenandoahHeapRegion::do_uncommit() {
 629   ShenandoahHeap* heap = ShenandoahHeap::heap();
 630   if (!heap->is_heap_region_special() && !os::uncommit_memory((char *) bottom(), RegionSizeBytes)) {
 631     report_java_out_of_memory("Unable to uncommit region");




 592   // before moving the allocation to region #N+1.
 593   //
 594   // The worst case realizes when "answer" is "region size", which means it could
 595   // prematurely retire an entire region. Having smaller TLABs does not fix that
 596   // completely, but reduces the probability of too wasteful region retirement.
 597   // With current divisor, we will waste no more than 1/8 of region size in the worst
 598   // case. This also has a secondary effect on collection set selection: even under
 599   // the race, the regions would be at least 7/8 used, which allows relying on
 600   // "used" - "live" for cset selection. Otherwise, we can get the fragmented region
 601   // below the garbage threshold that would never be considered for collection.
 602   //
 603   // The whole thing is mitigated if Elastic TLABs are enabled.
 604   //
 605   guarantee(MaxTLABSizeWords == 0, "we should only set it once");
 606   MaxTLABSizeWords = MIN2(ShenandoahElasticTLAB ? RegionSizeWords : (RegionSizeWords / 8), HumongousThresholdWords);
 607   MaxTLABSizeWords = align_down(MaxTLABSizeWords, MinObjAlignment);
 608 
 609   guarantee(MaxTLABSizeBytes == 0, "we should only set it once");
 610   MaxTLABSizeBytes = MaxTLABSizeWords * HeapWordSize;
 611   assert (MaxTLABSizeBytes > MinTLABSize, "should be larger");
 612 
 613   size_t locking_card_size = ((size_t)1) << (ShenandoahEvacLockGranularity + LogHeapWordSize);
 614   if (locking_card_size > RegionSizeBytes) {
 615     err_msg message("Evac locking card size (" SIZE_FORMAT "%s) should be lower than region size (" SIZE_FORMAT "%s).",
 616                     byte_size_in_proper_unit(locking_card_size), proper_unit_for_byte_size(locking_card_size),
 617                     byte_size_in_proper_unit(RegionSizeBytes),   proper_unit_for_byte_size(RegionSizeBytes));
 618     vm_exit_during_initialization("Invalid -XX:ShenandoahEvacLockGranularity option", message);
 619   }
 620 }
 621 
 622 void ShenandoahHeapRegion::do_commit() {
 623   ShenandoahHeap* heap = ShenandoahHeap::heap();
 624   if (!heap->is_heap_region_special() && !os::commit_memory((char *) bottom(), RegionSizeBytes, false)) {
 625     report_java_out_of_memory("Unable to commit region");
 626   }
 627   if (!heap->commit_bitmap_slice(this)) {
 628     report_java_out_of_memory("Unable to commit bitmaps for region");
 629   }
 630   if (AlwaysPreTouch) {
 631     os::pretouch_memory(bottom(), end(), heap->pretouch_heap_page_size());
 632   }
 633   heap->increase_committed(ShenandoahHeapRegion::region_size_bytes());
 634 }
 635 
 636 void ShenandoahHeapRegion::do_uncommit() {
 637   ShenandoahHeap* heap = ShenandoahHeap::heap();
 638   if (!heap->is_heap_region_special() && !os::uncommit_memory((char *) bottom(), RegionSizeBytes)) {
 639     report_java_out_of_memory("Unable to uncommit region");


< prev index next >