15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27
28 #include "cds/archiveHeapWriter.hpp"
29 #include "classfile/systemDictionary.hpp"
30 #include "gc/shared/classUnloadingContext.hpp"
31 #include "gc/shared/fullGCForwarding.hpp"
32 #include "gc/shared/gcArguments.hpp"
33 #include "gc/shared/gcTimer.hpp"
34 #include "gc/shared/gcTraceTime.inline.hpp"
35 #include "gc/shared/locationPrinter.inline.hpp"
36 #include "gc/shared/memAllocator.hpp"
37 #include "gc/shared/plab.hpp"
38 #include "gc/shared/tlab_globals.hpp"
39 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
40 #include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp"
41 #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
42 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
43 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
44 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
45 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
46 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
47 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
48 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
49 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
50 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
51 #include "gc/shenandoah/shenandoahControlThread.hpp"
52 #include "gc/shenandoah/shenandoahFreeSet.hpp"
53 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
54 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
184 size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
185
186 Universe::check_alignment(max_byte_size, reg_size_bytes, "Shenandoah heap");
187 Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
188
189 _num_regions = ShenandoahHeapRegion::region_count();
190 assert(_num_regions == (max_byte_size / reg_size_bytes),
191 "Regions should cover entire heap exactly: %zu != %zu/%zu",
192 _num_regions, max_byte_size, reg_size_bytes);
193
194 size_t num_committed_regions = init_byte_size / reg_size_bytes;
195 num_committed_regions = MIN2(num_committed_regions, _num_regions);
196 assert(num_committed_regions <= _num_regions, "sanity");
197 _initial_size = num_committed_regions * reg_size_bytes;
198
199 size_t num_min_regions = min_byte_size / reg_size_bytes;
200 num_min_regions = MIN2(num_min_regions, _num_regions);
201 assert(num_min_regions <= _num_regions, "sanity");
202 _minimum_size = num_min_regions * reg_size_bytes;
203
204 // Default to max heap size.
205 _soft_max_size = _num_regions * reg_size_bytes;
206
207 _committed = _initial_size;
208
209 size_t heap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
210 size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
211 size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
212
213 //
214 // Reserve and commit memory for heap
215 //
216
217 ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
218 initialize_reserved_region(heap_rs);
219 _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
220 _heap_region_special = heap_rs.special();
221
222 assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
223 "Misaligned heap: " PTR_FORMAT, p2i(base()));
224 os::trace_page_sizes_for_requested_size("Heap",
225 max_byte_size, heap_alignment,
507 } else {
508 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
509 }
510 } else {
511 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
512 }
513 _gc_mode->initialize_flags();
514 if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
515 vm_exit_during_initialization(
516 err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
517 _gc_mode->name()));
518 }
519 if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
520 vm_exit_during_initialization(
521 err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
522 _gc_mode->name()));
523 }
524 }
525
526 void ShenandoahHeap::initialize_heuristics() {
527 _global_generation = new ShenandoahGlobalGeneration(mode()->is_generational(), max_workers(), max_capacity(), max_capacity());
528 _global_generation->initialize_heuristics(mode());
529 }
530
531 #ifdef _MSC_VER
532 #pragma warning( push )
533 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
534 #endif
535
536 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
537 CollectedHeap(),
538 _gc_generation(nullptr),
539 _active_generation(nullptr),
540 _initial_size(0),
541 _committed(0),
542 _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)),
543 _workers(nullptr),
544 _safepoint_workers(nullptr),
545 _heap_region_special(false),
546 _num_regions(0),
547 _regions(nullptr),
1767 ShenandoahScanObjectStack oop_stack;
1768 ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1769 // Seed the stack with root scan
1770 scan_roots_for_iteration(&oop_stack, &oops);
1771
1772 // Work through the oop stack to traverse heap
1773 while (! oop_stack.is_empty()) {
1774 oop obj = oop_stack.pop();
1775 assert(oopDesc::is_oop(obj), "must be a valid oop");
1776 cl->do_object(obj);
1777 obj->oop_iterate(&oops);
1778 }
1779
1780 assert(oop_stack.is_empty(), "should be empty");
1781 // Reclaim bitmap
1782 reclaim_aux_bitmap_for_iteration();
1783 }
1784
1785 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
1786 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1787
1788 if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1789 log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1790 return false;
1791 }
1792 // Reset bitmap
1793 _aux_bit_map.clear();
1794 return true;
1795 }
1796
1797 void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops) {
1798 // Process GC roots according to current GC cycle
1799 // This populates the work stack with initial objects
1800 // It is important to relinquish the associated locks before diving
1801 // into heap dumper
1802 uint n_workers = safepoint_workers() != nullptr ? safepoint_workers()->active_workers() : 1;
1803 ShenandoahHeapIterationRootScanner rp(n_workers);
1804 rp.roots_do(oops);
1805 }
1806
1807 void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
1808 if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1809 log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1810 }
1811 }
1812
1813 // Closure for parallelly iterate objects
1814 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
1815 private:
1816 MarkBitMap* _bitmap;
1817 ShenandoahObjToScanQueue* _queue;
1818 ShenandoahHeap* const _heap;
1819 ShenandoahMarkingContext* const _marking_context;
1820
1821 template <class T>
1822 void do_oop_work(T* p) {
1823 T o = RawAccess<>::oop_load(p);
1824 if (!CompressedOops::is_null(o)) {
1825 oop obj = CompressedOops::decode_not_null(o);
1826 if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1827 // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1828 return;
1829 }
2584 ShenandoahOldGeneration* old_gen = gen_heap->old_generation();
2585 old_gen->heuristics()->evaluate_triggers(first_old_region, last_old_region, old_region_count, num_regions());
2586 }
2587 }
2588
2589 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2590 size_t slice = r->index() / _bitmap_regions_per_slice;
2591
2592 size_t regions_from = _bitmap_regions_per_slice * slice;
2593 size_t regions_to = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2594 for (size_t g = regions_from; g < regions_to; g++) {
2595 assert (g / _bitmap_regions_per_slice == slice, "same slice");
2596 if (skip_self && g == r->index()) continue;
2597 if (get_region(g)->is_committed()) {
2598 return true;
2599 }
2600 }
2601 return false;
2602 }
2603
2604 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2605 shenandoah_assert_heaplocked();
2606
2607 // Bitmaps in special regions do not need commits
2608 if (_bitmap_region_special) {
2609 return true;
2610 }
2611
2612 if (is_bitmap_slice_committed(r, true)) {
2613 // Some other region from the group is already committed, meaning the bitmap
2614 // slice is already committed, we exit right away.
2615 return true;
2616 }
2617
2618 // Commit the bitmap slice:
2619 size_t slice = r->index() / _bitmap_regions_per_slice;
2620 size_t off = _bitmap_bytes_per_slice * slice;
2621 size_t len = _bitmap_bytes_per_slice;
2622 char* start = (char*) _bitmap_region.start() + off;
2623
2624 if (!os::commit_memory(start, len, false)) {
2625 return false;
2626 }
2627
2628 if (AlwaysPreTouch) {
2629 os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2630 }
2631
2632 return true;
2633 }
2634
2635 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2636 shenandoah_assert_heaplocked();
2637
2638 // Bitmaps in special regions do not need uncommits
2639 if (_bitmap_region_special) {
2640 return true;
2641 }
2642
2643 if (is_bitmap_slice_committed(r, true)) {
2644 // Some other region from the group is still committed, meaning the bitmap
2645 // slice should stay committed, exit right away.
2646 return true;
2647 }
2648
2649 // Uncommit the bitmap slice:
2650 size_t slice = r->index() / _bitmap_regions_per_slice;
2651 size_t off = _bitmap_bytes_per_slice * slice;
2652 size_t len = _bitmap_bytes_per_slice;
2653 if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2654 return false;
2655 }
2656 return true;
2657 }
2658
2659 void ShenandoahHeap::forbid_uncommit() {
2660 if (_uncommit_thread != nullptr) {
2661 _uncommit_thread->forbid_uncommit();
2662 }
2663 }
2664
2665 void ShenandoahHeap::allow_uncommit() {
2666 if (_uncommit_thread != nullptr) {
2667 _uncommit_thread->allow_uncommit();
2668 }
2669 }
2670
2671 #ifdef ASSERT
2672 bool ShenandoahHeap::is_uncommit_in_progress() {
2673 if (_uncommit_thread != nullptr) {
2674 return _uncommit_thread->is_uncommit_in_progress();
2675 }
2676 return false;
|
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27
28 #include "cds/archiveHeapWriter.hpp"
29 #include "classfile/systemDictionary.hpp"
30 #include "gc/shared/classUnloadingContext.hpp"
31 #include "gc/shared/fullGCForwarding.hpp"
32 #include "gc/shared/gcArguments.hpp"
33 #include "gc/shared/gcTimer.hpp"
34 #include "gc/shared/gcTraceTime.inline.hpp"
35 #include "gc/shared/gc_globals.hpp"
36 #include "gc/shared/locationPrinter.inline.hpp"
37 #include "gc/shared/memAllocator.hpp"
38 #include "gc/shared/plab.hpp"
39 #include "gc/shared/tlab_globals.hpp"
40 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
41 #include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp"
42 #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
43 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
44 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
45 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
46 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
47 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
48 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
49 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
50 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
51 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
52 #include "gc/shenandoah/shenandoahControlThread.hpp"
53 #include "gc/shenandoah/shenandoahFreeSet.hpp"
54 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
55 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
185 size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
186
187 Universe::check_alignment(max_byte_size, reg_size_bytes, "Shenandoah heap");
188 Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
189
190 _num_regions = ShenandoahHeapRegion::region_count();
191 assert(_num_regions == (max_byte_size / reg_size_bytes),
192 "Regions should cover entire heap exactly: %zu != %zu/%zu",
193 _num_regions, max_byte_size, reg_size_bytes);
194
195 size_t num_committed_regions = init_byte_size / reg_size_bytes;
196 num_committed_regions = MIN2(num_committed_regions, _num_regions);
197 assert(num_committed_regions <= _num_regions, "sanity");
198 _initial_size = num_committed_regions * reg_size_bytes;
199
200 size_t num_min_regions = min_byte_size / reg_size_bytes;
201 num_min_regions = MIN2(num_min_regions, _num_regions);
202 assert(num_min_regions <= _num_regions, "sanity");
203 _minimum_size = num_min_regions * reg_size_bytes;
204
205 _soft_max_size = SoftMaxHeapSize;
206
207 _committed = _initial_size;
208
209 size_t heap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
210 size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
211 size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
212
213 //
214 // Reserve and commit memory for heap
215 //
216
217 ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
218 initialize_reserved_region(heap_rs);
219 _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
220 _heap_region_special = heap_rs.special();
221
222 assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
223 "Misaligned heap: " PTR_FORMAT, p2i(base()));
224 os::trace_page_sizes_for_requested_size("Heap",
225 max_byte_size, heap_alignment,
507 } else {
508 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
509 }
510 } else {
511 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
512 }
513 _gc_mode->initialize_flags();
514 if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
515 vm_exit_during_initialization(
516 err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
517 _gc_mode->name()));
518 }
519 if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
520 vm_exit_during_initialization(
521 err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
522 _gc_mode->name()));
523 }
524 }
525
526 void ShenandoahHeap::initialize_heuristics() {
527 _global_generation = new ShenandoahGlobalGeneration(mode()->is_generational(), max_workers(), max_capacity());
528 _global_generation->initialize_heuristics(mode());
529 }
530
531 #ifdef _MSC_VER
532 #pragma warning( push )
533 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
534 #endif
535
536 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
537 CollectedHeap(),
538 _gc_generation(nullptr),
539 _active_generation(nullptr),
540 _initial_size(0),
541 _committed(0),
542 _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)),
543 _workers(nullptr),
544 _safepoint_workers(nullptr),
545 _heap_region_special(false),
546 _num_regions(0),
547 _regions(nullptr),
1767 ShenandoahScanObjectStack oop_stack;
1768 ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1769 // Seed the stack with root scan
1770 scan_roots_for_iteration(&oop_stack, &oops);
1771
1772 // Work through the oop stack to traverse heap
1773 while (! oop_stack.is_empty()) {
1774 oop obj = oop_stack.pop();
1775 assert(oopDesc::is_oop(obj), "must be a valid oop");
1776 cl->do_object(obj);
1777 obj->oop_iterate(&oops);
1778 }
1779
1780 assert(oop_stack.is_empty(), "should be empty");
1781 // Reclaim bitmap
1782 reclaim_aux_bitmap_for_iteration();
1783 }
1784
1785 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
1786 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1787 if (!_aux_bitmap_region_special) {
1788 bool success = os::commit_memory((char *) _aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false);
1789 if (!success) {
1790 log_warning(gc)("Auxiliary marking bitmap commit failed: " PTR_FORMAT " (%zu bytes)",
1791 p2i(_aux_bitmap_region.start()), _aux_bitmap_region.byte_size());
1792 return false;
1793 }
1794 }
1795 _aux_bit_map.clear();
1796 return true;
1797 }
1798
1799 void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops) {
1800 // Process GC roots according to current GC cycle
1801 // This populates the work stack with initial objects
1802 // It is important to relinquish the associated locks before diving
1803 // into heap dumper
1804 uint n_workers = safepoint_workers() != nullptr ? safepoint_workers()->active_workers() : 1;
1805 ShenandoahHeapIterationRootScanner rp(n_workers);
1806 rp.roots_do(oops);
1807 }
1808
1809 void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
1810 if (!_aux_bitmap_region_special) {
1811 bool success = os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size());
1812 if (!success) {
1813 log_warning(gc)("Auxiliary marking bitmap uncommit failed: " PTR_FORMAT " (%zu bytes)",
1814 p2i(_aux_bitmap_region.start()), _aux_bitmap_region.byte_size());
1815 assert(false, "Auxiliary marking bitmap uncommit should always succeed");
1816 }
1817 }
1818 }
1819
1820 // Closure for parallelly iterate objects
1821 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
1822 private:
1823 MarkBitMap* _bitmap;
1824 ShenandoahObjToScanQueue* _queue;
1825 ShenandoahHeap* const _heap;
1826 ShenandoahMarkingContext* const _marking_context;
1827
1828 template <class T>
1829 void do_oop_work(T* p) {
1830 T o = RawAccess<>::oop_load(p);
1831 if (!CompressedOops::is_null(o)) {
1832 oop obj = CompressedOops::decode_not_null(o);
1833 if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1834 // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1835 return;
1836 }
2591 ShenandoahOldGeneration* old_gen = gen_heap->old_generation();
2592 old_gen->heuristics()->evaluate_triggers(first_old_region, last_old_region, old_region_count, num_regions());
2593 }
2594 }
2595
2596 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2597 size_t slice = r->index() / _bitmap_regions_per_slice;
2598
2599 size_t regions_from = _bitmap_regions_per_slice * slice;
2600 size_t regions_to = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2601 for (size_t g = regions_from; g < regions_to; g++) {
2602 assert (g / _bitmap_regions_per_slice == slice, "same slice");
2603 if (skip_self && g == r->index()) continue;
2604 if (get_region(g)->is_committed()) {
2605 return true;
2606 }
2607 }
2608 return false;
2609 }
2610
2611 void ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2612 shenandoah_assert_heaplocked();
2613 assert(!is_bitmap_region_special(), "Not for special memory");
2614
2615 if (is_bitmap_slice_committed(r, true)) {
2616 // Some other region from the group is already committed, meaning the bitmap
2617 // slice is already committed, we exit right away.
2618 return;
2619 }
2620
2621 // Commit the bitmap slice:
2622 size_t slice = r->index() / _bitmap_regions_per_slice;
2623 size_t off = _bitmap_bytes_per_slice * slice;
2624 size_t len = _bitmap_bytes_per_slice;
2625 char* start = (char*) _bitmap_region.start() + off;
2626
2627 os::commit_memory_or_exit(start, len, false, "Unable to commit bitmap slice");
2628
2629 if (AlwaysPreTouch) {
2630 os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2631 }
2632 }
2633
2634 void ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2635 shenandoah_assert_heaplocked();
2636 assert(!is_bitmap_region_special(), "Not for special memory");
2637
2638 if (is_bitmap_slice_committed(r, true)) {
2639 // Some other region from the group is still committed, meaning the bitmap
2640 // slice should stay committed, exit right away.
2641 return;
2642 }
2643
2644 // Uncommit the bitmap slice:
2645 size_t slice = r->index() / _bitmap_regions_per_slice;
2646 size_t off = _bitmap_bytes_per_slice * slice;
2647 size_t len = _bitmap_bytes_per_slice;
2648
2649 char* addr = (char*) _bitmap_region.start() + off;
2650 bool success = os::uncommit_memory(addr, len);
2651 if (!success) {
2652 log_warning(gc)("Bitmap slice uncommit failed: " PTR_FORMAT " (%zu bytes)", p2i(addr), len);
2653 assert(false, "Bitmap slice uncommit should always succeed");
2654 }
2655 }
2656
2657 void ShenandoahHeap::forbid_uncommit() {
2658 if (_uncommit_thread != nullptr) {
2659 _uncommit_thread->forbid_uncommit();
2660 }
2661 }
2662
2663 void ShenandoahHeap::allow_uncommit() {
2664 if (_uncommit_thread != nullptr) {
2665 _uncommit_thread->allow_uncommit();
2666 }
2667 }
2668
2669 #ifdef ASSERT
2670 bool ShenandoahHeap::is_uncommit_in_progress() {
2671 if (_uncommit_thread != nullptr) {
2672 return _uncommit_thread->is_uncommit_in_progress();
2673 }
2674 return false;
|