< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp

Print this page

  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 
  28 #include "cds/archiveHeapWriter.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 #include "gc/shared/classUnloadingContext.hpp"
  31 #include "gc/shared/fullGCForwarding.hpp"
  32 #include "gc/shared/gcArguments.hpp"
  33 #include "gc/shared/gcTimer.hpp"
  34 #include "gc/shared/gcTraceTime.inline.hpp"

  35 #include "gc/shared/locationPrinter.inline.hpp"
  36 #include "gc/shared/memAllocator.hpp"
  37 #include "gc/shared/plab.hpp"
  38 #include "gc/shared/tlab_globals.hpp"
  39 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
  40 #include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp"
  41 #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
  42 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  43 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
  44 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
  45 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  46 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  47 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  48 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  49 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  50 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  51 #include "gc/shenandoah/shenandoahControlThread.hpp"
  52 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  53 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
  54 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"

 184   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 185 
 186   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 187   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 188 
 189   _num_regions = ShenandoahHeapRegion::region_count();
 190   assert(_num_regions == (max_byte_size / reg_size_bytes),
 191          "Regions should cover entire heap exactly: %zu != %zu/%zu",
 192          _num_regions, max_byte_size, reg_size_bytes);
 193 
 194   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 195   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 196   assert(num_committed_regions <= _num_regions, "sanity");
 197   _initial_size = num_committed_regions * reg_size_bytes;
 198 
 199   size_t num_min_regions = min_byte_size / reg_size_bytes;
 200   num_min_regions = MIN2(num_min_regions, _num_regions);
 201   assert(num_min_regions <= _num_regions, "sanity");
 202   _minimum_size = num_min_regions * reg_size_bytes;
 203 
 204   // Default to max heap size.
 205   _soft_max_size = _num_regions * reg_size_bytes;
 206 
 207   _committed = _initial_size;
 208 
 209   size_t heap_page_size   = UseLargePages ? os::large_page_size() : os::vm_page_size();
 210   size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 211   size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 212 
 213   //
 214   // Reserve and commit memory for heap
 215   //
 216 
 217   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 218   initialize_reserved_region(heap_rs);
 219   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 220   _heap_region_special = heap_rs.special();
 221 
 222   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 223          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 224   os::trace_page_sizes_for_requested_size("Heap",
 225                                           max_byte_size, heap_alignment,

 507     } else {
 508       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 509     }
 510   } else {
 511     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 512   }
 513   _gc_mode->initialize_flags();
 514   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 515     vm_exit_during_initialization(
 516             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 517                     _gc_mode->name()));
 518   }
 519   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 520     vm_exit_during_initialization(
 521             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 522                     _gc_mode->name()));
 523   }
 524 }
 525 
 526 void ShenandoahHeap::initialize_heuristics() {
 527   _global_generation = new ShenandoahGlobalGeneration(mode()->is_generational(), max_workers(), max_capacity(), max_capacity());
 528   _global_generation->initialize_heuristics(mode());
 529 }
 530 
 531 #ifdef _MSC_VER
 532 #pragma warning( push )
 533 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 534 #endif
 535 
 536 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 537   CollectedHeap(),
 538   _gc_generation(nullptr),
 539   _active_generation(nullptr),
 540   _initial_size(0),
 541   _committed(0),
 542   _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)),
 543   _workers(nullptr),
 544   _safepoint_workers(nullptr),
 545   _heap_region_special(false),
 546   _num_regions(0),
 547   _regions(nullptr),

1221 private:
1222   bool const _resize;
1223 public:
1224   explicit ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1225   void do_thread(Thread* thread) override {
1226     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1227     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1228     gclab->retire();
1229     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1230       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1231     }
1232 
1233     if (ShenandoahHeap::heap()->mode()->is_generational()) {
1234       PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1235       assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1236 
1237       // There are two reasons to retire all plabs between old-gen evacuation passes.
1238       //  1. We need to make the plab memory parsable by remembered-set scanning.
1239       //  2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region
1240       ShenandoahGenerationalHeap::heap()->retire_plab(plab, thread);





1241       if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) {
1242         ShenandoahThreadLocalData::set_plab_size(thread, 0);
1243       }
1244     }
1245   }
1246 };
1247 
1248 class ShenandoahGCStatePropagator : public HandshakeClosure {
1249 public:
1250   explicit ShenandoahGCStatePropagator(char gc_state) :
1251     HandshakeClosure("Shenandoah GC State Change"),
1252     _gc_state(gc_state) {}
1253 
1254   void do_thread(Thread* thread) override {
1255     ShenandoahThreadLocalData::set_gc_state(thread, _gc_state);
1256   }
1257 private:
1258   char _gc_state;
1259 };
1260 

1762   ShenandoahScanObjectStack oop_stack;
1763   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1764   // Seed the stack with root scan
1765   scan_roots_for_iteration(&oop_stack, &oops);
1766 
1767   // Work through the oop stack to traverse heap
1768   while (! oop_stack.is_empty()) {
1769     oop obj = oop_stack.pop();
1770     assert(oopDesc::is_oop(obj), "must be a valid oop");
1771     cl->do_object(obj);
1772     obj->oop_iterate(&oops);
1773   }
1774 
1775   assert(oop_stack.is_empty(), "should be empty");
1776   // Reclaim bitmap
1777   reclaim_aux_bitmap_for_iteration();
1778 }
1779 
1780 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
1781   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1782 
1783   if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1784     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1785     return false;



1786   }
1787   // Reset bitmap
1788   _aux_bit_map.clear();
1789   return true;
1790 }
1791 
1792 void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops) {
1793   // Process GC roots according to current GC cycle
1794   // This populates the work stack with initial objects
1795   // It is important to relinquish the associated locks before diving
1796   // into heap dumper
1797   uint n_workers = safepoint_workers() != nullptr ? safepoint_workers()->active_workers() : 1;
1798   ShenandoahHeapIterationRootScanner rp(n_workers);
1799   rp.roots_do(oops);
1800 }
1801 
1802 void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
1803   if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1804     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");





1805   }
1806 }
1807 
1808 // Closure for parallelly iterate objects
1809 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
1810 private:
1811   MarkBitMap* _bitmap;
1812   ShenandoahObjToScanQueue* _queue;
1813   ShenandoahHeap* const _heap;
1814   ShenandoahMarkingContext* const _marking_context;
1815 
1816   template <class T>
1817   void do_oop_work(T* p) {
1818     T o = RawAccess<>::oop_load(p);
1819     if (!CompressedOops::is_null(o)) {
1820       oop obj = CompressedOops::decode_not_null(o);
1821       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1822         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1823         return;
1824       }

2579     ShenandoahOldGeneration* old_gen = gen_heap->old_generation();
2580     old_gen->heuristics()->evaluate_triggers(first_old_region, last_old_region, old_region_count, num_regions());
2581   }
2582 }
2583 
2584 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2585   size_t slice = r->index() / _bitmap_regions_per_slice;
2586 
2587   size_t regions_from = _bitmap_regions_per_slice * slice;
2588   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2589   for (size_t g = regions_from; g < regions_to; g++) {
2590     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2591     if (skip_self && g == r->index()) continue;
2592     if (get_region(g)->is_committed()) {
2593       return true;
2594     }
2595   }
2596   return false;
2597 }
2598 
2599 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2600   shenandoah_assert_heaplocked();
2601 
2602   // Bitmaps in special regions do not need commits
2603   if (_bitmap_region_special) {
2604     return true;
2605   }
2606 
2607   if (is_bitmap_slice_committed(r, true)) {
2608     // Some other region from the group is already committed, meaning the bitmap
2609     // slice is already committed, we exit right away.
2610     return true;
2611   }
2612 
2613   // Commit the bitmap slice:
2614   size_t slice = r->index() / _bitmap_regions_per_slice;
2615   size_t off = _bitmap_bytes_per_slice * slice;
2616   size_t len = _bitmap_bytes_per_slice;
2617   char* start = (char*) _bitmap_region.start() + off;
2618 
2619   if (!os::commit_memory(start, len, false)) {
2620     return false;
2621   }
2622 
2623   if (AlwaysPreTouch) {
2624     os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2625   }
2626 
2627   return true;
2628 }
2629 
2630 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2631   shenandoah_assert_heaplocked();
2632 
2633   // Bitmaps in special regions do not need uncommits
2634   if (_bitmap_region_special) {
2635     return true;
2636   }
2637 
2638   if (is_bitmap_slice_committed(r, true)) {
2639     // Some other region from the group is still committed, meaning the bitmap
2640     // slice should stay committed, exit right away.
2641     return true;
2642   }
2643 
2644   // Uncommit the bitmap slice:
2645   size_t slice = r->index() / _bitmap_regions_per_slice;
2646   size_t off = _bitmap_bytes_per_slice * slice;
2647   size_t len = _bitmap_bytes_per_slice;
2648   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2649     return false;




2650   }
2651   return true;
2652 }
2653 
2654 void ShenandoahHeap::forbid_uncommit() {
2655   if (_uncommit_thread != nullptr) {
2656     _uncommit_thread->forbid_uncommit();
2657   }
2658 }
2659 
2660 void ShenandoahHeap::allow_uncommit() {
2661   if (_uncommit_thread != nullptr) {
2662     _uncommit_thread->allow_uncommit();
2663   }
2664 }
2665 
2666 #ifdef ASSERT
2667 bool ShenandoahHeap::is_uncommit_in_progress() {
2668   if (_uncommit_thread != nullptr) {
2669     return _uncommit_thread->is_uncommit_in_progress();
2670   }
2671   return false;

  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 
  28 #include "cds/archiveHeapWriter.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 #include "gc/shared/classUnloadingContext.hpp"
  31 #include "gc/shared/fullGCForwarding.hpp"
  32 #include "gc/shared/gcArguments.hpp"
  33 #include "gc/shared/gcTimer.hpp"
  34 #include "gc/shared/gcTraceTime.inline.hpp"
  35 #include "gc/shared/gc_globals.hpp"
  36 #include "gc/shared/locationPrinter.inline.hpp"
  37 #include "gc/shared/memAllocator.hpp"
  38 #include "gc/shared/plab.hpp"
  39 #include "gc/shared/tlab_globals.hpp"
  40 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
  41 #include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp"
  42 #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
  43 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  44 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
  45 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
  46 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  47 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  48 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  49 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  50 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  51 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  52 #include "gc/shenandoah/shenandoahControlThread.hpp"
  53 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  54 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
  55 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"

 185   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 186 
 187   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 188   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 189 
 190   _num_regions = ShenandoahHeapRegion::region_count();
 191   assert(_num_regions == (max_byte_size / reg_size_bytes),
 192          "Regions should cover entire heap exactly: %zu != %zu/%zu",
 193          _num_regions, max_byte_size, reg_size_bytes);
 194 
 195   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 196   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 197   assert(num_committed_regions <= _num_regions, "sanity");
 198   _initial_size = num_committed_regions * reg_size_bytes;
 199 
 200   size_t num_min_regions = min_byte_size / reg_size_bytes;
 201   num_min_regions = MIN2(num_min_regions, _num_regions);
 202   assert(num_min_regions <= _num_regions, "sanity");
 203   _minimum_size = num_min_regions * reg_size_bytes;
 204 
 205   _soft_max_size = SoftMaxHeapSize;

 206 
 207   _committed = _initial_size;
 208 
 209   size_t heap_page_size   = UseLargePages ? os::large_page_size() : os::vm_page_size();
 210   size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 211   size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 212 
 213   //
 214   // Reserve and commit memory for heap
 215   //
 216 
 217   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 218   initialize_reserved_region(heap_rs);
 219   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 220   _heap_region_special = heap_rs.special();
 221 
 222   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 223          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 224   os::trace_page_sizes_for_requested_size("Heap",
 225                                           max_byte_size, heap_alignment,

 507     } else {
 508       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 509     }
 510   } else {
 511     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 512   }
 513   _gc_mode->initialize_flags();
 514   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 515     vm_exit_during_initialization(
 516             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 517                     _gc_mode->name()));
 518   }
 519   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 520     vm_exit_during_initialization(
 521             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 522                     _gc_mode->name()));
 523   }
 524 }
 525 
 526 void ShenandoahHeap::initialize_heuristics() {
 527   _global_generation = new ShenandoahGlobalGeneration(mode()->is_generational(), max_workers(), max_capacity());
 528   _global_generation->initialize_heuristics(mode());
 529 }
 530 
 531 #ifdef _MSC_VER
 532 #pragma warning( push )
 533 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 534 #endif
 535 
 536 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 537   CollectedHeap(),
 538   _gc_generation(nullptr),
 539   _active_generation(nullptr),
 540   _initial_size(0),
 541   _committed(0),
 542   _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)),
 543   _workers(nullptr),
 544   _safepoint_workers(nullptr),
 545   _heap_region_special(false),
 546   _num_regions(0),
 547   _regions(nullptr),

1221 private:
1222   bool const _resize;
1223 public:
1224   explicit ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1225   void do_thread(Thread* thread) override {
1226     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1227     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1228     gclab->retire();
1229     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1230       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1231     }
1232 
1233     if (ShenandoahHeap::heap()->mode()->is_generational()) {
1234       PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1235       assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1236 
1237       // There are two reasons to retire all plabs between old-gen evacuation passes.
1238       //  1. We need to make the plab memory parsable by remembered-set scanning.
1239       //  2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region
1240       ShenandoahGenerationalHeap::heap()->retire_plab(plab, thread);
1241 
1242       // Re-enable promotions for the next evacuation phase.
1243       ShenandoahThreadLocalData::enable_plab_promotions(thread);
1244 
1245       // Reset the fill size for next evacuation phase.
1246       if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) {
1247         ShenandoahThreadLocalData::set_plab_size(thread, 0);
1248       }
1249     }
1250   }
1251 };
1252 
1253 class ShenandoahGCStatePropagator : public HandshakeClosure {
1254 public:
1255   explicit ShenandoahGCStatePropagator(char gc_state) :
1256     HandshakeClosure("Shenandoah GC State Change"),
1257     _gc_state(gc_state) {}
1258 
1259   void do_thread(Thread* thread) override {
1260     ShenandoahThreadLocalData::set_gc_state(thread, _gc_state);
1261   }
1262 private:
1263   char _gc_state;
1264 };
1265 

1767   ShenandoahScanObjectStack oop_stack;
1768   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1769   // Seed the stack with root scan
1770   scan_roots_for_iteration(&oop_stack, &oops);
1771 
1772   // Work through the oop stack to traverse heap
1773   while (! oop_stack.is_empty()) {
1774     oop obj = oop_stack.pop();
1775     assert(oopDesc::is_oop(obj), "must be a valid oop");
1776     cl->do_object(obj);
1777     obj->oop_iterate(&oops);
1778   }
1779 
1780   assert(oop_stack.is_empty(), "should be empty");
1781   // Reclaim bitmap
1782   reclaim_aux_bitmap_for_iteration();
1783 }
1784 
1785 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
1786   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1787   if (!_aux_bitmap_region_special) {
1788     bool success = os::commit_memory((char *) _aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false);
1789     if (!success) {
1790       log_warning(gc)("Auxiliary marking bitmap commit failed: " PTR_FORMAT " (%zu bytes)",
1791                       p2i(_aux_bitmap_region.start()), _aux_bitmap_region.byte_size());
1792       return false;
1793     }
1794   }

1795   _aux_bit_map.clear();
1796   return true;
1797 }
1798 
1799 void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops) {
1800   // Process GC roots according to current GC cycle
1801   // This populates the work stack with initial objects
1802   // It is important to relinquish the associated locks before diving
1803   // into heap dumper
1804   uint n_workers = safepoint_workers() != nullptr ? safepoint_workers()->active_workers() : 1;
1805   ShenandoahHeapIterationRootScanner rp(n_workers);
1806   rp.roots_do(oops);
1807 }
1808 
1809 void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
1810   if (!_aux_bitmap_region_special) {
1811     bool success = os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size());
1812     if (!success) {
1813       log_warning(gc)("Auxiliary marking bitmap uncommit failed: " PTR_FORMAT " (%zu bytes)",
1814                       p2i(_aux_bitmap_region.start()), _aux_bitmap_region.byte_size());
1815       assert(false, "Auxiliary marking bitmap uncommit should always succeed");
1816     }
1817   }
1818 }
1819 
1820 // Closure for parallelly iterate objects
1821 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
1822 private:
1823   MarkBitMap* _bitmap;
1824   ShenandoahObjToScanQueue* _queue;
1825   ShenandoahHeap* const _heap;
1826   ShenandoahMarkingContext* const _marking_context;
1827 
1828   template <class T>
1829   void do_oop_work(T* p) {
1830     T o = RawAccess<>::oop_load(p);
1831     if (!CompressedOops::is_null(o)) {
1832       oop obj = CompressedOops::decode_not_null(o);
1833       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1834         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1835         return;
1836       }

2591     ShenandoahOldGeneration* old_gen = gen_heap->old_generation();
2592     old_gen->heuristics()->evaluate_triggers(first_old_region, last_old_region, old_region_count, num_regions());
2593   }
2594 }
2595 
2596 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2597   size_t slice = r->index() / _bitmap_regions_per_slice;
2598 
2599   size_t regions_from = _bitmap_regions_per_slice * slice;
2600   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2601   for (size_t g = regions_from; g < regions_to; g++) {
2602     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2603     if (skip_self && g == r->index()) continue;
2604     if (get_region(g)->is_committed()) {
2605       return true;
2606     }
2607   }
2608   return false;
2609 }
2610 
2611 void ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2612   shenandoah_assert_heaplocked();
2613   assert(!is_bitmap_region_special(), "Not for special memory");




2614 
2615   if (is_bitmap_slice_committed(r, true)) {
2616     // Some other region from the group is already committed, meaning the bitmap
2617     // slice is already committed, we exit right away.
2618     return;
2619   }
2620 
2621   // Commit the bitmap slice:
2622   size_t slice = r->index() / _bitmap_regions_per_slice;
2623   size_t off = _bitmap_bytes_per_slice * slice;
2624   size_t len = _bitmap_bytes_per_slice;
2625   char* start = (char*) _bitmap_region.start() + off;
2626 
2627   os::commit_memory_or_exit(start, len, false, "Unable to commit bitmap slice");


2628 
2629   if (AlwaysPreTouch) {
2630     os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2631   }


2632 }
2633 
2634 void ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2635   shenandoah_assert_heaplocked();
2636   assert(!is_bitmap_region_special(), "Not for special memory");




2637 
2638   if (is_bitmap_slice_committed(r, true)) {
2639     // Some other region from the group is still committed, meaning the bitmap
2640     // slice should stay committed, exit right away.
2641     return;
2642   }
2643 
2644   // Uncommit the bitmap slice:
2645   size_t slice = r->index() / _bitmap_regions_per_slice;
2646   size_t off = _bitmap_bytes_per_slice * slice;
2647   size_t len = _bitmap_bytes_per_slice;
2648 
2649   char* addr = (char*) _bitmap_region.start() + off;
2650   bool success = os::uncommit_memory(addr, len);
2651   if (!success) {
2652     log_warning(gc)("Bitmap slice uncommit failed: " PTR_FORMAT " (%zu bytes)", p2i(addr), len);
2653     assert(false, "Bitmap slice uncommit should always succeed");
2654   }

2655 }
2656 
2657 void ShenandoahHeap::forbid_uncommit() {
2658   if (_uncommit_thread != nullptr) {
2659     _uncommit_thread->forbid_uncommit();
2660   }
2661 }
2662 
2663 void ShenandoahHeap::allow_uncommit() {
2664   if (_uncommit_thread != nullptr) {
2665     _uncommit_thread->allow_uncommit();
2666   }
2667 }
2668 
2669 #ifdef ASSERT
2670 bool ShenandoahHeap::is_uncommit_in_progress() {
2671   if (_uncommit_thread != nullptr) {
2672     return _uncommit_thread->is_uncommit_in_progress();
2673   }
2674   return false;
< prev index next >