< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp

Print this page

  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/allocation.hpp"
  27 #include "memory/universe.hpp"
  28 
  29 #include "gc/shared/gcArguments.hpp"
  30 #include "gc/shared/gcTimer.hpp"
  31 #include "gc/shared/gcTraceTime.inline.hpp"
  32 #include "gc/shared/locationPrinter.inline.hpp"
  33 #include "gc/shared/memAllocator.hpp"
  34 #include "gc/shared/plab.hpp"

  35 #include "gc/shared/tlab_globals.hpp"
  36 
  37 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  38 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  39 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  40 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  41 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  42 #include "gc/shenandoah/shenandoahControlThread.hpp"
  43 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  44 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  45 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  46 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  47 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  48 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  49 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  50 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  51 #include "gc/shenandoah/shenandoahMetrics.hpp"
  52 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  53 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  54 #include "gc/shenandoah/shenandoahPacer.inline.hpp"

 174   _soft_max_size = _num_regions * reg_size_bytes;
 175 
 176   _committed = _initial_size;
 177 
 178   size_t heap_page_size   = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 179   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 180   size_t region_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 181 
 182   //
 183   // Reserve and commit memory for heap
 184   //
 185 
 186   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 187   initialize_reserved_region(heap_rs);
 188   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 189   _heap_region_special = heap_rs.special();
 190 
 191   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 192          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 193 


 194 #if SHENANDOAH_OPTIMIZED_MARKTASK
 195   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 196   // Fail if we ever attempt to address more than we can.
 197   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 198     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 199                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 200                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 201                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 202     vm_exit_during_initialization("Fatal Error", buf);
 203   }
 204 #endif
 205 
 206   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 207   if (!_heap_region_special) {
 208     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 209                               "Cannot commit heap memory");
 210   }
 211 
 212   //
 213   // Reserve and commit memory for bitmap(s)

 934   // Expand and retry allocation
 935   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 936   if (result != NULL) {
 937     return result;
 938   }
 939 
 940   // Out of memory
 941   return NULL;
 942 }
 943 
 944 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
 945 private:
 946   ShenandoahHeap* const _heap;
 947   Thread* const _thread;
 948 public:
 949   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 950     _heap(heap), _thread(Thread::current()) {}
 951 
 952   void do_object(oop p) {
 953     shenandoah_assert_marked(NULL, p);
 954     if (!p->is_forwarded()) {
 955       _heap->evacuate_object(p, _thread);
 956     }
 957   }
 958 };
 959 
 960 class ShenandoahEvacuationTask : public AbstractGangTask {
 961 private:
 962   ShenandoahHeap* const _sh;
 963   ShenandoahCollectionSet* const _cs;
 964   bool _concurrent;
 965 public:
 966   ShenandoahEvacuationTask(ShenandoahHeap* sh,
 967                            ShenandoahCollectionSet* cs,
 968                            bool concurrent) :
 969     AbstractGangTask("Shenandoah Evacuation"),
 970     _sh(sh),
 971     _cs(cs),
 972     _concurrent(concurrent)
 973   {}
 974 

1222 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1223   return _free_set->capacity();
1224 }
1225 
1226 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1227 private:
1228   MarkBitMap* _bitmap;
1229   ShenandoahScanObjectStack* _oop_stack;
1230   ShenandoahHeap* const _heap;
1231   ShenandoahMarkingContext* const _marking_context;
1232 
1233   template <class T>
1234   void do_oop_work(T* p) {
1235     T o = RawAccess<>::oop_load(p);
1236     if (!CompressedOops::is_null(o)) {
1237       oop obj = CompressedOops::decode_not_null(o);
1238       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1239         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1240         return;
1241       }
1242       obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1243 
1244       assert(oopDesc::is_oop(obj), "must be a valid oop");
1245       if (!_bitmap->is_marked(obj)) {
1246         _bitmap->mark(obj);
1247         _oop_stack->push(obj);
1248       }
1249     }
1250   }
1251 public:
1252   ObjectIterateScanRootClosure(MarkBitMap* bitmap, ShenandoahScanObjectStack* oop_stack) :
1253     _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1254     _marking_context(_heap->marking_context()) {}
1255   void do_oop(oop* p)       { do_oop_work(p); }
1256   void do_oop(narrowOop* p) { do_oop_work(p); }
1257 };
1258 
1259 /*
1260  * This is public API, used in preparation of object_iterate().
1261  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1262  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can

1278  * wiped the bitmap in preparation for next marking).
1279  *
1280  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1281  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1282  * is allowed to report dead objects, but is not required to do so.
1283  */
1284 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1285   // Reset bitmap
1286   if (!prepare_aux_bitmap_for_iteration())
1287     return;
1288 
1289   ShenandoahScanObjectStack oop_stack;
1290   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1291   // Seed the stack with root scan
1292   scan_roots_for_iteration(&oop_stack, &oops);
1293 
1294   // Work through the oop stack to traverse heap
1295   while (! oop_stack.is_empty()) {
1296     oop obj = oop_stack.pop();
1297     assert(oopDesc::is_oop(obj), "must be a valid oop");

1298     cl->do_object(obj);
1299     obj->oop_iterate(&oops);
1300   }
1301 
1302   assert(oop_stack.is_empty(), "should be empty");
1303   // Reclaim bitmap
1304   reclaim_aux_bitmap_for_iteration();
1305 }
1306 
1307 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
1308   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1309 
1310   if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1311     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1312     return false;
1313   }
1314   // Reset bitmap
1315   _aux_bit_map.clear();
1316   return true;
1317 }

1331   }
1332 }
1333 
1334 // Closure for parallelly iterate objects
1335 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
1336 private:
1337   MarkBitMap* _bitmap;
1338   ShenandoahObjToScanQueue* _queue;
1339   ShenandoahHeap* const _heap;
1340   ShenandoahMarkingContext* const _marking_context;
1341 
1342   template <class T>
1343   void do_oop_work(T* p) {
1344     T o = RawAccess<>::oop_load(p);
1345     if (!CompressedOops::is_null(o)) {
1346       oop obj = CompressedOops::decode_not_null(o);
1347       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1348         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1349         return;
1350       }
1351       obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1352 
1353       assert(oopDesc::is_oop(obj), "Must be a valid oop");
1354       if (_bitmap->par_mark(obj)) {
1355         _queue->push(ShenandoahMarkTask(obj));
1356       }
1357     }
1358   }
1359 public:
1360   ShenandoahObjectIterateParScanClosure(MarkBitMap* bitmap, ShenandoahObjToScanQueue* q) :
1361     _bitmap(bitmap), _queue(q), _heap(ShenandoahHeap::heap()),
1362     _marking_context(_heap->marking_context()) {}
1363   void do_oop(oop* p)       { do_oop_work(p); }
1364   void do_oop(narrowOop* p) { do_oop_work(p); }
1365 };
1366 
1367 // Object iterator for parallel heap iteraion.
1368 // The root scanning phase happenes in construction as a preparation of
1369 // parallel marking queues.
1370 // Every worker processes it's own marking queue. work-stealing is used
1371 // to balance workload.

  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/allocation.hpp"
  27 #include "memory/universe.hpp"
  28 
  29 #include "gc/shared/gcArguments.hpp"
  30 #include "gc/shared/gcTimer.hpp"
  31 #include "gc/shared/gcTraceTime.inline.hpp"
  32 #include "gc/shared/locationPrinter.inline.hpp"
  33 #include "gc/shared/memAllocator.hpp"
  34 #include "gc/shared/plab.hpp"
  35 #include "gc/shared/slidingForwarding.hpp"
  36 #include "gc/shared/tlab_globals.hpp"
  37 
  38 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  39 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  40 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  41 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  42 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  43 #include "gc/shenandoah/shenandoahControlThread.hpp"
  44 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  45 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  46 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  47 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  48 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  49 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  50 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  51 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  52 #include "gc/shenandoah/shenandoahMetrics.hpp"
  53 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  54 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  55 #include "gc/shenandoah/shenandoahPacer.inline.hpp"

 175   _soft_max_size = _num_regions * reg_size_bytes;
 176 
 177   _committed = _initial_size;
 178 
 179   size_t heap_page_size   = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 180   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 181   size_t region_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 182 
 183   //
 184   // Reserve and commit memory for heap
 185   //
 186 
 187   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 188   initialize_reserved_region(heap_rs);
 189   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 190   _heap_region_special = heap_rs.special();
 191 
 192   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 193          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 194 
 195   _forwarding = new SlidingForwarding(_heap_region, ShenandoahHeapRegion::region_size_words_shift());
 196 
 197 #if SHENANDOAH_OPTIMIZED_MARKTASK
 198   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 199   // Fail if we ever attempt to address more than we can.
 200   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 201     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 202                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 203                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 204                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 205     vm_exit_during_initialization("Fatal Error", buf);
 206   }
 207 #endif
 208 
 209   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 210   if (!_heap_region_special) {
 211     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 212                               "Cannot commit heap memory");
 213   }
 214 
 215   //
 216   // Reserve and commit memory for bitmap(s)

 937   // Expand and retry allocation
 938   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 939   if (result != NULL) {
 940     return result;
 941   }
 942 
 943   // Out of memory
 944   return NULL;
 945 }
 946 
 947 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
 948 private:
 949   ShenandoahHeap* const _heap;
 950   Thread* const _thread;
 951 public:
 952   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 953     _heap(heap), _thread(Thread::current()) {}
 954 
 955   void do_object(oop p) {
 956     shenandoah_assert_marked(NULL, p);
 957     if (!ShenandoahForwarding::is_forwarded(p)) {
 958       _heap->evacuate_object(p, _thread);
 959     }
 960   }
 961 };
 962 
 963 class ShenandoahEvacuationTask : public AbstractGangTask {
 964 private:
 965   ShenandoahHeap* const _sh;
 966   ShenandoahCollectionSet* const _cs;
 967   bool _concurrent;
 968 public:
 969   ShenandoahEvacuationTask(ShenandoahHeap* sh,
 970                            ShenandoahCollectionSet* cs,
 971                            bool concurrent) :
 972     AbstractGangTask("Shenandoah Evacuation"),
 973     _sh(sh),
 974     _cs(cs),
 975     _concurrent(concurrent)
 976   {}
 977 

1225 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1226   return _free_set->capacity();
1227 }
1228 
1229 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1230 private:
1231   MarkBitMap* _bitmap;
1232   ShenandoahScanObjectStack* _oop_stack;
1233   ShenandoahHeap* const _heap;
1234   ShenandoahMarkingContext* const _marking_context;
1235 
1236   template <class T>
1237   void do_oop_work(T* p) {
1238     T o = RawAccess<>::oop_load(p);
1239     if (!CompressedOops::is_null(o)) {
1240       oop obj = CompressedOops::decode_not_null(o);
1241       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1242         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1243         return;
1244       }
1245       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1246 
1247       assert(oopDesc::is_oop(obj), "must be a valid oop");
1248       if (!_bitmap->is_marked(obj)) {
1249         _bitmap->mark(obj);
1250         _oop_stack->push(obj);
1251       }
1252     }
1253   }
1254 public:
1255   ObjectIterateScanRootClosure(MarkBitMap* bitmap, ShenandoahScanObjectStack* oop_stack) :
1256     _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1257     _marking_context(_heap->marking_context()) {}
1258   void do_oop(oop* p)       { do_oop_work(p); }
1259   void do_oop(narrowOop* p) { do_oop_work(p); }
1260 };
1261 
1262 /*
1263  * This is public API, used in preparation of object_iterate().
1264  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1265  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can

1281  * wiped the bitmap in preparation for next marking).
1282  *
1283  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1284  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1285  * is allowed to report dead objects, but is not required to do so.
1286  */
1287 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1288   // Reset bitmap
1289   if (!prepare_aux_bitmap_for_iteration())
1290     return;
1291 
1292   ShenandoahScanObjectStack oop_stack;
1293   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1294   // Seed the stack with root scan
1295   scan_roots_for_iteration(&oop_stack, &oops);
1296 
1297   // Work through the oop stack to traverse heap
1298   while (! oop_stack.is_empty()) {
1299     oop obj = oop_stack.pop();
1300     assert(oopDesc::is_oop(obj), "must be a valid oop");
1301     shenandoah_assert_not_in_cset_except(NULL, obj, cancelled_gc());
1302     cl->do_object(obj);
1303     obj->oop_iterate(&oops);
1304   }
1305 
1306   assert(oop_stack.is_empty(), "should be empty");
1307   // Reclaim bitmap
1308   reclaim_aux_bitmap_for_iteration();
1309 }
1310 
1311 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
1312   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1313 
1314   if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1315     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1316     return false;
1317   }
1318   // Reset bitmap
1319   _aux_bit_map.clear();
1320   return true;
1321 }

1335   }
1336 }
1337 
1338 // Closure for parallelly iterate objects
1339 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
1340 private:
1341   MarkBitMap* _bitmap;
1342   ShenandoahObjToScanQueue* _queue;
1343   ShenandoahHeap* const _heap;
1344   ShenandoahMarkingContext* const _marking_context;
1345 
1346   template <class T>
1347   void do_oop_work(T* p) {
1348     T o = RawAccess<>::oop_load(p);
1349     if (!CompressedOops::is_null(o)) {
1350       oop obj = CompressedOops::decode_not_null(o);
1351       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1352         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1353         return;
1354       }
1355       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1356 
1357       assert(oopDesc::is_oop(obj), "Must be a valid oop");
1358       if (_bitmap->par_mark(obj)) {
1359         _queue->push(ShenandoahMarkTask(obj));
1360       }
1361     }
1362   }
1363 public:
1364   ShenandoahObjectIterateParScanClosure(MarkBitMap* bitmap, ShenandoahObjToScanQueue* q) :
1365     _bitmap(bitmap), _queue(q), _heap(ShenandoahHeap::heap()),
1366     _marking_context(_heap->marking_context()) {}
1367   void do_oop(oop* p)       { do_oop_work(p); }
1368   void do_oop(narrowOop* p) { do_oop_work(p); }
1369 };
1370 
1371 // Object iterator for parallel heap iteraion.
1372 // The root scanning phase happenes in construction as a preparation of
1373 // parallel marking queues.
1374 // Every worker processes it's own marking queue. work-stealing is used
1375 // to balance workload.
< prev index next >