15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "memory/allocation.hpp"
27 #include "memory/universe.hpp"
28
29 #include "gc/shared/gcArguments.hpp"
30 #include "gc/shared/gcTimer.hpp"
31 #include "gc/shared/gcTraceTime.inline.hpp"
32 #include "gc/shared/locationPrinter.inline.hpp"
33 #include "gc/shared/memAllocator.hpp"
34 #include "gc/shared/plab.hpp"
35 #include "gc/shared/tlab_globals.hpp"
36
37 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
38 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
39 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
40 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
41 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
42 #include "gc/shenandoah/shenandoahControlThread.hpp"
43 #include "gc/shenandoah/shenandoahFreeSet.hpp"
44 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
45 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
46 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
47 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
48 #include "gc/shenandoah/shenandoahInitLogger.hpp"
49 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
50 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
51 #include "gc/shenandoah/shenandoahMetrics.hpp"
52 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
53 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
54 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
384 ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
385 satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
386 satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
387 }
388
389 _monitoring_support = new ShenandoahMonitoringSupport(this);
390 _phase_timings = new ShenandoahPhaseTimings(max_workers());
391 ShenandoahCodeRoots::initialize();
392
393 if (ShenandoahPacing) {
394 _pacer = new ShenandoahPacer(this);
395 _pacer->setup_for_idle();
396 } else {
397 _pacer = NULL;
398 }
399
400 _control_thread = new ShenandoahControlThread();
401
402 ShenandoahInitLogger::print();
403
404 return JNI_OK;
405 }
406
407 void ShenandoahHeap::initialize_mode() {
408 if (ShenandoahGCMode != NULL) {
409 if (strcmp(ShenandoahGCMode, "satb") == 0) {
410 _gc_mode = new ShenandoahSATBMode();
411 } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
412 _gc_mode = new ShenandoahIUMode();
413 } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
414 _gc_mode = new ShenandoahPassiveMode();
415 } else {
416 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
417 }
418 } else {
419 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
420 }
421 _gc_mode->initialize_flags();
422 if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
423 vm_exit_during_initialization(
934 // Expand and retry allocation
935 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
936 if (result != NULL) {
937 return result;
938 }
939
940 // Out of memory
941 return NULL;
942 }
943
944 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
945 private:
946 ShenandoahHeap* const _heap;
947 Thread* const _thread;
948 public:
949 ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
950 _heap(heap), _thread(Thread::current()) {}
951
952 void do_object(oop p) {
953 shenandoah_assert_marked(NULL, p);
954 if (!p->is_forwarded()) {
955 _heap->evacuate_object(p, _thread);
956 }
957 }
958 };
959
960 class ShenandoahEvacuationTask : public AbstractGangTask {
961 private:
962 ShenandoahHeap* const _sh;
963 ShenandoahCollectionSet* const _cs;
964 bool _concurrent;
965 public:
966 ShenandoahEvacuationTask(ShenandoahHeap* sh,
967 ShenandoahCollectionSet* cs,
968 bool concurrent) :
969 AbstractGangTask("Shenandoah Evacuation"),
970 _sh(sh),
971 _cs(cs),
972 _concurrent(concurrent)
973 {}
974
1222 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1223 return _free_set->capacity();
1224 }
1225
1226 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1227 private:
1228 MarkBitMap* _bitmap;
1229 ShenandoahScanObjectStack* _oop_stack;
1230 ShenandoahHeap* const _heap;
1231 ShenandoahMarkingContext* const _marking_context;
1232
1233 template <class T>
1234 void do_oop_work(T* p) {
1235 T o = RawAccess<>::oop_load(p);
1236 if (!CompressedOops::is_null(o)) {
1237 oop obj = CompressedOops::decode_not_null(o);
1238 if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1239 // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1240 return;
1241 }
1242 obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1243
1244 assert(oopDesc::is_oop(obj), "must be a valid oop");
1245 if (!_bitmap->is_marked(obj)) {
1246 _bitmap->mark(obj);
1247 _oop_stack->push(obj);
1248 }
1249 }
1250 }
1251 public:
1252 ObjectIterateScanRootClosure(MarkBitMap* bitmap, ShenandoahScanObjectStack* oop_stack) :
1253 _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1254 _marking_context(_heap->marking_context()) {}
1255 void do_oop(oop* p) { do_oop_work(p); }
1256 void do_oop(narrowOop* p) { do_oop_work(p); }
1257 };
1258
1259 /*
1260 * This is public API, used in preparation of object_iterate().
1261 * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1262 * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1278 * wiped the bitmap in preparation for next marking).
1279 *
1280 * For all those reasons, we implement object iteration as a single marking traversal, reporting
1281 * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1282 * is allowed to report dead objects, but is not required to do so.
1283 */
1284 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1285 // Reset bitmap
1286 if (!prepare_aux_bitmap_for_iteration())
1287 return;
1288
1289 ShenandoahScanObjectStack oop_stack;
1290 ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1291 // Seed the stack with root scan
1292 scan_roots_for_iteration(&oop_stack, &oops);
1293
1294 // Work through the oop stack to traverse heap
1295 while (! oop_stack.is_empty()) {
1296 oop obj = oop_stack.pop();
1297 assert(oopDesc::is_oop(obj), "must be a valid oop");
1298 cl->do_object(obj);
1299 obj->oop_iterate(&oops);
1300 }
1301
1302 assert(oop_stack.is_empty(), "should be empty");
1303 // Reclaim bitmap
1304 reclaim_aux_bitmap_for_iteration();
1305 }
1306
1307 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
1308 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1309
1310 if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1311 log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1312 return false;
1313 }
1314 // Reset bitmap
1315 _aux_bit_map.clear();
1316 return true;
1317 }
1331 }
1332 }
1333
1334 // Closure for parallelly iterate objects
1335 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
1336 private:
1337 MarkBitMap* _bitmap;
1338 ShenandoahObjToScanQueue* _queue;
1339 ShenandoahHeap* const _heap;
1340 ShenandoahMarkingContext* const _marking_context;
1341
1342 template <class T>
1343 void do_oop_work(T* p) {
1344 T o = RawAccess<>::oop_load(p);
1345 if (!CompressedOops::is_null(o)) {
1346 oop obj = CompressedOops::decode_not_null(o);
1347 if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1348 // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1349 return;
1350 }
1351 obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1352
1353 assert(oopDesc::is_oop(obj), "Must be a valid oop");
1354 if (_bitmap->par_mark(obj)) {
1355 _queue->push(ShenandoahMarkTask(obj));
1356 }
1357 }
1358 }
1359 public:
1360 ShenandoahObjectIterateParScanClosure(MarkBitMap* bitmap, ShenandoahObjToScanQueue* q) :
1361 _bitmap(bitmap), _queue(q), _heap(ShenandoahHeap::heap()),
1362 _marking_context(_heap->marking_context()) {}
1363 void do_oop(oop* p) { do_oop_work(p); }
1364 void do_oop(narrowOop* p) { do_oop_work(p); }
1365 };
1366
1367 // Object iterator for parallel heap iteraion.
1368 // The root scanning phase happenes in construction as a preparation of
1369 // parallel marking queues.
1370 // Every worker processes it's own marking queue. work-stealing is used
1371 // to balance workload.
|
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "memory/allocation.hpp"
27 #include "memory/universe.hpp"
28
29 #include "gc/shared/gcArguments.hpp"
30 #include "gc/shared/gcTimer.hpp"
31 #include "gc/shared/gcTraceTime.inline.hpp"
32 #include "gc/shared/locationPrinter.inline.hpp"
33 #include "gc/shared/memAllocator.hpp"
34 #include "gc/shared/plab.hpp"
35 #include "gc/shared/slidingForwarding.hpp"
36 #include "gc/shared/tlab_globals.hpp"
37
38 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
39 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
40 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
41 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
42 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
43 #include "gc/shenandoah/shenandoahControlThread.hpp"
44 #include "gc/shenandoah/shenandoahFreeSet.hpp"
45 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
46 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
47 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
48 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
49 #include "gc/shenandoah/shenandoahInitLogger.hpp"
50 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
51 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
52 #include "gc/shenandoah/shenandoahMetrics.hpp"
53 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
54 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
55 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
385 ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
386 satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
387 satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
388 }
389
390 _monitoring_support = new ShenandoahMonitoringSupport(this);
391 _phase_timings = new ShenandoahPhaseTimings(max_workers());
392 ShenandoahCodeRoots::initialize();
393
394 if (ShenandoahPacing) {
395 _pacer = new ShenandoahPacer(this);
396 _pacer->setup_for_idle();
397 } else {
398 _pacer = NULL;
399 }
400
401 _control_thread = new ShenandoahControlThread();
402
403 ShenandoahInitLogger::print();
404
405 SlidingForwarding::initialize(_heap_region, ShenandoahHeapRegion::region_size_words());
406
407 return JNI_OK;
408 }
409
410 void ShenandoahHeap::initialize_mode() {
411 if (ShenandoahGCMode != NULL) {
412 if (strcmp(ShenandoahGCMode, "satb") == 0) {
413 _gc_mode = new ShenandoahSATBMode();
414 } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
415 _gc_mode = new ShenandoahIUMode();
416 } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
417 _gc_mode = new ShenandoahPassiveMode();
418 } else {
419 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
420 }
421 } else {
422 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
423 }
424 _gc_mode->initialize_flags();
425 if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
426 vm_exit_during_initialization(
937 // Expand and retry allocation
938 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
939 if (result != NULL) {
940 return result;
941 }
942
943 // Out of memory
944 return NULL;
945 }
946
947 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
948 private:
949 ShenandoahHeap* const _heap;
950 Thread* const _thread;
951 public:
952 ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
953 _heap(heap), _thread(Thread::current()) {}
954
955 void do_object(oop p) {
956 shenandoah_assert_marked(NULL, p);
957 if (!ShenandoahForwarding::is_forwarded(p)) {
958 _heap->evacuate_object(p, _thread);
959 }
960 }
961 };
962
963 class ShenandoahEvacuationTask : public AbstractGangTask {
964 private:
965 ShenandoahHeap* const _sh;
966 ShenandoahCollectionSet* const _cs;
967 bool _concurrent;
968 public:
969 ShenandoahEvacuationTask(ShenandoahHeap* sh,
970 ShenandoahCollectionSet* cs,
971 bool concurrent) :
972 AbstractGangTask("Shenandoah Evacuation"),
973 _sh(sh),
974 _cs(cs),
975 _concurrent(concurrent)
976 {}
977
1225 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1226 return _free_set->capacity();
1227 }
1228
1229 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1230 private:
1231 MarkBitMap* _bitmap;
1232 ShenandoahScanObjectStack* _oop_stack;
1233 ShenandoahHeap* const _heap;
1234 ShenandoahMarkingContext* const _marking_context;
1235
1236 template <class T>
1237 void do_oop_work(T* p) {
1238 T o = RawAccess<>::oop_load(p);
1239 if (!CompressedOops::is_null(o)) {
1240 oop obj = CompressedOops::decode_not_null(o);
1241 if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1242 // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1243 return;
1244 }
1245 obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1246
1247 assert(oopDesc::is_oop(obj), "must be a valid oop");
1248 if (!_bitmap->is_marked(obj)) {
1249 _bitmap->mark(obj);
1250 _oop_stack->push(obj);
1251 }
1252 }
1253 }
1254 public:
1255 ObjectIterateScanRootClosure(MarkBitMap* bitmap, ShenandoahScanObjectStack* oop_stack) :
1256 _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1257 _marking_context(_heap->marking_context()) {}
1258 void do_oop(oop* p) { do_oop_work(p); }
1259 void do_oop(narrowOop* p) { do_oop_work(p); }
1260 };
1261
1262 /*
1263 * This is public API, used in preparation of object_iterate().
1264 * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1265 * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1281 * wiped the bitmap in preparation for next marking).
1282 *
1283 * For all those reasons, we implement object iteration as a single marking traversal, reporting
1284 * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1285 * is allowed to report dead objects, but is not required to do so.
1286 */
1287 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1288 // Reset bitmap
1289 if (!prepare_aux_bitmap_for_iteration())
1290 return;
1291
1292 ShenandoahScanObjectStack oop_stack;
1293 ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1294 // Seed the stack with root scan
1295 scan_roots_for_iteration(&oop_stack, &oops);
1296
1297 // Work through the oop stack to traverse heap
1298 while (! oop_stack.is_empty()) {
1299 oop obj = oop_stack.pop();
1300 assert(oopDesc::is_oop(obj), "must be a valid oop");
1301 shenandoah_assert_not_in_cset_except(NULL, obj, cancelled_gc());
1302 cl->do_object(obj);
1303 obj->oop_iterate(&oops);
1304 }
1305
1306 assert(oop_stack.is_empty(), "should be empty");
1307 // Reclaim bitmap
1308 reclaim_aux_bitmap_for_iteration();
1309 }
1310
1311 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
1312 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1313
1314 if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1315 log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1316 return false;
1317 }
1318 // Reset bitmap
1319 _aux_bit_map.clear();
1320 return true;
1321 }
1335 }
1336 }
1337
1338 // Closure for parallelly iterate objects
1339 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
1340 private:
1341 MarkBitMap* _bitmap;
1342 ShenandoahObjToScanQueue* _queue;
1343 ShenandoahHeap* const _heap;
1344 ShenandoahMarkingContext* const _marking_context;
1345
1346 template <class T>
1347 void do_oop_work(T* p) {
1348 T o = RawAccess<>::oop_load(p);
1349 if (!CompressedOops::is_null(o)) {
1350 oop obj = CompressedOops::decode_not_null(o);
1351 if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1352 // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1353 return;
1354 }
1355 obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1356
1357 assert(oopDesc::is_oop(obj), "Must be a valid oop");
1358 if (_bitmap->par_mark(obj)) {
1359 _queue->push(ShenandoahMarkTask(obj));
1360 }
1361 }
1362 }
1363 public:
1364 ShenandoahObjectIterateParScanClosure(MarkBitMap* bitmap, ShenandoahObjToScanQueue* q) :
1365 _bitmap(bitmap), _queue(q), _heap(ShenandoahHeap::heap()),
1366 _marking_context(_heap->marking_context()) {}
1367 void do_oop(oop* p) { do_oop_work(p); }
1368 void do_oop(narrowOop* p) { do_oop_work(p); }
1369 };
1370
1371 // Object iterator for parallel heap iteraion.
1372 // The root scanning phase happenes in construction as a preparation of
1373 // parallel marking queues.
1374 // Every worker processes it's own marking queue. work-stealing is used
1375 // to balance workload.
|