7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "gc/epsilon/epsilonHeap.hpp"
28 #include "gc/epsilon/epsilonInitLogger.hpp"
29 #include "gc/epsilon/epsilonMemoryPool.hpp"
30 #include "gc/epsilon/epsilonThreadLocalData.hpp"
31 #include "gc/shared/gcArguments.hpp"
32 #include "gc/shared/locationPrinter.inline.hpp"
33 #include "logging/log.hpp"
34 #include "memory/allocation.hpp"
35 #include "memory/allocation.inline.hpp"
36 #include "memory/metaspaceUtils.hpp"
37 #include "memory/resourceArea.hpp"
38 #include "memory/universe.hpp"
39 #include "runtime/atomic.hpp"
40 #include "runtime/globals.hpp"
41
42 jint EpsilonHeap::initialize() {
43 size_t align = HeapAlignment;
44 size_t init_byte_size = align_up(InitialHeapSize, align);
45 size_t max_byte_size = align_up(MaxHeapSize, align);
46
47 // Initialize backing storage
48 ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
49 _virtual_space.initialize(heap_rs, init_byte_size);
50
51 MemRegion committed_region((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high());
52
53 initialize_reserved_region(heap_rs);
54
55 _space = new ContiguousSpace();
56 _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);
57
58 // Precompute hot fields
59 _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), align_object_size(EpsilonMaxTLABSize / HeapWordSize));
60 _step_counter_update = MIN2<size_t>(max_byte_size / 16, EpsilonUpdateCountersStep);
61 _step_heap_print = (EpsilonPrintHeapSteps == 0) ? SIZE_MAX : (max_byte_size / EpsilonPrintHeapSteps);
62 _decay_time_ns = (int64_t) EpsilonTLABDecayTime * NANOSECS_PER_MILLISEC;
63
64 // Enable monitoring
65 _monitoring_support = new EpsilonMonitoringSupport(this);
66 _last_counter_update = 0;
67 _last_heap_print = 0;
68
69 // Install barrier set
70 BarrierSet::set_barrier_set(new EpsilonBarrierSet());
71
72 // All done, print out the configuration
73 EpsilonInitLogger::print();
74
75 return JNI_OK;
76 }
77
78 void EpsilonHeap::initialize_serviceability() {
79 _pool = new EpsilonMemoryPool(this);
80 _memory_manager.add_pool(_pool);
81 }
82
83 GrowableArray<GCMemoryManager*> EpsilonHeap::memory_managers() {
84 GrowableArray<GCMemoryManager*> memory_managers(1);
85 memory_managers.append(&_memory_manager);
86 return memory_managers;
87 }
88
89 GrowableArray<MemoryPool*> EpsilonHeap::memory_pools() {
90 GrowableArray<MemoryPool*> memory_pools(1);
91 memory_pools.append(_pool);
217 assert(min_size <= size,
218 "Size honors min size: " SIZE_FORMAT " <= " SIZE_FORMAT, min_size, size);
219 assert(size <= _max_tlab_size,
220 "Size honors max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, _max_tlab_size);
221 assert(size <= CollectedHeap::max_tlab_size(),
222 "Size honors global max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, CollectedHeap::max_tlab_size());
223
224 if (log_is_enabled(Trace, gc)) {
225 ResourceMark rm;
226 log_trace(gc)("TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT
227 "K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K",
228 thread->name(),
229 requested_size * HeapWordSize / K,
230 min_size * HeapWordSize / K,
231 _max_tlab_size * HeapWordSize / K,
232 ergo_tlab * HeapWordSize / K,
233 size * HeapWordSize / K);
234 }
235
236 // All prepared, let's do it!
237 HeapWord* res = allocate_work(size);
238
239 if (res != nullptr) {
240 // Allocation successful
241 *actual_size = size;
242 if (EpsilonElasticTLABDecay) {
243 EpsilonThreadLocalData::set_last_tlab_time(thread, time);
244 }
245 if (EpsilonElasticTLAB && !fits) {
246 // If we requested expansion, this is our new ergonomic TLAB size
247 EpsilonThreadLocalData::set_ergo_tlab_size(thread, size);
248 }
249 } else {
250 // Allocation failed, reset ergonomics to try and fit smaller TLABs
251 if (EpsilonElasticTLAB) {
252 EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
253 }
254 }
255
256 return res;
257 }
258
259 HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) {
260 *gc_overhead_limit_was_exceeded = false;
261 return allocate_work(size);
262 }
263
264 HeapWord* EpsilonHeap::allocate_loaded_archive_space(size_t size) {
265 // Cannot use verbose=true because Metaspace is not initialized
266 return allocate_work(size, /* verbose = */false);
267 }
268
269 void EpsilonHeap::collect(GCCause::Cause cause) {
270 switch (cause) {
271 case GCCause::_metadata_GC_threshold:
272 case GCCause::_metadata_GC_clear_soft_refs:
273 // Receiving these causes means the VM itself entered the safepoint for metadata collection.
274 // While Epsilon does not do GC, it has to perform sizing adjustments, otherwise we would
275 // re-enter the safepoint again very soon.
276
277 assert(SafepointSynchronize::is_at_safepoint(), "Expected at safepoint");
278 log_info(gc)("GC request for \"%s\" is handled", GCCause::to_string(cause));
279 MetaspaceGC::compute_new_size();
280 print_metaspace_info();
281 break;
282 default:
283 log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause));
284 }
285 _monitoring_support->update_counters();
286 }
287
288 void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) {
289 collect(gc_cause());
290 }
291
292 void EpsilonHeap::object_iterate(ObjectClosure *cl) {
293 _space->object_iterate(cl);
294 }
295
296 void EpsilonHeap::print_on(outputStream *st) const {
297 st->print_cr("Epsilon Heap");
298
299 _virtual_space.print_on(st);
300
301 if (_space != nullptr) {
302 st->print_cr("Allocation space:");
303 _space->print_on(st);
307 }
308
309 bool EpsilonHeap::print_location(outputStream* st, void* addr) const {
310 return BlockLocationPrinter<EpsilonHeap>::print_location(st, addr);
311 }
312
313 void EpsilonHeap::print_tracing_info() const {
314 print_heap_info(used());
315 print_metaspace_info();
316 }
317
318 void EpsilonHeap::print_heap_info(size_t used) const {
319 size_t reserved = max_capacity();
320 size_t committed = capacity();
321
322 if (reserved != 0) {
323 log_info(gc)("Heap: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "
324 SIZE_FORMAT "%s (%.2f%%) used",
325 byte_size_in_proper_unit(reserved), proper_unit_for_byte_size(reserved),
326 byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
327 committed * 100.0 / reserved,
328 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used),
329 used * 100.0 / reserved);
330 } else {
331 log_info(gc)("Heap: no reliable data");
332 }
333 }
334
335 void EpsilonHeap::print_metaspace_info() const {
336 MetaspaceCombinedStats stats = MetaspaceUtils::get_combined_statistics();
337 size_t reserved = stats.reserved();
338 size_t committed = stats.committed();
339 size_t used = stats.used();
340
341 if (reserved != 0) {
342 log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "
343 SIZE_FORMAT "%s (%.2f%%) used",
344 byte_size_in_proper_unit(reserved), proper_unit_for_byte_size(reserved),
345 byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
346 committed * 100.0 / reserved,
347 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used),
348 used * 100.0 / reserved);
349 } else {
350 log_info(gc, metaspace)("Metaspace: no reliable data");
351 }
352 }
|
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "classfile/classLoaderDataGraph.hpp"
28 #include "code/codeCache.hpp"
29 #include "gc/epsilon/epsilonHeap.hpp"
30 #include "gc/epsilon/epsilonInitLogger.hpp"
31 #include "gc/epsilon/epsilonMemoryPool.hpp"
32 #include "gc/epsilon/epsilonThreadLocalData.hpp"
33 #include "gc/shared/gcArguments.hpp"
34 #include "gc/shared/gcLocker.inline.hpp"
35 #include "gc/shared/gcTraceTime.inline.hpp"
36 #include "gc/shared/locationPrinter.inline.hpp"
37 #include "gc/shared/markBitMap.inline.hpp"
38 #include "gc/shared/strongRootsScope.hpp"
39 #include "gc/shared/preservedMarks.inline.hpp"
40 #include "gc/shared/oopStorageSet.inline.hpp"
41 #include "logging/log.hpp"
42 #include "nmt/memTracker.hpp"
43 #include "memory/allocation.hpp"
44 #include "memory/iterator.inline.hpp"
45 #include "memory/metaspaceUtils.hpp"
46 #include "memory/resourceArea.hpp"
47 #include "memory/universe.hpp"
48 #include "oops/compressedOops.inline.hpp"
49 #include "runtime/atomic.hpp"
50 #include "runtime/globals.hpp"
51 #include "runtime/thread.hpp"
52 #include "runtime/threads.hpp"
53 #include "runtime/vmOperations.hpp"
54 #include "runtime/vmThread.hpp"
55 #include "services/management.hpp"
56 <<<<<<< HEAD
57 #include "services/memTracker.hpp"
58 =======
59 >>>>>>> f279b26b775e16770eb0ad413439b8beb777b29a
60 #include "utilities/stack.inline.hpp"
61
62 jint EpsilonHeap::initialize() {
63 size_t align = HeapAlignment;
64 size_t init_byte_size = align_up(InitialHeapSize, align);
65 size_t max_byte_size = align_up(MaxHeapSize, align);
66
67 // Initialize backing storage
68 ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
69 _virtual_space.initialize(heap_rs, init_byte_size);
70
71 MemRegion committed_region((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high());
72
73 initialize_reserved_region(heap_rs);
74
75 _space = new ContiguousSpace();
76 _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);
77
78 // Precompute hot fields
79 _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), align_object_size(EpsilonMaxTLABSize / HeapWordSize));
80 _step_counter_update = MIN2<size_t>(max_byte_size / 16, EpsilonUpdateCountersStep);
81 _step_heap_print = (EpsilonPrintHeapSteps == 0) ? SIZE_MAX : (max_byte_size / EpsilonPrintHeapSteps);
82 _decay_time_ns = (int64_t) EpsilonTLABDecayTime * NANOSECS_PER_MILLISEC;
83
84 // Enable monitoring
85 _monitoring_support = new EpsilonMonitoringSupport(this);
86 _last_counter_update = 0;
87 _last_heap_print = 0;
88
89 // Install barrier set
90 BarrierSet::set_barrier_set(new EpsilonBarrierSet());
91
92 size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
93 size_t _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
94 _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
95
96 // Initialize marking bitmap, but not commit it yet
97 if (EpsilonSlidingGC) {
98 ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
99 MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
100 _bitmap_region = MemRegion((HeapWord *) bitmap.base(), bitmap.size() / HeapWordSize);
101 MemRegion heap_region = MemRegion((HeapWord *) heap_rs.base(), heap_rs.size() / HeapWordSize);
102 _bitmap.initialize(heap_region, _bitmap_region);
103 }
104
105 // All done, print out the configuration
106 EpsilonInitLogger::print();
107
108 return JNI_OK;
109 }
110
111 void EpsilonHeap::initialize_serviceability() {
112 _pool = new EpsilonMemoryPool(this);
113 _memory_manager.add_pool(_pool);
114 }
115
116 GrowableArray<GCMemoryManager*> EpsilonHeap::memory_managers() {
117 GrowableArray<GCMemoryManager*> memory_managers(1);
118 memory_managers.append(&_memory_manager);
119 return memory_managers;
120 }
121
122 GrowableArray<MemoryPool*> EpsilonHeap::memory_pools() {
123 GrowableArray<MemoryPool*> memory_pools(1);
124 memory_pools.append(_pool);
250 assert(min_size <= size,
251 "Size honors min size: " SIZE_FORMAT " <= " SIZE_FORMAT, min_size, size);
252 assert(size <= _max_tlab_size,
253 "Size honors max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, _max_tlab_size);
254 assert(size <= CollectedHeap::max_tlab_size(),
255 "Size honors global max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, CollectedHeap::max_tlab_size());
256
257 if (log_is_enabled(Trace, gc)) {
258 ResourceMark rm;
259 log_trace(gc)("TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT
260 "K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K",
261 thread->name(),
262 requested_size * HeapWordSize / K,
263 min_size * HeapWordSize / K,
264 _max_tlab_size * HeapWordSize / K,
265 ergo_tlab * HeapWordSize / K,
266 size * HeapWordSize / K);
267 }
268
269 // All prepared, let's do it!
270 HeapWord* res = allocate_or_collect_work(size);
271
272 if (res != nullptr) {
273 // Allocation successful
274 *actual_size = size;
275 if (EpsilonElasticTLABDecay) {
276 EpsilonThreadLocalData::set_last_tlab_time(thread, time);
277 }
278 if (EpsilonElasticTLAB && !fits) {
279 // If we requested expansion, this is our new ergonomic TLAB size
280 EpsilonThreadLocalData::set_ergo_tlab_size(thread, size);
281 }
282 } else {
283 // Allocation failed, reset ergonomics to try and fit smaller TLABs
284 if (EpsilonElasticTLAB) {
285 EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
286 }
287 }
288
289 return res;
290 }
291
292 HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) {
293 *gc_overhead_limit_was_exceeded = false;
294 return allocate_or_collect_work(size);
295 }
296
297 HeapWord* EpsilonHeap::allocate_loaded_archive_space(size_t size) {
298 // Cannot use verbose=true because Metaspace is not initialized
299 return allocate_work(size, /* verbose = */false);
300 }
301
302 void EpsilonHeap::collect(GCCause::Cause cause) {
303 switch (cause) {
304 case GCCause::_metadata_GC_threshold:
305 case GCCause::_metadata_GC_clear_soft_refs:
306 // Receiving these causes means the VM itself entered the safepoint for metadata collection.
307 // While Epsilon does not do GC, it has to perform sizing adjustments, otherwise we would
308 // re-enter the safepoint again very soon.
309
310 assert(SafepointSynchronize::is_at_safepoint(), "Expected at safepoint");
311 log_info(gc)("GC request for \"%s\" is handled", GCCause::to_string(cause));
312 MetaspaceGC::compute_new_size();
313 print_metaspace_info();
314 break;
315 default:
316 if (EpsilonSlidingGC) {
317 if (SafepointSynchronize::is_at_safepoint()) {
318 entry_collect(cause);
319 } else {
320 vmentry_collect(cause);
321 }
322 } else {
323 log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause));
324 }
325 }
326 _monitoring_support->update_counters();
327 }
328
329 void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) {
330 collect(gc_cause());
331 }
332
333 void EpsilonHeap::object_iterate(ObjectClosure *cl) {
334 _space->object_iterate(cl);
335 }
336
337 void EpsilonHeap::print_on(outputStream *st) const {
338 st->print_cr("Epsilon Heap");
339
340 _virtual_space.print_on(st);
341
342 if (_space != nullptr) {
343 st->print_cr("Allocation space:");
344 _space->print_on(st);
348 }
349
350 bool EpsilonHeap::print_location(outputStream* st, void* addr) const {
351 return BlockLocationPrinter<EpsilonHeap>::print_location(st, addr);
352 }
353
354 void EpsilonHeap::print_tracing_info() const {
355 print_heap_info(used());
356 print_metaspace_info();
357 }
358
359 void EpsilonHeap::print_heap_info(size_t used) const {
360 size_t reserved = max_capacity();
361 size_t committed = capacity();
362
363 if (reserved != 0) {
364 log_info(gc)("Heap: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "
365 SIZE_FORMAT "%s (%.2f%%) used",
366 byte_size_in_proper_unit(reserved), proper_unit_for_byte_size(reserved),
367 byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
368 percent_of(committed, reserved),
369 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used),
370 percent_of(used, reserved)
371 );
372 } else {
373 log_info(gc)("Heap: no reliable data");
374 }
375 }
376
377 void EpsilonHeap::print_metaspace_info() const {
378 MetaspaceCombinedStats stats = MetaspaceUtils::get_combined_statistics();
379 size_t reserved = stats.reserved();
380 size_t committed = stats.committed();
381 size_t used = stats.used();
382
383 if (reserved != 0) {
384 log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "
385 SIZE_FORMAT "%s (%.2f%%) used",
386 byte_size_in_proper_unit(reserved), proper_unit_for_byte_size(reserved),
387 byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
388 percent_of(committed, reserved),
389 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used),
390 percent_of(used, reserved)
391 );
392 } else {
393 log_info(gc, metaspace)("Metaspace: no reliable data");
394 }
395 }
396
397 // ------------------ EXPERIMENTAL MARK-COMPACT -------------------------------
398 //
399 // This implements a trivial Lisp2-style sliding collector:
400 // https://en.wikipedia.org/wiki/Mark-compact_algorithm#LISP2_algorithm
401 //
402 // The goal for this implementation is to be as simple as possible, ignoring
403 // non-trivial performance optimizations. This collector does not implement
404 // reference processing: no soft/weak/phantom/finalizeable references are ever
405 // cleared. It also does not implement class unloading and other runtime
406 // cleanups.
407 //
408
409 // VM operation that executes collection cycle under safepoint
410 class VM_EpsilonCollect: public VM_Operation {
411 private:
412 const GCCause::Cause _cause;
413 EpsilonHeap* const _heap;
414 static size_t _req_id;
415 public:
416 VM_EpsilonCollect(GCCause::Cause cause) : VM_Operation(),
417 _cause(cause),
418 _heap(EpsilonHeap::heap()) {};
419
420 VM_Operation::VMOp_Type type() const { return VMOp_EpsilonCollect; }
421 const char* name() const { return "Epsilon Collection"; }
422
423 virtual bool doit_prologue() {
424 size_t id = Atomic::load_acquire(&_req_id);
425
426 // Need to take the Heap lock before managing backing storage.
427 Heap_lock->lock();
428
429 // Heap lock also naturally serializes GC requests, and allows us to coalesce
430 // back-to-back GC requests from many threads. Avoid the consecutive GCs
431 // if we started waiting when other GC request was being handled.
432 if (id < Atomic::load_acquire(&_req_id)) {
433 Heap_lock->unlock();
434 return false;
435 }
436
437 // No contenders. Start handling a new GC request.
438 Atomic::inc(&_req_id);
439 return true;
440 }
441
442 virtual void doit() {
443 _heap->entry_collect(_cause);
444 }
445
446 virtual void doit_epilogue() {
447 Heap_lock->unlock();
448 }
449 };
450
451 size_t VM_EpsilonCollect::_req_id = 0;
452
453 void EpsilonHeap::vmentry_collect(GCCause::Cause cause) {
454 VM_EpsilonCollect vmop(cause);
455 VMThread::execute(&vmop);
456 }
457
458 HeapWord* EpsilonHeap::allocate_or_collect_work(size_t size, bool verbose) {
459 HeapWord* res = allocate_work(size);
460 <<<<<<< HEAD
461 if (res == NULL && EpsilonSlidingGC) {
462 =======
463 if (res == NULL && EpsilonSlidingGC && EpsilonImplicitGC) {
464 >>>>>>> f279b26b775e16770eb0ad413439b8beb777b29a
465 vmentry_collect(GCCause::_allocation_failure);
466 // TODO: This looks incorrect
467 GCLocker::stall_until_clear();
468 res = allocate_work(size, verbose);
469 }
470 return res;
471 }
472
473 typedef Stack<oop, mtGC> EpsilonMarkStack;
474
475 void EpsilonHeap::process_roots(OopClosure* cl) {
476 // Need to tell runtime we are about to walk the roots with 1 thread
477 StrongRootsScope scope(1);
478
479 // Need to adapt oop closure for some special root types.
480 CLDToOopClosure clds(cl, ClassLoaderData::_claim_none);
481 MarkingCodeBlobClosure blobs(cl, CodeBlobToOopClosure::FixRelocations, true); // TODO: Keepalive?
482
483 // Strong roots: always reachable roots
484
485 // General strong roots that are registered in OopStorages
486 for (auto id : EnumRange<OopStorageSet::StrongId>()) {
487 OopStorageSet::storage(id)->oops_do(cl);
488 }
489
490 // Subsystems that still have their own root handling
491 ClassLoaderDataGraph::cld_do(&clds);
492 Threads::possibly_parallel_oops_do(false, cl, &blobs);
493
494 {
495 MutexLocker lock(CodeCache_lock, Mutex::_no_safepoint_check_flag);
496 CodeCache::blobs_do(&blobs);
497 }
498
499 // Weak roots: in an advanced GC these roots would be skipped during
500 // the initial scan, and walked again after the marking is complete.
501 // Then, we could discover which roots are not actually pointing
502 // to surviving Java objects, and either clean the roots, or mark them.
503 // Current simple implementation does not handle weak roots specially,
504 // and therefore, we mark through them as if they are strong roots.
505 for (auto id : EnumRange<OopStorageSet::WeakId>()) {
506 OopStorageSet::storage(id)->oops_do(cl);
507 }
508 }
509
510 // Walk the marking bitmap and call object closure on every marked object.
511 // This is much faster that walking a (very sparse) parsable heap, but it
512 // takes up to 1/64-th of heap size for the bitmap.
513 void EpsilonHeap::walk_bitmap(ObjectClosure* cl) {
514 HeapWord* limit = _space->top();
515 HeapWord* addr = _bitmap.get_next_marked_addr(_space->bottom(), limit);
516 while (addr < limit) {
517 oop obj = cast_to_oop(addr);
518 assert(_bitmap.is_marked(obj), "sanity");
519 cl->do_object(obj);
520 addr += 1;
521 if (addr < limit) {
522 addr = _bitmap.get_next_marked_addr(addr, limit);
523 }
524 }
525 }
526
527 class EpsilonScanOopClosure : public BasicOopIterateClosure {
528 private:
529 EpsilonMarkStack* const _stack;
530 MarkBitMap* const _bitmap;
531
532 template <class T>
533 void do_oop_work(T* p) {
534 // p is the pointer to memory location where oop is, load the value
535 // from it, unpack the compressed reference, if needed:
536 T o = RawAccess<>::oop_load(p);
537 if (!CompressedOops::is_null(o)) {
538 oop obj = CompressedOops::decode_not_null(o);
539
540 // Object is discovered. See if it is marked already. If not,
541 // mark and push it on mark stack for further traversal. Non-atomic
542 // check and set would do, as this closure is called by single thread.
543 if (!_bitmap->is_marked(obj)) {
544 _bitmap->mark(obj);
545 _stack->push(obj);
546 }
547 }
548 }
549
550 public:
551 EpsilonScanOopClosure(EpsilonMarkStack* stack, MarkBitMap* bitmap) :
552 _stack(stack), _bitmap(bitmap) {}
553 virtual void do_oop(oop* p) { do_oop_work(p); }
554 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
555 };
556
557 class EpsilonCalcNewLocationObjectClosure : public ObjectClosure {
558 private:
559 HeapWord* _compact_point;
560 PreservedMarks* const _preserved_marks;
561
562 public:
563 EpsilonCalcNewLocationObjectClosure(HeapWord* start, PreservedMarks* pm) :
564 _compact_point(start),
565 _preserved_marks(pm) {}
566
567 void do_object(oop obj) {
568 // Record the new location of the object: it is current compaction point.
569 // If object stays at the same location (which is true for objects in
570 // dense prefix, that we would normally get), do not bother recording the
571 // move, letting downstream code ignore it.
572 if (obj != cast_to_oop(_compact_point)) {
573 markWord mark = obj->mark();
574 _preserved_marks->push_if_necessary(obj, mark);
575 obj->forward_to(cast_to_oop(_compact_point));
576 }
577 _compact_point += obj->size();
578 }
579
580 HeapWord* compact_point() {
581 return _compact_point;
582 }
583 };
584
585 class EpsilonAdjustPointersOopClosure : public BasicOopIterateClosure {
586 private:
587 template <class T>
588 void do_oop_work(T* p) {
589 // p is the pointer to memory location where oop is, load the value
590 // from it, unpack the compressed reference, if needed:
591 T o = RawAccess<>::oop_load(p);
592 if (!CompressedOops::is_null(o)) {
593 oop obj = CompressedOops::decode_not_null(o);
594
595 // Rewrite the current pointer to the object with its forwardee.
596 // Skip the write if update is not needed.
597 if (obj->is_forwarded()) {
598 oop fwd = obj->forwardee();
599 assert(fwd != NULL, "just checking");
600 RawAccess<>::oop_store(p, fwd);
601 }
602 }
603 }
604
605 public:
606 virtual void do_oop(oop* p) { do_oop_work(p); }
607 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
608 };
609
610 class EpsilonAdjustPointersObjectClosure : public ObjectClosure {
611 private:
612 EpsilonAdjustPointersOopClosure _cl;
613 public:
614 void do_object(oop obj) {
615 // Apply the updates to all references reachable from current object:
616 obj->oop_iterate(&_cl);
617 }
618 };
619
620 class EpsilonMoveObjectsObjectClosure : public ObjectClosure {
621 private:
622 size_t _moved;
623 public:
624 EpsilonMoveObjectsObjectClosure() : ObjectClosure(), _moved(0) {}
625
626 void do_object(oop obj) {
627 // Copy the object to its new location, if needed. This is final step,
628 // so we have to re-initialize its new mark word, dropping the forwardee
629 // data from it.
630 if (obj->is_forwarded()) {
631 oop fwd = obj->forwardee();
632 assert(fwd != NULL, "just checking");
633 Copy::aligned_conjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(fwd), obj->size());
634 fwd->init_mark();
635 _moved++;
636 }
637 }
638
639 size_t moved() {
640 return _moved;
641 }
642 };
643
644 class EpsilonVerifyOopClosure : public BasicOopIterateClosure {
645 private:
646 EpsilonHeap* const _heap;
647 EpsilonMarkStack* const _stack;
648 MarkBitMap* const _bitmap;
649
650 template <class T>
651 void do_oop_work(T* p) {
652 T o = RawAccess<>::oop_load(p);
653 if (!CompressedOops::is_null(o)) {
654 oop obj = CompressedOops::decode_not_null(o);
655 if (!_bitmap->is_marked(obj)) {
656 _bitmap->mark(obj);
657
658 guarantee(_heap->is_in(obj), "Is in heap: " PTR_FORMAT, p2i(obj));
659 guarantee(oopDesc::is_oop(obj), "Is an object: " PTR_FORMAT, p2i(obj));
660 guarantee(!obj->mark().is_marked(), "Mark is gone: " PTR_FORMAT, p2i(obj));
661
662 _stack->push(obj);
663 }
664 }
665 }
666
667 public:
668 EpsilonVerifyOopClosure(EpsilonMarkStack* stack, MarkBitMap* bitmap) :
669 _heap(EpsilonHeap::heap()), _stack(stack), _bitmap(bitmap) {}
670 virtual void do_oop(oop* p) { do_oop_work(p); }
671 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
672 };
673
674 void EpsilonHeap::entry_collect(GCCause::Cause cause) {
675 if (GCLocker::check_active_before_gc()) {
676 return;
677 }
678
679 GCIdMark mark;
680 GCTraceTime(Info, gc) time("Lisp2-style Mark-Compact", NULL, cause, true);
681
682 // Some statistics, for fun and profit:
683 size_t stat_reachable_roots = 0;
684 size_t stat_reachable_heap = 0;
685 size_t stat_moved = 0;
686 size_t stat_preserved_marks = 0;
687
688 {
689 GCTraceTime(Info, gc) time("Step 0: Prologue", NULL);
690
691 // Commit marking bitmap memory. There are several upsides of doing this
692 // before the cycle: no memory is taken if GC is not happening, the memory
693 // is "cleared" on first touch, and untouched parts of bitmap are mapped
694 // to zero page, boosting performance on sparse heaps.
695 if (!os::commit_memory((char*)_bitmap_region.start(), _bitmap_region.byte_size(), false)) {
696 log_warning(gc)("Could not commit native memory for marking bitmap, GC failed");
697 return;
698 }
699
700 // We do not need parsable heap for this algorithm to work, but we want
701 // threads to give up their TLABs.
702 ensure_parsability(true);
703
704 #if COMPILER2_OR_JVMCI
705 // Derived pointers would be re-discovered during the mark.
706 // Clear and activate the table for them.
707 DerivedPointerTable::clear();
708 #endif
709 }
710
711 {
712 GCTraceTime(Info, gc) time("Step 1: Mark", NULL);
713
714 // Marking stack and the closure that does most of the work. The closure
715 // would scan the outgoing references, mark them, and push newly-marked
716 // objects to stack for further processing.
717 EpsilonMarkStack stack;
718 EpsilonScanOopClosure cl(&stack, &_bitmap);
719
720 // Seed the marking with roots.
721 process_roots(&cl);
722 stat_reachable_roots = stack.size();
723
724 // Scan the rest of the heap until we run out of objects. Termination is
725 // guaranteed, because all reachable objects would be marked eventually.
726 while (!stack.is_empty()) {
727 oop obj = stack.pop();
728 obj->oop_iterate(&cl);
729 stat_reachable_heap++;
730 }
731
732 #if COMPILER2_OR_JVMCI
733 // No more derived pointers discovered after marking is done.
734 DerivedPointerTable::set_active(false);
735 #endif
736 }
737
738 // We are going to store forwarding information (where the new copy resides)
739 // in mark words. Some of those mark words need to be carefully preserved.
740 // This is an utility that maintains the list of those special mark words.
741 PreservedMarks preserved_marks;
742
743 // New top of the allocated space.
744 HeapWord* new_top;
745
746 {
747 GCTraceTime(Info, gc) time("Step 2: Calculate new locations", NULL);
748
749 // Walk all alive objects, compute their new addresses and store those
750 // addresses in mark words. Optionally preserve some marks.
751 EpsilonCalcNewLocationObjectClosure cl(_space->bottom(), &preserved_marks);
752 walk_bitmap(&cl);
753
754 // After addresses are calculated, we know the new top for the allocated
755 // space. We cannot set it just yet, because some asserts check that objects
756 // are "in heap" based on current "top".
757 new_top = cl.compact_point();
758
759 stat_preserved_marks = preserved_marks.size();
760 }
761
762 {
763 GCTraceTime(Info, gc) time("Step 3: Adjust pointers", NULL);
764
765 // Walk all alive objects _and their reference fields_, and put "new
766 // addresses" there. We know the new addresses from the forwarding data
767 // in mark words. Take care of the heap objects first.
768 EpsilonAdjustPointersObjectClosure cl;
769 walk_bitmap(&cl);
770
771 // Now do the same, but for all VM roots, which reference the objects on
772 // their own: their references should also be updated.
773 EpsilonAdjustPointersOopClosure cli;
774 process_roots(&cli);
775
776 // Finally, make sure preserved marks know the objects are about to move.
777 preserved_marks.adjust_during_full_gc();
778 }
779
780 {
781 GCTraceTime(Info, gc) time("Step 4: Move objects", NULL);
782
783 // Move all alive objects to their new locations. All the references are
784 // already adjusted at previous step.
785 EpsilonMoveObjectsObjectClosure cl;
786 walk_bitmap(&cl);
787 stat_moved = cl.moved();
788
789 // Now we moved all objects to their relevant locations, we can retract
790 // the "top" of the allocation space to the end of the compacted prefix.
791 _space->set_top(new_top);
792 }
793
794 {
795 GCTraceTime(Info, gc) time("Step 5: Epilogue", NULL);
796
797 // Restore all special mark words.
798 preserved_marks.restore();
799
800 #if COMPILER2_OR_JVMCI
801 // Tell the rest of runtime we have finished the GC.
802 DerivedPointerTable::update_pointers();
803 #endif
804
805 // Verification code walks entire heap and verifies nothing is broken.
806 if (EpsilonVerify) {
807 // The basic implementation turns heap into entirely parsable one with
808 // only alive objects, which mean we could just walked the heap object
809 // by object and verify it. But, it would be inconvenient for verification
810 // to assume heap has only alive objects. Any future change that leaves
811 // at least one dead object with dead outgoing references would fail the
812 // verification. Therefore, it makes more sense to mark through the heap
813 // again, not assuming objects are all alive.
814 EpsilonMarkStack stack;
815 EpsilonVerifyOopClosure cl(&stack, &_bitmap);
816
817 _bitmap.clear();
818
819 // Verify all roots are correct, and that we have the same number of
820 // object reachable from roots.
821 process_roots(&cl);
822
823 size_t verified_roots = stack.size();
824 guarantee(verified_roots == stat_reachable_roots,
825 "Verification discovered " SIZE_FORMAT " roots out of " SIZE_FORMAT,
826 verified_roots, stat_reachable_roots);
827
828 // Verify the rest of the heap is correct, and that we have the same
829 // number of objects reachable from heap.
830 size_t verified_heap = 0;
831 while (!stack.is_empty()) {
832 oop obj = stack.pop();
833 obj->oop_iterate(&cl);
834 verified_heap++;
835 }
836
837 guarantee(verified_heap == stat_reachable_heap,
838 "Verification discovered " SIZE_FORMAT " heap objects out of " SIZE_FORMAT,
839 verified_heap, stat_reachable_heap);
840
841 // Ask parts of runtime to verify themselves too
842 Universe::verify("Epsilon");
843 }
844
845 // Marking bitmap is not needed anymore
846 if (!os::uncommit_memory((char*)_bitmap_region.start(), _bitmap_region.byte_size())) {
847 log_warning(gc)("Could not uncommit native memory for marking bitmap");
848 }
849
850 // Return all memory back if so requested. On large heaps, this would
851 // take a while.
852 if (EpsilonUncommit) {
853 _virtual_space.shrink_by((_space->end() - new_top) * HeapWordSize);
854 _space->set_end((HeapWord*)_virtual_space.high());
855 }
856 }
857
858 size_t stat_reachable = stat_reachable_roots + stat_reachable_heap;
859 log_info(gc)("GC Stats: " SIZE_FORMAT " (%.2f%%) reachable from roots, " SIZE_FORMAT " (%.2f%%) reachable from heap, "
860 SIZE_FORMAT " (%.2f%%) moved, " SIZE_FORMAT " (%.2f%%) markwords preserved",
861 stat_reachable_roots, percent_of(stat_reachable_roots, stat_reachable),
862 stat_reachable_heap, percent_of(stat_reachable_heap, stat_reachable),
863 stat_moved, percent_of(stat_moved, stat_reachable),
864 stat_preserved_marks, percent_of(stat_preserved_marks, stat_reachable)
865 );
866
867 print_heap_info(used());
868 print_metaspace_info();
869 }
870
871 void EpsilonHeap::pin_object(JavaThread* thread, oop obj) {
872 GCLocker::lock_critical(thread);
873 }
874
875 void EpsilonHeap::unpin_object(JavaThread* thread, oop obj) {
876 GCLocker::unlock_critical(thread);
877 }
|