1 /*
  2  * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2017, 2022, Red Hat, Inc. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "classfile/classLoaderDataGraph.hpp"
 28 #include "code/codeCache.hpp"
 29 #include "gc/epsilon/epsilonHeap.hpp"
 30 #include "gc/epsilon/epsilonInitLogger.hpp"
 31 #include "gc/epsilon/epsilonMemoryPool.hpp"
 32 #include "gc/epsilon/epsilonThreadLocalData.hpp"
 33 #include "gc/shared/gcArguments.hpp"
 34 #include "gc/shared/gcLocker.inline.hpp"
 35 #include "gc/shared/gcTraceTime.inline.hpp"
 36 #include "gc/shared/locationPrinter.inline.hpp"
 37 #include "gc/shared/markBitMap.inline.hpp"
 38 #include "gc/shared/strongRootsScope.hpp"
 39 #include "gc/shared/preservedMarks.inline.hpp"
 40 #include "gc/shared/oopStorageSet.inline.hpp"
 41 #include "logging/log.hpp"
 42 #include "nmt/memTracker.hpp"
 43 #include "memory/allocation.hpp"
 44 #include "memory/iterator.inline.hpp"
 45 #include "memory/metaspaceUtils.hpp"
 46 #include "memory/resourceArea.hpp"
 47 #include "memory/universe.hpp"
 48 #include "oops/compressedOops.inline.hpp"
 49 #include "runtime/atomic.hpp"
 50 #include "runtime/globals.hpp"
 51 #include "runtime/thread.hpp"
 52 #include "runtime/threads.hpp"
 53 #include "runtime/vmOperations.hpp"
 54 #include "runtime/vmThread.hpp"
 55 #include "services/management.hpp"
 56 #include "utilities/stack.inline.hpp"
 57 
 58 jint EpsilonHeap::initialize() {
 59   size_t align = HeapAlignment;
 60   size_t init_byte_size = align_up(InitialHeapSize, align);
 61   size_t max_byte_size  = align_up(MaxHeapSize, align);
 62 
 63   // Initialize backing storage
 64   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
 65   _virtual_space.initialize(heap_rs, init_byte_size);
 66 
 67   MemRegion committed_region((HeapWord*)_virtual_space.low(),          (HeapWord*)_virtual_space.high());
 68 
 69   initialize_reserved_region(heap_rs);
 70 
 71   _space = new ContiguousSpace();
 72   _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);
 73 
 74   // Precompute hot fields
 75   _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), align_object_size(EpsilonMaxTLABSize / HeapWordSize));
 76   _step_counter_update = MIN2<size_t>(max_byte_size / 16, EpsilonUpdateCountersStep);
 77   _step_heap_print = (EpsilonPrintHeapSteps == 0) ? SIZE_MAX : (max_byte_size / EpsilonPrintHeapSteps);
 78   _decay_time_ns = (int64_t) EpsilonTLABDecayTime * NANOSECS_PER_MILLISEC;
 79 
 80   // Enable monitoring
 81   _monitoring_support = new EpsilonMonitoringSupport(this);
 82   _last_counter_update = 0;
 83   _last_heap_print = 0;
 84 
 85   // Install barrier set
 86   BarrierSet::set_barrier_set(new EpsilonBarrierSet());
 87 
 88   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 89   size_t _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
 90   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
 91 
 92   // Initialize marking bitmap, but not commit it yet
 93   if (EpsilonSlidingGC) {
 94     ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 95     MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 96     _bitmap_region = MemRegion((HeapWord *) bitmap.base(), bitmap.size() / HeapWordSize);
 97     MemRegion heap_region = MemRegion((HeapWord *) heap_rs.base(), heap_rs.size() / HeapWordSize);
 98     _bitmap.initialize(heap_region, _bitmap_region);
 99   }
100 
101   // All done, print out the configuration
102   EpsilonInitLogger::print();
103 
104   return JNI_OK;
105 }
106 
107 void EpsilonHeap::initialize_serviceability() {
108   _pool = new EpsilonMemoryPool(this);
109   _memory_manager.add_pool(_pool);
110 }
111 
112 GrowableArray<GCMemoryManager*> EpsilonHeap::memory_managers() {
113   GrowableArray<GCMemoryManager*> memory_managers(1);
114   memory_managers.append(&_memory_manager);
115   return memory_managers;
116 }
117 
118 GrowableArray<MemoryPool*> EpsilonHeap::memory_pools() {
119   GrowableArray<MemoryPool*> memory_pools(1);
120   memory_pools.append(_pool);
121   return memory_pools;
122 }
123 
124 size_t EpsilonHeap::unsafe_max_tlab_alloc(Thread* thr) const {
125   // Return max allocatable TLAB size, and let allocation path figure out
126   // the actual allocation size. Note: result should be in bytes.
127   return _max_tlab_size * HeapWordSize;
128 }
129 
130 EpsilonHeap* EpsilonHeap::heap() {
131   return named_heap<EpsilonHeap>(CollectedHeap::Epsilon);
132 }
133 
134 HeapWord* EpsilonHeap::allocate_work(size_t size, bool verbose) {
135   assert(is_object_aligned(size), "Allocation size should be aligned: " SIZE_FORMAT, size);
136 
137   HeapWord* res = nullptr;
138   while (true) {
139     // Try to allocate, assume space is available
140     res = _space->par_allocate(size);
141     if (res != nullptr) {
142       break;
143     }
144 
145     // Allocation failed, attempt expansion, and retry:
146     {
147       MutexLocker ml(Heap_lock);
148 
149       // Try to allocate under the lock, assume another thread was able to expand
150       res = _space->par_allocate(size);
151       if (res != nullptr) {
152         break;
153       }
154 
155       // Expand and loop back if space is available
156       size_t space_left = max_capacity() - capacity();
157       size_t want_space = MAX2(size, EpsilonMinHeapExpand);
158 
159       if (want_space < space_left) {
160         // Enough space to expand in bulk:
161         bool expand = _virtual_space.expand_by(want_space);
162         assert(expand, "Should be able to expand");
163       } else if (size < space_left) {
164         // No space to expand in bulk, and this allocation is still possible,
165         // take all the remaining space:
166         bool expand = _virtual_space.expand_by(space_left);
167         assert(expand, "Should be able to expand");
168       } else {
169         // No space left:
170         return nullptr;
171       }
172 
173       _space->set_end((HeapWord *) _virtual_space.high());
174     }
175   }
176 
177   size_t used = _space->used();
178 
179   // Allocation successful, update counters
180   if (verbose) {
181     size_t last = _last_counter_update;
182     if ((used - last >= _step_counter_update) && Atomic::cmpxchg(&_last_counter_update, last, used) == last) {
183       _monitoring_support->update_counters();
184     }
185   }
186 
187   // ...and print the occupancy line, if needed
188   if (verbose) {
189     size_t last = _last_heap_print;
190     if ((used - last >= _step_heap_print) && Atomic::cmpxchg(&_last_heap_print, last, used) == last) {
191       print_heap_info(used);
192       print_metaspace_info();
193     }
194   }
195 
196   assert(is_object_aligned(res), "Object should be aligned: " PTR_FORMAT, p2i(res));
197   return res;
198 }
199 
200 HeapWord* EpsilonHeap::allocate_new_tlab(size_t min_size,
201                                          size_t requested_size,
202                                          size_t* actual_size) {
203   Thread* thread = Thread::current();
204 
205   // Defaults in case elastic paths are not taken
206   bool fits = true;
207   size_t size = requested_size;
208   size_t ergo_tlab = requested_size;
209   int64_t time = 0;
210 
211   if (EpsilonElasticTLAB) {
212     ergo_tlab = EpsilonThreadLocalData::ergo_tlab_size(thread);
213 
214     if (EpsilonElasticTLABDecay) {
215       int64_t last_time = EpsilonThreadLocalData::last_tlab_time(thread);
216       time = (int64_t) os::javaTimeNanos();
217 
218       assert(last_time <= time, "time should be monotonic");
219 
220       // If the thread had not allocated recently, retract the ergonomic size.
221       // This conserves memory when the thread had initial burst of allocations,
222       // and then started allocating only sporadically.
223       if (last_time != 0 && (time - last_time > _decay_time_ns)) {
224         ergo_tlab = 0;
225         EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
226       }
227     }
228 
229     // If we can fit the allocation under current TLAB size, do so.
230     // Otherwise, we want to elastically increase the TLAB size.
231     fits = (requested_size <= ergo_tlab);
232     if (!fits) {
233       size = (size_t) (ergo_tlab * EpsilonTLABElasticity);
234     }
235   }
236 
237   // Always honor boundaries
238   size = clamp(size, min_size, _max_tlab_size);
239 
240   // Always honor alignment
241   size = align_up(size, MinObjAlignment);
242 
243   // Check that adjustments did not break local and global invariants
244   assert(is_object_aligned(size),
245          "Size honors object alignment: " SIZE_FORMAT, size);
246   assert(min_size <= size,
247          "Size honors min size: "  SIZE_FORMAT " <= " SIZE_FORMAT, min_size, size);
248   assert(size <= _max_tlab_size,
249          "Size honors max size: "  SIZE_FORMAT " <= " SIZE_FORMAT, size, _max_tlab_size);
250   assert(size <= CollectedHeap::max_tlab_size(),
251          "Size honors global max size: "  SIZE_FORMAT " <= " SIZE_FORMAT, size, CollectedHeap::max_tlab_size());
252 
253   if (log_is_enabled(Trace, gc)) {
254     ResourceMark rm;
255     log_trace(gc)("TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT
256                           "K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K",
257                   thread->name(),
258                   requested_size * HeapWordSize / K,
259                   min_size * HeapWordSize / K,
260                   _max_tlab_size * HeapWordSize / K,
261                   ergo_tlab * HeapWordSize / K,
262                   size * HeapWordSize / K);
263   }
264 
265   // All prepared, let's do it!
266   HeapWord* res = allocate_or_collect_work(size);
267 
268   if (res != nullptr) {
269     // Allocation successful
270     *actual_size = size;
271     if (EpsilonElasticTLABDecay) {
272       EpsilonThreadLocalData::set_last_tlab_time(thread, time);
273     }
274     if (EpsilonElasticTLAB && !fits) {
275       // If we requested expansion, this is our new ergonomic TLAB size
276       EpsilonThreadLocalData::set_ergo_tlab_size(thread, size);
277     }
278   } else {
279     // Allocation failed, reset ergonomics to try and fit smaller TLABs
280     if (EpsilonElasticTLAB) {
281       EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
282     }
283   }
284 
285   return res;
286 }
287 
288 HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) {
289   *gc_overhead_limit_was_exceeded = false;
290   return allocate_or_collect_work(size);
291 }
292 
293 HeapWord* EpsilonHeap::allocate_loaded_archive_space(size_t size) {
294   // Cannot use verbose=true because Metaspace is not initialized
295   return allocate_work(size, /* verbose = */false);
296 }
297 
298 void EpsilonHeap::collect(GCCause::Cause cause) {
299   switch (cause) {
300     case GCCause::_metadata_GC_threshold:
301     case GCCause::_metadata_GC_clear_soft_refs:
302       // Receiving these causes means the VM itself entered the safepoint for metadata collection.
303       // While Epsilon does not do GC, it has to perform sizing adjustments, otherwise we would
304       // re-enter the safepoint again very soon.
305 
306       assert(SafepointSynchronize::is_at_safepoint(), "Expected at safepoint");
307       log_info(gc)("GC request for \"%s\" is handled", GCCause::to_string(cause));
308       MetaspaceGC::compute_new_size();
309       print_metaspace_info();
310       break;
311     default:
312       if (EpsilonSlidingGC) {
313         if (SafepointSynchronize::is_at_safepoint()) {
314           entry_collect(cause);
315         } else {
316           vmentry_collect(cause);
317         }
318       } else {
319         log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause));
320       }
321   }
322   _monitoring_support->update_counters();
323 }
324 
325 void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) {
326   collect(gc_cause());
327 }
328 
329 void EpsilonHeap::object_iterate(ObjectClosure *cl) {
330   _space->object_iterate(cl);
331 }
332 
333 void EpsilonHeap::print_on(outputStream *st) const {
334   st->print_cr("Epsilon Heap");
335 
336   _virtual_space.print_on(st);
337 
338   if (_space != nullptr) {
339     st->print_cr("Allocation space:");
340     _space->print_on(st);
341   }
342 
343   MetaspaceUtils::print_on(st);
344 }
345 
346 bool EpsilonHeap::print_location(outputStream* st, void* addr) const {
347   return BlockLocationPrinter<EpsilonHeap>::print_location(st, addr);
348 }
349 
350 void EpsilonHeap::print_tracing_info() const {
351   print_heap_info(used());
352   print_metaspace_info();
353 }
354 
355 void EpsilonHeap::print_heap_info(size_t used) const {
356   size_t reserved  = max_capacity();
357   size_t committed = capacity();
358 
359   if (reserved != 0) {
360     log_info(gc)("Heap: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "
361                  SIZE_FORMAT "%s (%.2f%%) used",
362             byte_size_in_proper_unit(reserved),  proper_unit_for_byte_size(reserved),
363             byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
364             percent_of(committed, reserved),
365             byte_size_in_proper_unit(used),      proper_unit_for_byte_size(used),
366             percent_of(used, reserved)
367     );
368   } else {
369     log_info(gc)("Heap: no reliable data");
370   }
371 }
372 
373 void EpsilonHeap::print_metaspace_info() const {
374   MetaspaceCombinedStats stats = MetaspaceUtils::get_combined_statistics();
375   size_t reserved  = stats.reserved();
376   size_t committed = stats.committed();
377   size_t used      = stats.used();
378 
379   if (reserved != 0) {
380     log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "
381                             SIZE_FORMAT "%s (%.2f%%) used",
382             byte_size_in_proper_unit(reserved),  proper_unit_for_byte_size(reserved),
383             byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
384             percent_of(committed, reserved),
385             byte_size_in_proper_unit(used),      proper_unit_for_byte_size(used),
386             percent_of(used, reserved)
387     );
388   } else {
389     log_info(gc, metaspace)("Metaspace: no reliable data");
390   }
391 }
392 
393 // ------------------ EXPERIMENTAL MARK-COMPACT -------------------------------
394 //
395 // This implements a trivial Lisp2-style sliding collector:
396 //     https://en.wikipedia.org/wiki/Mark-compact_algorithm#LISP2_algorithm
397 //
398 // The goal for this implementation is to be as simple as possible, ignoring
399 // non-trivial performance optimizations. This collector does not implement
400 // reference processing: no soft/weak/phantom/finalizeable references are ever
401 // cleared. It also does not implement class unloading and other runtime
402 // cleanups.
403 //
404 
405 // VM operation that executes collection cycle under safepoint
406 class VM_EpsilonCollect: public VM_Operation {
407 private:
408   const GCCause::Cause _cause;
409   EpsilonHeap* const _heap;
410   static size_t _req_id;
411 public:
412   VM_EpsilonCollect(GCCause::Cause cause) : VM_Operation(),
413                                             _cause(cause),
414                                             _heap(EpsilonHeap::heap()) {};
415 
416   VM_Operation::VMOp_Type type() const { return VMOp_EpsilonCollect; }
417   const char* name()             const { return "Epsilon Collection"; }
418 
419   virtual bool doit_prologue() {
420     size_t id = Atomic::load_acquire(&_req_id);
421 
422     // Need to take the Heap lock before managing backing storage.
423     Heap_lock->lock();
424 
425     // Heap lock also naturally serializes GC requests, and allows us to coalesce
426     // back-to-back GC requests from many threads. Avoid the consecutive GCs
427     // if we started waiting when other GC request was being handled.
428     if (id < Atomic::load_acquire(&_req_id)) {
429       Heap_lock->unlock();
430       return false;
431     }
432 
433     // No contenders. Start handling a new GC request.
434     Atomic::inc(&_req_id);
435     return true;
436   }
437 
438   virtual void doit() {
439     _heap->entry_collect(_cause);
440   }
441 
442   virtual void doit_epilogue() {
443     Heap_lock->unlock();
444   }
445 };
446 
447 size_t VM_EpsilonCollect::_req_id = 0;
448 
449 void EpsilonHeap::vmentry_collect(GCCause::Cause cause) {
450   VM_EpsilonCollect vmop(cause);
451   VMThread::execute(&vmop);
452 }
453 
454 HeapWord* EpsilonHeap::allocate_or_collect_work(size_t size, bool verbose) {
455   HeapWord* res = allocate_work(size);
456   if (res == NULL && EpsilonSlidingGC && EpsilonImplicitGC) {
457     vmentry_collect(GCCause::_allocation_failure);
458     // TODO: This looks incorrect
459     GCLocker::stall_until_clear();
460     res = allocate_work(size, verbose);
461   }
462   return res;
463 }
464 
465 typedef Stack<oop, mtGC> EpsilonMarkStack;
466 
467 void EpsilonHeap::process_roots(OopClosure* cl) {
468   // Need to tell runtime we are about to walk the roots with 1 thread
469   StrongRootsScope scope(1);
470 
471   // Need to adapt oop closure for some special root types.
472   CLDToOopClosure clds(cl, ClassLoaderData::_claim_none);
473   MarkingCodeBlobClosure blobs(cl, CodeBlobToOopClosure::FixRelocations, true); // TODO: Keepalive?
474 
475   // Strong roots: always reachable roots
476 
477   // General strong roots that are registered in OopStorages
478   for (auto id : EnumRange<OopStorageSet::StrongId>()) {
479     OopStorageSet::storage(id)->oops_do(cl);
480   }
481 
482   // Subsystems that still have their own root handling
483   ClassLoaderDataGraph::cld_do(&clds);
484   Threads::possibly_parallel_oops_do(false, cl, &blobs);
485 
486   {
487     MutexLocker lock(CodeCache_lock, Mutex::_no_safepoint_check_flag);
488     CodeCache::blobs_do(&blobs);
489   }
490 
491   // Weak roots: in an advanced GC these roots would be skipped during
492   // the initial scan, and walked again after the marking is complete.
493   // Then, we could discover which roots are not actually pointing
494   // to surviving Java objects, and either clean the roots, or mark them.
495   // Current simple implementation does not handle weak roots specially,
496   // and therefore, we mark through them as if they are strong roots.
497   for (auto id : EnumRange<OopStorageSet::WeakId>()) {
498     OopStorageSet::storage(id)->oops_do(cl);
499   }
500 }
501 
502 // Walk the marking bitmap and call object closure on every marked object.
503 // This is much faster that walking a (very sparse) parsable heap, but it
504 // takes up to 1/64-th of heap size for the bitmap.
505 void EpsilonHeap::walk_bitmap(ObjectClosure* cl) {
506    HeapWord* limit = _space->top();
507    HeapWord* addr = _bitmap.get_next_marked_addr(_space->bottom(), limit);
508    while (addr < limit) {
509      oop obj = cast_to_oop(addr);
510      assert(_bitmap.is_marked(obj), "sanity");
511      cl->do_object(obj);
512      addr += 1;
513      if (addr < limit) {
514        addr = _bitmap.get_next_marked_addr(addr, limit);
515      }
516    }
517 }
518 
519 class EpsilonScanOopClosure : public BasicOopIterateClosure {
520 private:
521   EpsilonMarkStack* const _stack;
522   MarkBitMap* const _bitmap;
523 
524   template <class T>
525   void do_oop_work(T* p) {
526     // p is the pointer to memory location where oop is, load the value
527     // from it, unpack the compressed reference, if needed:
528     T o = RawAccess<>::oop_load(p);
529     if (!CompressedOops::is_null(o)) {
530       oop obj = CompressedOops::decode_not_null(o);
531 
532       // Object is discovered. See if it is marked already. If not,
533       // mark and push it on mark stack for further traversal. Non-atomic
534       // check and set would do, as this closure is called by single thread.
535       if (!_bitmap->is_marked(obj)) {
536         _bitmap->mark(obj);
537         _stack->push(obj);
538       }
539     }
540   }
541 
542 public:
543   EpsilonScanOopClosure(EpsilonMarkStack* stack, MarkBitMap* bitmap) :
544                         _stack(stack), _bitmap(bitmap) {}
545   virtual void do_oop(oop* p)       { do_oop_work(p); }
546   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
547 };
548 
549 class EpsilonCalcNewLocationObjectClosure : public ObjectClosure {
550 private:
551   HeapWord* _compact_point;
552   PreservedMarks* const _preserved_marks;
553 
554 public:
555   EpsilonCalcNewLocationObjectClosure(HeapWord* start, PreservedMarks* pm) :
556                                       _compact_point(start),
557                                       _preserved_marks(pm) {}
558 
559   void do_object(oop obj) {
560     // Record the new location of the object: it is current compaction point.
561     // If object stays at the same location (which is true for objects in
562     // dense prefix, that we would normally get), do not bother recording the
563     // move, letting downstream code ignore it.
564     if (obj != cast_to_oop(_compact_point)) {
565       markWord mark = obj->mark();
566       _preserved_marks->push_if_necessary(obj, mark);
567       obj->forward_to(cast_to_oop(_compact_point));
568     }
569     _compact_point += obj->size();
570   }
571 
572   HeapWord* compact_point() {
573     return _compact_point;
574   }
575 };
576 
577 class EpsilonAdjustPointersOopClosure : public BasicOopIterateClosure {
578 private:
579   template <class T>
580   void do_oop_work(T* p) {
581     // p is the pointer to memory location where oop is, load the value
582     // from it, unpack the compressed reference, if needed:
583     T o = RawAccess<>::oop_load(p);
584     if (!CompressedOops::is_null(o)) {
585       oop obj = CompressedOops::decode_not_null(o);
586 
587       // Rewrite the current pointer to the object with its forwardee.
588       // Skip the write if update is not needed.
589       if (obj->is_forwarded()) {
590         oop fwd = obj->forwardee();
591         assert(fwd != NULL, "just checking");
592         RawAccess<>::oop_store(p, fwd);
593       }
594     }
595   }
596 
597 public:
598   virtual void do_oop(oop* p)       { do_oop_work(p); }
599   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
600 };
601 
602 class EpsilonAdjustPointersObjectClosure : public ObjectClosure {
603 private:
604   EpsilonAdjustPointersOopClosure _cl;
605 public:
606   void do_object(oop obj) {
607     // Apply the updates to all references reachable from current object:
608     obj->oop_iterate(&_cl);
609   }
610 };
611 
612 class EpsilonMoveObjectsObjectClosure : public ObjectClosure {
613 private:
614   size_t _moved;
615 public:
616   EpsilonMoveObjectsObjectClosure() : ObjectClosure(), _moved(0) {}
617 
618   void do_object(oop obj) {
619     // Copy the object to its new location, if needed. This is final step,
620     // so we have to re-initialize its new mark word, dropping the forwardee
621     // data from it.
622     if (obj->is_forwarded()) {
623       oop fwd = obj->forwardee();
624       assert(fwd != NULL, "just checking");
625       Copy::aligned_conjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(fwd), obj->size());
626       fwd->init_mark();
627       _moved++;
628     }
629   }
630 
631   size_t moved() {
632     return _moved;
633   }
634 };
635 
636 class EpsilonVerifyOopClosure : public BasicOopIterateClosure {
637 private:
638   EpsilonHeap* const _heap;
639   EpsilonMarkStack* const _stack;
640   MarkBitMap* const _bitmap;
641 
642   template <class T>
643   void do_oop_work(T* p) {
644     T o = RawAccess<>::oop_load(p);
645     if (!CompressedOops::is_null(o)) {
646       oop obj = CompressedOops::decode_not_null(o);
647       if (!_bitmap->is_marked(obj)) {
648         _bitmap->mark(obj);
649 
650         guarantee(_heap->is_in(obj),        "Is in heap: "   PTR_FORMAT, p2i(obj));
651         guarantee(oopDesc::is_oop(obj),     "Is an object: " PTR_FORMAT, p2i(obj));
652         guarantee(!obj->mark().is_marked(), "Mark is gone: " PTR_FORMAT, p2i(obj));
653 
654         _stack->push(obj);
655       }
656     }
657   }
658 
659 public:
660   EpsilonVerifyOopClosure(EpsilonMarkStack* stack, MarkBitMap* bitmap) :
661     _heap(EpsilonHeap::heap()), _stack(stack), _bitmap(bitmap) {}
662   virtual void do_oop(oop* p)       { do_oop_work(p); }
663   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
664 };
665 
666 void EpsilonHeap::entry_collect(GCCause::Cause cause) {
667   if (GCLocker::check_active_before_gc()) {
668     return;
669   }
670 
671   GCIdMark mark;
672   GCTraceTime(Info, gc) time("Lisp2-style Mark-Compact", NULL, cause, true);
673 
674   // Some statistics, for fun and profit:
675   size_t stat_reachable_roots = 0;
676   size_t stat_reachable_heap = 0;
677   size_t stat_moved = 0;
678   size_t stat_preserved_marks = 0;
679 
680   {
681     GCTraceTime(Info, gc) time("Step 0: Prologue", NULL);
682 
683     // Commit marking bitmap memory. There are several upsides of doing this
684     // before the cycle: no memory is taken if GC is not happening, the memory
685     // is "cleared" on first touch, and untouched parts of bitmap are mapped
686     // to zero page, boosting performance on sparse heaps.
687     if (!os::commit_memory((char*)_bitmap_region.start(), _bitmap_region.byte_size(), false)) {
688       log_warning(gc)("Could not commit native memory for marking bitmap, GC failed");
689       return;
690     }
691 
692     // We do not need parsable heap for this algorithm to work, but we want
693     // threads to give up their TLABs.
694     ensure_parsability(true);
695 
696 #if COMPILER2_OR_JVMCI
697     // Derived pointers would be re-discovered during the mark.
698     // Clear and activate the table for them.
699     DerivedPointerTable::clear();
700 #endif
701   }
702 
703   {
704     GCTraceTime(Info, gc) time("Step 1: Mark", NULL);
705 
706     // Marking stack and the closure that does most of the work. The closure
707     // would scan the outgoing references, mark them, and push newly-marked
708     // objects to stack for further processing.
709     EpsilonMarkStack stack;
710     EpsilonScanOopClosure cl(&stack, &_bitmap);
711 
712     // Seed the marking with roots.
713     process_roots(&cl);
714     stat_reachable_roots = stack.size();
715 
716     // Scan the rest of the heap until we run out of objects. Termination is
717     // guaranteed, because all reachable objects would be marked eventually.
718     while (!stack.is_empty()) {
719       oop obj = stack.pop();
720       obj->oop_iterate(&cl);
721       stat_reachable_heap++;
722     }
723 
724 #if COMPILER2_OR_JVMCI
725     // No more derived pointers discovered after marking is done.
726     DerivedPointerTable::set_active(false);
727 #endif
728   }
729 
730   // We are going to store forwarding information (where the new copy resides)
731   // in mark words. Some of those mark words need to be carefully preserved.
732   // This is an utility that maintains the list of those special mark words.
733   PreservedMarks preserved_marks;
734 
735   // New top of the allocated space.
736   HeapWord* new_top;
737 
738   {
739     GCTraceTime(Info, gc) time("Step 2: Calculate new locations", NULL);
740 
741     // Walk all alive objects, compute their new addresses and store those
742     // addresses in mark words. Optionally preserve some marks.
743     EpsilonCalcNewLocationObjectClosure cl(_space->bottom(), &preserved_marks);
744     walk_bitmap(&cl);
745 
746     // After addresses are calculated, we know the new top for the allocated
747     // space. We cannot set it just yet, because some asserts check that objects
748     // are "in heap" based on current "top".
749     new_top = cl.compact_point();
750 
751     stat_preserved_marks = preserved_marks.size();
752   }
753 
754   {
755     GCTraceTime(Info, gc) time("Step 3: Adjust pointers", NULL);
756 
757     // Walk all alive objects _and their reference fields_, and put "new
758     // addresses" there. We know the new addresses from the forwarding data
759     // in mark words. Take care of the heap objects first.
760     EpsilonAdjustPointersObjectClosure cl;
761     walk_bitmap(&cl);
762 
763     // Now do the same, but for all VM roots, which reference the objects on
764     // their own: their references should also be updated.
765     EpsilonAdjustPointersOopClosure cli;
766     process_roots(&cli);
767 
768     // Finally, make sure preserved marks know the objects are about to move.
769     preserved_marks.adjust_during_full_gc();
770   }
771 
772   {
773     GCTraceTime(Info, gc) time("Step 4: Move objects", NULL);
774 
775     // Move all alive objects to their new locations. All the references are
776     // already adjusted at previous step.
777     EpsilonMoveObjectsObjectClosure cl;
778     walk_bitmap(&cl);
779     stat_moved = cl.moved();
780 
781     // Now we moved all objects to their relevant locations, we can retract
782     // the "top" of the allocation space to the end of the compacted prefix.
783     _space->set_top(new_top);
784   }
785 
786   {
787     GCTraceTime(Info, gc) time("Step 5: Epilogue", NULL);
788 
789     // Restore all special mark words.
790     preserved_marks.restore();
791 
792 #if COMPILER2_OR_JVMCI
793     // Tell the rest of runtime we have finished the GC.
794     DerivedPointerTable::update_pointers();
795 #endif
796 
797     // Verification code walks entire heap and verifies nothing is broken.
798     if (EpsilonVerify) {
799       // The basic implementation turns heap into entirely parsable one with
800       // only alive objects, which mean we could just walked the heap object
801       // by object and verify it. But, it would be inconvenient for verification
802       // to assume heap has only alive objects. Any future change that leaves
803       // at least one dead object with dead outgoing references would fail the
804       // verification. Therefore, it makes more sense to mark through the heap
805       // again, not assuming objects are all alive.
806       EpsilonMarkStack stack;
807       EpsilonVerifyOopClosure cl(&stack, &_bitmap);
808 
809       _bitmap.clear();
810 
811       // Verify all roots are correct, and that we have the same number of
812       // object reachable from roots.
813       process_roots(&cl);
814 
815       size_t verified_roots = stack.size();
816       guarantee(verified_roots == stat_reachable_roots,
817                 "Verification discovered " SIZE_FORMAT " roots out of " SIZE_FORMAT,
818                 verified_roots, stat_reachable_roots);
819 
820       // Verify the rest of the heap is correct, and that we have the same
821       // number of objects reachable from heap.
822       size_t verified_heap = 0;
823       while (!stack.is_empty()) {
824         oop obj = stack.pop();
825         obj->oop_iterate(&cl);
826         verified_heap++;
827       }
828 
829       guarantee(verified_heap == stat_reachable_heap,
830                 "Verification discovered " SIZE_FORMAT " heap objects out of " SIZE_FORMAT,
831                 verified_heap, stat_reachable_heap);
832 
833       // Ask parts of runtime to verify themselves too
834       Universe::verify("Epsilon");
835     }
836 
837     // Marking bitmap is not needed anymore
838     if (!os::uncommit_memory((char*)_bitmap_region.start(), _bitmap_region.byte_size())) {
839       log_warning(gc)("Could not uncommit native memory for marking bitmap");
840     }
841 
842     // Return all memory back if so requested. On large heaps, this would
843     // take a while.
844     if (EpsilonUncommit) {
845       _virtual_space.shrink_by((_space->end() - new_top) * HeapWordSize);
846       _space->set_end((HeapWord*)_virtual_space.high());
847     }
848   }
849 
850   size_t stat_reachable = stat_reachable_roots + stat_reachable_heap;
851   log_info(gc)("GC Stats: " SIZE_FORMAT " (%.2f%%) reachable from roots, " SIZE_FORMAT " (%.2f%%) reachable from heap, "
852                SIZE_FORMAT " (%.2f%%) moved, " SIZE_FORMAT " (%.2f%%) markwords preserved",
853                stat_reachable_roots, percent_of(stat_reachable_roots, stat_reachable),
854                stat_reachable_heap,  percent_of(stat_reachable_heap,  stat_reachable),
855                stat_moved,           percent_of(stat_moved,           stat_reachable),
856                stat_preserved_marks, percent_of(stat_preserved_marks, stat_reachable)
857   );
858 
859   print_heap_info(used());
860   print_metaspace_info();
861 }
862 
863 void EpsilonHeap::pin_object(JavaThread* thread, oop obj) {
864   GCLocker::lock_critical(thread);
865 }
866 
867 void EpsilonHeap::unpin_object(JavaThread* thread, oop obj) {
868   GCLocker::unlock_critical(thread);
869 }