1 /*
  2  * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2017, 2022, Red Hat, Inc. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #ifndef SHARE_GC_EPSILON_EPSILONHEAP_HPP
 27 #define SHARE_GC_EPSILON_EPSILONHEAP_HPP
 28 
 29 #include "gc/epsilon/epsilonBarrierSet.hpp"
 30 #include "gc/epsilon/epsilonMonitoringSupport.hpp"
 31 #include "gc/shared/collectedHeap.hpp"
 32 #include "gc/shared/markBitMap.hpp"
 33 #include "gc/shared/softRefPolicy.hpp"
 34 #include "gc/shared/space.hpp"
 35 #include "memory/virtualspace.hpp"
 36 #include "services/memoryManager.hpp"
 37 
 38 class EpsilonHeap : public CollectedHeap {
 39   friend class VMStructs;
 40 private:
 41   SoftRefPolicy _soft_ref_policy;
 42   EpsilonMonitoringSupport* _monitoring_support;
 43   MemoryPool* _pool;
 44   GCMemoryManager _memory_manager;
 45   ContiguousSpace* _space;
 46   VirtualSpace _virtual_space;
 47   size_t _max_tlab_size;
 48   size_t _step_counter_update;
 49   size_t _step_heap_print;
 50   int64_t _decay_time_ns;
 51   volatile size_t _last_counter_update;
 52   volatile size_t _last_heap_print;
 53   MemRegion  _bitmap_region;
 54   MarkBitMap _bitmap;
 55 
 56 public:
 57   static EpsilonHeap* heap();
 58 
 59   EpsilonHeap() :
 60           _memory_manager("Epsilon Heap"),
 61           _space(nullptr) {};
 62 
 63   Name kind() const override {
 64     return CollectedHeap::Epsilon;
 65   }
 66 
 67   const char* name() const override {
 68     return "Epsilon";
 69   }
 70 
 71   SoftRefPolicy* soft_ref_policy() override {
 72     return &_soft_ref_policy;
 73   }
 74 
 75   jint initialize() override;
 76   void initialize_serviceability() override;
 77 
 78   GrowableArray<GCMemoryManager*> memory_managers() override;
 79   GrowableArray<MemoryPool*> memory_pools() override;
 80 
 81   size_t max_capacity() const override { return _virtual_space.reserved_size();  }
 82   size_t capacity()     const override { return _virtual_space.committed_size(); }
 83   size_t used()         const override { return _space->used(); }
 84 
 85   bool is_in(const void* p) const override {
 86     return _space->is_in(p);
 87   }
 88 
 89   bool requires_barriers(stackChunkOop obj) const override { return false; }
 90 
 91   bool is_maximal_no_gc() const override {
 92     // No GC is going to happen. Return "we are at max", when we are about to fail.
 93     return used() == capacity();
 94   }
 95 
 96   // Allocation
 97   HeapWord* allocate_work(size_t size, bool verbose = true);
 98   HeapWord* allocate_or_collect_work(size_t size, bool verbose = true);
 99   HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) override;
100   HeapWord* allocate_new_tlab(size_t min_size,
101                               size_t requested_size,
102                               size_t* actual_size) override;
103 
104   // TLAB allocation
105   size_t tlab_capacity(Thread* thr)         const override { return capacity();     }
106   size_t tlab_used(Thread* thr)             const override { return used();         }
107   size_t max_tlab_size()                    const override { return _max_tlab_size; }
108   size_t unsafe_max_tlab_alloc(Thread* thr) const override;
109 
110   void collect(GCCause::Cause cause) override;
111   void do_full_collection(bool clear_all_soft_refs) override;
112 
113   // Heap walking support
114   void object_iterate(ObjectClosure* cl) override;
115 
116   // Object pinning support: every object is implicitly pinned
117   void pin_object(JavaThread* thread, oop obj) override;
118   void unpin_object(JavaThread* thread, oop obj) override;
119 
120   // No support for block parsing.
121   HeapWord* block_start(const void* addr) const { return nullptr;  }
122   bool block_is_obj(const HeapWord* addr) const { return false; }
123 
124   // No GC threads
125   void gc_threads_do(ThreadClosure* tc) const override {}
126 
127   // No nmethod handling
128   void register_nmethod(nmethod* nm) override {}
129   void unregister_nmethod(nmethod* nm) override {}
130   void verify_nmethod(nmethod* nm) override {}
131 
132   // No heap verification
133   void prepare_for_verify() override {}
134   void verify(VerifyOption option) override {}
135 
136   MemRegion reserved_region() const { return _reserved; }
137   bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }
138 
139   // Support for loading objects from CDS archive into the heap
140   bool can_load_archived_objects() const override { return UseCompressedOops; }
141   HeapWord* allocate_loaded_archive_space(size_t size) override;
142 
143   void print_on(outputStream* st) const override;
144   void print_tracing_info() const override;
145   bool print_location(outputStream* st, void* addr) const override;
146 
147   void entry_collect(GCCause::Cause cause);
148 
149 private:
150   void print_heap_info(size_t used) const;
151   void print_metaspace_info() const;
152 
153   void vmentry_collect(GCCause::Cause cause);
154 
155   void process_roots(OopClosure* cl);
156   void walk_bitmap(ObjectClosure* cl);
157 
158 };
159 
160 #endif // SHARE_GC_EPSILON_EPSILONHEAP_HPP