1 /*
  2  * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2017, 2022, Red Hat, Inc. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #ifndef SHARE_GC_EPSILON_EPSILONHEAP_HPP
 27 #define SHARE_GC_EPSILON_EPSILONHEAP_HPP
 28 
 29 #include "gc/epsilon/epsilonBarrierSet.hpp"
 30 #include "gc/epsilon/epsilonMonitoringSupport.hpp"
 31 #include "gc/shared/collectedHeap.hpp"
 32 #include "gc/shared/softRefPolicy.hpp"
 33 #include "gc/shared/space.hpp"
 34 #include "memory/virtualspace.hpp"
 35 #include "services/memoryManager.hpp"
 36 
 37 class EpsilonHeap : public CollectedHeap {
 38   friend class VMStructs;
 39 private:
 40   SoftRefPolicy _soft_ref_policy;
 41   EpsilonMonitoringSupport* _monitoring_support;
 42   MemoryPool* _pool;
 43   GCMemoryManager _memory_manager;
 44   ContiguousSpace* _space;
 45   VirtualSpace _virtual_space;
 46   size_t _max_tlab_size;
 47   size_t _step_counter_update;
 48   size_t _step_heap_print;
 49   int64_t _decay_time_ns;
 50   volatile size_t _last_counter_update;
 51   volatile size_t _last_heap_print;
 52 
 53 public:
 54   static EpsilonHeap* heap();
 55 
 56   EpsilonHeap() :
 57           _memory_manager("Epsilon Heap"),
 58           _space(nullptr) {};
 59 
 60   Name kind() const override {
 61     return CollectedHeap::Epsilon;
 62   }
 63 
 64   const char* name() const override {
 65     return "Epsilon";
 66   }
 67 
 68   SoftRefPolicy* soft_ref_policy() override {
 69     return &_soft_ref_policy;
 70   }
 71 
 72   jint initialize() override;
 73   void initialize_serviceability() override;
 74 
 75   GrowableArray<GCMemoryManager*> memory_managers() override;
 76   GrowableArray<MemoryPool*> memory_pools() override;
 77 
 78   size_t max_capacity() const override { return _virtual_space.reserved_size();  }
 79   size_t capacity()     const override { return _virtual_space.committed_size(); }
 80   size_t used()         const override { return _space->used(); }
 81 
 82   bool is_in(const void* p) const override {
 83     return _space->is_in(p);
 84   }
 85 
 86   bool requires_barriers(stackChunkOop obj) const override { return false; }
 87 
 88   bool is_maximal_no_gc() const override {
 89     // No GC is going to happen. Return "we are at max", when we are about to fail.
 90     return used() == capacity();
 91   }
 92 
 93   // Allocation
 94   HeapWord* allocate_work(size_t size, bool verbose = true);
 95   HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) override;
 96   HeapWord* allocate_new_tlab(size_t min_size,
 97                               size_t requested_size,
 98                               size_t* actual_size) override;
 99 
100   // TLAB allocation
101   size_t tlab_capacity(Thread* thr)         const override { return capacity();     }
102   size_t tlab_used(Thread* thr)             const override { return used();         }
103   size_t max_tlab_size()                    const override { return _max_tlab_size; }
104   size_t unsafe_max_tlab_alloc(Thread* thr) const override;
105 
106   void collect(GCCause::Cause cause) override;
107   void do_full_collection(bool clear_all_soft_refs) override;
108 
109   // Heap walking support
110   void object_iterate(ObjectClosure* cl) override;
111 
112   // Object pinning support: every object is implicitly pinned
113   void pin_object(JavaThread* thread, oop obj) override { }
114   void unpin_object(JavaThread* thread, oop obj) override { }
115 
116   // No support for block parsing.
117   HeapWord* block_start(const void* addr) const { return nullptr;  }
118   bool block_is_obj(const HeapWord* addr) const { return false; }
119 
120   // No GC threads
121   void gc_threads_do(ThreadClosure* tc) const override {}
122 
123   // No nmethod handling
124   void register_nmethod(nmethod* nm) override {}
125   void unregister_nmethod(nmethod* nm) override {}
126   void verify_nmethod(nmethod* nm) override {}
127 
128   // No heap verification
129   void prepare_for_verify() override {}
130   void verify(VerifyOption option) override {}
131 
132   MemRegion reserved_region() const { return _reserved; }
133   bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }
134 
135   // Support for loading objects from CDS archive into the heap
136   bool can_load_archived_objects() const override { return UseCompressedOops; }
137   HeapWord* allocate_loaded_archive_space(size_t size) override;
138 
139   void print_on(outputStream* st) const override;
140   void print_tracing_info() const override;
141   bool print_location(outputStream* st, void* addr) const override;
142 
143 private:
144   void print_heap_info(size_t used) const;
145   void print_metaspace_info() const;
146 
147 };
148 
149 #endif // SHARE_GC_EPSILON_EPSILONHEAP_HPP