12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef SHARE_GC_EPSILON_EPSILONHEAP_HPP
27 #define SHARE_GC_EPSILON_EPSILONHEAP_HPP
28
29 #include "gc/epsilon/epsilonBarrierSet.hpp"
30 #include "gc/epsilon/epsilonMonitoringSupport.hpp"
31 #include "gc/shared/collectedHeap.hpp"
32 #include "gc/shared/softRefPolicy.hpp"
33 #include "gc/shared/space.hpp"
34 #include "memory/virtualspace.hpp"
35 #include "services/memoryManager.hpp"
36
37 class EpsilonHeap : public CollectedHeap {
38 friend class VMStructs;
39 private:
40 SoftRefPolicy _soft_ref_policy;
41 EpsilonMonitoringSupport* _monitoring_support;
42 MemoryPool* _pool;
43 GCMemoryManager _memory_manager;
44 ContiguousSpace* _space;
45 VirtualSpace _virtual_space;
46 size_t _max_tlab_size;
47 size_t _step_counter_update;
48 size_t _step_heap_print;
49 int64_t _decay_time_ns;
50 volatile size_t _last_counter_update;
51 volatile size_t _last_heap_print;
52
53 public:
54 static EpsilonHeap* heap();
55
56 EpsilonHeap() :
57 _memory_manager("Epsilon Heap"),
58 _space(nullptr) {};
59
60 Name kind() const override {
61 return CollectedHeap::Epsilon;
62 }
63
64 const char* name() const override {
65 return "Epsilon";
66 }
67
68 SoftRefPolicy* soft_ref_policy() override {
69 return &_soft_ref_policy;
70 }
71
75 GrowableArray<GCMemoryManager*> memory_managers() override;
76 GrowableArray<MemoryPool*> memory_pools() override;
77
78 size_t max_capacity() const override { return _virtual_space.reserved_size(); }
79 size_t capacity() const override { return _virtual_space.committed_size(); }
80 size_t used() const override { return _space->used(); }
81
82 bool is_in(const void* p) const override {
83 return _space->is_in(p);
84 }
85
86 bool requires_barriers(stackChunkOop obj) const override { return false; }
87
88 bool is_maximal_no_gc() const override {
89 // No GC is going to happen. Return "we are at max", when we are about to fail.
90 return used() == capacity();
91 }
92
93 // Allocation
94 HeapWord* allocate_work(size_t size, bool verbose = true);
95 HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) override;
96 HeapWord* allocate_new_tlab(size_t min_size,
97 size_t requested_size,
98 size_t* actual_size) override;
99
100 // TLAB allocation
101 size_t tlab_capacity(Thread* thr) const override { return capacity(); }
102 size_t tlab_used(Thread* thr) const override { return used(); }
103 size_t max_tlab_size() const override { return _max_tlab_size; }
104 size_t unsafe_max_tlab_alloc(Thread* thr) const override;
105
106 void collect(GCCause::Cause cause) override;
107 void do_full_collection(bool clear_all_soft_refs) override;
108
109 // Heap walking support
110 void object_iterate(ObjectClosure* cl) override;
111
112 // Object pinning support: every object is implicitly pinned
113 void pin_object(JavaThread* thread, oop obj) override { }
114 void unpin_object(JavaThread* thread, oop obj) override { }
115
116 // No support for block parsing.
117 HeapWord* block_start(const void* addr) const { return nullptr; }
118 bool block_is_obj(const HeapWord* addr) const { return false; }
119
120 // No GC threads
121 void gc_threads_do(ThreadClosure* tc) const override {}
122
123 // No nmethod handling
124 void register_nmethod(nmethod* nm) override {}
125 void unregister_nmethod(nmethod* nm) override {}
126 void verify_nmethod(nmethod* nm) override {}
127
128 // No heap verification
129 void prepare_for_verify() override {}
130 void verify(VerifyOption option) override {}
131
132 MemRegion reserved_region() const { return _reserved; }
133 bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }
134
135 // Support for loading objects from CDS archive into the heap
136 bool can_load_archived_objects() const override { return UseCompressedOops; }
137 HeapWord* allocate_loaded_archive_space(size_t size) override;
138
139 void print_on(outputStream* st) const override;
140 void print_tracing_info() const override;
141 bool print_location(outputStream* st, void* addr) const override;
142
143 private:
144 void print_heap_info(size_t used) const;
145 void print_metaspace_info() const;
146
147 };
148
149 #endif // SHARE_GC_EPSILON_EPSILONHEAP_HPP
|
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef SHARE_GC_EPSILON_EPSILONHEAP_HPP
27 #define SHARE_GC_EPSILON_EPSILONHEAP_HPP
28
29 #include "gc/epsilon/epsilonBarrierSet.hpp"
30 #include "gc/epsilon/epsilonMonitoringSupport.hpp"
31 #include "gc/shared/collectedHeap.hpp"
32 #include "gc/shared/markBitMap.hpp"
33 #include "gc/shared/softRefPolicy.hpp"
34 #include "gc/shared/space.hpp"
35 #include "memory/virtualspace.hpp"
36 #include "services/memoryManager.hpp"
37
38 class EpsilonHeap : public CollectedHeap {
39 friend class VMStructs;
40 private:
41 SoftRefPolicy _soft_ref_policy;
42 EpsilonMonitoringSupport* _monitoring_support;
43 MemoryPool* _pool;
44 GCMemoryManager _memory_manager;
45 ContiguousSpace* _space;
46 VirtualSpace _virtual_space;
47 size_t _max_tlab_size;
48 size_t _step_counter_update;
49 size_t _step_heap_print;
50 int64_t _decay_time_ns;
51 volatile size_t _last_counter_update;
52 volatile size_t _last_heap_print;
53 MemRegion _bitmap_region;
54 MarkBitMap _bitmap;
55
56 public:
57 static EpsilonHeap* heap();
58
59 EpsilonHeap() :
60 _memory_manager("Epsilon Heap"),
61 _space(nullptr) {};
62
63 Name kind() const override {
64 return CollectedHeap::Epsilon;
65 }
66
67 const char* name() const override {
68 return "Epsilon";
69 }
70
71 SoftRefPolicy* soft_ref_policy() override {
72 return &_soft_ref_policy;
73 }
74
78 GrowableArray<GCMemoryManager*> memory_managers() override;
79 GrowableArray<MemoryPool*> memory_pools() override;
80
81 size_t max_capacity() const override { return _virtual_space.reserved_size(); }
82 size_t capacity() const override { return _virtual_space.committed_size(); }
83 size_t used() const override { return _space->used(); }
84
85 bool is_in(const void* p) const override {
86 return _space->is_in(p);
87 }
88
89 bool requires_barriers(stackChunkOop obj) const override { return false; }
90
91 bool is_maximal_no_gc() const override {
92 // No GC is going to happen. Return "we are at max", when we are about to fail.
93 return used() == capacity();
94 }
95
96 // Allocation
97 HeapWord* allocate_work(size_t size, bool verbose = true);
98 HeapWord* allocate_or_collect_work(size_t size, bool verbose = true);
99 HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) override;
100 HeapWord* allocate_new_tlab(size_t min_size,
101 size_t requested_size,
102 size_t* actual_size) override;
103
104 // TLAB allocation
105 size_t tlab_capacity(Thread* thr) const override { return capacity(); }
106 size_t tlab_used(Thread* thr) const override { return used(); }
107 size_t max_tlab_size() const override { return _max_tlab_size; }
108 size_t unsafe_max_tlab_alloc(Thread* thr) const override;
109
110 void collect(GCCause::Cause cause) override;
111 void do_full_collection(bool clear_all_soft_refs) override;
112
113 // Heap walking support
114 void object_iterate(ObjectClosure* cl) override;
115
116 // Object pinning support: every object is implicitly pinned
117 void pin_object(JavaThread* thread, oop obj) override;
118 void unpin_object(JavaThread* thread, oop obj) override;
119
120 // No support for block parsing.
121 HeapWord* block_start(const void* addr) const { return nullptr; }
122 bool block_is_obj(const HeapWord* addr) const { return false; }
123
124 // No GC threads
125 void gc_threads_do(ThreadClosure* tc) const override {}
126
127 // No nmethod handling
128 void register_nmethod(nmethod* nm) override {}
129 void unregister_nmethod(nmethod* nm) override {}
130 void verify_nmethod(nmethod* nm) override {}
131
132 // No heap verification
133 void prepare_for_verify() override {}
134 void verify(VerifyOption option) override {}
135
136 MemRegion reserved_region() const { return _reserved; }
137 bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }
138
139 // Support for loading objects from CDS archive into the heap
140 bool can_load_archived_objects() const override { return UseCompressedOops; }
141 HeapWord* allocate_loaded_archive_space(size_t size) override;
142
143 void print_on(outputStream* st) const override;
144 void print_tracing_info() const override;
145 bool print_location(outputStream* st, void* addr) const override;
146
147 void entry_collect(GCCause::Cause cause);
148
149 private:
150 void print_heap_info(size_t used) const;
151 void print_metaspace_info() const;
152
153 void vmentry_collect(GCCause::Cause cause);
154
155 void process_roots(OopClosure* cl);
156 void walk_bitmap(ObjectClosure* cl);
157
158 };
159
160 #endif // SHARE_GC_EPSILON_EPSILONHEAP_HPP
|