1 /*
  2  * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  */
 23 
 24 #include "precompiled.hpp"
 25 #include "classfile/classLoaderData.hpp"
 26 #include "gc/shared/gcHeapSummary.hpp"
 27 #include "gc/shared/suspendibleThreadSet.hpp"
 28 #include "gc/z/zCollectedHeap.hpp"
 29 #include "gc/z/zDirector.hpp"
 30 #include "gc/z/zDriver.hpp"
 31 #include "gc/z/zGlobals.hpp"
 32 #include "gc/z/zHeap.inline.hpp"
 33 #include "gc/z/zNMethod.hpp"
 34 #include "gc/z/zObjArrayAllocator.hpp"
 35 #include "gc/z/zOop.inline.hpp"
 36 #include "gc/z/zServiceability.hpp"
 37 #include "gc/z/zStat.hpp"
 38 #include "gc/z/zUtils.inline.hpp"
 39 #include "memory/classLoaderMetaspace.hpp"
 40 #include "memory/iterator.hpp"
 41 #include "memory/universe.hpp"
 42 #include "utilities/align.hpp"
 43 
 44 ZCollectedHeap* ZCollectedHeap::heap() {
 45   return named_heap<ZCollectedHeap>(CollectedHeap::Z);
 46 }
 47 
 48 ZCollectedHeap::ZCollectedHeap() :
 49     _soft_ref_policy(),
 50     _barrier_set(),
 51     _initialize(&_barrier_set),
 52     _heap(),
 53     _driver(new ZDriver()),
 54     _director(new ZDirector(_driver)),
 55     _stat(new ZStat()),
 56     _runtime_workers() {}
 57 
 58 CollectedHeap::Name ZCollectedHeap::kind() const {
 59   return CollectedHeap::Z;
 60 }
 61 
 62 const char* ZCollectedHeap::name() const {
 63   return ZName;
 64 }
 65 
 66 jint ZCollectedHeap::initialize() {
 67   if (!_heap.is_initialized()) {
 68     return JNI_ENOMEM;
 69   }
 70 
 71   Universe::calculate_verify_data((HeapWord*)0, (HeapWord*)UINTPTR_MAX);
 72 
 73   return JNI_OK;
 74 }
 75 
 76 void ZCollectedHeap::initialize_serviceability() {
 77   _heap.serviceability_initialize();
 78 }
 79 
 80 class ZStopConcurrentGCThreadClosure : public ThreadClosure {
 81 public:
 82   virtual void do_thread(Thread* thread) {
 83     if (thread->is_ConcurrentGC_thread()) {
 84       ConcurrentGCThread::cast(thread)->stop();
 85     }
 86   }
 87 };
 88 
 89 void ZCollectedHeap::stop() {
 90   ZStopConcurrentGCThreadClosure cl;
 91   gc_threads_do(&cl);
 92 }
 93 
 94 SoftRefPolicy* ZCollectedHeap::soft_ref_policy() {
 95   return &_soft_ref_policy;
 96 }
 97 
 98 size_t ZCollectedHeap::max_capacity() const {
 99   return _heap.max_capacity();
100 }
101 
102 size_t ZCollectedHeap::capacity() const {
103   return _heap.capacity();
104 }
105 
106 size_t ZCollectedHeap::used() const {
107   return _heap.used();
108 }
109 
110 size_t ZCollectedHeap::unused() const {
111   return _heap.unused();
112 }
113 
114 bool ZCollectedHeap::is_maximal_no_gc() const {
115   // Not supported
116   ShouldNotReachHere();
117   return false;
118 }
119 
120 bool ZCollectedHeap::is_in(const void* p) const {
121   return _heap.is_in((uintptr_t)p);
122 }
123 
124 uint32_t ZCollectedHeap::hash_oop(oop obj) const {
125   return _heap.hash_oop(ZOop::to_address(obj));
126 }
127 
128 HeapWord* ZCollectedHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
129   const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(requested_size));
130   const uintptr_t addr = _heap.alloc_tlab(size_in_bytes);
131 
132   if (addr != 0) {
133     *actual_size = requested_size;
134   }
135 
136   return (HeapWord*)addr;
137 }
138 
139 oop ZCollectedHeap::array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS) {
140   if (!do_zero) {
141     return CollectedHeap::array_allocate(klass, size, length, false /* do_zero */, THREAD);
142   }
143 
144   ZObjArrayAllocator allocator(klass, size, length, THREAD);
145   return allocator.allocate();
146 }
147 
148 HeapWord* ZCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) {
149   const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(size));
150   return (HeapWord*)_heap.alloc_object(size_in_bytes);
151 }
152 
153 MetaWord* ZCollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
154                                                              size_t size,
155                                                              Metaspace::MetadataType mdtype) {
156   MetaWord* result;
157 
158   // Start asynchronous GC
159   collect(GCCause::_metadata_GC_threshold);
160 
161   // Expand and retry allocation
162   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
163   if (result != NULL) {
164     return result;
165   }
166 
167   // Start synchronous GC
168   collect(GCCause::_metadata_GC_clear_soft_refs);
169 
170   // Retry allocation
171   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
172   if (result != NULL) {
173     return result;
174   }
175 
176   // Expand and retry allocation
177   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
178   if (result != NULL) {
179     return result;
180   }
181 
182   // Out of memory
183   return NULL;
184 }
185 
186 void ZCollectedHeap::collect(GCCause::Cause cause) {
187   _driver->collect(cause);
188 }
189 
190 void ZCollectedHeap::collect_for_codecache() {
191   // Start synchronous GC
192   collect(GCCause::_codecache_GC_threshold);
193 }
194 
195 void ZCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
196   // These collection requests are ignored since ZGC can't run a synchronous
197   // GC cycle from within the VM thread. This is considered benign, since the
198   // only GC causes coming in here should be heap dumper and heap inspector.
199   // However, neither the heap dumper nor the heap inspector really need a GC
200   // to happen, but the result of their heap iterations might in that case be
201   // less accurate since they might include objects that would otherwise have
202   // been collected by a GC.
203   assert(Thread::current()->is_VM_thread(), "Should be the VM thread");
204   guarantee(cause == GCCause::_heap_dump ||
205             cause == GCCause::_heap_inspection, "Invalid cause");
206 }
207 
208 void ZCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
209   // Not supported
210   ShouldNotReachHere();
211 }
212 
213 size_t ZCollectedHeap::tlab_capacity(Thread* ignored) const {
214   return _heap.tlab_capacity();
215 }
216 
217 size_t ZCollectedHeap::tlab_used(Thread* ignored) const {
218   return _heap.tlab_used();
219 }
220 
221 size_t ZCollectedHeap::max_tlab_size() const {
222   return _heap.max_tlab_size();
223 }
224 
225 size_t ZCollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
226   return _heap.unsafe_max_tlab_alloc();
227 }
228 
229 bool ZCollectedHeap::uses_stack_watermark_barrier() const {
230   return true;
231 }
232 
233 GrowableArray<GCMemoryManager*> ZCollectedHeap::memory_managers() {
234   GrowableArray<GCMemoryManager*> memory_managers(2);
235   memory_managers.append(_heap.serviceability_cycle_memory_manager());
236   memory_managers.append(_heap.serviceability_pause_memory_manager());
237   return memory_managers;
238 }
239 
240 GrowableArray<MemoryPool*> ZCollectedHeap::memory_pools() {
241   GrowableArray<MemoryPool*> memory_pools(1);
242   memory_pools.append(_heap.serviceability_memory_pool());
243   return memory_pools;
244 }
245 
246 void ZCollectedHeap::object_iterate(ObjectClosure* cl) {
247   _heap.object_iterate(cl, true /* visit_weaks */);
248 }
249 
250 ParallelObjectIterator* ZCollectedHeap::parallel_object_iterator(uint nworkers) {
251   return _heap.parallel_object_iterator(nworkers, true /* visit_weaks */);
252 }
253 
254 void ZCollectedHeap::keep_alive(oop obj) {
255   _heap.keep_alive(obj);
256 }
257 
258 void ZCollectedHeap::register_nmethod(nmethod* nm) {
259   ZNMethod::register_nmethod(nm);
260 }
261 
262 void ZCollectedHeap::unregister_nmethod(nmethod* nm) {
263   ZNMethod::unregister_nmethod(nm);
264 }
265 
266 void ZCollectedHeap::flush_nmethod(nmethod* nm) {
267   ZNMethod::flush_nmethod(nm);
268 }
269 
270 void ZCollectedHeap::verify_nmethod(nmethod* nm) {
271   // Does nothing
272 }
273 
274 WorkGang* ZCollectedHeap::safepoint_workers() {
275   return _runtime_workers.workers();
276 }
277 
278 void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
279   tc->do_thread(_director);
280   tc->do_thread(_driver);
281   tc->do_thread(_stat);
282   _heap.threads_do(tc);
283   _runtime_workers.threads_do(tc);
284 }
285 
286 VirtualSpaceSummary ZCollectedHeap::create_heap_space_summary() {
287   return VirtualSpaceSummary((HeapWord*)0, (HeapWord*)capacity(), (HeapWord*)max_capacity());
288 }
289 
290 void ZCollectedHeap::safepoint_synchronize_begin() {
291   SuspendibleThreadSet::synchronize();
292 }
293 
294 void ZCollectedHeap::safepoint_synchronize_end() {
295   SuspendibleThreadSet::desynchronize();
296 }
297 
298 void ZCollectedHeap::prepare_for_verify() {
299   // Does nothing
300 }
301 
302 void ZCollectedHeap::print_on(outputStream* st) const {
303   _heap.print_on(st);
304 }
305 
306 void ZCollectedHeap::print_on_error(outputStream* st) const {
307   st->print_cr("ZGC Globals:");
308   st->print_cr(" GlobalPhase:       %u (%s)", ZGlobalPhase, ZGlobalPhaseToString());
309   st->print_cr(" GlobalSeqNum:      %u", ZGlobalSeqNum);
310   st->print_cr(" Offset Max:        " SIZE_FORMAT "%s (" PTR_FORMAT ")",
311                byte_size_in_exact_unit(ZAddressOffsetMax),
312                exact_unit_for_byte_size(ZAddressOffsetMax),
313                ZAddressOffsetMax);
314   st->print_cr(" Page Size Small:   " SIZE_FORMAT "M", ZPageSizeSmall / M);
315   st->print_cr(" Page Size Medium:  " SIZE_FORMAT "M", ZPageSizeMedium / M);
316   st->cr();
317   st->print_cr("ZGC Metadata Bits:");
318   st->print_cr(" Good:              " PTR_FORMAT, ZAddressGoodMask);
319   st->print_cr(" Bad:               " PTR_FORMAT, ZAddressBadMask);
320   st->print_cr(" WeakBad:           " PTR_FORMAT, ZAddressWeakBadMask);
321   st->print_cr(" Marked:            " PTR_FORMAT, ZAddressMetadataMarked);
322   st->print_cr(" Remapped:          " PTR_FORMAT, ZAddressMetadataRemapped);
323   st->cr();
324   CollectedHeap::print_on_error(st);
325 }
326 
327 void ZCollectedHeap::print_extended_on(outputStream* st) const {
328   _heap.print_extended_on(st);
329 }
330 
331 void ZCollectedHeap::print_tracing_info() const {
332   // Does nothing
333 }
334 
335 bool ZCollectedHeap::print_location(outputStream* st, void* addr) const {
336   return _heap.print_location(st, (uintptr_t)addr);
337 }
338 
339 void ZCollectedHeap::verify(VerifyOption option /* ignored */) {
340   _heap.verify();
341 }
342 
343 bool ZCollectedHeap::is_oop(oop object) const {
344   return _heap.is_oop(ZOop::to_address(object));
345 }
346 
347 bool ZCollectedHeap::supports_concurrent_gc_breakpoints() const {
348   return true;
349 }