1 /*
  2  * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  */
 23 
 24 #include "precompiled.hpp"
 25 #include "classfile/classLoaderData.hpp"
 26 #include "gc/shared/gcHeapSummary.hpp"
 27 #include "gc/shared/suspendibleThreadSet.hpp"
 28 #include "gc/z/zCollectedHeap.hpp"
 29 #include "gc/z/zDirector.hpp"
 30 #include "gc/z/zDriver.hpp"
 31 #include "gc/z/zGlobals.hpp"
 32 #include "gc/z/zHeap.inline.hpp"
 33 #include "gc/z/zNMethod.hpp"
 34 #include "gc/z/zObjArrayAllocator.hpp"
 35 #include "gc/z/zOop.inline.hpp"
 36 #include "gc/z/zServiceability.hpp"
 37 #include "gc/z/zStat.hpp"
 38 #include "gc/z/zUtils.inline.hpp"
 39 #include "memory/classLoaderMetaspace.hpp"
 40 #include "memory/iterator.hpp"
 41 #include "memory/universe.hpp"
 42 #include "utilities/align.hpp"
 43 
 44 ZCollectedHeap* ZCollectedHeap::heap() {
 45   return named_heap<ZCollectedHeap>(CollectedHeap::Z);
 46 }
 47 
 48 ZCollectedHeap::ZCollectedHeap() :
 49     _soft_ref_policy(),
 50     _barrier_set(),
 51     _initialize(&_barrier_set),
 52     _heap(),
 53     _driver(new ZDriver()),
 54     _director(new ZDirector(_driver)),
 55     _stat(new ZStat()),
 56     _runtime_workers() {}
 57 
 58 CollectedHeap::Name ZCollectedHeap::kind() const {
 59   return CollectedHeap::Z;
 60 }
 61 
 62 const char* ZCollectedHeap::name() const {
 63   return ZName;
 64 }
 65 
 66 jint ZCollectedHeap::initialize() {
 67   if (!_heap.is_initialized()) {
 68     return JNI_ENOMEM;
 69   }
 70 
 71   Universe::calculate_verify_data((HeapWord*)0, (HeapWord*)UINTPTR_MAX);
 72 
 73   return JNI_OK;
 74 }
 75 
 76 void ZCollectedHeap::initialize_serviceability() {
 77   _heap.serviceability_initialize();
 78 }
 79 
 80 class ZStopConcurrentGCThreadClosure : public ThreadClosure {
 81 public:
 82   virtual void do_thread(Thread* thread) {
 83     if (thread->is_ConcurrentGC_thread()) {
 84       ConcurrentGCThread::cast(thread)->stop();
 85     }
 86   }
 87 };
 88 
 89 void ZCollectedHeap::stop() {
 90   ZStopConcurrentGCThreadClosure cl;
 91   gc_threads_do(&cl);
 92 }
 93 
 94 SoftRefPolicy* ZCollectedHeap::soft_ref_policy() {
 95   return &_soft_ref_policy;
 96 }
 97 
 98 size_t ZCollectedHeap::max_capacity() const {
 99   return _heap.max_capacity();
100 }
101 
102 size_t ZCollectedHeap::capacity() const {
103   return _heap.capacity();
104 }
105 
106 size_t ZCollectedHeap::used() const {
107   return _heap.used();
108 }
109 
110 size_t ZCollectedHeap::unused() const {
111   return _heap.unused();
112 }
113 
114 bool ZCollectedHeap::is_maximal_no_gc() const {
115   // Not supported
116   ShouldNotReachHere();
117   return false;
118 }
119 
120 bool ZCollectedHeap::is_in(const void* p) const {
121   return _heap.is_in((uintptr_t)p);
122 }
123 
124 uint32_t ZCollectedHeap::hash_oop(oop obj) const {
125   return _heap.hash_oop(ZOop::to_address(obj));
126 }
127 
128 HeapWord* ZCollectedHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
129   const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(requested_size));
130   const uintptr_t addr = _heap.alloc_tlab(size_in_bytes);
131 
132   if (addr != 0) {
133     *actual_size = requested_size;
134   }
135 
136   return (HeapWord*)addr;
137 }
138 
139 oop ZCollectedHeap::array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS) {
140   if (!do_zero) {
141     return CollectedHeap::array_allocate(klass, size, length, false /* do_zero */, THREAD);
142   }
143 
144   ZObjArrayAllocator allocator(klass, size, length, THREAD);
145   return allocator.allocate();
146 }
147 
148 HeapWord* ZCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) {
149   const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(size));
150   return (HeapWord*)_heap.alloc_object(size_in_bytes);
151 }
152 
153 MetaWord* ZCollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
154                                                              size_t size,
155                                                              Metaspace::MetadataType mdtype) {
156   MetaWord* result;
157 
158   // Start asynchronous GC
159   collect(GCCause::_metadata_GC_threshold);
160 
161   // Expand and retry allocation
162   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
163   if (result != NULL) {
164     return result;
165   }
166 
167   // Start synchronous GC
168   collect(GCCause::_metadata_GC_clear_soft_refs);
169 
170   // Retry allocation
171   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
172   if (result != NULL) {
173     return result;
174   }
175 
176   // Expand and retry allocation
177   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
178   if (result != NULL) {
179     return result;
180   }
181 
182   // Out of memory
183   return NULL;
184 }
185 
186 void ZCollectedHeap::collect(GCCause::Cause cause) {
187   _driver->collect(cause);
188 }
189 
190 void ZCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
191   // These collection requests are ignored since ZGC can't run a synchronous
192   // GC cycle from within the VM thread. This is considered benign, since the
193   // only GC causes coming in here should be heap dumper and heap inspector.
194   // However, neither the heap dumper nor the heap inspector really need a GC
195   // to happen, but the result of their heap iterations might in that case be
196   // less accurate since they might include objects that would otherwise have
197   // been collected by a GC.
198   assert(Thread::current()->is_VM_thread(), "Should be the VM thread");
199   guarantee(cause == GCCause::_heap_dump ||
200             cause == GCCause::_heap_inspection, "Invalid cause");
201 }
202 
203 void ZCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
204   // Not supported
205   ShouldNotReachHere();
206 }
207 
208 size_t ZCollectedHeap::tlab_capacity(Thread* ignored) const {
209   return _heap.tlab_capacity();
210 }
211 
212 size_t ZCollectedHeap::tlab_used(Thread* ignored) const {
213   return _heap.tlab_used();
214 }
215 
216 size_t ZCollectedHeap::max_tlab_size() const {
217   return _heap.max_tlab_size();
218 }
219 
220 size_t ZCollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
221   return _heap.unsafe_max_tlab_alloc();
222 }
223 
224 bool ZCollectedHeap::uses_stack_watermark_barrier() const {
225   return true;
226 }
227 
228 GrowableArray<GCMemoryManager*> ZCollectedHeap::memory_managers() {
229   GrowableArray<GCMemoryManager*> memory_managers(2);
230   memory_managers.append(_heap.serviceability_cycle_memory_manager());
231   memory_managers.append(_heap.serviceability_pause_memory_manager());
232   return memory_managers;
233 }
234 
235 GrowableArray<MemoryPool*> ZCollectedHeap::memory_pools() {
236   GrowableArray<MemoryPool*> memory_pools(1);
237   memory_pools.append(_heap.serviceability_memory_pool());
238   return memory_pools;
239 }
240 
241 void ZCollectedHeap::object_iterate(ObjectClosure* cl) {
242   _heap.object_iterate(cl, true /* visit_weaks */);
243 }
244 
245 ParallelObjectIterator* ZCollectedHeap::parallel_object_iterator(uint nworkers) {
246   return _heap.parallel_object_iterator(nworkers, true /* visit_weaks */);
247 }
248 
249 void ZCollectedHeap::keep_alive(oop obj) {
250   _heap.keep_alive(obj);
251 }
252 
253 void ZCollectedHeap::register_nmethod(nmethod* nm) {
254   ZNMethod::register_nmethod(nm);
255 }
256 
257 void ZCollectedHeap::unregister_nmethod(nmethod* nm) {
258   ZNMethod::unregister_nmethod(nm);
259 }
260 
261 void ZCollectedHeap::flush_nmethod(nmethod* nm) {
262   ZNMethod::flush_nmethod(nm);
263 }
264 
265 void ZCollectedHeap::verify_nmethod(nmethod* nm) {
266   // Does nothing
267 }
268 
269 WorkGang* ZCollectedHeap::safepoint_workers() {
270   return _runtime_workers.workers();
271 }
272 
273 void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
274   tc->do_thread(_director);
275   tc->do_thread(_driver);
276   tc->do_thread(_stat);
277   _heap.threads_do(tc);
278   _runtime_workers.threads_do(tc);
279 }
280 
281 VirtualSpaceSummary ZCollectedHeap::create_heap_space_summary() {
282   return VirtualSpaceSummary((HeapWord*)0, (HeapWord*)capacity(), (HeapWord*)max_capacity());
283 }
284 
285 void ZCollectedHeap::safepoint_synchronize_begin() {
286   SuspendibleThreadSet::synchronize();
287 }
288 
289 void ZCollectedHeap::safepoint_synchronize_end() {
290   SuspendibleThreadSet::desynchronize();
291 }
292 
293 void ZCollectedHeap::prepare_for_verify() {
294   // Does nothing
295 }
296 
297 void ZCollectedHeap::print_on(outputStream* st) const {
298   _heap.print_on(st);
299 }
300 
301 void ZCollectedHeap::print_on_error(outputStream* st) const {
302   st->print_cr("ZGC Globals:");
303   st->print_cr(" GlobalPhase:       %u (%s)", ZGlobalPhase, ZGlobalPhaseToString());
304   st->print_cr(" GlobalSeqNum:      %u", ZGlobalSeqNum);
305   st->print_cr(" Offset Max:        " SIZE_FORMAT "%s (" PTR_FORMAT ")",
306                byte_size_in_exact_unit(ZAddressOffsetMax),
307                exact_unit_for_byte_size(ZAddressOffsetMax),
308                ZAddressOffsetMax);
309   st->print_cr(" Page Size Small:   " SIZE_FORMAT "M", ZPageSizeSmall / M);
310   st->print_cr(" Page Size Medium:  " SIZE_FORMAT "M", ZPageSizeMedium / M);
311   st->cr();
312   st->print_cr("ZGC Metadata Bits:");
313   st->print_cr(" Good:              " PTR_FORMAT, ZAddressGoodMask);
314   st->print_cr(" Bad:               " PTR_FORMAT, ZAddressBadMask);
315   st->print_cr(" WeakBad:           " PTR_FORMAT, ZAddressWeakBadMask);
316   st->print_cr(" Marked:            " PTR_FORMAT, ZAddressMetadataMarked);
317   st->print_cr(" Remapped:          " PTR_FORMAT, ZAddressMetadataRemapped);
318   st->cr();
319   CollectedHeap::print_on_error(st);
320 }
321 
322 void ZCollectedHeap::print_extended_on(outputStream* st) const {
323   _heap.print_extended_on(st);
324 }
325 
326 void ZCollectedHeap::print_tracing_info() const {
327   // Does nothing
328 }
329 
330 bool ZCollectedHeap::print_location(outputStream* st, void* addr) const {
331   return _heap.print_location(st, (uintptr_t)addr);
332 }
333 
334 void ZCollectedHeap::verify(VerifyOption option /* ignored */) {
335   _heap.verify();
336 }
337 
338 bool ZCollectedHeap::is_oop(oop object) const {
339   return _heap.is_oop(ZOop::to_address(object));
340 }
341 
342 bool ZCollectedHeap::supports_concurrent_gc_breakpoints() const {
343   return true;
344 }