1 /*
  2  * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  */
 23 
 24 #include "precompiled.hpp"
 25 #include "gc/shared/gc_globals.hpp"
 26 #include "gc/shared/locationPrinter.hpp"
 27 #include "gc/shared/tlab_globals.hpp"
 28 #include "gc/z/zAddress.inline.hpp"
 29 #include "gc/z/zArray.inline.hpp"
 30 #include "gc/z/zGlobals.hpp"
 31 #include "gc/z/zHeap.inline.hpp"
 32 #include "gc/z/zHeapIterator.hpp"
 33 #include "gc/z/zHeuristics.hpp"
 34 #include "gc/z/zMark.inline.hpp"
 35 #include "gc/z/zPage.inline.hpp"
 36 #include "gc/z/zPageTable.inline.hpp"
 37 #include "gc/z/zRelocationSet.inline.hpp"
 38 #include "gc/z/zRelocationSetSelector.inline.hpp"
 39 #include "gc/z/zResurrection.hpp"
 40 #include "gc/z/zStat.hpp"
 41 #include "gc/z/zThread.inline.hpp"
 42 #include "gc/z/zVerify.hpp"
 43 #include "gc/z/zWorkers.hpp"
 44 #include "logging/log.hpp"
 45 #include "memory/iterator.hpp"
 46 #include "memory/metaspaceUtils.hpp"
 47 #include "memory/resourceArea.hpp"
 48 #include "prims/jvmtiTagMap.hpp"
 49 #include "runtime/handshake.hpp"
 50 #include "runtime/safepoint.hpp"
 51 #include "runtime/thread.hpp"
 52 #include "utilities/debug.hpp"
 53 
 54 static const ZStatCounter ZCounterUndoPageAllocation("Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond);
 55 static const ZStatCounter ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUnitOpsPerSecond);
 56 
 57 ZHeap* ZHeap::_heap = NULL;
 58 
 59 ZHeap::ZHeap() :
 60     _workers(),
 61     _object_allocator(),
 62     _page_allocator(&_workers, MinHeapSize, InitialHeapSize, MaxHeapSize),
 63     _page_table(),
 64     _forwarding_table(),
 65     _mark(&_workers, &_page_table),
 66     _reference_processor(&_workers),
 67     _weak_roots_processor(&_workers),
 68     _relocate(&_workers),
 69     _relocation_set(&_workers),
 70     _unload(&_workers),
 71     _serviceability(min_capacity(), max_capacity()) {
 72   // Install global heap instance
 73   assert(_heap == NULL, "Already initialized");
 74   _heap = this;
 75 
 76   // Update statistics
 77   ZStatHeap::set_at_initialize(_page_allocator.stats());
 78 }
 79 
 80 bool ZHeap::is_initialized() const {
 81   return _page_allocator.is_initialized() && _mark.is_initialized();
 82 }
 83 
 84 size_t ZHeap::min_capacity() const {
 85   return _page_allocator.min_capacity();
 86 }
 87 
 88 size_t ZHeap::max_capacity() const {
 89   return _page_allocator.max_capacity();
 90 }
 91 
 92 size_t ZHeap::soft_max_capacity() const {
 93   return _page_allocator.soft_max_capacity();
 94 }
 95 
 96 size_t ZHeap::capacity() const {
 97   return _page_allocator.capacity();
 98 }
 99 
100 size_t ZHeap::used() const {
101   return _page_allocator.used();
102 }
103 
104 size_t ZHeap::unused() const {
105   return _page_allocator.unused();
106 }
107 
108 size_t ZHeap::tlab_capacity() const {
109   return capacity();
110 }
111 
112 size_t ZHeap::tlab_used() const {
113   return _object_allocator.used();
114 }
115 
116 size_t ZHeap::max_tlab_size() const {
117   return ZObjectSizeLimitSmall;
118 }
119 
120 size_t ZHeap::unsafe_max_tlab_alloc() const {
121   size_t size = _object_allocator.remaining();
122 
123   if (size < MinTLABSize) {
124     // The remaining space in the allocator is not enough to
125     // fit the smallest possible TLAB. This means that the next
126     // TLAB allocation will force the allocator to get a new
127     // backing page anyway, which in turn means that we can then
128     // fit the largest possible TLAB.
129     size = max_tlab_size();
130   }
131 
132   return MIN2(size, max_tlab_size());
133 }
134 
135 bool ZHeap::is_in(uintptr_t addr) const {
136   // An address is considered to be "in the heap" if it points into
137   // the allocated part of a page, regardless of which heap view is
138   // used. Note that an address with the finalizable metadata bit set
139   // is not pointing into a heap view, and therefore not considered
140   // to be "in the heap".
141 
142   if (ZAddress::is_in(addr)) {
143     const ZPage* const page = _page_table.get(addr);
144     if (page != NULL) {
145       return page->is_in(addr);
146     }
147   }
148 
149   return false;
150 }
151 
152 uint ZHeap::active_workers() const {
153   return _workers.active_workers();
154 }
155 
156 void ZHeap::set_active_workers(uint nworkers) {
157   _workers.set_active_workers(nworkers);
158 }
159 
160 void ZHeap::threads_do(ThreadClosure* tc) const {
161   _page_allocator.threads_do(tc);
162   _workers.threads_do(tc);
163 }
164 
165 void ZHeap::out_of_memory() {
166   ResourceMark rm;
167 
168   ZStatInc(ZCounterOutOfMemory);
169   log_info(gc)("Out Of Memory (%s)", Thread::current()->name());
170 }
171 
172 ZPage* ZHeap::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
173   ZPage* const page = _page_allocator.alloc_page(type, size, flags);
174   if (page != NULL) {
175     // Insert page table entry
176     _page_table.insert(page);
177   }
178 
179   return page;
180 }
181 
182 void ZHeap::undo_alloc_page(ZPage* page) {
183   assert(page->is_allocating(), "Invalid page state");
184 
185   ZStatInc(ZCounterUndoPageAllocation);
186   log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: " SIZE_FORMAT,
187                 ZThread::id(), ZThread::name(), p2i(page), page->size());
188 
189   free_page(page, false /* reclaimed */);
190 }
191 
192 void ZHeap::free_page(ZPage* page, bool reclaimed) {
193   // Remove page table entry
194   _page_table.remove(page);
195 
196   // Free page
197   _page_allocator.free_page(page, reclaimed);
198 }
199 
200 void ZHeap::free_pages(const ZArray<ZPage*>* pages, bool reclaimed) {
201   // Remove page table entries
202   ZArrayIterator<ZPage*> iter(pages);
203   for (ZPage* page; iter.next(&page);) {
204     _page_table.remove(page);
205   }
206 
207   // Free pages
208   _page_allocator.free_pages(pages, reclaimed);
209 }
210 
211 void ZHeap::flip_to_marked() {
212   ZVerifyViewsFlip flip(&_page_allocator);
213   ZAddress::flip_to_marked();
214 }
215 
216 void ZHeap::flip_to_remapped() {
217   ZVerifyViewsFlip flip(&_page_allocator);
218   ZAddress::flip_to_remapped();
219 }
220 
221 void ZHeap::mark_start() {
222   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
223 
224   // Flip address view
225   flip_to_marked();
226 
227   // Retire allocating pages
228   _object_allocator.retire_pages();
229 
230   // Reset allocated/reclaimed/used statistics
231   _page_allocator.reset_statistics();
232 
233   // Reset encountered/dropped/enqueued statistics
234   _reference_processor.reset_statistics();
235 
236   // Enter mark phase
237   ZGlobalPhase = ZPhaseMark;
238 
239   // Reset marking information and mark roots
240   _mark.start();
241 
242   // Update statistics
243   ZStatHeap::set_at_mark_start(_page_allocator.stats());
244 }
245 
246 void ZHeap::mark(bool initial) {
247   _mark.mark(initial);
248 }
249 
250 void ZHeap::mark_flush_and_free(Thread* thread) {
251   _mark.flush_and_free(thread);
252 }
253 
254 bool ZHeap::mark_end() {
255   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
256 
257   // Try end marking
258   if (!_mark.end()) {
259     // Marking not completed, continue concurrent mark
260     return false;
261   }
262 
263   // Enter mark completed phase
264   ZGlobalPhase = ZPhaseMarkCompleted;
265 
266   // Verify after mark
267   ZVerify::after_mark();
268 
269   // Update statistics
270   ZStatHeap::set_at_mark_end(_page_allocator.stats());
271 
272   // Block resurrection of weak/phantom references
273   ZResurrection::block();
274 
275   // Prepare to unload stale metadata and nmethods
276   _unload.prepare();
277 
278   // Notify JVMTI that some tagmap entry objects may have died.
279   JvmtiTagMap::set_needs_cleaning();
280 
281   return true;
282 }
283 
284 void ZHeap::mark_free() {
285   _mark.free();
286 }
287 
288 void ZHeap::keep_alive(oop obj) {
289   ZBarrier::keep_alive_barrier_on_oop(obj);
290 }
291 
292 void ZHeap::set_soft_reference_policy(bool clear) {
293   _reference_processor.set_soft_reference_policy(clear);
294 }
295 
296 class ZRendezvousClosure : public HandshakeClosure {
297 public:
298   ZRendezvousClosure() :
299       HandshakeClosure("ZRendezvous") {}
300 
301   void do_thread(Thread* thread) {}
302 };
303 
304 void ZHeap::process_non_strong_references() {
305   // Process Soft/Weak/Final/PhantomReferences
306   _reference_processor.process_references();
307 
308   // Process weak roots
309   _weak_roots_processor.process_weak_roots();
310 
311   // Unlink stale metadata and nmethods
312   _unload.unlink();
313 
314   // Perform a handshake. This is needed 1) to make sure that stale
315   // metadata and nmethods are no longer observable. And 2), to
316   // prevent the race where a mutator first loads an oop, which is
317   // logically null but not yet cleared. Then this oop gets cleared
318   // by the reference processor and resurrection is unblocked. At
319   // this point the mutator could see the unblocked state and pass
320   // this invalid oop through the normal barrier path, which would
321   // incorrectly try to mark the oop.
322   ZRendezvousClosure cl;
323   Handshake::execute(&cl);
324 
325   // Unblock resurrection of weak/phantom references
326   ZResurrection::unblock();
327 
328   // Purge stale metadata and nmethods that were unlinked
329   _unload.purge();
330 
331   // Enqueue Soft/Weak/Final/PhantomReferences. Note that this
332   // must be done after unblocking resurrection. Otherwise the
333   // Finalizer thread could call Reference.get() on the Finalizers
334   // that were just enqueued, which would incorrectly return null
335   // during the resurrection block window, since such referents
336   // are only Finalizable marked.
337   _reference_processor.enqueue_references();
338 }
339 
340 void ZHeap::free_empty_pages(ZRelocationSetSelector* selector, int bulk) {
341   // Freeing empty pages in bulk is an optimization to avoid grabbing
342   // the page allocator lock, and trying to satisfy stalled allocations
343   // too frequently.
344   if (selector->should_free_empty_pages(bulk)) {
345     free_pages(selector->empty_pages(), true /* reclaimed */);
346     selector->clear_empty_pages();
347   }
348 }
349 
350 void ZHeap::select_relocation_set() {
351   // Do not allow pages to be deleted
352   _page_allocator.enable_deferred_delete();
353 
354   // Register relocatable pages with selector
355   ZRelocationSetSelector selector;
356   ZPageTableIterator pt_iter(&_page_table);
357   for (ZPage* page; pt_iter.next(&page);) {
358     if (!page->is_relocatable()) {
359       // Not relocatable, don't register
360       continue;
361     }
362 
363     if (page->is_marked()) {
364       // Register live page
365       selector.register_live_page(page);
366     } else {
367       // Register empty page
368       selector.register_empty_page(page);
369 
370       // Reclaim empty pages in bulk
371       free_empty_pages(&selector, 64 /* bulk */);
372     }
373   }
374 
375   // Reclaim remaining empty pages
376   free_empty_pages(&selector, 0 /* bulk */);
377 
378   // Allow pages to be deleted
379   _page_allocator.disable_deferred_delete();
380 
381   // Select relocation set
382   selector.select();
383 
384   // Install relocation set
385   _relocation_set.install(&selector);
386 
387   // Setup forwarding table
388   ZRelocationSetIterator rs_iter(&_relocation_set);
389   for (ZForwarding* forwarding; rs_iter.next(&forwarding);) {
390     _forwarding_table.insert(forwarding);
391   }
392 
393   // Update statistics
394   ZStatRelocation::set_at_select_relocation_set(selector.stats());
395   ZStatHeap::set_at_select_relocation_set(selector.stats());
396 }
397 
398 void ZHeap::reset_relocation_set() {
399   // Reset forwarding table
400   ZRelocationSetIterator iter(&_relocation_set);
401   for (ZForwarding* forwarding; iter.next(&forwarding);) {
402     _forwarding_table.remove(forwarding);
403   }
404 
405   // Reset relocation set
406   _relocation_set.reset();
407 }
408 
409 void ZHeap::relocate_start() {
410   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
411 
412   // Finish unloading stale metadata and nmethods
413   _unload.finish();
414 
415   // Flip address view
416   flip_to_remapped();
417 
418   // Enter relocate phase
419   ZGlobalPhase = ZPhaseRelocate;
420 
421   // Update statistics
422   ZStatHeap::set_at_relocate_start(_page_allocator.stats());
423 
424   // Notify JVMTI
425   JvmtiTagMap::set_needs_rehashing();
426 }
427 
428 void ZHeap::relocate() {
429   // Relocate relocation set
430   _relocate.relocate(&_relocation_set);
431 
432   // Update statistics
433   ZStatHeap::set_at_relocate_end(_page_allocator.stats(), _object_allocator.relocated());
434 }
435 
436 bool ZHeap::is_allocating(uintptr_t addr) const {
437   const ZPage* const page = _page_table.get(addr);
438   return page->is_allocating();
439 }
440 
441 void ZHeap::object_iterate(ObjectClosure* cl, bool visit_weaks) {
442   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
443   ZHeapIterator iter(1 /* nworkers */, visit_weaks);
444   iter.object_iterate(cl, 0 /* worker_id */);
445 }
446 
447 ParallelObjectIterator* ZHeap::parallel_object_iterator(uint nworkers, bool visit_weaks) {
448   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
449   return new ZHeapIterator(nworkers, visit_weaks);
450 }
451 
452 void ZHeap::pages_do(ZPageClosure* cl) {
453   ZPageTableIterator iter(&_page_table);
454   for (ZPage* page; iter.next(&page);) {
455     cl->do_page(page);
456   }
457   _page_allocator.pages_do(cl);
458 }
459 
460 void ZHeap::serviceability_initialize() {
461   _serviceability.initialize();
462 }
463 
464 GCMemoryManager* ZHeap::serviceability_cycle_memory_manager() {
465   return _serviceability.cycle_memory_manager();
466 }
467 
468 GCMemoryManager* ZHeap::serviceability_pause_memory_manager() {
469   return _serviceability.pause_memory_manager();
470 }
471 
472 MemoryPool* ZHeap::serviceability_memory_pool() {
473   return _serviceability.memory_pool();
474 }
475 
476 ZServiceabilityCounters* ZHeap::serviceability_counters() {
477   return _serviceability.counters();
478 }
479 
480 void ZHeap::print_on(outputStream* st) const {
481   st->print_cr(" ZHeap           used " SIZE_FORMAT "M, capacity " SIZE_FORMAT "M, max capacity " SIZE_FORMAT "M",
482                used() / M,
483                capacity() / M,
484                max_capacity() / M);
485   MetaspaceUtils::print_on(st);
486 }
487 
488 void ZHeap::print_extended_on(outputStream* st) const {
489   print_on(st);
490   st->cr();
491 
492   // Do not allow pages to be deleted
493   _page_allocator.enable_deferred_delete();
494 
495   // Print all pages
496   st->print_cr("ZGC Page Table:");
497   ZPageTableIterator iter(&_page_table);
498   for (ZPage* page; iter.next(&page);) {
499     page->print_on(st);
500   }
501 
502   // Allow pages to be deleted
503   _page_allocator.disable_deferred_delete();
504 }
505 
506 bool ZHeap::print_location(outputStream* st, uintptr_t addr) const {
507   if (LocationPrinter::is_valid_obj((void*)addr)) {
508     st->print(PTR_FORMAT " is a %s oop: ", addr, ZAddress::is_good(addr) ? "good" : "bad");
509     ZOop::from_address(addr)->print_on(st);
510     return true;
511   }
512 
513   return false;
514 }
515 
516 void ZHeap::verify() {
517   // Heap verification can only be done between mark end and
518   // relocate start. This is the only window where all oop are
519   // good and the whole heap is in a consistent state.
520   guarantee(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase");
521 
522   ZVerify::after_weak_processing();
523 }