1 /*
  2  * Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2017, 2021 SAP SE. All rights reserved.
  4  * Copyright (c) 2023, Red Hat, Inc. All rights reserved.
  5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  6  *
  7  * This code is free software; you can redistribute it and/or modify it
  8  * under the terms of the GNU General Public License version 2 only, as
  9  * published by the Free Software Foundation.
 10  *
 11  * This code is distributed in the hope that it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  *
 25  */
 26 
 27 #include "precompiled.hpp"
 28 #include "cds/cdsConfig.hpp"
 29 #include "cds/metaspaceShared.hpp"
 30 #include "classfile/classLoaderData.hpp"
 31 #include "gc/shared/collectedHeap.hpp"
 32 #include "logging/log.hpp"
 33 #include "logging/logStream.hpp"
 34 #include "memory/classLoaderMetaspace.hpp"
 35 #include "memory/metaspace.hpp"
 36 #include "memory/metaspace/chunkHeaderPool.hpp"
 37 #include "memory/metaspace/chunkManager.hpp"
 38 #include "memory/metaspace/commitLimiter.hpp"
 39 #include "memory/metaspace/internalStats.hpp"
 40 #include "memory/metaspace/metaspaceCommon.hpp"
 41 #include "memory/metaspace/metaspaceContext.hpp"
 42 #include "memory/metaspace/metaspaceReporter.hpp"
 43 #include "memory/metaspace/metaspaceSettings.hpp"
 44 #include "memory/metaspace/runningCounters.hpp"
 45 #include "memory/metaspace/virtualSpaceList.hpp"
 46 #include "memory/metaspaceCriticalAllocation.hpp"
 47 #include "memory/metaspaceStats.hpp"
 48 #include "memory/metaspaceTracer.hpp"
 49 #include "memory/metaspaceUtils.hpp"
 50 #include "memory/resourceArea.hpp"
 51 #include "memory/universe.hpp"
 52 #include "nmt/memTracker.hpp"
 53 #include "oops/compressedKlass.inline.hpp"
 54 #include "oops/compressedOops.hpp"
 55 #include "prims/jvmtiExport.hpp"
 56 #include "runtime/atomic.hpp"
 57 #include "runtime/globals_extension.hpp"
 58 #include "runtime/init.hpp"
 59 #include "runtime/java.hpp"
 60 #include "utilities/copy.hpp"
 61 #include "utilities/debug.hpp"
 62 #include "utilities/formatBuffer.hpp"
 63 #include "utilities/globalDefinitions.hpp"
 64 #include "virtualspace.hpp"
 65 
 66 using metaspace::ChunkManager;
 67 using metaspace::CommitLimiter;
 68 using metaspace::MetaspaceContext;
 69 using metaspace::MetaspaceReporter;
 70 using metaspace::RunningCounters;
 71 using metaspace::VirtualSpaceList;
 72 
 73 size_t MetaspaceUtils::used_words() {
 74   return RunningCounters::used_words();
 75 }
 76 
 77 size_t MetaspaceUtils::used_words(Metaspace::MetadataType mdtype) {
 78   return mdtype == Metaspace::ClassType ? RunningCounters::used_words_class() : RunningCounters::used_words_nonclass();
 79 }
 80 
 81 size_t MetaspaceUtils::reserved_words() {
 82   return RunningCounters::reserved_words();
 83 }
 84 
 85 size_t MetaspaceUtils::reserved_words(Metaspace::MetadataType mdtype) {
 86   return mdtype == Metaspace::ClassType ? RunningCounters::reserved_words_class() : RunningCounters::reserved_words_nonclass();
 87 }
 88 
 89 size_t MetaspaceUtils::committed_words() {
 90   return RunningCounters::committed_words();
 91 }
 92 
 93 size_t MetaspaceUtils::committed_words(Metaspace::MetadataType mdtype) {
 94   return mdtype == Metaspace::ClassType ? RunningCounters::committed_words_class() : RunningCounters::committed_words_nonclass();
 95 }
 96 
 97 // Helper for get_statistics()
 98 static void get_values_for(Metaspace::MetadataType mdtype, size_t* reserved, size_t* committed, size_t* used) {
 99 #define w2b(x) (x * sizeof(MetaWord))
100   if (mdtype == Metaspace::ClassType) {
101     *reserved = w2b(RunningCounters::reserved_words_class());
102     *committed = w2b(RunningCounters::committed_words_class());
103     *used = w2b(RunningCounters::used_words_class());
104   } else {
105     *reserved = w2b(RunningCounters::reserved_words_nonclass());
106     *committed = w2b(RunningCounters::committed_words_nonclass());
107     *used = w2b(RunningCounters::used_words_nonclass());
108   }
109 #undef w2b
110 }
111 
112 // Retrieve all statistics in one go; make sure the values are consistent.
113 MetaspaceStats MetaspaceUtils::get_statistics(Metaspace::MetadataType mdtype) {
114 
115   // Consistency:
116   // This function reads three values (reserved, committed, used) from different counters. These counters
117   // may (very rarely) be out of sync. This has been a source for intermittent test errors in the past
118   //  (see e.g. JDK-8237872, JDK-8151460).
119   // - reserved and committed counter are updated under protection of Metaspace_lock; an inconsistency
120   //   between them can be the result of a dirty read.
121   // - used is an atomic counter updated outside any lock range; there is no way to guarantee
122   //   a clean read wrt the other two values.
123   // Reading these values under lock protection would would only help for the first case. Therefore
124   //   we don't bother and just re-read several times, then give up and correct the values.
125 
126   size_t r = 0, c = 0, u = 0; // Note: byte values.
127   get_values_for(mdtype, &r, &c, &u);
128   int retries = 10;
129   // If the first retrieval resulted in inconsistent values, retry a bit...
130   while ((r < c || c < u) && --retries >= 0) {
131     get_values_for(mdtype, &r, &c, &u);
132   }
133   if (c < u || r < c) { // still inconsistent.
134     // ... but not endlessly. If we don't get consistent values, correct them on the fly.
135     // The logic here is that we trust the used counter - its an atomic counter and whatever we see
136     // must have been the truth once - and from that we reconstruct a likely set of committed/reserved
137     // values.
138     metaspace::InternalStats::inc_num_inconsistent_stats();
139     if (c < u) {
140       c = align_up(u, Metaspace::commit_alignment());
141     }
142     if (r < c) {
143       r = align_up(c, Metaspace::reserve_alignment());
144     }
145   }
146   return MetaspaceStats(r, c, u);
147 }
148 
149 MetaspaceCombinedStats MetaspaceUtils::get_combined_statistics() {
150   return MetaspaceCombinedStats(get_statistics(Metaspace::ClassType), get_statistics(Metaspace::NonClassType));
151 }
152 
153 void MetaspaceUtils::print_metaspace_change(const MetaspaceCombinedStats& pre_meta_values) {
154   // Get values now:
155   const MetaspaceCombinedStats meta_values = get_combined_statistics();
156 
157   // We print used and committed since these are the most useful at-a-glance vitals for Metaspace:
158   // - used tells you how much memory is actually used for metadata
159   // - committed tells you how much memory is committed for the purpose of metadata
160   // The difference between those two would be waste, which can have various forms (freelists,
161   //   unused parts of committed chunks etc)
162   //
163   // Left out is reserved, since this is not as exciting as the first two values: for class space,
164   // it is a constant (to uninformed users, often confusingly large). For non-class space, it would
165   // be interesting since free chunks can be uncommitted, but for now it is left out.
166 
167   if (Metaspace::using_class_space()) {
168     log_info(gc, metaspace)(HEAP_CHANGE_FORMAT" "
169                             HEAP_CHANGE_FORMAT" "
170                             HEAP_CHANGE_FORMAT,
171                             HEAP_CHANGE_FORMAT_ARGS("Metaspace",
172                                                     pre_meta_values.used(),
173                                                     pre_meta_values.committed(),
174                                                     meta_values.used(),
175                                                     meta_values.committed()),
176                             HEAP_CHANGE_FORMAT_ARGS("NonClass",
177                                                     pre_meta_values.non_class_used(),
178                                                     pre_meta_values.non_class_committed(),
179                                                     meta_values.non_class_used(),
180                                                     meta_values.non_class_committed()),
181                             HEAP_CHANGE_FORMAT_ARGS("Class",
182                                                     pre_meta_values.class_used(),
183                                                     pre_meta_values.class_committed(),
184                                                     meta_values.class_used(),
185                                                     meta_values.class_committed()));
186   } else {
187     log_info(gc, metaspace)(HEAP_CHANGE_FORMAT,
188                             HEAP_CHANGE_FORMAT_ARGS("Metaspace",
189                                                     pre_meta_values.used(),
190                                                     pre_meta_values.committed(),
191                                                     meta_values.used(),
192                                                     meta_values.committed()));
193   }
194 }
195 
196 // This will print out a basic metaspace usage report but
197 // unlike print_report() is guaranteed not to lock or to walk the CLDG.
198 void MetaspaceUtils::print_basic_report(outputStream* out, size_t scale) {
199   MetaspaceReporter::print_basic_report(out, scale);
200 }
201 
202 // Prints a report about the current metaspace state.
203 // Optional parts can be enabled via flags.
204 // Function will walk the CLDG and will lock the expand lock; if that is not
205 // convenient, use print_basic_report() instead.
206 void MetaspaceUtils::print_report(outputStream* out, size_t scale) {
207   const int flags =
208       (int)MetaspaceReporter::Option::ShowLoaders |
209       (int)MetaspaceReporter::Option::BreakDownByChunkType |
210       (int)MetaspaceReporter::Option::ShowClasses;
211   MetaspaceReporter::print_report(out, scale, flags);
212 }
213 
214 void MetaspaceUtils::print_on(outputStream* out) {
215 
216   // Used from all GCs. It first prints out totals, then, separately, the class space portion.
217   MetaspaceCombinedStats stats = get_combined_statistics();
218   out->print_cr(" Metaspace       "
219                 "used "      SIZE_FORMAT "K, "
220                 "committed " SIZE_FORMAT "K, "
221                 "reserved "  SIZE_FORMAT "K",
222                 stats.used()/K,
223                 stats.committed()/K,
224                 stats.reserved()/K);
225 
226   if (Metaspace::using_class_space()) {
227     out->print_cr("  class space    "
228                   "used "      SIZE_FORMAT "K, "
229                   "committed " SIZE_FORMAT "K, "
230                   "reserved "  SIZE_FORMAT "K",
231                   stats.class_space_stats().used()/K,
232                   stats.class_space_stats().committed()/K,
233                   stats.class_space_stats().reserved()/K);
234   }
235 }
236 
237 #ifdef ASSERT
238 void MetaspaceUtils::verify() {
239   if (Metaspace::initialized()) {
240 
241     // Verify non-class chunkmanager...
242     ChunkManager* cm = ChunkManager::chunkmanager_nonclass();
243     cm->verify();
244 
245     // ... and space list.
246     VirtualSpaceList* vsl = VirtualSpaceList::vslist_nonclass();
247     vsl->verify();
248 
249     if (Metaspace::using_class_space()) {
250       // If we use compressed class pointers, verify class chunkmanager...
251       cm = ChunkManager::chunkmanager_class();
252       cm->verify();
253 
254       // ... and class spacelist.
255       vsl = VirtualSpaceList::vslist_class();
256       vsl->verify();
257     }
258 
259   }
260 }
261 #endif
262 
263 ////////////////////////////////7
264 // MetaspaceGC methods
265 
266 volatile size_t MetaspaceGC::_capacity_until_GC = 0;
267 uint MetaspaceGC::_shrink_factor = 0;
268 
269 // VM_CollectForMetadataAllocation is the vm operation used to GC.
270 // Within the VM operation after the GC the attempt to allocate the metadata
271 // should succeed.  If the GC did not free enough space for the metaspace
272 // allocation, the HWM is increased so that another virtualspace will be
273 // allocated for the metadata.  With perm gen the increase in the perm
274 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
275 // metaspace policy uses those as the small and large steps for the HWM.
276 //
277 // After the GC the compute_new_size() for MetaspaceGC is called to
278 // resize the capacity of the metaspaces.  The current implementation
279 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
280 // to resize the Java heap by some GC's.  New flags can be implemented
281 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
282 // free space is desirable in the metaspace capacity to decide how much
283 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
284 // free space is desirable in the metaspace capacity before decreasing
285 // the HWM.
286 
287 // Calculate the amount to increase the high water mark (HWM).
288 // Increase by a minimum amount (MinMetaspaceExpansion) so that
289 // another expansion is not requested too soon.  If that is not
290 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
291 // If that is still not enough, expand by the size of the allocation
292 // plus some.
293 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
294   size_t min_delta = MinMetaspaceExpansion;
295   size_t max_delta = MaxMetaspaceExpansion;
296   size_t delta = align_up(bytes, Metaspace::commit_alignment());
297 
298   if (delta <= min_delta) {
299     delta = min_delta;
300   } else if (delta <= max_delta) {
301     // Don't want to hit the high water mark on the next
302     // allocation so make the delta greater than just enough
303     // for this allocation.
304     delta = max_delta;
305   } else {
306     // This allocation is large but the next ones are probably not
307     // so increase by the minimum.
308     delta = delta + min_delta;
309   }
310 
311   assert_is_aligned(delta, Metaspace::commit_alignment());
312 
313   return delta;
314 }
315 
316 size_t MetaspaceGC::capacity_until_GC() {
317   size_t value = Atomic::load_acquire(&_capacity_until_GC);
318   assert(value >= MetaspaceSize, "Not initialized properly?");
319   return value;
320 }
321 
322 // Try to increase the _capacity_until_GC limit counter by v bytes.
323 // Returns true if it succeeded. It may fail if either another thread
324 // concurrently increased the limit or the new limit would be larger
325 // than MaxMetaspaceSize.
326 // On success, optionally returns new and old metaspace capacity in
327 // new_cap_until_GC and old_cap_until_GC respectively.
328 // On error, optionally sets can_retry to indicate whether if there is
329 // actually enough space remaining to satisfy the request.
330 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC, bool* can_retry) {
331   assert_is_aligned(v, Metaspace::commit_alignment());
332 
333   size_t old_capacity_until_GC = _capacity_until_GC;
334   size_t new_value = old_capacity_until_GC + v;
335 
336   if (new_value < old_capacity_until_GC) {
337     // The addition wrapped around, set new_value to aligned max value.
338     new_value = align_down(max_uintx, Metaspace::reserve_alignment());
339   }
340 
341   if (new_value > MaxMetaspaceSize) {
342     if (can_retry != nullptr) {
343       *can_retry = false;
344     }
345     return false;
346   }
347 
348   if (can_retry != nullptr) {
349     *can_retry = true;
350   }
351   size_t prev_value = Atomic::cmpxchg(&_capacity_until_GC, old_capacity_until_GC, new_value);
352 
353   if (old_capacity_until_GC != prev_value) {
354     return false;
355   }
356 
357   if (new_cap_until_GC != nullptr) {
358     *new_cap_until_GC = new_value;
359   }
360   if (old_cap_until_GC != nullptr) {
361     *old_cap_until_GC = old_capacity_until_GC;
362   }
363   return true;
364 }
365 
366 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
367   assert_is_aligned(v, Metaspace::commit_alignment());
368 
369   return Atomic::sub(&_capacity_until_GC, v);
370 }
371 
372 void MetaspaceGC::initialize() {
373   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
374   // we can't do a GC during initialization.
375   _capacity_until_GC = MaxMetaspaceSize;
376 }
377 
378 void MetaspaceGC::post_initialize() {
379   // Reset the high-water mark once the VM initialization is done.
380   _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize);
381 }
382 
383 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
384   // Check if the compressed class space is full.
385   if (is_class && Metaspace::using_class_space()) {
386     size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
387     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
388       log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)",
389                 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
390       return false;
391     }
392   }
393 
394   // Check if the user has imposed a limit on the metaspace memory.
395   size_t committed_bytes = MetaspaceUtils::committed_bytes();
396   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
397     log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)",
398               (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
399     return false;
400   }
401 
402   return true;
403 }
404 
405 size_t MetaspaceGC::allowed_expansion() {
406   size_t committed_bytes = MetaspaceUtils::committed_bytes();
407   size_t capacity_until_gc = capacity_until_GC();
408 
409   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
410   // capacity_until_GC may have been decreased concurrently and may
411   // temporarily be lower than what metaspace has committed. Allow for that.
412   size_t left_until_GC = capacity_until_gc > committed_bytes ?
413       capacity_until_gc - committed_bytes : 0;
414   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
415   log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT
416             " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".",
417             left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);
418 
419   return left_to_commit / BytesPerWord;
420 }
421 
422 void MetaspaceGC::compute_new_size() {
423   assert(_shrink_factor <= 100, "invalid shrink factor");
424   uint current_shrink_factor = _shrink_factor;
425   _shrink_factor = 0;
426 
427   // Using committed_bytes() for used_after_gc is an overestimation, since the
428   // chunk free lists are included in committed_bytes() and the memory in an
429   // un-fragmented chunk free list is available for future allocations.
430   // However, if the chunk free lists becomes fragmented, then the memory may
431   // not be available for future allocations and the memory is therefore "in use".
432   // Including the chunk free lists in the definition of "in use" is therefore
433   // necessary. Not including the chunk free lists can cause capacity_until_GC to
434   // shrink below committed_bytes() and this has caused serious bugs in the past.
435   const double used_after_gc = (double)MetaspaceUtils::committed_bytes();
436   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
437 
438   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
439   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
440 
441   const double min_tmp = used_after_gc / maximum_used_percentage;
442   size_t minimum_desired_capacity =
443     (size_t)MIN2(min_tmp, double(MaxMetaspaceSize));
444   // Don't shrink less than the initial generation size
445   minimum_desired_capacity = MAX2(minimum_desired_capacity,
446                                   MetaspaceSize);
447 
448   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
449   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
450                            minimum_free_percentage, maximum_used_percentage);
451   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
452 
453   size_t shrink_bytes = 0;
454   if (capacity_until_GC < minimum_desired_capacity) {
455     // If we have less capacity below the metaspace HWM, then
456     // increment the HWM.
457     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
458     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
459     // Don't expand unless it's significant
460     if (expand_bytes >= MinMetaspaceExpansion) {
461       size_t new_capacity_until_GC = 0;
462       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
463       assert(succeeded, "Should always successfully increment HWM when at safepoint");
464 
465       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
466                                                new_capacity_until_GC,
467                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
468       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
469                                (double) minimum_desired_capacity / (double) K,
470                                (double) expand_bytes / (double) K,
471                                (double) MinMetaspaceExpansion / (double) K,
472                                (double) new_capacity_until_GC / (double) K);
473     }
474     return;
475   }
476 
477   // No expansion, now see if we want to shrink
478   // We would never want to shrink more than this
479   assert(capacity_until_GC >= minimum_desired_capacity,
480          SIZE_FORMAT " >= " SIZE_FORMAT,
481          capacity_until_GC, minimum_desired_capacity);
482   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
483 
484   // Should shrinking be considered?
485   if (MaxMetaspaceFreeRatio < 100) {
486     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
487     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
488     const double max_tmp = used_after_gc / minimum_used_percentage;
489     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(MaxMetaspaceSize));
490     maximum_desired_capacity = MAX2(maximum_desired_capacity,
491                                     MetaspaceSize);
492     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
493                              maximum_free_percentage, minimum_used_percentage);
494     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
495                              (double) minimum_desired_capacity / (double) K, (double) maximum_desired_capacity / (double) K);
496 
497     assert(minimum_desired_capacity <= maximum_desired_capacity,
498            "sanity check");
499 
500     if (capacity_until_GC > maximum_desired_capacity) {
501       // Capacity too large, compute shrinking size
502       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
503       // We don't want shrink all the way back to initSize if people call
504       // System.gc(), because some programs do that between "phases" and then
505       // we'd just have to grow the heap up again for the next phase.  So we
506       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
507       // on the third call, and 100% by the fourth call.  But if we recompute
508       // size without shrinking, it goes back to 0%.
509       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
510 
511       shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
512 
513       assert(shrink_bytes <= max_shrink_bytes,
514              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
515              shrink_bytes, max_shrink_bytes);
516       if (current_shrink_factor == 0) {
517         _shrink_factor = 10;
518       } else {
519         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
520       }
521       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
522                                (double) MetaspaceSize / (double) K, (double) maximum_desired_capacity / (double) K);
523       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
524                                (double) shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, (double) MinMetaspaceExpansion / (double) K);
525     }
526   }
527 
528   // Don't shrink unless it's significant
529   if (shrink_bytes >= MinMetaspaceExpansion &&
530       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
531     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
532     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
533                                              new_capacity_until_GC,
534                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
535   }
536 }
537 
538 //////  Metaspace methods /////
539 
540 const MetaspaceTracer* Metaspace::_tracer = nullptr;
541 
542 bool Metaspace::initialized() {
543   return metaspace::MetaspaceContext::context_nonclass() != nullptr
544       LP64_ONLY(&& (using_class_space() ? Metaspace::class_space_is_initialized() : true));
545 }
546 
547 #ifdef _LP64
548 
549 void Metaspace::print_compressed_class_space(outputStream* st) {
550   if (VirtualSpaceList::vslist_class() != nullptr) {
551     MetaWord* base = VirtualSpaceList::vslist_class()->base_of_first_node();
552     size_t size = VirtualSpaceList::vslist_class()->word_size_of_first_node();
553     MetaWord* top = base + size;
554     st->print("Compressed class space mapped at: " PTR_FORMAT "-" PTR_FORMAT ", reserved size: " SIZE_FORMAT,
555                p2i(base), p2i(top), (top - base) * BytesPerWord);
556     st->cr();
557   }
558 }
559 
560 // Given a prereserved space, use that to set up the compressed class space list.
561 void Metaspace::initialize_class_space(ReservedSpace rs) {
562   assert(rs.size() >= CompressedClassSpaceSize,
563          SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
564   assert(using_class_space(), "Must be using class space");
565 
566   assert(rs.size() == CompressedClassSpaceSize, SIZE_FORMAT " != " SIZE_FORMAT,
567          rs.size(), CompressedClassSpaceSize);
568   assert(is_aligned(rs.base(), Metaspace::reserve_alignment()) &&
569          is_aligned(rs.size(), Metaspace::reserve_alignment()),
570          "wrong alignment");
571 
572   MetaspaceContext::initialize_class_space_context(rs);
573 }
574 
575 // Returns true if class space has been setup (initialize_class_space).
576 bool Metaspace::class_space_is_initialized() {
577   return MetaspaceContext::context_class() != nullptr;
578 }
579 
580 // Reserve a range of memory that is to contain narrow Klass IDs. If "try_in_low_address_ranges"
581 // is true, we will attempt to reserve memory suitable for zero-based encoding.
582 ReservedSpace Metaspace::reserve_address_space_for_compressed_classes(size_t size, bool optimize_for_zero_base) {
583   char* result = nullptr;
584 
585   NOT_ZERO(result =
586       (char*) CompressedKlassPointers::reserve_address_space_for_compressed_classes(size, RandomizeClassSpaceLocation,
587                                                                                     optimize_for_zero_base));
588 
589   if (result == nullptr) {
590     // Fallback: reserve anywhere
591     log_debug(metaspace, map)("Trying anywhere...");
592     result = os::reserve_memory_aligned(size, Metaspace::reserve_alignment(), false);
593   }
594 
595   // Wrap resulting range in ReservedSpace
596   ReservedSpace rs;
597   if (result != nullptr) {
598     log_debug(metaspace, map)("Mapped at " PTR_FORMAT, p2i(result));
599     assert(is_aligned(result, Metaspace::reserve_alignment()), "Alignment too small for metaspace");
600     rs = ReservedSpace::space_for_range(result, size, Metaspace::reserve_alignment(),
601                                                       os::vm_page_size(), false, false);
602   } else {
603     log_debug(metaspace, map)("Failed to map.");
604     rs = ReservedSpace();
605   }
606   return rs;
607 }
608 #endif // _LP64
609 
610 size_t Metaspace::reserve_alignment_words() {
611   return metaspace::Settings::virtual_space_node_reserve_alignment_words();
612 }
613 
614 size_t Metaspace::commit_alignment_words() {
615   return metaspace::Settings::commit_granule_words();
616 }
617 
618 void Metaspace::ergo_initialize() {
619 
620   // Must happen before using any setting from Settings::---
621   metaspace::Settings::ergo_initialize();
622 
623   // MaxMetaspaceSize and CompressedClassSpaceSize:
624   //
625   // MaxMetaspaceSize is the maximum size, in bytes, of memory we are allowed
626   //  to commit for the Metaspace.
627   //  It is just a number; a limit we compare against before committing. It
628   //  does not have to be aligned to anything.
629   //  It gets used as compare value before attempting to increase the metaspace
630   //  commit charge. It defaults to max_uintx (unlimited).
631   //
632   // CompressedClassSpaceSize is the size, in bytes, of the address range we
633   //  pre-reserve for the compressed class space (if we use class space).
634   //  This size has to be aligned to the metaspace reserve alignment (to the
635   //  size of a root chunk). It gets aligned up from whatever value the caller
636   //  gave us to the next multiple of root chunk size.
637   //
638   // Note: Strictly speaking MaxMetaspaceSize and CompressedClassSpaceSize have
639   //  very little to do with each other. The notion often encountered:
640   //  MaxMetaspaceSize = CompressedClassSpaceSize + <non-class metadata size>
641   //  is subtly wrong: MaxMetaspaceSize can besmaller than CompressedClassSpaceSize,
642   //  in which case we just would not be able to fully commit the class space range.
643   //
644   // We still adjust CompressedClassSpaceSize to reasonable limits, mainly to
645   //  save on reserved space, and to make ergnonomics less confusing.
646 
647   MaxMetaspaceSize = MAX2(MaxMetaspaceSize, commit_alignment());
648 
649   if (UseCompressedClassPointers) {
650     // Let CCS size not be larger than 80% of MaxMetaspaceSize. Note that is
651     // grossly over-dimensioned for most usage scenarios; typical ratio of
652     // class space : non class space usage is about 1:6. With many small classes,
653     // it can get as low as 1:2. It is not a big deal though since ccs is only
654     // reserved and will be committed on demand only.
655     size_t max_ccs_size = 8 * (MaxMetaspaceSize / 10);
656     size_t adjusted_ccs_size = MIN2(CompressedClassSpaceSize, max_ccs_size);
657 
658     // CCS must be aligned to root chunk size, and be at least the size of one
659     //  root chunk.
660     adjusted_ccs_size = align_up(adjusted_ccs_size, reserve_alignment());
661     adjusted_ccs_size = MAX2(adjusted_ccs_size, reserve_alignment());
662 
663     // Note: re-adjusting may have us left with a CompressedClassSpaceSize
664     //  larger than MaxMetaspaceSize for very small values of MaxMetaspaceSize.
665     //  Lets just live with that, its not a big deal.
666 
667     if (adjusted_ccs_size != CompressedClassSpaceSize) {
668       FLAG_SET_ERGO(CompressedClassSpaceSize, adjusted_ccs_size);
669       log_info(metaspace)("Setting CompressedClassSpaceSize to " SIZE_FORMAT ".",
670                           CompressedClassSpaceSize);
671     }
672   }
673 
674   // Set MetaspaceSize, MinMetaspaceExpansion and MaxMetaspaceExpansion
675   if (MetaspaceSize > MaxMetaspaceSize) {
676     MetaspaceSize = MaxMetaspaceSize;
677   }
678 
679   MetaspaceSize = align_down_bounded(MetaspaceSize, commit_alignment());
680 
681   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
682 
683   MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, commit_alignment());
684   MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, commit_alignment());
685 
686 }
687 
688 void Metaspace::global_initialize() {
689   MetaspaceGC::initialize(); // <- since we do not prealloc init chunks anymore is this still needed?
690 
691   metaspace::ChunkHeaderPool::initialize();
692 
693   if (CDSConfig::is_dumping_static_archive()) {
694     if (!CDSConfig::is_dumping_final_static_archive()) {
695       assert(!UseSharedSpaces, "sanity");
696     }
697     MetaspaceShared::initialize_for_static_dump();
698   }
699 
700   // If UseCompressedClassPointers=1, we have two cases:
701   // a) if CDS is active (runtime, Xshare=on), it will create the class space
702   //    for us, initialize it and set up CompressedKlassPointers encoding.
703   //    Class space will be reserved above the mapped archives.
704   // b) if CDS either deactivated (Xshare=off) or a static dump is to be done (Xshare:dump),
705   //    we will create the class space on our own. It will be placed above the java heap,
706   //    since we assume it has been placed in low
707   //    address regions. We may rethink this (see JDK-8244943). Failing that,
708   //    it will be placed anywhere.
709 
710 #if INCLUDE_CDS
711   // case (a)
712   if (UseSharedSpaces) {
713     if (!FLAG_IS_DEFAULT(CompressedClassSpaceBaseAddress)) {
714       log_warning(metaspace)("CDS active - ignoring CompressedClassSpaceBaseAddress.");
715     }
716     MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
717     // If any of the archived space fails to map, UseSharedSpaces
718     // is reset to false.
719   }
720 #endif // INCLUDE_CDS
721 
722 #ifdef _LP64
723 
724   if (using_class_space() && !class_space_is_initialized()) {
725     assert(!UseSharedSpaces, "CDS archive is not mapped at this point");
726 
727     // case (b) (No CDS)
728     ReservedSpace rs;
729     const size_t size = align_up(CompressedClassSpaceSize, Metaspace::reserve_alignment());
730 
731     // If CompressedClassSpaceBaseAddress is set, we attempt to force-map class space to
732     // the given address. This is a debug-only feature aiding tests. Due to the ASLR lottery
733     // this may fail, in which case the VM will exit after printing an appropriate message.
734     // Tests using this switch should cope with that.
735     if (CompressedClassSpaceBaseAddress != 0) {
736       const address base = (address)CompressedClassSpaceBaseAddress;
737       if (!is_aligned(base, Metaspace::reserve_alignment())) {
738         vm_exit_during_initialization(
739             err_msg("CompressedClassSpaceBaseAddress=" PTR_FORMAT " invalid "
740                     "(must be aligned to " SIZE_FORMAT_X ").",
741                     CompressedClassSpaceBaseAddress, Metaspace::reserve_alignment()));
742       }
743       rs = ReservedSpace(size, Metaspace::reserve_alignment(),
744                          os::vm_page_size() /* large */, (char*)base);
745       if (rs.is_reserved()) {
746         log_info(metaspace)("Successfully forced class space address to " PTR_FORMAT, p2i(base));
747       } else {
748         LogTarget(Debug, metaspace) lt;
749         if (lt.is_enabled()) {
750           LogStream ls(lt);
751           os::print_memory_mappings((char*)base, size, &ls);
752         }
753         vm_exit_during_initialization(
754             err_msg("CompressedClassSpaceBaseAddress=" PTR_FORMAT " given, but reserving class space failed.",
755                 CompressedClassSpaceBaseAddress));
756       }
757     }
758 
759     // ...failing that, reserve anywhere, but let platform do optimized placement:
760     if (!rs.is_reserved()) {
761       log_info(metaspace)("Reserving compressed class space anywhere");
762       rs = Metaspace::reserve_address_space_for_compressed_classes(size, true);
763     }
764 
765     // ...failing that, give up.
766     if (!rs.is_reserved()) {
767       vm_exit_during_initialization(
768           err_msg("Could not allocate compressed class space: " SIZE_FORMAT " bytes",
769                    CompressedClassSpaceSize));
770     }
771 
772     // Mark class space as such
773     MemTracker::record_virtual_memory_type((address)rs.base(), mtClass);
774 
775     // Initialize space
776     Metaspace::initialize_class_space(rs);
777 
778     // Set up compressed class pointer encoding.
779     CompressedKlassPointers::initialize((address)rs.base(), rs.size());
780   }
781 
782 #endif
783 
784   // Initialize non-class virtual space list, and its chunk manager:
785   MetaspaceContext::initialize_nonclass_space_context();
786 
787   _tracer = new MetaspaceTracer();
788 
789   // We must prevent the very first address of the ccs from being used to store
790   // metadata, since that address would translate to a narrow pointer of 0, and the
791   // VM does not distinguish between "narrow 0 as in null" and "narrow 0 as in start
792   //  of ccs".
793   // Before Elastic Metaspace that did not happen due to the fact that every Metachunk
794   // had a header and therefore could not allocate anything at offset 0.
795 #ifdef _LP64
796   if (using_class_space()) {
797     // The simplest way to fix this is to allocate a tiny dummy chunk right at the
798     // start of ccs and do not use it for anything.
799     MetaspaceContext::context_class()->cm()->get_chunk(metaspace::chunklevel::HIGHEST_CHUNK_LEVEL);
800   }
801 #endif
802 
803 #ifdef _LP64
804   if (UseCompressedClassPointers) {
805     // Note: "cds" would be a better fit but keep this for backward compatibility.
806     LogTarget(Info, gc, metaspace) lt;
807     if (lt.is_enabled()) {
808       ResourceMark rm;
809       LogStream ls(lt);
810       CDS_ONLY(MetaspaceShared::print_on(&ls);)
811       Metaspace::print_compressed_class_space(&ls);
812       CompressedKlassPointers::print_mode(&ls);
813     }
814   }
815 #endif
816 
817 }
818 
819 void Metaspace::post_initialize() {
820   MetaspaceGC::post_initialize();
821 }
822 
823 size_t Metaspace::max_allocation_word_size() {
824   return metaspace::chunklevel::MAX_CHUNK_WORD_SIZE;
825 }
826 
827 // This version of Metaspace::allocate does not throw OOM but simply returns null, and
828 // is suitable for calling from non-Java threads.
829 // Callers are responsible for checking null.
830 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
831                               MetaspaceObj::Type type) {
832   assert(word_size <= Metaspace::max_allocation_word_size(),
833          "allocation size too large (" SIZE_FORMAT ")", word_size);
834 
835   assert(loader_data != nullptr, "Should never pass around a null loader_data. "
836         "ClassLoaderData::the_null_class_loader_data() should have been used.");
837 
838   // Deal with concurrent unloading failed allocation starvation
839   MetaspaceCriticalAllocation::block_if_concurrent_purge();
840 
841   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
842 
843   // Try to allocate metadata.
844   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
845 
846   if (result != nullptr) {
847     // Zero initialize.
848     Copy::fill_to_words((HeapWord*)result, word_size, 0);
849 
850     log_trace(metaspace)("Metaspace::allocate: type %d return " PTR_FORMAT ".", (int)type, p2i(result));
851   }
852 
853   return result;
854 }
855 
856 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
857                               MetaspaceObj::Type type, TRAPS) {
858 
859   if (HAS_PENDING_EXCEPTION) {
860     assert(false, "Should not allocate with exception pending");
861     return nullptr;  // caller does a CHECK_NULL too
862   }
863 
864   MetaWord* result = allocate(loader_data, word_size, type);
865 
866   if (result == nullptr) {
867     MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
868     tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
869 
870     // Allocation failed.
871     if (is_init_completed()) {
872       // Only start a GC if the bootstrapping has completed.
873       // Try to clean out some heap memory and retry. This can prevent premature
874       // expansion of the metaspace.
875       result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
876     }
877 
878     if (result == nullptr) {
879       report_metadata_oome(loader_data, word_size, type, mdtype, THREAD);
880       assert(HAS_PENDING_EXCEPTION, "sanity");
881       return nullptr;
882     }
883 
884     // Zero initialize.
885     Copy::fill_to_words((HeapWord*)result, word_size, 0);
886 
887     log_trace(metaspace)("Metaspace::allocate: type %d return " PTR_FORMAT ".", (int)type, p2i(result));
888   }
889 
890   return result;
891 }
892 
893 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
894   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
895 
896   // If result is still null, we are out of memory.
897   Log(gc, metaspace, freelist, oom) log;
898   if (log.is_info()) {
899     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
900              is_class_space_allocation(mdtype) ? "class" : "data", word_size);
901     ResourceMark rm;
902     if (log.is_debug()) {
903       if (loader_data->metaspace_or_null() != nullptr) {
904         LogStream ls(log.debug());
905         loader_data->print_value_on(&ls);
906       }
907     }
908     LogStream ls(log.info());
909     // In case of an OOM, log out a short but still useful report.
910     MetaspaceUtils::print_basic_report(&ls, 0);
911   }
912 
913   bool out_of_compressed_class_space = false;
914   if (is_class_space_allocation(mdtype)) {
915     ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null();
916     out_of_compressed_class_space =
917       MetaspaceUtils::committed_bytes(Metaspace::ClassType) +
918       align_up(word_size * BytesPerWord, 4 * M) >
919       CompressedClassSpaceSize;
920   }
921 
922   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
923   const char* space_string = out_of_compressed_class_space ?
924     "Compressed class space" : "Metaspace";
925 
926   report_java_out_of_memory(space_string);
927 
928   if (JvmtiExport::should_post_resource_exhausted()) {
929     JvmtiExport::post_resource_exhausted(
930         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
931         space_string);
932   }
933 
934   if (!is_init_completed()) {
935     vm_exit_during_initialization("OutOfMemoryError", space_string);
936   }
937 
938   if (out_of_compressed_class_space) {
939     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
940   } else {
941     THROW_OOP(Universe::out_of_memory_error_metaspace());
942   }
943 }
944 
945 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
946   switch (mdtype) {
947     case Metaspace::ClassType: return "Class";
948     case Metaspace::NonClassType: return "Metadata";
949     default:
950       assert(false, "Got bad mdtype: %d", (int) mdtype);
951       return nullptr;
952   }
953 }
954 
955 void Metaspace::purge(bool classes_unloaded) {
956   // The MetaspaceCritical_lock is used by a concurrent GC to block out concurrent metaspace
957   // allocations, that would starve critical metaspace allocations, that are about to throw
958   // OOM if they fail; they need precedence for correctness.
959   MutexLocker ml(MetaspaceCritical_lock, Mutex::_no_safepoint_check_flag);
960   if (classes_unloaded) {
961     ChunkManager* cm = ChunkManager::chunkmanager_nonclass();
962     if (cm != nullptr) {
963       cm->purge();
964     }
965     if (using_class_space()) {
966       cm = ChunkManager::chunkmanager_class();
967       if (cm != nullptr) {
968         cm->purge();
969       }
970     }
971   }
972 
973   // Try to satisfy queued metaspace allocation requests.
974   //
975   // It might seem unnecessary to try to process allocation requests if no
976   // classes have been unloaded. However, this call is required for the code
977   // in MetaspaceCriticalAllocation::try_allocate_critical to work.
978   MetaspaceCriticalAllocation::process();
979 }
980 
981 bool Metaspace::contains(const void* ptr) {
982   if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
983     return true;
984   }
985   return contains_non_shared(ptr);
986 }
987 
988 bool Metaspace::contains_non_shared(const void* ptr) {
989   if (using_class_space() && VirtualSpaceList::vslist_class()->contains((MetaWord*)ptr)) {
990      return true;
991   }
992 
993   return VirtualSpaceList::vslist_nonclass()->contains((MetaWord*)ptr);
994 }