1 /*
   2  * Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2017, 2021 SAP SE. All rights reserved.
   4  * Copyright (c) 2023, 2025, Red Hat, Inc. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "cds/cdsConfig.hpp"
  28 #include "cds/metaspaceShared.hpp"
  29 #include "classfile/classLoaderData.hpp"
  30 #include "gc/shared/collectedHeap.hpp"
  31 #include "logging/log.hpp"
  32 #include "logging/logStream.hpp"
  33 #include "memory/classLoaderMetaspace.hpp"
  34 #include "memory/memoryReserver.hpp"
  35 #include "memory/metaspace.hpp"
  36 #include "memory/metaspace/chunkHeaderPool.hpp"
  37 #include "memory/metaspace/chunkManager.hpp"
  38 #include "memory/metaspace/commitLimiter.hpp"
  39 #include "memory/metaspace/internalStats.hpp"
  40 #include "memory/metaspace/metachunk.hpp"
  41 #include "memory/metaspace/metaspaceCommon.hpp"
  42 #include "memory/metaspace/metaspaceContext.hpp"
  43 #include "memory/metaspace/metaspaceReporter.hpp"
  44 #include "memory/metaspace/metaspaceSettings.hpp"
  45 #include "memory/metaspace/runningCounters.hpp"
  46 #include "memory/metaspace/virtualSpaceList.hpp"
  47 #include "memory/metaspaceCriticalAllocation.hpp"
  48 #include "memory/metaspaceStats.hpp"
  49 #include "memory/metaspaceTracer.hpp"
  50 #include "memory/metaspaceUtils.hpp"
  51 #include "memory/resourceArea.hpp"
  52 #include "memory/universe.hpp"
  53 #include "nmt/memTracker.hpp"
  54 #include "oops/compressedKlass.inline.hpp"
  55 #include "oops/compressedOops.hpp"
  56 #include "prims/jvmtiExport.hpp"
  57 #include "runtime/atomic.hpp"
  58 #include "runtime/globals_extension.hpp"
  59 #include "runtime/init.hpp"
  60 #include "runtime/java.hpp"
  61 #include "runtime/mutexLocker.hpp"
  62 #include "utilities/copy.hpp"
  63 #include "utilities/debug.hpp"
  64 #include "utilities/formatBuffer.hpp"
  65 #include "utilities/globalDefinitions.hpp"
  66 #include "utilities/ostream.hpp"
  67 
  68 using metaspace::ChunkManager;
  69 using metaspace::CommitLimiter;
  70 using metaspace::MetaspaceContext;
  71 using metaspace::MetaspaceReporter;
  72 using metaspace::RunningCounters;
  73 using metaspace::VirtualSpaceList;
  74 
  75 size_t MetaspaceUtils::used_words() {
  76   return RunningCounters::used_words();
  77 }
  78 
  79 size_t MetaspaceUtils::used_words(Metaspace::MetadataType mdtype) {
  80   return mdtype == Metaspace::ClassType ? RunningCounters::used_words_class() : RunningCounters::used_words_nonclass();
  81 }
  82 
  83 size_t MetaspaceUtils::reserved_words() {
  84   return RunningCounters::reserved_words();
  85 }
  86 
  87 size_t MetaspaceUtils::reserved_words(Metaspace::MetadataType mdtype) {
  88   return mdtype == Metaspace::ClassType ? RunningCounters::reserved_words_class() : RunningCounters::reserved_words_nonclass();
  89 }
  90 
  91 size_t MetaspaceUtils::committed_words() {
  92   return RunningCounters::committed_words();
  93 }
  94 
  95 size_t MetaspaceUtils::committed_words(Metaspace::MetadataType mdtype) {
  96   return mdtype == Metaspace::ClassType ? RunningCounters::committed_words_class() : RunningCounters::committed_words_nonclass();
  97 }
  98 
  99 // Helper for get_statistics()
 100 static void get_values_for(Metaspace::MetadataType mdtype, size_t* reserved, size_t* committed, size_t* used) {
 101 #define w2b(x) (x * sizeof(MetaWord))
 102   if (mdtype == Metaspace::ClassType) {
 103     *reserved = w2b(RunningCounters::reserved_words_class());
 104     *committed = w2b(RunningCounters::committed_words_class());
 105     *used = w2b(RunningCounters::used_words_class());
 106   } else {
 107     *reserved = w2b(RunningCounters::reserved_words_nonclass());
 108     *committed = w2b(RunningCounters::committed_words_nonclass());
 109     *used = w2b(RunningCounters::used_words_nonclass());
 110   }
 111 #undef w2b
 112 }
 113 
 114 // Retrieve all statistics in one go; make sure the values are consistent.
 115 MetaspaceStats MetaspaceUtils::get_statistics(Metaspace::MetadataType mdtype) {
 116 
 117   // Consistency:
 118   // This function reads three values (reserved, committed, used) from different counters. These counters
 119   // may (very rarely) be out of sync. This has been a source for intermittent test errors in the past
 120   //  (see e.g. JDK-8237872, JDK-8151460).
 121   // - reserved and committed counter are updated under protection of Metaspace_lock; an inconsistency
 122   //   between them can be the result of a dirty read.
 123   // - used is an atomic counter updated outside any lock range; there is no way to guarantee
 124   //   a clean read wrt the other two values.
 125   // Reading these values under lock protection would would only help for the first case. Therefore
 126   //   we don't bother and just re-read several times, then give up and correct the values.
 127 
 128   size_t r = 0, c = 0, u = 0; // Note: byte values.
 129   get_values_for(mdtype, &r, &c, &u);
 130   int retries = 10;
 131   // If the first retrieval resulted in inconsistent values, retry a bit...
 132   while ((r < c || c < u) && --retries >= 0) {
 133     get_values_for(mdtype, &r, &c, &u);
 134   }
 135   if (c < u || r < c) { // still inconsistent.
 136     // ... but not endlessly. If we don't get consistent values, correct them on the fly.
 137     // The logic here is that we trust the used counter - its an atomic counter and whatever we see
 138     // must have been the truth once - and from that we reconstruct a likely set of committed/reserved
 139     // values.
 140     metaspace::InternalStats::inc_num_inconsistent_stats();
 141     if (c < u) {
 142       c = align_up(u, Metaspace::commit_alignment());
 143     }
 144     if (r < c) {
 145       r = align_up(c, Metaspace::reserve_alignment());
 146     }
 147   }
 148   return MetaspaceStats(r, c, u);
 149 }
 150 
 151 MetaspaceCombinedStats MetaspaceUtils::get_combined_statistics() {
 152   return MetaspaceCombinedStats(get_statistics(Metaspace::ClassType), get_statistics(Metaspace::NonClassType));
 153 }
 154 
 155 void MetaspaceUtils::print_metaspace_change(const MetaspaceCombinedStats& pre_meta_values) {
 156   // Get values now:
 157   const MetaspaceCombinedStats meta_values = get_combined_statistics();
 158 
 159   // We print used and committed since these are the most useful at-a-glance vitals for Metaspace:
 160   // - used tells you how much memory is actually used for metadata
 161   // - committed tells you how much memory is committed for the purpose of metadata
 162   // The difference between those two would be waste, which can have various forms (freelists,
 163   //   unused parts of committed chunks etc)
 164   //
 165   // Left out is reserved, since this is not as exciting as the first two values: for class space,
 166   // it is a constant (to uninformed users, often confusingly large). For non-class space, it would
 167   // be interesting since free chunks can be uncommitted, but for now it is left out.
 168 
 169   if (Metaspace::using_class_space()) {
 170     log_info(gc, metaspace)(HEAP_CHANGE_FORMAT" "
 171                             HEAP_CHANGE_FORMAT" "
 172                             HEAP_CHANGE_FORMAT,
 173                             HEAP_CHANGE_FORMAT_ARGS("Metaspace",
 174                                                     pre_meta_values.used(),
 175                                                     pre_meta_values.committed(),
 176                                                     meta_values.used(),
 177                                                     meta_values.committed()),
 178                             HEAP_CHANGE_FORMAT_ARGS("NonClass",
 179                                                     pre_meta_values.non_class_used(),
 180                                                     pre_meta_values.non_class_committed(),
 181                                                     meta_values.non_class_used(),
 182                                                     meta_values.non_class_committed()),
 183                             HEAP_CHANGE_FORMAT_ARGS("Class",
 184                                                     pre_meta_values.class_used(),
 185                                                     pre_meta_values.class_committed(),
 186                                                     meta_values.class_used(),
 187                                                     meta_values.class_committed()));
 188   } else {
 189     log_info(gc, metaspace)(HEAP_CHANGE_FORMAT,
 190                             HEAP_CHANGE_FORMAT_ARGS("Metaspace",
 191                                                     pre_meta_values.used(),
 192                                                     pre_meta_values.committed(),
 193                                                     meta_values.used(),
 194                                                     meta_values.committed()));
 195   }
 196 }
 197 
 198 // This will print out a basic metaspace usage report but
 199 // unlike print_report() is guaranteed not to lock or to walk the CLDG.
 200 void MetaspaceUtils::print_basic_report(outputStream* out, size_t scale) {
 201   MetaspaceReporter::print_basic_report(out, scale);
 202 }
 203 
 204 // Prints a report about the current metaspace state.
 205 // Optional parts can be enabled via flags.
 206 // Function will walk the CLDG and will lock the expand lock; if that is not
 207 // convenient, use print_basic_report() instead.
 208 void MetaspaceUtils::print_report(outputStream* out, size_t scale) {
 209   const int flags =
 210       (int)MetaspaceReporter::Option::ShowLoaders |
 211       (int)MetaspaceReporter::Option::BreakDownByChunkType |
 212       (int)MetaspaceReporter::Option::ShowClasses;
 213   MetaspaceReporter::print_report(out, scale, flags);
 214 }
 215 
 216 void MetaspaceUtils::print_on(outputStream* out) {
 217 
 218   // First prints out totals, then, separately, the class space portion.
 219   MetaspaceCombinedStats stats = get_combined_statistics();
 220   out->print("Metaspace ");
 221   out->fill_to(17);
 222   out->print_cr("used %zuK, "
 223                 "committed %zuK, "
 224                 "reserved %zuK",
 225                 stats.used()/K,
 226                 stats.committed()/K,
 227                 stats.reserved()/K);
 228 
 229   if (Metaspace::using_class_space()) {
 230     StreamAutoIndentor indentor(out, 1);
 231     out->print("class space ");
 232     out->fill_to(17);
 233     out->print_cr("used %zuK, "
 234                   "committed %zuK, "
 235                   "reserved %zuK",
 236                   stats.class_space_stats().used()/K,
 237                   stats.class_space_stats().committed()/K,
 238                   stats.class_space_stats().reserved()/K);
 239   }
 240 }
 241 
 242 #ifdef ASSERT
 243 void MetaspaceUtils::verify() {
 244   if (Metaspace::initialized()) {
 245 
 246     // Verify non-class chunkmanager...
 247     ChunkManager* cm = ChunkManager::chunkmanager_nonclass();
 248     cm->verify();
 249 
 250     // ... and space list.
 251     VirtualSpaceList* vsl = VirtualSpaceList::vslist_nonclass();
 252     vsl->verify();
 253 
 254     if (Metaspace::using_class_space()) {
 255       // If we use compressed class pointers, verify class chunkmanager...
 256       cm = ChunkManager::chunkmanager_class();
 257       cm->verify();
 258 
 259       // ... and class spacelist.
 260       vsl = VirtualSpaceList::vslist_class();
 261       vsl->verify();
 262     }
 263 
 264   }
 265 }
 266 #endif
 267 
 268 ////////////////////////////////7
 269 // MetaspaceGC methods
 270 
 271 volatile size_t MetaspaceGC::_capacity_until_GC = 0;
 272 uint MetaspaceGC::_shrink_factor = 0;
 273 
 274 // VM_CollectForMetadataAllocation is the vm operation used to GC.
 275 // Within the VM operation after the GC the attempt to allocate the metadata
 276 // should succeed.  If the GC did not free enough space for the metaspace
 277 // allocation, the HWM is increased so that another virtualspace will be
 278 // allocated for the metadata.  With perm gen the increase in the perm
 279 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
 280 // metaspace policy uses those as the small and large steps for the HWM.
 281 //
 282 // After the GC the compute_new_size() for MetaspaceGC is called to
 283 // resize the capacity of the metaspaces.  The current implementation
 284 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
 285 // to resize the Java heap by some GC's.  New flags can be implemented
 286 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
 287 // free space is desirable in the metaspace capacity to decide how much
 288 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
 289 // free space is desirable in the metaspace capacity before decreasing
 290 // the HWM.
 291 
 292 // Calculate the amount to increase the high water mark (HWM).
 293 // Increase by a minimum amount (MinMetaspaceExpansion) so that
 294 // another expansion is not requested too soon.  If that is not
 295 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
 296 // If that is still not enough, expand by the size of the allocation
 297 // plus some.
 298 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
 299   size_t min_delta = MinMetaspaceExpansion;
 300   size_t max_delta = MaxMetaspaceExpansion;
 301   size_t delta = align_up(bytes, Metaspace::commit_alignment());
 302 
 303   if (delta <= min_delta) {
 304     delta = min_delta;
 305   } else if (delta <= max_delta) {
 306     // Don't want to hit the high water mark on the next
 307     // allocation so make the delta greater than just enough
 308     // for this allocation.
 309     delta = max_delta;
 310   } else {
 311     // This allocation is large but the next ones are probably not
 312     // so increase by the minimum.
 313     delta = delta + min_delta;
 314   }
 315 
 316   assert_is_aligned(delta, Metaspace::commit_alignment());
 317 
 318   return delta;
 319 }
 320 
 321 size_t MetaspaceGC::capacity_until_GC() {
 322   size_t value = Atomic::load_acquire(&_capacity_until_GC);
 323   assert(value >= MetaspaceSize, "Not initialized properly?");
 324   return value;
 325 }
 326 
 327 // Try to increase the _capacity_until_GC limit counter by v bytes.
 328 // Returns true if it succeeded. It may fail if either another thread
 329 // concurrently increased the limit or the new limit would be larger
 330 // than MaxMetaspaceSize.
 331 // On success, optionally returns new and old metaspace capacity in
 332 // new_cap_until_GC and old_cap_until_GC respectively.
 333 // On error, optionally sets can_retry to indicate whether if there is
 334 // actually enough space remaining to satisfy the request.
 335 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC, bool* can_retry) {
 336   assert_is_aligned(v, Metaspace::commit_alignment());
 337 
 338   size_t old_capacity_until_GC = _capacity_until_GC;
 339   size_t new_value = old_capacity_until_GC + v;
 340 
 341   if (new_value < old_capacity_until_GC) {
 342     // The addition wrapped around, set new_value to aligned max value.
 343     new_value = align_down(max_uintx, Metaspace::reserve_alignment());
 344   }
 345 
 346   if (new_value > MaxMetaspaceSize) {
 347     if (can_retry != nullptr) {
 348       *can_retry = false;
 349     }
 350     return false;
 351   }
 352 
 353   if (can_retry != nullptr) {
 354     *can_retry = true;
 355   }
 356   size_t prev_value = Atomic::cmpxchg(&_capacity_until_GC, old_capacity_until_GC, new_value);
 357 
 358   if (old_capacity_until_GC != prev_value) {
 359     return false;
 360   }
 361 
 362   if (new_cap_until_GC != nullptr) {
 363     *new_cap_until_GC = new_value;
 364   }
 365   if (old_cap_until_GC != nullptr) {
 366     *old_cap_until_GC = old_capacity_until_GC;
 367   }
 368   return true;
 369 }
 370 
 371 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
 372   assert_is_aligned(v, Metaspace::commit_alignment());
 373 
 374   return Atomic::sub(&_capacity_until_GC, v);
 375 }
 376 
 377 void MetaspaceGC::initialize() {
 378   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
 379   // we can't do a GC during initialization.
 380   _capacity_until_GC = MaxMetaspaceSize;
 381 }
 382 
 383 void MetaspaceGC::post_initialize() {
 384   // Reset the high-water mark once the VM initialization is done.
 385   _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize);
 386 }
 387 
 388 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
 389   // Check if the compressed class space is full.
 390   if (is_class && Metaspace::using_class_space()) {
 391     size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
 392     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
 393       log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by %zu words (CompressedClassSpaceSize = %zu words)",
 394                 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
 395       return false;
 396     }
 397   }
 398 
 399   // Check if the user has imposed a limit on the metaspace memory.
 400   size_t committed_bytes = MetaspaceUtils::committed_bytes();
 401   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
 402     log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by %zu words (MaxMetaspaceSize = %zu words)",
 403               (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
 404     return false;
 405   }
 406 
 407   return true;
 408 }
 409 
 410 size_t MetaspaceGC::allowed_expansion() {
 411   size_t committed_bytes = MetaspaceUtils::committed_bytes();
 412   size_t capacity_until_gc = capacity_until_GC();
 413 
 414   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
 415   // capacity_until_GC may have been decreased concurrently and may
 416   // temporarily be lower than what metaspace has committed. Allow for that.
 417   size_t left_until_GC = capacity_until_gc > committed_bytes ?
 418       capacity_until_gc - committed_bytes : 0;
 419   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
 420   log_trace(gc, metaspace, freelist)("allowed expansion words: %zu"
 421             " (left_until_max: %zu, left_until_GC: %zu.",
 422             left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);
 423 
 424   return left_to_commit / BytesPerWord;
 425 }
 426 
 427 void MetaspaceGC::compute_new_size() {
 428   assert(_shrink_factor <= 100, "invalid shrink factor");
 429   uint current_shrink_factor = _shrink_factor;
 430   _shrink_factor = 0;
 431 
 432   // Using committed_bytes() for used_after_gc is an overestimation, since the
 433   // chunk free lists are included in committed_bytes() and the memory in an
 434   // un-fragmented chunk free list is available for future allocations.
 435   // However, if the chunk free lists becomes fragmented, then the memory may
 436   // not be available for future allocations and the memory is therefore "in use".
 437   // Including the chunk free lists in the definition of "in use" is therefore
 438   // necessary. Not including the chunk free lists can cause capacity_until_GC to
 439   // shrink below committed_bytes() and this has caused serious bugs in the past.
 440   const double used_after_gc = (double)MetaspaceUtils::committed_bytes();
 441   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
 442 
 443   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
 444   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
 445 
 446   const double min_tmp = used_after_gc / maximum_used_percentage;
 447   size_t minimum_desired_capacity =
 448     (size_t)MIN2(min_tmp, double(MaxMetaspaceSize));
 449   // Don't shrink less than the initial generation size
 450   minimum_desired_capacity = MAX2(minimum_desired_capacity,
 451                                   MetaspaceSize);
 452 
 453   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
 454   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
 455                            minimum_free_percentage, maximum_used_percentage);
 456   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
 457 
 458   size_t shrink_bytes = 0;
 459   if (capacity_until_GC < minimum_desired_capacity) {
 460     // If we have less capacity below the metaspace HWM, then
 461     // increment the HWM.
 462     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
 463     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
 464     // Don't expand unless it's significant
 465     if (expand_bytes >= MinMetaspaceExpansion) {
 466       size_t new_capacity_until_GC = 0;
 467       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
 468       assert(succeeded, "Should always successfully increment HWM when at safepoint");
 469 
 470       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
 471                                                new_capacity_until_GC,
 472                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
 473       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
 474                                (double) minimum_desired_capacity / (double) K,
 475                                (double) expand_bytes / (double) K,
 476                                (double) MinMetaspaceExpansion / (double) K,
 477                                (double) new_capacity_until_GC / (double) K);
 478     }
 479     return;
 480   }
 481 
 482   // No expansion, now see if we want to shrink
 483   // We would never want to shrink more than this
 484   assert(capacity_until_GC >= minimum_desired_capacity,
 485          "%zu >= %zu",
 486          capacity_until_GC, minimum_desired_capacity);
 487   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
 488 
 489   // Should shrinking be considered?
 490   if (MaxMetaspaceFreeRatio < 100) {
 491     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
 492     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
 493     const double max_tmp = used_after_gc / minimum_used_percentage;
 494     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(MaxMetaspaceSize));
 495     maximum_desired_capacity = MAX2(maximum_desired_capacity,
 496                                     MetaspaceSize);
 497     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
 498                              maximum_free_percentage, minimum_used_percentage);
 499     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
 500                              (double) minimum_desired_capacity / (double) K, (double) maximum_desired_capacity / (double) K);
 501 
 502     assert(minimum_desired_capacity <= maximum_desired_capacity,
 503            "sanity check");
 504 
 505     if (capacity_until_GC > maximum_desired_capacity) {
 506       // Capacity too large, compute shrinking size
 507       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
 508       // We don't want shrink all the way back to initSize if people call
 509       // System.gc(), because some programs do that between "phases" and then
 510       // we'd just have to grow the heap up again for the next phase.  So we
 511       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
 512       // on the third call, and 100% by the fourth call.  But if we recompute
 513       // size without shrinking, it goes back to 0%.
 514       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
 515 
 516       shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
 517 
 518       assert(shrink_bytes <= max_shrink_bytes,
 519              "invalid shrink size %zu not <= %zu",
 520              shrink_bytes, max_shrink_bytes);
 521       if (current_shrink_factor == 0) {
 522         _shrink_factor = 10;
 523       } else {
 524         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
 525       }
 526       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
 527                                (double) MetaspaceSize / (double) K, (double) maximum_desired_capacity / (double) K);
 528       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
 529                                (double) shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, (double) MinMetaspaceExpansion / (double) K);
 530     }
 531   }
 532 
 533   // Don't shrink unless it's significant
 534   if (shrink_bytes >= MinMetaspaceExpansion &&
 535       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
 536     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
 537     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
 538                                              new_capacity_until_GC,
 539                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
 540   }
 541 }
 542 
 543 //////  Metaspace methods /////
 544 
 545 const MetaspaceTracer* Metaspace::_tracer = nullptr;
 546 const void* Metaspace::_class_space_start = nullptr;
 547 const void* Metaspace::_class_space_end = nullptr;
 548 
 549 bool Metaspace::initialized() {
 550   return metaspace::MetaspaceContext::context_nonclass() != nullptr
 551       LP64_ONLY(&& (using_class_space() ? Metaspace::class_space_is_initialized() : true));
 552 }
 553 
 554 #ifdef _LP64
 555 
 556 void Metaspace::print_compressed_class_space(outputStream* st) {
 557   if (VirtualSpaceList::vslist_class() != nullptr) {
 558     MetaWord* base = VirtualSpaceList::vslist_class()->base_of_first_node();
 559     size_t size = VirtualSpaceList::vslist_class()->word_size_of_first_node();
 560     MetaWord* top = base + size;
 561     st->print("Compressed class space mapped at: " PTR_FORMAT "-" PTR_FORMAT ", reserved size: %zu",
 562                p2i(base), p2i(top), (top - base) * BytesPerWord);
 563     st->cr();
 564   }
 565 }
 566 
 567 // Given a prereserved space, use that to set up the compressed class space list.
 568 void Metaspace::initialize_class_space(ReservedSpace rs) {
 569   assert(rs.size() >= CompressedClassSpaceSize,
 570          "%zu != %zu", rs.size(), CompressedClassSpaceSize);
 571   assert(using_class_space(), "Must be using class space");
 572 
 573   assert(rs.size() == CompressedClassSpaceSize, "%zu != %zu",
 574          rs.size(), CompressedClassSpaceSize);
 575   assert(is_aligned(rs.base(), Metaspace::reserve_alignment()) &&
 576          is_aligned(rs.size(), Metaspace::reserve_alignment()),
 577          "wrong alignment");
 578 
 579   MetaspaceContext::initialize_class_space_context(rs);
 580   _class_space_start = rs.base();
 581   _class_space_end = rs.end();
 582 }
 583 
 584 // Returns true if class space has been setup (initialize_class_space).
 585 bool Metaspace::class_space_is_initialized() {
 586   return MetaspaceContext::context_class() != nullptr;
 587 }
 588 
 589 // Reserve a range of memory that is to contain narrow Klass IDs. If "try_in_low_address_ranges"
 590 // is true, we will attempt to reserve memory suitable for zero-based encoding.
 591 ReservedSpace Metaspace::reserve_address_space_for_compressed_classes(size_t size, bool optimize_for_zero_base) {
 592   char* result = nullptr;
 593 
 594   NOT_ZERO(result =
 595       (char*) CompressedKlassPointers::reserve_address_space_for_compressed_classes(size, RandomizeClassSpaceLocation,
 596                                                                                     optimize_for_zero_base));
 597 
 598   if (result == nullptr) {
 599     // Fallback: reserve anywhere
 600     log_debug(metaspace, map)("Trying anywhere...");
 601     result = os::reserve_memory_aligned(size, Metaspace::reserve_alignment(), mtClass);
 602   }
 603 
 604   // Wrap resulting range in ReservedSpace
 605   if (result != nullptr) {
 606     log_debug(metaspace, map)("Mapped at " PTR_FORMAT, p2i(result));
 607     assert(is_aligned(result, Metaspace::reserve_alignment()), "Alignment too small for metaspace");
 608 
 609     return ReservedSpace(result,
 610                          size,
 611                          Metaspace::reserve_alignment(),
 612                          os::vm_page_size(),
 613                          !ExecMem,
 614                          false /* special */);
 615   } else {
 616     log_debug(metaspace, map)("Failed to map.");
 617     return {};
 618   }
 619 }
 620 #endif // _LP64
 621 
 622 size_t Metaspace::reserve_alignment_words() {
 623   return metaspace::Settings::virtual_space_node_reserve_alignment_words();
 624 }
 625 
 626 size_t Metaspace::commit_alignment_words() {
 627   return metaspace::Settings::commit_granule_words();
 628 }
 629 
 630 void Metaspace::ergo_initialize() {
 631 
 632   // Must happen before using any setting from Settings::---
 633   metaspace::Settings::ergo_initialize();
 634 
 635   // MaxMetaspaceSize and CompressedClassSpaceSize:
 636   //
 637   // MaxMetaspaceSize is the maximum size, in bytes, of memory we are allowed
 638   //  to commit for the Metaspace.
 639   //  It is just a number; a limit we compare against before committing. It
 640   //  does not have to be aligned to anything.
 641   //  It gets used as compare value before attempting to increase the metaspace
 642   //  commit charge. It defaults to max_uintx (unlimited).
 643   //
 644   // CompressedClassSpaceSize is the size, in bytes, of the address range we
 645   //  pre-reserve for the compressed class space (if we use class space).
 646   //  This size has to be aligned to the metaspace reserve alignment (to the
 647   //  size of a root chunk). It gets aligned up from whatever value the caller
 648   //  gave us to the next multiple of root chunk size.
 649   //
 650   // Note: Strictly speaking MaxMetaspaceSize and CompressedClassSpaceSize have
 651   //  very little to do with each other. The notion often encountered:
 652   //  MaxMetaspaceSize = CompressedClassSpaceSize + <non-class metadata size>
 653   //  is subtly wrong: MaxMetaspaceSize can besmaller than CompressedClassSpaceSize,
 654   //  in which case we just would not be able to fully commit the class space range.
 655   //
 656   // We still adjust CompressedClassSpaceSize to reasonable limits, mainly to
 657   //  save on reserved space, and to make ergnonomics less confusing.
 658 
 659   MaxMetaspaceSize = MAX2(MaxMetaspaceSize, commit_alignment());
 660 
 661   if (UseCompressedClassPointers) {
 662     // Let Class Space not be larger than 80% of MaxMetaspaceSize. Note that is
 663     // grossly over-dimensioned for most usage scenarios; typical ratio of
 664     // class space : non class space usage is about 1:6. With many small classes,
 665     // it can get as low as 1:2. It is not a big deal though since ccs is only
 666     // reserved and will be committed on demand only.
 667     const size_t max_ccs_size = 8 * (MaxMetaspaceSize / 10);
 668 
 669     // Sanity check.
 670     const size_t max_klass_range = CompressedKlassPointers::max_klass_range_size();
 671     assert(max_klass_range >= reserve_alignment(),
 672            "Klass range (%zu) must cover at least a full root chunk (%zu)",
 673            max_klass_range, reserve_alignment());
 674 
 675     size_t adjusted_ccs_size = MIN3(CompressedClassSpaceSize, max_ccs_size, max_klass_range);
 676 
 677     // CCS must be aligned to root chunk size, and be at least the size of one
 678     //  root chunk.
 679     adjusted_ccs_size = align_up(adjusted_ccs_size, reserve_alignment());
 680     adjusted_ccs_size = MAX2(adjusted_ccs_size, reserve_alignment());
 681 
 682     // Print a warning if the adjusted size differs from the users input
 683     if (CompressedClassSpaceSize != adjusted_ccs_size) {
 684       #define X "CompressedClassSpaceSize adjusted from user input " \
 685                 "%zu bytes to %zu bytes", CompressedClassSpaceSize, adjusted_ccs_size
 686       if (FLAG_IS_CMDLINE(CompressedClassSpaceSize)) {
 687         log_warning(metaspace)(X);
 688       } else {
 689         log_info(metaspace)(X);
 690       }
 691       #undef X
 692     }
 693 
 694     // Note: re-adjusting may have us left with a CompressedClassSpaceSize
 695     //  larger than MaxMetaspaceSize for very small values of MaxMetaspaceSize.
 696     //  Lets just live with that, its not a big deal.
 697     if (adjusted_ccs_size != CompressedClassSpaceSize) {
 698       FLAG_SET_ERGO(CompressedClassSpaceSize, adjusted_ccs_size);
 699       log_info(metaspace)("Setting CompressedClassSpaceSize to %zu.",
 700                           CompressedClassSpaceSize);
 701     }
 702   }
 703 
 704   // Set MetaspaceSize, MinMetaspaceExpansion and MaxMetaspaceExpansion
 705   if (MetaspaceSize > MaxMetaspaceSize) {
 706     MetaspaceSize = MaxMetaspaceSize;
 707   }
 708 
 709   MetaspaceSize = align_down_bounded(MetaspaceSize, commit_alignment());
 710 
 711   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
 712 
 713   MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, commit_alignment());
 714   MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, commit_alignment());
 715 
 716 }
 717 
 718 void Metaspace::global_initialize() {
 719   MetaspaceGC::initialize(); // <- since we do not prealloc init chunks anymore is this still needed?
 720 
 721   metaspace::ChunkHeaderPool::initialize();
 722 
 723   if (CDSConfig::is_dumping_static_archive()) {
 724     if (!CDSConfig::is_dumping_final_static_archive()) {
 725       assert(!CDSConfig::is_using_archive(), "sanity");
 726     }
 727     MetaspaceShared::initialize_for_static_dump();
 728   }
 729 
 730   // If UseCompressedClassPointers=1, we have two cases:
 731   // a) if CDS is active (runtime, Xshare=on), it will create the class space
 732   //    for us, initialize it and set up CompressedKlassPointers encoding.
 733   //    Class space will be reserved above the mapped archives.
 734   // b) if CDS either deactivated (Xshare=off) or a static dump is to be done (Xshare:dump),
 735   //    we will create the class space on our own. It will be placed above the java heap,
 736   //    since we assume it has been placed in low
 737   //    address regions. We may rethink this (see JDK-8244943). Failing that,
 738   //    it will be placed anywhere.
 739 
 740 #if INCLUDE_CDS
 741   // case (a)
 742   if (CDSConfig::is_using_archive()) {
 743     if (!FLAG_IS_DEFAULT(CompressedClassSpaceBaseAddress)) {
 744       log_warning(metaspace)("CDS active - ignoring CompressedClassSpaceBaseAddress.");
 745     }
 746     MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
 747     // If any of the archived space fails to map, UseSharedSpaces
 748     // is reset to false.
 749   }
 750 #endif // INCLUDE_CDS
 751 
 752 #ifdef _LP64
 753 
 754   if (using_class_space() && !class_space_is_initialized()) {
 755     assert(!CDSConfig::is_using_archive(), "CDS archive is not mapped at this point");
 756 
 757     // case (b) (No CDS)
 758     ReservedSpace rs;
 759     const size_t size = align_up(CompressedClassSpaceSize, Metaspace::reserve_alignment());
 760 
 761     // If CompressedClassSpaceBaseAddress is set, we attempt to force-map class space to
 762     // the given address. This is a debug-only feature aiding tests. Due to the ASLR lottery
 763     // this may fail, in which case the VM will exit after printing an appropriate message.
 764     // Tests using this switch should cope with that.
 765     if (CompressedClassSpaceBaseAddress != 0) {
 766       const address base = (address)CompressedClassSpaceBaseAddress;
 767       if (!is_aligned(base, Metaspace::reserve_alignment())) {
 768         vm_exit_during_initialization(
 769             err_msg("CompressedClassSpaceBaseAddress=" PTR_FORMAT " invalid "
 770                     "(must be aligned to 0x%zx).",
 771                     CompressedClassSpaceBaseAddress, Metaspace::reserve_alignment()));
 772       }
 773 
 774       rs = MemoryReserver::reserve((char*)base,
 775                                    size,
 776                                    Metaspace::reserve_alignment(),
 777                                    os::vm_page_size(),
 778                                    mtClass);
 779 
 780       if (rs.is_reserved()) {
 781         log_info(metaspace)("Successfully forced class space address to " PTR_FORMAT, p2i(base));
 782       } else {
 783         LogTarget(Debug, metaspace) lt;
 784         if (lt.is_enabled()) {
 785           LogStream ls(lt);
 786           os::print_memory_mappings((char*)base, size, &ls);
 787         }
 788         vm_exit_during_initialization(
 789             err_msg("CompressedClassSpaceBaseAddress=" PTR_FORMAT " given, but reserving class space failed.",
 790                 CompressedClassSpaceBaseAddress));
 791       }
 792     }
 793 
 794     // ...failing that, reserve anywhere, but let platform do optimized placement:
 795     if (!rs.is_reserved()) {
 796       log_info(metaspace)("Reserving compressed class space anywhere");
 797       rs = Metaspace::reserve_address_space_for_compressed_classes(size, true);
 798     }
 799 
 800     // ...failing that, give up.
 801     if (!rs.is_reserved()) {
 802       vm_exit_during_initialization(
 803           err_msg("Could not allocate compressed class space: %zu bytes",
 804                    CompressedClassSpaceSize));
 805     }
 806 
 807     // Mark class space as such
 808     MemTracker::record_virtual_memory_tag(rs, mtClass);
 809 
 810     // Initialize space
 811     Metaspace::initialize_class_space(rs);
 812 
 813     // Set up compressed class pointer encoding.
 814     // In CDS=off mode, we give the JVM some leeway to choose a favorable base/shift combination.
 815     CompressedKlassPointers::initialize((address)rs.base(), rs.size());
 816 
 817     // After narrowKlass encoding scheme is decided: if the encoding base points to class space start,
 818     // establish a protection zone. Accidentally decoding a zero nKlass ID and then using it will result
 819     // in an immediate segmentation fault instead of a delayed error much later.
 820     if (CompressedKlassPointers::base() == (address)rs.base()) {
 821       // Let the protection zone be a whole commit granule. Otherwise, buddy allocator may later place neighboring
 822       // chunks in the same granule, see that the granule is not yet committed, and commit it, which would replace
 823       // the protection mapping and make the zone readable.
 824       // Alternatively, we could commit the chunk right now, but that is a tiny bit more fiddly, since we are not
 825       // fully set up yet at this point.
 826       const size_t protzone_size = metaspace::Settings::commit_granule_bytes(); // granule size >= page size
 827       const size_t protzone_wordsize = protzone_size / BytesPerWord;
 828       const metaspace::chunklevel_t lvl = metaspace::chunklevel::level_fitting_word_size(protzone_wordsize);
 829       metaspace::Metachunk* const chunk = MetaspaceContext::context_class()->cm()->get_chunk(lvl);
 830       const address protzone = (address) chunk->base();
 831       assert(protzone == (address)rs.base(), "The very first chunk should be located at the class space start?");
 832       assert(chunk->word_size() == protzone_wordsize, "Weird chunk size");
 833       CompressedKlassPointers::establish_protection_zone(protzone, protzone_size);
 834     } else {
 835       assert(CompressedKlassPointers::base() == nullptr, "Zero-based encoding expected");
 836     }
 837 
 838   }
 839 
 840 #endif // _LP64
 841 
 842   // Initialize non-class virtual space list, and its chunk manager:
 843   MetaspaceContext::initialize_nonclass_space_context();
 844 
 845   _tracer = new MetaspaceTracer();
 846 
 847 #ifdef _LP64
 848   if (UseCompressedClassPointers) {
 849     // Note: "cds" would be a better fit but keep this for backward compatibility.
 850     LogTarget(Info, gc, metaspace) lt;
 851     if (lt.is_enabled()) {
 852       LogStream ls(lt);
 853       CDS_ONLY(MetaspaceShared::print_on(&ls);)
 854       Metaspace::print_compressed_class_space(&ls);
 855       CompressedKlassPointers::print_mode(&ls);
 856     }
 857   }
 858 #endif
 859 
 860 }
 861 
 862 void Metaspace::post_initialize() {
 863   MetaspaceGC::post_initialize();
 864 }
 865 
 866 size_t Metaspace::max_allocation_word_size() {
 867   return metaspace::chunklevel::MAX_CHUNK_WORD_SIZE;
 868 }
 869 
 870 // This version of Metaspace::allocate does not throw OOM but simply returns null, and
 871 // is suitable for calling from non-Java threads.
 872 // Callers are responsible for checking null.
 873 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
 874                               MetaspaceObj::Type type, bool use_class_space) {
 875   assert(word_size <= Metaspace::max_allocation_word_size(),
 876          "allocation size too large (%zu)", word_size);
 877 
 878   assert(loader_data != nullptr, "Should never pass around a null loader_data. "
 879         "ClassLoaderData::the_null_class_loader_data() should have been used.");
 880 
 881   // Deal with concurrent unloading failed allocation starvation
 882   MetaspaceCriticalAllocation::block_if_concurrent_purge();
 883 
 884   MetadataType mdtype = use_class_space ? ClassType : NonClassType;
 885 
 886   // Try to allocate metadata.
 887   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
 888 
 889   if (result != nullptr) {
 890 #ifdef ASSERT
 891     if (using_class_space() && mdtype == ClassType) {
 892       assert(is_in_class_space(result) &&
 893              is_aligned(result, CompressedKlassPointers::klass_alignment_in_bytes()), "Sanity");
 894     } else {
 895       assert((is_in_class_space(result) || is_in_nonclass_metaspace(result)) &&
 896              is_aligned(result, Metaspace::min_allocation_alignment_bytes), "Sanity");
 897     }
 898 #endif
 899     // Zero initialize.
 900     Copy::fill_to_words((HeapWord*)result, word_size, 0);
 901     log_trace(metaspace)("Metaspace::allocate: type %d return " PTR_FORMAT ".", (int)type, p2i(result));
 902   }
 903 
 904   return result;
 905 }
 906 
 907 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
 908                               MetaspaceObj::Type type, bool use_class_space, TRAPS) {
 909 
 910   if (HAS_PENDING_EXCEPTION) {
 911     assert(false, "Should not allocate with exception pending");
 912     return nullptr;  // caller does a CHECK_NULL too
 913   }
 914   //leyden/premain: temporarily disabled due to JDK-8327737
 915   assert(!THREAD->owns_locks(), "allocating metaspace while holding mutex");
 916 
 917   MetaWord* result = allocate(loader_data, word_size, type, use_class_space);
 918 
 919   if (result == nullptr) {
 920     MetadataType mdtype = use_class_space ? ClassType : NonClassType;
 921     tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
 922 
 923     // Allocation failed.
 924     if (is_init_completed()) {
 925       // Only start a GC if the bootstrapping has completed.
 926       // Try to clean out some heap memory and retry. This can prevent premature
 927       // expansion of the metaspace.
 928       result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
 929     }
 930 
 931     if (result == nullptr) {
 932       report_metadata_oome(loader_data, word_size, type, mdtype, THREAD);
 933       assert(HAS_PENDING_EXCEPTION, "sanity");
 934       return nullptr;
 935     }
 936 
 937     // Zero initialize.
 938     Copy::fill_to_words((HeapWord*)result, word_size, 0);
 939 
 940     log_trace(metaspace)("Metaspace::allocate: type %d return " PTR_FORMAT ".", (int)type, p2i(result));
 941   }
 942 
 943   return result;
 944 }
 945 
 946 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
 947   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
 948 
 949   // If result is still null, we are out of memory.
 950   {
 951     LogMessage(gc, metaspace, freelist, oom) log;
 952     if (log.is_info()) {
 953       log.info("Metaspace (%s) allocation failed for size %zu",
 954                is_class_space_allocation(mdtype) ? "class" : "data", word_size);
 955       ResourceMark rm;
 956       if (log.is_debug()) {
 957         if (loader_data->metaspace_or_null() != nullptr) {
 958           NonInterleavingLogStream ls(LogLevelType::Debug, log);
 959           loader_data->print_value_on(&ls);
 960         }
 961       }
 962       NonInterleavingLogStream ls(LogLevelType::Info, log);
 963       // In case of an OOM, log out a short but still useful report.
 964       MetaspaceUtils::print_basic_report(&ls, 0);
 965     }
 966   }
 967 
 968   bool out_of_compressed_class_space = false;
 969   if (is_class_space_allocation(mdtype)) {
 970     ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null();
 971     out_of_compressed_class_space =
 972       MetaspaceUtils::committed_bytes(Metaspace::ClassType) +
 973       align_up(word_size * BytesPerWord, 4 * M) >
 974       CompressedClassSpaceSize;
 975   }
 976 
 977   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
 978   const char* space_string = out_of_compressed_class_space ?
 979     "Compressed class space" : "Metaspace";
 980 
 981   report_java_out_of_memory(space_string);
 982 
 983   if (JvmtiExport::should_post_resource_exhausted()) {
 984     JvmtiExport::post_resource_exhausted(
 985         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
 986         space_string);
 987   }
 988 
 989   if (!is_init_completed()) {
 990     vm_exit_during_initialization("OutOfMemoryError", space_string);
 991   }
 992 
 993   if (out_of_compressed_class_space) {
 994     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
 995   } else {
 996     THROW_OOP(Universe::out_of_memory_error_metaspace());
 997   }
 998 }
 999 
1000 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
1001   switch (mdtype) {
1002     case Metaspace::ClassType: return "Class";
1003     case Metaspace::NonClassType: return "Metadata";
1004     default:
1005       assert(false, "Got bad mdtype: %d", (int) mdtype);
1006       return nullptr;
1007   }
1008 }
1009 
1010 void Metaspace::purge(bool classes_unloaded) {
1011   // The MetaspaceCritical_lock is used by a concurrent GC to block out concurrent metaspace
1012   // allocations, that would starve critical metaspace allocations, that are about to throw
1013   // OOM if they fail; they need precedence for correctness.
1014   MutexLocker ml(MetaspaceCritical_lock, Mutex::_no_safepoint_check_flag);
1015   if (classes_unloaded) {
1016     ChunkManager* cm = ChunkManager::chunkmanager_nonclass();
1017     if (cm != nullptr) {
1018       cm->purge();
1019     }
1020     if (using_class_space()) {
1021       cm = ChunkManager::chunkmanager_class();
1022       if (cm != nullptr) {
1023         cm->purge();
1024       }
1025     }
1026   }
1027 
1028   // Try to satisfy queued metaspace allocation requests.
1029   //
1030   // It might seem unnecessary to try to process allocation requests if no
1031   // classes have been unloaded. However, this call is required for the code
1032   // in MetaspaceCriticalAllocation::try_allocate_critical to work.
1033   MetaspaceCriticalAllocation::process();
1034 }
1035 
1036 
1037 // Returns true if pointer points into one of the metaspace regions, or
1038 // into the class space.
1039 bool Metaspace::is_in_shared_metaspace(const void* ptr) {
1040   return MetaspaceShared::is_in_shared_metaspace(ptr);
1041 }
1042 
1043 // Returns true if pointer points into one of the non-class-space metaspace regions.
1044 bool Metaspace::is_in_nonclass_metaspace(const void* ptr) {
1045   return VirtualSpaceList::vslist_nonclass()->contains((MetaWord*)ptr);
1046 }