1 /*
   2  * Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2017, 2021 SAP SE. All rights reserved.
   4  * Copyright (c) 2023, 2025, Red Hat, Inc. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "cds/cdsConfig.hpp"
  28 #include "cds/metaspaceShared.hpp"
  29 #include "classfile/classLoaderData.hpp"
  30 #include "gc/shared/collectedHeap.hpp"
  31 #include "logging/log.hpp"
  32 #include "logging/logStream.hpp"
  33 #include "memory/classLoaderMetaspace.hpp"
  34 #include "memory/memoryReserver.hpp"
  35 #include "memory/metaspace.hpp"
  36 #include "memory/metaspace/chunkHeaderPool.hpp"
  37 #include "memory/metaspace/chunkManager.hpp"
  38 #include "memory/metaspace/commitLimiter.hpp"
  39 #include "memory/metaspace/internalStats.hpp"
  40 #include "memory/metaspace/metachunk.hpp"
  41 #include "memory/metaspace/metaspaceCommon.hpp"
  42 #include "memory/metaspace/metaspaceContext.hpp"
  43 #include "memory/metaspace/metaspaceReporter.hpp"
  44 #include "memory/metaspace/metaspaceSettings.hpp"
  45 #include "memory/metaspace/runningCounters.hpp"
  46 #include "memory/metaspace/virtualSpaceList.hpp"
  47 #include "memory/metaspaceCriticalAllocation.hpp"
  48 #include "memory/metaspaceStats.hpp"
  49 #include "memory/metaspaceTracer.hpp"
  50 #include "memory/metaspaceUtils.hpp"
  51 #include "memory/resourceArea.hpp"
  52 #include "memory/universe.hpp"
  53 #include "nmt/memTracker.hpp"
  54 #include "oops/compressedKlass.inline.hpp"
  55 #include "oops/compressedOops.hpp"
  56 #include "prims/jvmtiExport.hpp"
  57 #include "runtime/atomic.hpp"
  58 #include "runtime/globals_extension.hpp"
  59 #include "runtime/init.hpp"
  60 #include "runtime/java.hpp"
  61 #include "runtime/mutexLocker.hpp"
  62 #include "utilities/copy.hpp"
  63 #include "utilities/debug.hpp"
  64 #include "utilities/formatBuffer.hpp"
  65 #include "utilities/globalDefinitions.hpp"
  66 
  67 using metaspace::ChunkManager;
  68 using metaspace::CommitLimiter;
  69 using metaspace::MetaspaceContext;
  70 using metaspace::MetaspaceReporter;
  71 using metaspace::RunningCounters;
  72 using metaspace::VirtualSpaceList;
  73 
  74 size_t MetaspaceUtils::used_words() {
  75   return RunningCounters::used_words();
  76 }
  77 
  78 size_t MetaspaceUtils::used_words(Metaspace::MetadataType mdtype) {
  79   return mdtype == Metaspace::ClassType ? RunningCounters::used_words_class() : RunningCounters::used_words_nonclass();
  80 }
  81 
  82 size_t MetaspaceUtils::reserved_words() {
  83   return RunningCounters::reserved_words();
  84 }
  85 
  86 size_t MetaspaceUtils::reserved_words(Metaspace::MetadataType mdtype) {
  87   return mdtype == Metaspace::ClassType ? RunningCounters::reserved_words_class() : RunningCounters::reserved_words_nonclass();
  88 }
  89 
  90 size_t MetaspaceUtils::committed_words() {
  91   return RunningCounters::committed_words();
  92 }
  93 
  94 size_t MetaspaceUtils::committed_words(Metaspace::MetadataType mdtype) {
  95   return mdtype == Metaspace::ClassType ? RunningCounters::committed_words_class() : RunningCounters::committed_words_nonclass();
  96 }
  97 
  98 // Helper for get_statistics()
  99 static void get_values_for(Metaspace::MetadataType mdtype, size_t* reserved, size_t* committed, size_t* used) {
 100 #define w2b(x) (x * sizeof(MetaWord))
 101   if (mdtype == Metaspace::ClassType) {
 102     *reserved = w2b(RunningCounters::reserved_words_class());
 103     *committed = w2b(RunningCounters::committed_words_class());
 104     *used = w2b(RunningCounters::used_words_class());
 105   } else {
 106     *reserved = w2b(RunningCounters::reserved_words_nonclass());
 107     *committed = w2b(RunningCounters::committed_words_nonclass());
 108     *used = w2b(RunningCounters::used_words_nonclass());
 109   }
 110 #undef w2b
 111 }
 112 
 113 // Retrieve all statistics in one go; make sure the values are consistent.
 114 MetaspaceStats MetaspaceUtils::get_statistics(Metaspace::MetadataType mdtype) {
 115 
 116   // Consistency:
 117   // This function reads three values (reserved, committed, used) from different counters. These counters
 118   // may (very rarely) be out of sync. This has been a source for intermittent test errors in the past
 119   //  (see e.g. JDK-8237872, JDK-8151460).
 120   // - reserved and committed counter are updated under protection of Metaspace_lock; an inconsistency
 121   //   between them can be the result of a dirty read.
 122   // - used is an atomic counter updated outside any lock range; there is no way to guarantee
 123   //   a clean read wrt the other two values.
 124   // Reading these values under lock protection would would only help for the first case. Therefore
 125   //   we don't bother and just re-read several times, then give up and correct the values.
 126 
 127   size_t r = 0, c = 0, u = 0; // Note: byte values.
 128   get_values_for(mdtype, &r, &c, &u);
 129   int retries = 10;
 130   // If the first retrieval resulted in inconsistent values, retry a bit...
 131   while ((r < c || c < u) && --retries >= 0) {
 132     get_values_for(mdtype, &r, &c, &u);
 133   }
 134   if (c < u || r < c) { // still inconsistent.
 135     // ... but not endlessly. If we don't get consistent values, correct them on the fly.
 136     // The logic here is that we trust the used counter - its an atomic counter and whatever we see
 137     // must have been the truth once - and from that we reconstruct a likely set of committed/reserved
 138     // values.
 139     metaspace::InternalStats::inc_num_inconsistent_stats();
 140     if (c < u) {
 141       c = align_up(u, Metaspace::commit_alignment());
 142     }
 143     if (r < c) {
 144       r = align_up(c, Metaspace::reserve_alignment());
 145     }
 146   }
 147   return MetaspaceStats(r, c, u);
 148 }
 149 
 150 MetaspaceCombinedStats MetaspaceUtils::get_combined_statistics() {
 151   return MetaspaceCombinedStats(get_statistics(Metaspace::ClassType), get_statistics(Metaspace::NonClassType));
 152 }
 153 
 154 void MetaspaceUtils::print_metaspace_change(const MetaspaceCombinedStats& pre_meta_values) {
 155   // Get values now:
 156   const MetaspaceCombinedStats meta_values = get_combined_statistics();
 157 
 158   // We print used and committed since these are the most useful at-a-glance vitals for Metaspace:
 159   // - used tells you how much memory is actually used for metadata
 160   // - committed tells you how much memory is committed for the purpose of metadata
 161   // The difference between those two would be waste, which can have various forms (freelists,
 162   //   unused parts of committed chunks etc)
 163   //
 164   // Left out is reserved, since this is not as exciting as the first two values: for class space,
 165   // it is a constant (to uninformed users, often confusingly large). For non-class space, it would
 166   // be interesting since free chunks can be uncommitted, but for now it is left out.
 167 
 168   if (Metaspace::using_class_space()) {
 169     log_info(gc, metaspace)(HEAP_CHANGE_FORMAT" "
 170                             HEAP_CHANGE_FORMAT" "
 171                             HEAP_CHANGE_FORMAT,
 172                             HEAP_CHANGE_FORMAT_ARGS("Metaspace",
 173                                                     pre_meta_values.used(),
 174                                                     pre_meta_values.committed(),
 175                                                     meta_values.used(),
 176                                                     meta_values.committed()),
 177                             HEAP_CHANGE_FORMAT_ARGS("NonClass",
 178                                                     pre_meta_values.non_class_used(),
 179                                                     pre_meta_values.non_class_committed(),
 180                                                     meta_values.non_class_used(),
 181                                                     meta_values.non_class_committed()),
 182                             HEAP_CHANGE_FORMAT_ARGS("Class",
 183                                                     pre_meta_values.class_used(),
 184                                                     pre_meta_values.class_committed(),
 185                                                     meta_values.class_used(),
 186                                                     meta_values.class_committed()));
 187   } else {
 188     log_info(gc, metaspace)(HEAP_CHANGE_FORMAT,
 189                             HEAP_CHANGE_FORMAT_ARGS("Metaspace",
 190                                                     pre_meta_values.used(),
 191                                                     pre_meta_values.committed(),
 192                                                     meta_values.used(),
 193                                                     meta_values.committed()));
 194   }
 195 }
 196 
 197 // This will print out a basic metaspace usage report but
 198 // unlike print_report() is guaranteed not to lock or to walk the CLDG.
 199 void MetaspaceUtils::print_basic_report(outputStream* out, size_t scale) {
 200   MetaspaceReporter::print_basic_report(out, scale);
 201 }
 202 
 203 // Prints a report about the current metaspace state.
 204 // Optional parts can be enabled via flags.
 205 // Function will walk the CLDG and will lock the expand lock; if that is not
 206 // convenient, use print_basic_report() instead.
 207 void MetaspaceUtils::print_report(outputStream* out, size_t scale) {
 208   const int flags =
 209       (int)MetaspaceReporter::Option::ShowLoaders |
 210       (int)MetaspaceReporter::Option::BreakDownByChunkType |
 211       (int)MetaspaceReporter::Option::ShowClasses;
 212   MetaspaceReporter::print_report(out, scale, flags);
 213 }
 214 
 215 void MetaspaceUtils::print_on(outputStream* out) {
 216 
 217   // Used from all GCs. It first prints out totals, then, separately, the class space portion.
 218   MetaspaceCombinedStats stats = get_combined_statistics();
 219   out->print_cr(" Metaspace       "
 220                 "used %zuK, "
 221                 "committed %zuK, "
 222                 "reserved %zuK",
 223                 stats.used()/K,
 224                 stats.committed()/K,
 225                 stats.reserved()/K);
 226 
 227   if (Metaspace::using_class_space()) {
 228     out->print_cr("  class space    "
 229                   "used %zuK, "
 230                   "committed %zuK, "
 231                   "reserved %zuK",
 232                   stats.class_space_stats().used()/K,
 233                   stats.class_space_stats().committed()/K,
 234                   stats.class_space_stats().reserved()/K);
 235   }
 236 }
 237 
 238 #ifdef ASSERT
 239 void MetaspaceUtils::verify() {
 240   if (Metaspace::initialized()) {
 241 
 242     // Verify non-class chunkmanager...
 243     ChunkManager* cm = ChunkManager::chunkmanager_nonclass();
 244     cm->verify();
 245 
 246     // ... and space list.
 247     VirtualSpaceList* vsl = VirtualSpaceList::vslist_nonclass();
 248     vsl->verify();
 249 
 250     if (Metaspace::using_class_space()) {
 251       // If we use compressed class pointers, verify class chunkmanager...
 252       cm = ChunkManager::chunkmanager_class();
 253       cm->verify();
 254 
 255       // ... and class spacelist.
 256       vsl = VirtualSpaceList::vslist_class();
 257       vsl->verify();
 258     }
 259 
 260   }
 261 }
 262 #endif
 263 
 264 ////////////////////////////////7
 265 // MetaspaceGC methods
 266 
 267 volatile size_t MetaspaceGC::_capacity_until_GC = 0;
 268 uint MetaspaceGC::_shrink_factor = 0;
 269 
 270 // VM_CollectForMetadataAllocation is the vm operation used to GC.
 271 // Within the VM operation after the GC the attempt to allocate the metadata
 272 // should succeed.  If the GC did not free enough space for the metaspace
 273 // allocation, the HWM is increased so that another virtualspace will be
 274 // allocated for the metadata.  With perm gen the increase in the perm
 275 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
 276 // metaspace policy uses those as the small and large steps for the HWM.
 277 //
 278 // After the GC the compute_new_size() for MetaspaceGC is called to
 279 // resize the capacity of the metaspaces.  The current implementation
 280 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
 281 // to resize the Java heap by some GC's.  New flags can be implemented
 282 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
 283 // free space is desirable in the metaspace capacity to decide how much
 284 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
 285 // free space is desirable in the metaspace capacity before decreasing
 286 // the HWM.
 287 
 288 // Calculate the amount to increase the high water mark (HWM).
 289 // Increase by a minimum amount (MinMetaspaceExpansion) so that
 290 // another expansion is not requested too soon.  If that is not
 291 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
 292 // If that is still not enough, expand by the size of the allocation
 293 // plus some.
 294 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
 295   size_t min_delta = MinMetaspaceExpansion;
 296   size_t max_delta = MaxMetaspaceExpansion;
 297   size_t delta = align_up(bytes, Metaspace::commit_alignment());
 298 
 299   if (delta <= min_delta) {
 300     delta = min_delta;
 301   } else if (delta <= max_delta) {
 302     // Don't want to hit the high water mark on the next
 303     // allocation so make the delta greater than just enough
 304     // for this allocation.
 305     delta = max_delta;
 306   } else {
 307     // This allocation is large but the next ones are probably not
 308     // so increase by the minimum.
 309     delta = delta + min_delta;
 310   }
 311 
 312   assert_is_aligned(delta, Metaspace::commit_alignment());
 313 
 314   return delta;
 315 }
 316 
 317 size_t MetaspaceGC::capacity_until_GC() {
 318   size_t value = Atomic::load_acquire(&_capacity_until_GC);
 319   assert(value >= MetaspaceSize, "Not initialized properly?");
 320   return value;
 321 }
 322 
 323 // Try to increase the _capacity_until_GC limit counter by v bytes.
 324 // Returns true if it succeeded. It may fail if either another thread
 325 // concurrently increased the limit or the new limit would be larger
 326 // than MaxMetaspaceSize.
 327 // On success, optionally returns new and old metaspace capacity in
 328 // new_cap_until_GC and old_cap_until_GC respectively.
 329 // On error, optionally sets can_retry to indicate whether if there is
 330 // actually enough space remaining to satisfy the request.
 331 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC, bool* can_retry) {
 332   assert_is_aligned(v, Metaspace::commit_alignment());
 333 
 334   size_t old_capacity_until_GC = _capacity_until_GC;
 335   size_t new_value = old_capacity_until_GC + v;
 336 
 337   if (new_value < old_capacity_until_GC) {
 338     // The addition wrapped around, set new_value to aligned max value.
 339     new_value = align_down(max_uintx, Metaspace::reserve_alignment());
 340   }
 341 
 342   if (new_value > MaxMetaspaceSize) {
 343     if (can_retry != nullptr) {
 344       *can_retry = false;
 345     }
 346     return false;
 347   }
 348 
 349   if (can_retry != nullptr) {
 350     *can_retry = true;
 351   }
 352   size_t prev_value = Atomic::cmpxchg(&_capacity_until_GC, old_capacity_until_GC, new_value);
 353 
 354   if (old_capacity_until_GC != prev_value) {
 355     return false;
 356   }
 357 
 358   if (new_cap_until_GC != nullptr) {
 359     *new_cap_until_GC = new_value;
 360   }
 361   if (old_cap_until_GC != nullptr) {
 362     *old_cap_until_GC = old_capacity_until_GC;
 363   }
 364   return true;
 365 }
 366 
 367 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
 368   assert_is_aligned(v, Metaspace::commit_alignment());
 369 
 370   return Atomic::sub(&_capacity_until_GC, v);
 371 }
 372 
 373 void MetaspaceGC::initialize() {
 374   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
 375   // we can't do a GC during initialization.
 376   _capacity_until_GC = MaxMetaspaceSize;
 377 }
 378 
 379 void MetaspaceGC::post_initialize() {
 380   // Reset the high-water mark once the VM initialization is done.
 381   _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize);
 382 }
 383 
 384 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
 385   // Check if the compressed class space is full.
 386   if (is_class && Metaspace::using_class_space()) {
 387     size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
 388     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
 389       log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by %zu words (CompressedClassSpaceSize = %zu words)",
 390                 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
 391       return false;
 392     }
 393   }
 394 
 395   // Check if the user has imposed a limit on the metaspace memory.
 396   size_t committed_bytes = MetaspaceUtils::committed_bytes();
 397   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
 398     log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by %zu words (MaxMetaspaceSize = %zu words)",
 399               (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
 400     return false;
 401   }
 402 
 403   return true;
 404 }
 405 
 406 size_t MetaspaceGC::allowed_expansion() {
 407   size_t committed_bytes = MetaspaceUtils::committed_bytes();
 408   size_t capacity_until_gc = capacity_until_GC();
 409 
 410   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
 411   // capacity_until_GC may have been decreased concurrently and may
 412   // temporarily be lower than what metaspace has committed. Allow for that.
 413   size_t left_until_GC = capacity_until_gc > committed_bytes ?
 414       capacity_until_gc - committed_bytes : 0;
 415   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
 416   log_trace(gc, metaspace, freelist)("allowed expansion words: %zu"
 417             " (left_until_max: %zu, left_until_GC: %zu.",
 418             left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);
 419 
 420   return left_to_commit / BytesPerWord;
 421 }
 422 
 423 void MetaspaceGC::compute_new_size() {
 424   assert(_shrink_factor <= 100, "invalid shrink factor");
 425   uint current_shrink_factor = _shrink_factor;
 426   _shrink_factor = 0;
 427 
 428   // Using committed_bytes() for used_after_gc is an overestimation, since the
 429   // chunk free lists are included in committed_bytes() and the memory in an
 430   // un-fragmented chunk free list is available for future allocations.
 431   // However, if the chunk free lists becomes fragmented, then the memory may
 432   // not be available for future allocations and the memory is therefore "in use".
 433   // Including the chunk free lists in the definition of "in use" is therefore
 434   // necessary. Not including the chunk free lists can cause capacity_until_GC to
 435   // shrink below committed_bytes() and this has caused serious bugs in the past.
 436   const double used_after_gc = (double)MetaspaceUtils::committed_bytes();
 437   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
 438 
 439   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
 440   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
 441 
 442   const double min_tmp = used_after_gc / maximum_used_percentage;
 443   size_t minimum_desired_capacity =
 444     (size_t)MIN2(min_tmp, double(MaxMetaspaceSize));
 445   // Don't shrink less than the initial generation size
 446   minimum_desired_capacity = MAX2(minimum_desired_capacity,
 447                                   MetaspaceSize);
 448 
 449   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
 450   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
 451                            minimum_free_percentage, maximum_used_percentage);
 452   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
 453 
 454   size_t shrink_bytes = 0;
 455   if (capacity_until_GC < minimum_desired_capacity) {
 456     // If we have less capacity below the metaspace HWM, then
 457     // increment the HWM.
 458     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
 459     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
 460     // Don't expand unless it's significant
 461     if (expand_bytes >= MinMetaspaceExpansion) {
 462       size_t new_capacity_until_GC = 0;
 463       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
 464       assert(succeeded, "Should always successfully increment HWM when at safepoint");
 465 
 466       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
 467                                                new_capacity_until_GC,
 468                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
 469       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
 470                                (double) minimum_desired_capacity / (double) K,
 471                                (double) expand_bytes / (double) K,
 472                                (double) MinMetaspaceExpansion / (double) K,
 473                                (double) new_capacity_until_GC / (double) K);
 474     }
 475     return;
 476   }
 477 
 478   // No expansion, now see if we want to shrink
 479   // We would never want to shrink more than this
 480   assert(capacity_until_GC >= minimum_desired_capacity,
 481          "%zu >= %zu",
 482          capacity_until_GC, minimum_desired_capacity);
 483   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
 484 
 485   // Should shrinking be considered?
 486   if (MaxMetaspaceFreeRatio < 100) {
 487     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
 488     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
 489     const double max_tmp = used_after_gc / minimum_used_percentage;
 490     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(MaxMetaspaceSize));
 491     maximum_desired_capacity = MAX2(maximum_desired_capacity,
 492                                     MetaspaceSize);
 493     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
 494                              maximum_free_percentage, minimum_used_percentage);
 495     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
 496                              (double) minimum_desired_capacity / (double) K, (double) maximum_desired_capacity / (double) K);
 497 
 498     assert(minimum_desired_capacity <= maximum_desired_capacity,
 499            "sanity check");
 500 
 501     if (capacity_until_GC > maximum_desired_capacity) {
 502       // Capacity too large, compute shrinking size
 503       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
 504       // We don't want shrink all the way back to initSize if people call
 505       // System.gc(), because some programs do that between "phases" and then
 506       // we'd just have to grow the heap up again for the next phase.  So we
 507       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
 508       // on the third call, and 100% by the fourth call.  But if we recompute
 509       // size without shrinking, it goes back to 0%.
 510       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
 511 
 512       shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
 513 
 514       assert(shrink_bytes <= max_shrink_bytes,
 515              "invalid shrink size %zu not <= %zu",
 516              shrink_bytes, max_shrink_bytes);
 517       if (current_shrink_factor == 0) {
 518         _shrink_factor = 10;
 519       } else {
 520         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
 521       }
 522       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
 523                                (double) MetaspaceSize / (double) K, (double) maximum_desired_capacity / (double) K);
 524       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
 525                                (double) shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, (double) MinMetaspaceExpansion / (double) K);
 526     }
 527   }
 528 
 529   // Don't shrink unless it's significant
 530   if (shrink_bytes >= MinMetaspaceExpansion &&
 531       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
 532     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
 533     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
 534                                              new_capacity_until_GC,
 535                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
 536   }
 537 }
 538 
 539 //////  Metaspace methods /////
 540 
 541 const MetaspaceTracer* Metaspace::_tracer = nullptr;
 542 const void* Metaspace::_class_space_start = nullptr;
 543 const void* Metaspace::_class_space_end = nullptr;
 544 
 545 bool Metaspace::initialized() {
 546   return metaspace::MetaspaceContext::context_nonclass() != nullptr
 547       LP64_ONLY(&& (using_class_space() ? Metaspace::class_space_is_initialized() : true));
 548 }
 549 
 550 #ifdef _LP64
 551 
 552 void Metaspace::print_compressed_class_space(outputStream* st) {
 553   if (VirtualSpaceList::vslist_class() != nullptr) {
 554     MetaWord* base = VirtualSpaceList::vslist_class()->base_of_first_node();
 555     size_t size = VirtualSpaceList::vslist_class()->word_size_of_first_node();
 556     MetaWord* top = base + size;
 557     st->print("Compressed class space mapped at: " PTR_FORMAT "-" PTR_FORMAT ", reserved size: %zu",
 558                p2i(base), p2i(top), (top - base) * BytesPerWord);
 559     st->cr();
 560   }
 561 }
 562 
 563 // Given a prereserved space, use that to set up the compressed class space list.
 564 void Metaspace::initialize_class_space(ReservedSpace rs) {
 565   assert(rs.size() >= CompressedClassSpaceSize,
 566          "%zu != %zu", rs.size(), CompressedClassSpaceSize);
 567   assert(using_class_space(), "Must be using class space");
 568 
 569   assert(rs.size() == CompressedClassSpaceSize, "%zu != %zu",
 570          rs.size(), CompressedClassSpaceSize);
 571   assert(is_aligned(rs.base(), Metaspace::reserve_alignment()) &&
 572          is_aligned(rs.size(), Metaspace::reserve_alignment()),
 573          "wrong alignment");
 574 
 575   MetaspaceContext::initialize_class_space_context(rs);
 576   _class_space_start = rs.base();
 577   _class_space_end = rs.end();
 578 }
 579 
 580 // Returns true if class space has been setup (initialize_class_space).
 581 bool Metaspace::class_space_is_initialized() {
 582   return MetaspaceContext::context_class() != nullptr;
 583 }
 584 
 585 // Reserve a range of memory that is to contain narrow Klass IDs. If "try_in_low_address_ranges"
 586 // is true, we will attempt to reserve memory suitable for zero-based encoding.
 587 ReservedSpace Metaspace::reserve_address_space_for_compressed_classes(size_t size, bool optimize_for_zero_base) {
 588   char* result = nullptr;
 589 
 590   NOT_ZERO(result =
 591       (char*) CompressedKlassPointers::reserve_address_space_for_compressed_classes(size, RandomizeClassSpaceLocation,
 592                                                                                     optimize_for_zero_base));
 593 
 594   if (result == nullptr) {
 595     // Fallback: reserve anywhere
 596     log_debug(metaspace, map)("Trying anywhere...");
 597     result = os::reserve_memory_aligned(size, Metaspace::reserve_alignment(), false);
 598   }
 599 
 600   // Wrap resulting range in ReservedSpace
 601   if (result != nullptr) {
 602     log_debug(metaspace, map)("Mapped at " PTR_FORMAT, p2i(result));
 603     assert(is_aligned(result, Metaspace::reserve_alignment()), "Alignment too small for metaspace");
 604 
 605     return ReservedSpace(result,
 606                          size,
 607                          Metaspace::reserve_alignment(),
 608                          os::vm_page_size(),
 609                          !ExecMem,
 610                          false /* special */);
 611   } else {
 612     log_debug(metaspace, map)("Failed to map.");
 613     return {};
 614   }
 615 }
 616 #endif // _LP64
 617 
 618 size_t Metaspace::reserve_alignment_words() {
 619   return metaspace::Settings::virtual_space_node_reserve_alignment_words();
 620 }
 621 
 622 size_t Metaspace::commit_alignment_words() {
 623   return metaspace::Settings::commit_granule_words();
 624 }
 625 
 626 void Metaspace::ergo_initialize() {
 627 
 628   // Must happen before using any setting from Settings::---
 629   metaspace::Settings::ergo_initialize();
 630 
 631   // MaxMetaspaceSize and CompressedClassSpaceSize:
 632   //
 633   // MaxMetaspaceSize is the maximum size, in bytes, of memory we are allowed
 634   //  to commit for the Metaspace.
 635   //  It is just a number; a limit we compare against before committing. It
 636   //  does not have to be aligned to anything.
 637   //  It gets used as compare value before attempting to increase the metaspace
 638   //  commit charge. It defaults to max_uintx (unlimited).
 639   //
 640   // CompressedClassSpaceSize is the size, in bytes, of the address range we
 641   //  pre-reserve for the compressed class space (if we use class space).
 642   //  This size has to be aligned to the metaspace reserve alignment (to the
 643   //  size of a root chunk). It gets aligned up from whatever value the caller
 644   //  gave us to the next multiple of root chunk size.
 645   //
 646   // Note: Strictly speaking MaxMetaspaceSize and CompressedClassSpaceSize have
 647   //  very little to do with each other. The notion often encountered:
 648   //  MaxMetaspaceSize = CompressedClassSpaceSize + <non-class metadata size>
 649   //  is subtly wrong: MaxMetaspaceSize can besmaller than CompressedClassSpaceSize,
 650   //  in which case we just would not be able to fully commit the class space range.
 651   //
 652   // We still adjust CompressedClassSpaceSize to reasonable limits, mainly to
 653   //  save on reserved space, and to make ergnonomics less confusing.
 654 
 655   MaxMetaspaceSize = MAX2(MaxMetaspaceSize, commit_alignment());
 656 
 657   if (UseCompressedClassPointers) {
 658     // Let Class Space not be larger than 80% of MaxMetaspaceSize. Note that is
 659     // grossly over-dimensioned for most usage scenarios; typical ratio of
 660     // class space : non class space usage is about 1:6. With many small classes,
 661     // it can get as low as 1:2. It is not a big deal though since ccs is only
 662     // reserved and will be committed on demand only.
 663     const size_t max_ccs_size = 8 * (MaxMetaspaceSize / 10);
 664 
 665     // Sanity check.
 666     const size_t max_klass_range = CompressedKlassPointers::max_klass_range_size();
 667     assert(max_klass_range >= reserve_alignment(),
 668            "Klass range (%zu) must cover at least a full root chunk (%zu)",
 669            max_klass_range, reserve_alignment());
 670 
 671     size_t adjusted_ccs_size = MIN3(CompressedClassSpaceSize, max_ccs_size, max_klass_range);
 672 
 673     // CCS must be aligned to root chunk size, and be at least the size of one
 674     //  root chunk.
 675     adjusted_ccs_size = align_up(adjusted_ccs_size, reserve_alignment());
 676     adjusted_ccs_size = MAX2(adjusted_ccs_size, reserve_alignment());
 677 
 678     // Print a warning if the adjusted size differs from the users input
 679     if (CompressedClassSpaceSize != adjusted_ccs_size) {
 680       #define X "CompressedClassSpaceSize adjusted from user input " \
 681                 "%zu bytes to %zu bytes", CompressedClassSpaceSize, adjusted_ccs_size
 682       if (FLAG_IS_CMDLINE(CompressedClassSpaceSize)) {
 683         log_warning(metaspace)(X);
 684       } else {
 685         log_info(metaspace)(X);
 686       }
 687       #undef X
 688     }
 689 
 690     // Note: re-adjusting may have us left with a CompressedClassSpaceSize
 691     //  larger than MaxMetaspaceSize for very small values of MaxMetaspaceSize.
 692     //  Lets just live with that, its not a big deal.
 693     if (adjusted_ccs_size != CompressedClassSpaceSize) {
 694       FLAG_SET_ERGO(CompressedClassSpaceSize, adjusted_ccs_size);
 695       log_info(metaspace)("Setting CompressedClassSpaceSize to %zu.",
 696                           CompressedClassSpaceSize);
 697     }
 698   }
 699 
 700   // Set MetaspaceSize, MinMetaspaceExpansion and MaxMetaspaceExpansion
 701   if (MetaspaceSize > MaxMetaspaceSize) {
 702     MetaspaceSize = MaxMetaspaceSize;
 703   }
 704 
 705   MetaspaceSize = align_down_bounded(MetaspaceSize, commit_alignment());
 706 
 707   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
 708 
 709   MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, commit_alignment());
 710   MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, commit_alignment());
 711 
 712 }
 713 
 714 void Metaspace::global_initialize() {
 715   MetaspaceGC::initialize(); // <- since we do not prealloc init chunks anymore is this still needed?
 716 
 717   metaspace::ChunkHeaderPool::initialize();
 718 
 719   if (CDSConfig::is_dumping_static_archive()) {
 720     MetaspaceShared::initialize_for_static_dump();
 721   }
 722 
 723   // If UseCompressedClassPointers=1, we have two cases:
 724   // a) if CDS is active (runtime, Xshare=on), it will create the class space
 725   //    for us, initialize it and set up CompressedKlassPointers encoding.
 726   //    Class space will be reserved above the mapped archives.
 727   // b) if CDS either deactivated (Xshare=off) or a static dump is to be done (Xshare:dump),
 728   //    we will create the class space on our own. It will be placed above the java heap,
 729   //    since we assume it has been placed in low
 730   //    address regions. We may rethink this (see JDK-8244943). Failing that,
 731   //    it will be placed anywhere.
 732 
 733 #if INCLUDE_CDS
 734   // case (a)
 735   if (CDSConfig::is_using_archive()) {
 736     if (!FLAG_IS_DEFAULT(CompressedClassSpaceBaseAddress)) {
 737       log_warning(metaspace)("CDS active - ignoring CompressedClassSpaceBaseAddress.");
 738     }
 739     MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
 740     // If any of the archived space fails to map, UseSharedSpaces
 741     // is reset to false.
 742   }
 743 #endif // INCLUDE_CDS
 744 
 745 #ifdef _LP64
 746 
 747   if (using_class_space() && !class_space_is_initialized()) {
 748     assert(!CDSConfig::is_using_archive(), "CDS archive is not mapped at this point");
 749 
 750     // case (b) (No CDS)
 751     ReservedSpace rs;
 752     const size_t size = align_up(CompressedClassSpaceSize, Metaspace::reserve_alignment());
 753 
 754     // If CompressedClassSpaceBaseAddress is set, we attempt to force-map class space to
 755     // the given address. This is a debug-only feature aiding tests. Due to the ASLR lottery
 756     // this may fail, in which case the VM will exit after printing an appropriate message.
 757     // Tests using this switch should cope with that.
 758     if (CompressedClassSpaceBaseAddress != 0) {
 759       const address base = (address)CompressedClassSpaceBaseAddress;
 760       if (!is_aligned(base, Metaspace::reserve_alignment())) {
 761         vm_exit_during_initialization(
 762             err_msg("CompressedClassSpaceBaseAddress=" PTR_FORMAT " invalid "
 763                     "(must be aligned to 0x%zx).",
 764                     CompressedClassSpaceBaseAddress, Metaspace::reserve_alignment()));
 765       }
 766 
 767       rs = MemoryReserver::reserve((char*)base,
 768                                    size,
 769                                    Metaspace::reserve_alignment(),
 770                                    os::vm_page_size());
 771 
 772       if (rs.is_reserved()) {
 773         log_info(metaspace)("Successfully forced class space address to " PTR_FORMAT, p2i(base));
 774       } else {
 775         LogTarget(Debug, metaspace) lt;
 776         if (lt.is_enabled()) {
 777           LogStream ls(lt);
 778           os::print_memory_mappings((char*)base, size, &ls);
 779         }
 780         vm_exit_during_initialization(
 781             err_msg("CompressedClassSpaceBaseAddress=" PTR_FORMAT " given, but reserving class space failed.",
 782                 CompressedClassSpaceBaseAddress));
 783       }
 784     }
 785 
 786     // ...failing that, reserve anywhere, but let platform do optimized placement:
 787     if (!rs.is_reserved()) {
 788       log_info(metaspace)("Reserving compressed class space anywhere");
 789       rs = Metaspace::reserve_address_space_for_compressed_classes(size, true);
 790     }
 791 
 792     // ...failing that, give up.
 793     if (!rs.is_reserved()) {
 794       vm_exit_during_initialization(
 795           err_msg("Could not allocate compressed class space: %zu bytes",
 796                    CompressedClassSpaceSize));
 797     }
 798 
 799     // Mark class space as such
 800     MemTracker::record_virtual_memory_tag(rs, mtClass);
 801 
 802     // Initialize space
 803     Metaspace::initialize_class_space(rs);
 804 
 805     // Set up compressed class pointer encoding.
 806     // In CDS=off mode, we give the JVM some leeway to choose a favorable base/shift combination.
 807     CompressedKlassPointers::initialize((address)rs.base(), rs.size());
 808 
 809     // After narrowKlass encoding scheme is decided: if the encoding base points to class space start,
 810     // establish a protection zone. Accidentally decoding a zero nKlass ID and then using it will result
 811     // in an immediate segmentation fault instead of a delayed error much later.
 812     if (CompressedKlassPointers::base() == (address)rs.base()) {
 813       // Let the protection zone be a whole commit granule. Otherwise, buddy allocator may later place neighboring
 814       // chunks in the same granule, see that the granule is not yet committed, and commit it, which would replace
 815       // the protection mapping and make the zone readable.
 816       // Alternatively, we could commit the chunk right now, but that is a tiny bit more fiddly, since we are not
 817       // fully set up yet at this point.
 818       const size_t protzone_size = metaspace::Settings::commit_granule_bytes(); // granule size >= page size
 819       const size_t protzone_wordsize = protzone_size / BytesPerWord;
 820       const metaspace::chunklevel_t lvl = metaspace::chunklevel::level_fitting_word_size(protzone_wordsize);
 821       metaspace::Metachunk* const chunk = MetaspaceContext::context_class()->cm()->get_chunk(lvl);
 822       const address protzone = (address) chunk->base();
 823       assert(protzone == (address)rs.base(), "The very first chunk should be located at the class space start?");
 824       assert(chunk->word_size() == protzone_wordsize, "Weird chunk size");
 825       CompressedKlassPointers::establish_protection_zone(protzone, protzone_size);
 826     } else {
 827       assert(CompressedKlassPointers::base() == nullptr, "Zero-based encoding expected");
 828     }
 829 
 830   }
 831 
 832 #endif // _LP64
 833 
 834   // Initialize non-class virtual space list, and its chunk manager:
 835   MetaspaceContext::initialize_nonclass_space_context();
 836 
 837   _tracer = new MetaspaceTracer();
 838 
 839 #ifdef _LP64
 840   if (UseCompressedClassPointers) {
 841     // Note: "cds" would be a better fit but keep this for backward compatibility.
 842     LogTarget(Info, gc, metaspace) lt;
 843     if (lt.is_enabled()) {
 844       LogStream ls(lt);
 845       CDS_ONLY(MetaspaceShared::print_on(&ls);)
 846       Metaspace::print_compressed_class_space(&ls);
 847       CompressedKlassPointers::print_mode(&ls);
 848     }
 849   }
 850 #endif
 851 
 852 }
 853 
 854 void Metaspace::post_initialize() {
 855   MetaspaceGC::post_initialize();
 856 }
 857 
 858 size_t Metaspace::max_allocation_word_size() {
 859   return metaspace::chunklevel::MAX_CHUNK_WORD_SIZE;
 860 }
 861 
 862 // This version of Metaspace::allocate does not throw OOM but simply returns null, and
 863 // is suitable for calling from non-Java threads.
 864 // Callers are responsible for checking null.
 865 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
 866                               MetaspaceObj::Type type, bool use_class_space) {
 867   assert(word_size <= Metaspace::max_allocation_word_size(),
 868          "allocation size too large (%zu)", word_size);
 869 
 870   assert(loader_data != nullptr, "Should never pass around a null loader_data. "
 871         "ClassLoaderData::the_null_class_loader_data() should have been used.");
 872 
 873   // Deal with concurrent unloading failed allocation starvation
 874   MetaspaceCriticalAllocation::block_if_concurrent_purge();
 875 
 876   MetadataType mdtype = use_class_space ? ClassType : NonClassType;
 877 
 878   // Try to allocate metadata.
 879   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
 880 
 881   if (result != nullptr) {
 882 #ifdef ASSERT
 883     if (using_class_space() && mdtype == ClassType) {
 884       assert(is_in_class_space(result) &&
 885              is_aligned(result, CompressedKlassPointers::klass_alignment_in_bytes()), "Sanity");
 886     } else {
 887       assert((is_in_class_space(result) || is_in_nonclass_metaspace(result)) &&
 888              is_aligned(result, Metaspace::min_allocation_alignment_bytes), "Sanity");
 889     }
 890 #endif
 891     // Zero initialize.
 892     Copy::fill_to_words((HeapWord*)result, word_size, 0);
 893     log_trace(metaspace)("Metaspace::allocate: type %d return " PTR_FORMAT ".", (int)type, p2i(result));
 894   }
 895 
 896   return result;
 897 }
 898 
 899 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
 900                               MetaspaceObj::Type type, bool use_class_space, TRAPS) {
 901 
 902   if (HAS_PENDING_EXCEPTION) {
 903     assert(false, "Should not allocate with exception pending");
 904     return nullptr;  // caller does a CHECK_NULL too
 905   }
 906   assert(!THREAD->owns_locks(), "allocating metaspace while holding mutex");
 907 
 908   MetaWord* result = allocate(loader_data, word_size, type, use_class_space);
 909 
 910   if (result == nullptr) {
 911     MetadataType mdtype = use_class_space ? ClassType : NonClassType;
 912     tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
 913 
 914     // Allocation failed.
 915     if (is_init_completed()) {
 916       // Only start a GC if the bootstrapping has completed.
 917       // Try to clean out some heap memory and retry. This can prevent premature
 918       // expansion of the metaspace.
 919       result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
 920     }
 921 
 922     if (result == nullptr) {
 923       report_metadata_oome(loader_data, word_size, type, mdtype, THREAD);
 924       assert(HAS_PENDING_EXCEPTION, "sanity");
 925       return nullptr;
 926     }
 927 
 928     // Zero initialize.
 929     Copy::fill_to_words((HeapWord*)result, word_size, 0);
 930 
 931     log_trace(metaspace)("Metaspace::allocate: type %d return " PTR_FORMAT ".", (int)type, p2i(result));
 932   }
 933 
 934   return result;
 935 }
 936 
 937 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
 938   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
 939 
 940   // If result is still null, we are out of memory.
 941   {
 942     LogMessage(gc, metaspace, freelist, oom) log;
 943     if (log.is_info()) {
 944       log.info("Metaspace (%s) allocation failed for size %zu",
 945                is_class_space_allocation(mdtype) ? "class" : "data", word_size);
 946       ResourceMark rm;
 947       if (log.is_debug()) {
 948         if (loader_data->metaspace_or_null() != nullptr) {
 949           NonInterleavingLogStream ls(LogLevelType::Debug, log);
 950           loader_data->print_value_on(&ls);
 951         }
 952       }
 953       NonInterleavingLogStream ls(LogLevelType::Info, log);
 954       // In case of an OOM, log out a short but still useful report.
 955       MetaspaceUtils::print_basic_report(&ls, 0);
 956     }
 957   }
 958 
 959   bool out_of_compressed_class_space = false;
 960   if (is_class_space_allocation(mdtype)) {
 961     ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null();
 962     out_of_compressed_class_space =
 963       MetaspaceUtils::committed_bytes(Metaspace::ClassType) +
 964       align_up(word_size * BytesPerWord, 4 * M) >
 965       CompressedClassSpaceSize;
 966   }
 967 
 968   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
 969   const char* space_string = out_of_compressed_class_space ?
 970     "Compressed class space" : "Metaspace";
 971 
 972   report_java_out_of_memory(space_string);
 973 
 974   if (JvmtiExport::should_post_resource_exhausted()) {
 975     JvmtiExport::post_resource_exhausted(
 976         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
 977         space_string);
 978   }
 979 
 980   if (!is_init_completed()) {
 981     vm_exit_during_initialization("OutOfMemoryError", space_string);
 982   }
 983 
 984   if (out_of_compressed_class_space) {
 985     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
 986   } else {
 987     THROW_OOP(Universe::out_of_memory_error_metaspace());
 988   }
 989 }
 990 
 991 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
 992   switch (mdtype) {
 993     case Metaspace::ClassType: return "Class";
 994     case Metaspace::NonClassType: return "Metadata";
 995     default:
 996       assert(false, "Got bad mdtype: %d", (int) mdtype);
 997       return nullptr;
 998   }
 999 }
1000 
1001 void Metaspace::purge(bool classes_unloaded) {
1002   // The MetaspaceCritical_lock is used by a concurrent GC to block out concurrent metaspace
1003   // allocations, that would starve critical metaspace allocations, that are about to throw
1004   // OOM if they fail; they need precedence for correctness.
1005   MutexLocker ml(MetaspaceCritical_lock, Mutex::_no_safepoint_check_flag);
1006   if (classes_unloaded) {
1007     ChunkManager* cm = ChunkManager::chunkmanager_nonclass();
1008     if (cm != nullptr) {
1009       cm->purge();
1010     }
1011     if (using_class_space()) {
1012       cm = ChunkManager::chunkmanager_class();
1013       if (cm != nullptr) {
1014         cm->purge();
1015       }
1016     }
1017   }
1018 
1019   // Try to satisfy queued metaspace allocation requests.
1020   //
1021   // It might seem unnecessary to try to process allocation requests if no
1022   // classes have been unloaded. However, this call is required for the code
1023   // in MetaspaceCriticalAllocation::try_allocate_critical to work.
1024   MetaspaceCriticalAllocation::process();
1025 }
1026 
1027 
1028 // Returns true if pointer points into one of the metaspace regions, or
1029 // into the class space.
1030 bool Metaspace::is_in_shared_metaspace(const void* ptr) {
1031   return MetaspaceShared::is_in_shared_metaspace(ptr);
1032 }
1033 
1034 // Returns true if pointer points into one of the non-class-space metaspace regions.
1035 bool Metaspace::is_in_nonclass_metaspace(const void* ptr) {
1036   return VirtualSpaceList::vslist_nonclass()->contains((MetaWord*)ptr);
1037 }