1 /*
   2  * Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2017, 2021 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "cds/metaspaceShared.hpp"
  28 #include "classfile/classLoaderData.hpp"
  29 #include "gc/shared/collectedHeap.hpp"
  30 #include "logging/log.hpp"
  31 #include "logging/logStream.hpp"
  32 #include "memory/classLoaderMetaspace.hpp"
  33 #include "memory/metaspace.hpp"
  34 #include "memory/metaspaceCriticalAllocation.hpp"
  35 #include "memory/metaspace/chunkHeaderPool.hpp"
  36 #include "memory/metaspace/chunkManager.hpp"
  37 #include "memory/metaspace/commitLimiter.hpp"
  38 #include "memory/metaspace/internalStats.hpp"
  39 #include "memory/metaspace/metaspaceAlignment.hpp"
  40 #include "memory/metaspace/metaspaceCommon.hpp"
  41 #include "memory/metaspace/metaspaceContext.hpp"
  42 #include "memory/metaspace/metaspaceReporter.hpp"
  43 #include "memory/metaspace/metaspaceSettings.hpp"
  44 #include "memory/metaspace/runningCounters.hpp"
  45 #include "memory/metaspace/virtualSpaceList.hpp"
  46 #include "memory/metaspaceTracer.hpp"
  47 #include "memory/metaspaceStats.hpp"
  48 #include "memory/metaspaceUtils.hpp"
  49 #include "memory/resourceArea.hpp"
  50 #include "memory/universe.hpp"
  51 #include "oops/compressedOops.hpp"
  52 #include "prims/jvmtiExport.hpp"
  53 #include "runtime/atomic.hpp"
  54 #include "runtime/globals_extension.hpp"
  55 #include "runtime/init.hpp"
  56 #include "runtime/java.hpp"
  57 #include "services/memTracker.hpp"
  58 #include "utilities/copy.hpp"
  59 #include "utilities/debug.hpp"
  60 #include "utilities/formatBuffer.hpp"
  61 #include "utilities/globalDefinitions.hpp"
  62 
  63 using metaspace::ChunkManager;
  64 using metaspace::CommitLimiter;
  65 using metaspace::MetaspaceContext;
  66 using metaspace::MetaspaceReporter;
  67 using metaspace::RunningCounters;
  68 using metaspace::VirtualSpaceList;
  69 
  70 size_t MetaspaceUtils::used_words() {
  71   return RunningCounters::used_words();
  72 }
  73 
  74 size_t MetaspaceUtils::used_words(Metaspace::MetadataType mdtype) {
  75   return mdtype == Metaspace::ClassType ? RunningCounters::used_words_class() : RunningCounters::used_words_nonclass();
  76 }
  77 
  78 size_t MetaspaceUtils::reserved_words() {
  79   return RunningCounters::reserved_words();
  80 }
  81 
  82 size_t MetaspaceUtils::reserved_words(Metaspace::MetadataType mdtype) {
  83   return mdtype == Metaspace::ClassType ? RunningCounters::reserved_words_class() : RunningCounters::reserved_words_nonclass();
  84 }
  85 
  86 size_t MetaspaceUtils::committed_words() {
  87   return RunningCounters::committed_words();
  88 }
  89 
  90 size_t MetaspaceUtils::committed_words(Metaspace::MetadataType mdtype) {
  91   return mdtype == Metaspace::ClassType ? RunningCounters::committed_words_class() : RunningCounters::committed_words_nonclass();
  92 }
  93 
  94 // Helper for get_statistics()
  95 static void get_values_for(Metaspace::MetadataType mdtype, size_t* reserved, size_t* committed, size_t* used) {
  96 #define w2b(x) (x * sizeof(MetaWord))
  97   if (mdtype == Metaspace::ClassType) {
  98     *reserved = w2b(RunningCounters::reserved_words_class());
  99     *committed = w2b(RunningCounters::committed_words_class());
 100     *used = w2b(RunningCounters::used_words_class());
 101   } else {
 102     *reserved = w2b(RunningCounters::reserved_words_nonclass());
 103     *committed = w2b(RunningCounters::committed_words_nonclass());
 104     *used = w2b(RunningCounters::used_words_nonclass());
 105   }
 106 #undef w2b
 107 }
 108 
 109 // Retrieve all statistics in one go; make sure the values are consistent.
 110 MetaspaceStats MetaspaceUtils::get_statistics(Metaspace::MetadataType mdtype) {
 111 
 112   // Consistency:
 113   // This function reads three values (reserved, committed, used) from different counters. These counters
 114   // may (very rarely) be out of sync. This has been a source for intermittent test errors in the past
 115   //  (see e.g. JDK-8237872, JDK-8151460).
 116   // - reserved and committed counter are updated under protection of Metaspace_lock; an inconsistency
 117   //   between them can be the result of a dirty read.
 118   // - used is an atomic counter updated outside any lock range; there is no way to guarantee
 119   //   a clean read wrt the other two values.
 120   // Reading these values under lock protection would would only help for the first case. Therefore
 121   //   we don't bother and just re-read several times, then give up and correct the values.
 122 
 123   size_t r = 0, c = 0, u = 0; // Note: byte values.
 124   get_values_for(mdtype, &r, &c, &u);
 125   int retries = 10;
 126   // If the first retrieval resulted in inconsistent values, retry a bit...
 127   while ((r < c || c < u) && --retries >= 0) {
 128     get_values_for(mdtype, &r, &c, &u);
 129   }
 130   if (c < u || r < c) { // still inconsistent.
 131     // ... but not endlessly. If we don't get consistent values, correct them on the fly.
 132     // The logic here is that we trust the used counter - its an atomic counter and whatever we see
 133     // must have been the truth once - and from that we reconstruct a likely set of committed/reserved
 134     // values.
 135     metaspace::InternalStats::inc_num_inconsistent_stats();
 136     if (c < u) {
 137       c = align_up(u, Metaspace::commit_alignment());
 138     }
 139     if (r < c) {
 140       r = align_up(c, Metaspace::reserve_alignment());
 141     }
 142   }
 143   return MetaspaceStats(r, c, u);
 144 }
 145 
 146 MetaspaceCombinedStats MetaspaceUtils::get_combined_statistics() {
 147   return MetaspaceCombinedStats(get_statistics(Metaspace::ClassType), get_statistics(Metaspace::NonClassType));
 148 }
 149 
 150 void MetaspaceUtils::print_metaspace_change(const MetaspaceCombinedStats& pre_meta_values) {
 151   // Get values now:
 152   const MetaspaceCombinedStats meta_values = get_combined_statistics();
 153 
 154   // We print used and committed since these are the most useful at-a-glance vitals for Metaspace:
 155   // - used tells you how much memory is actually used for metadata
 156   // - committed tells you how much memory is committed for the purpose of metadata
 157   // The difference between those two would be waste, which can have various forms (freelists,
 158   //   unused parts of committed chunks etc)
 159   //
 160   // Left out is reserved, since this is not as exciting as the first two values: for class space,
 161   // it is a constant (to uninformed users, often confusingly large). For non-class space, it would
 162   // be interesting since free chunks can be uncommitted, but for now it is left out.
 163 
 164   if (Metaspace::using_class_space()) {
 165     log_info(gc, metaspace)(HEAP_CHANGE_FORMAT" "
 166                             HEAP_CHANGE_FORMAT" "
 167                             HEAP_CHANGE_FORMAT,
 168                             HEAP_CHANGE_FORMAT_ARGS("Metaspace",
 169                                                     pre_meta_values.used(),
 170                                                     pre_meta_values.committed(),
 171                                                     meta_values.used(),
 172                                                     meta_values.committed()),
 173                             HEAP_CHANGE_FORMAT_ARGS("NonClass",
 174                                                     pre_meta_values.non_class_used(),
 175                                                     pre_meta_values.non_class_committed(),
 176                                                     meta_values.non_class_used(),
 177                                                     meta_values.non_class_committed()),
 178                             HEAP_CHANGE_FORMAT_ARGS("Class",
 179                                                     pre_meta_values.class_used(),
 180                                                     pre_meta_values.class_committed(),
 181                                                     meta_values.class_used(),
 182                                                     meta_values.class_committed()));
 183   } else {
 184     log_info(gc, metaspace)(HEAP_CHANGE_FORMAT,
 185                             HEAP_CHANGE_FORMAT_ARGS("Metaspace",
 186                                                     pre_meta_values.used(),
 187                                                     pre_meta_values.committed(),
 188                                                     meta_values.used(),
 189                                                     meta_values.committed()));
 190   }
 191 }
 192 
 193 // This will print out a basic metaspace usage report but
 194 // unlike print_report() is guaranteed not to lock or to walk the CLDG.
 195 void MetaspaceUtils::print_basic_report(outputStream* out, size_t scale) {
 196   MetaspaceReporter::print_basic_report(out, scale);
 197 }
 198 
 199 // Prints a report about the current metaspace state.
 200 // Optional parts can be enabled via flags.
 201 // Function will walk the CLDG and will lock the expand lock; if that is not
 202 // convenient, use print_basic_report() instead.
 203 void MetaspaceUtils::print_report(outputStream* out, size_t scale) {
 204   const int flags =
 205       (int)MetaspaceReporter::Option::ShowLoaders |
 206       (int)MetaspaceReporter::Option::BreakDownByChunkType |
 207       (int)MetaspaceReporter::Option::ShowClasses;
 208   MetaspaceReporter::print_report(out, scale, flags);
 209 }
 210 
 211 void MetaspaceUtils::print_on(outputStream* out) {
 212 
 213   // Used from all GCs. It first prints out totals, then, separately, the class space portion.
 214   MetaspaceCombinedStats stats = get_combined_statistics();
 215   out->print_cr(" Metaspace       "
 216                 "used "      SIZE_FORMAT "K, "
 217                 "committed " SIZE_FORMAT "K, "
 218                 "reserved "  SIZE_FORMAT "K",
 219                 stats.used()/K,
 220                 stats.committed()/K,
 221                 stats.reserved()/K);
 222 
 223   if (Metaspace::using_class_space()) {
 224     out->print_cr("  class space    "
 225                   "used "      SIZE_FORMAT "K, "
 226                   "committed " SIZE_FORMAT "K, "
 227                   "reserved "  SIZE_FORMAT "K",
 228                   stats.class_space_stats().used()/K,
 229                   stats.class_space_stats().committed()/K,
 230                   stats.class_space_stats().reserved()/K);
 231   }
 232 }
 233 
 234 #ifdef ASSERT
 235 void MetaspaceUtils::verify() {
 236   if (Metaspace::initialized()) {
 237 
 238     // Verify non-class chunkmanager...
 239     ChunkManager* cm = ChunkManager::chunkmanager_nonclass();
 240     cm->verify();
 241 
 242     // ... and space list.
 243     VirtualSpaceList* vsl = VirtualSpaceList::vslist_nonclass();
 244     vsl->verify();
 245 
 246     if (Metaspace::using_class_space()) {
 247       // If we use compressed class pointers, verify class chunkmanager...
 248       cm = ChunkManager::chunkmanager_class();
 249       cm->verify();
 250 
 251       // ... and class spacelist.
 252       vsl = VirtualSpaceList::vslist_class();
 253       vsl->verify();
 254     }
 255 
 256   }
 257 }
 258 #endif
 259 
 260 ////////////////////////////////7
 261 // MetaspaceGC methods
 262 
 263 volatile size_t MetaspaceGC::_capacity_until_GC = 0;
 264 uint MetaspaceGC::_shrink_factor = 0;
 265 
 266 // VM_CollectForMetadataAllocation is the vm operation used to GC.
 267 // Within the VM operation after the GC the attempt to allocate the metadata
 268 // should succeed.  If the GC did not free enough space for the metaspace
 269 // allocation, the HWM is increased so that another virtualspace will be
 270 // allocated for the metadata.  With perm gen the increase in the perm
 271 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
 272 // metaspace policy uses those as the small and large steps for the HWM.
 273 //
 274 // After the GC the compute_new_size() for MetaspaceGC is called to
 275 // resize the capacity of the metaspaces.  The current implementation
 276 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
 277 // to resize the Java heap by some GC's.  New flags can be implemented
 278 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
 279 // free space is desirable in the metaspace capacity to decide how much
 280 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
 281 // free space is desirable in the metaspace capacity before decreasing
 282 // the HWM.
 283 
 284 // Calculate the amount to increase the high water mark (HWM).
 285 // Increase by a minimum amount (MinMetaspaceExpansion) so that
 286 // another expansion is not requested too soon.  If that is not
 287 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
 288 // If that is still not enough, expand by the size of the allocation
 289 // plus some.
 290 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
 291   size_t min_delta = MinMetaspaceExpansion;
 292   size_t max_delta = MaxMetaspaceExpansion;
 293   size_t delta = align_up(bytes, Metaspace::commit_alignment());
 294 
 295   if (delta <= min_delta) {
 296     delta = min_delta;
 297   } else if (delta <= max_delta) {
 298     // Don't want to hit the high water mark on the next
 299     // allocation so make the delta greater than just enough
 300     // for this allocation.
 301     delta = max_delta;
 302   } else {
 303     // This allocation is large but the next ones are probably not
 304     // so increase by the minimum.
 305     delta = delta + min_delta;
 306   }
 307 
 308   assert_is_aligned(delta, Metaspace::commit_alignment());
 309 
 310   return delta;
 311 }
 312 
 313 size_t MetaspaceGC::capacity_until_GC() {
 314   size_t value = Atomic::load_acquire(&_capacity_until_GC);
 315   assert(value >= MetaspaceSize, "Not initialized properly?");
 316   return value;
 317 }
 318 
 319 // Try to increase the _capacity_until_GC limit counter by v bytes.
 320 // Returns true if it succeeded. It may fail if either another thread
 321 // concurrently increased the limit or the new limit would be larger
 322 // than MaxMetaspaceSize.
 323 // On success, optionally returns new and old metaspace capacity in
 324 // new_cap_until_GC and old_cap_until_GC respectively.
 325 // On error, optionally sets can_retry to indicate whether if there is
 326 // actually enough space remaining to satisfy the request.
 327 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC, bool* can_retry) {
 328   assert_is_aligned(v, Metaspace::commit_alignment());
 329 
 330   size_t old_capacity_until_GC = _capacity_until_GC;
 331   size_t new_value = old_capacity_until_GC + v;
 332 
 333   if (new_value < old_capacity_until_GC) {
 334     // The addition wrapped around, set new_value to aligned max value.
 335     new_value = align_down(max_uintx, Metaspace::reserve_alignment());
 336   }
 337 
 338   if (new_value > MaxMetaspaceSize) {
 339     if (can_retry != nullptr) {
 340       *can_retry = false;
 341     }
 342     return false;
 343   }
 344 
 345   if (can_retry != nullptr) {
 346     *can_retry = true;
 347   }
 348   size_t prev_value = Atomic::cmpxchg(&_capacity_until_GC, old_capacity_until_GC, new_value);
 349 
 350   if (old_capacity_until_GC != prev_value) {
 351     return false;
 352   }
 353 
 354   if (new_cap_until_GC != nullptr) {
 355     *new_cap_until_GC = new_value;
 356   }
 357   if (old_cap_until_GC != nullptr) {
 358     *old_cap_until_GC = old_capacity_until_GC;
 359   }
 360   return true;
 361 }
 362 
 363 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
 364   assert_is_aligned(v, Metaspace::commit_alignment());
 365 
 366   return Atomic::sub(&_capacity_until_GC, v);
 367 }
 368 
 369 void MetaspaceGC::initialize() {
 370   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
 371   // we can't do a GC during initialization.
 372   _capacity_until_GC = MaxMetaspaceSize;
 373 }
 374 
 375 void MetaspaceGC::post_initialize() {
 376   // Reset the high-water mark once the VM initialization is done.
 377   _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize);
 378 }
 379 
 380 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
 381   // Check if the compressed class space is full.
 382   if (is_class && Metaspace::using_class_space()) {
 383     size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
 384     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
 385       log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)",
 386                 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
 387       return false;
 388     }
 389   }
 390 
 391   // Check if the user has imposed a limit on the metaspace memory.
 392   size_t committed_bytes = MetaspaceUtils::committed_bytes();
 393   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
 394     log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)",
 395               (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
 396     return false;
 397   }
 398 
 399   return true;
 400 }
 401 
 402 size_t MetaspaceGC::allowed_expansion() {
 403   size_t committed_bytes = MetaspaceUtils::committed_bytes();
 404   size_t capacity_until_gc = capacity_until_GC();
 405 
 406   assert(capacity_until_gc >= committed_bytes,
 407          "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
 408          capacity_until_gc, committed_bytes);
 409 
 410   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
 411   size_t left_until_GC = capacity_until_gc - committed_bytes;
 412   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
 413   log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT
 414             " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".",
 415             left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);
 416 
 417   return left_to_commit / BytesPerWord;
 418 }
 419 
 420 void MetaspaceGC::compute_new_size() {
 421   assert(_shrink_factor <= 100, "invalid shrink factor");
 422   uint current_shrink_factor = _shrink_factor;
 423   _shrink_factor = 0;
 424 
 425   // Using committed_bytes() for used_after_gc is an overestimation, since the
 426   // chunk free lists are included in committed_bytes() and the memory in an
 427   // un-fragmented chunk free list is available for future allocations.
 428   // However, if the chunk free lists becomes fragmented, then the memory may
 429   // not be available for future allocations and the memory is therefore "in use".
 430   // Including the chunk free lists in the definition of "in use" is therefore
 431   // necessary. Not including the chunk free lists can cause capacity_until_GC to
 432   // shrink below committed_bytes() and this has caused serious bugs in the past.
 433   const size_t used_after_gc = MetaspaceUtils::committed_bytes();
 434   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
 435 
 436   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
 437   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
 438 
 439   const double min_tmp = used_after_gc / maximum_used_percentage;
 440   size_t minimum_desired_capacity =
 441     (size_t)MIN2(min_tmp, double(MaxMetaspaceSize));
 442   // Don't shrink less than the initial generation size
 443   minimum_desired_capacity = MAX2(minimum_desired_capacity,
 444                                   MetaspaceSize);
 445 
 446   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
 447   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
 448                            minimum_free_percentage, maximum_used_percentage);
 449   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
 450 
 451   size_t shrink_bytes = 0;
 452   if (capacity_until_GC < minimum_desired_capacity) {
 453     // If we have less capacity below the metaspace HWM, then
 454     // increment the HWM.
 455     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
 456     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
 457     // Don't expand unless it's significant
 458     if (expand_bytes >= MinMetaspaceExpansion) {
 459       size_t new_capacity_until_GC = 0;
 460       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
 461       assert(succeeded, "Should always successfully increment HWM when at safepoint");
 462 
 463       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
 464                                                new_capacity_until_GC,
 465                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
 466       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
 467                                minimum_desired_capacity / (double) K,
 468                                expand_bytes / (double) K,
 469                                MinMetaspaceExpansion / (double) K,
 470                                new_capacity_until_GC / (double) K);
 471     }
 472     return;
 473   }
 474 
 475   // No expansion, now see if we want to shrink
 476   // We would never want to shrink more than this
 477   assert(capacity_until_GC >= minimum_desired_capacity,
 478          SIZE_FORMAT " >= " SIZE_FORMAT,
 479          capacity_until_GC, minimum_desired_capacity);
 480   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
 481 
 482   // Should shrinking be considered?
 483   if (MaxMetaspaceFreeRatio < 100) {
 484     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
 485     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
 486     const double max_tmp = used_after_gc / minimum_used_percentage;
 487     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(MaxMetaspaceSize));
 488     maximum_desired_capacity = MAX2(maximum_desired_capacity,
 489                                     MetaspaceSize);
 490     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
 491                              maximum_free_percentage, minimum_used_percentage);
 492     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
 493                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
 494 
 495     assert(minimum_desired_capacity <= maximum_desired_capacity,
 496            "sanity check");
 497 
 498     if (capacity_until_GC > maximum_desired_capacity) {
 499       // Capacity too large, compute shrinking size
 500       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
 501       // We don't want shrink all the way back to initSize if people call
 502       // System.gc(), because some programs do that between "phases" and then
 503       // we'd just have to grow the heap up again for the next phase.  So we
 504       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
 505       // on the third call, and 100% by the fourth call.  But if we recompute
 506       // size without shrinking, it goes back to 0%.
 507       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
 508 
 509       shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
 510 
 511       assert(shrink_bytes <= max_shrink_bytes,
 512              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
 513              shrink_bytes, max_shrink_bytes);
 514       if (current_shrink_factor == 0) {
 515         _shrink_factor = 10;
 516       } else {
 517         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
 518       }
 519       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
 520                                MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
 521       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
 522                                shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
 523     }
 524   }
 525 
 526   // Don't shrink unless it's significant
 527   if (shrink_bytes >= MinMetaspaceExpansion &&
 528       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
 529     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
 530     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
 531                                              new_capacity_until_GC,
 532                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
 533   }
 534 }
 535 
 536 //////  Metaspace methods /////
 537 
 538 const MetaspaceTracer* Metaspace::_tracer = nullptr;
 539 
 540 bool Metaspace::initialized() {
 541   return metaspace::MetaspaceContext::context_nonclass() != nullptr
 542       LP64_ONLY(&& (using_class_space() ? Metaspace::class_space_is_initialized() : true));
 543 }
 544 
 545 #ifdef _LP64
 546 
 547 void Metaspace::print_compressed_class_space(outputStream* st) {
 548   if (VirtualSpaceList::vslist_class() != nullptr) {
 549     MetaWord* base = VirtualSpaceList::vslist_class()->base_of_first_node();
 550     size_t size = VirtualSpaceList::vslist_class()->word_size_of_first_node();
 551     MetaWord* top = base + size;
 552     st->print("Compressed class space mapped at: " PTR_FORMAT "-" PTR_FORMAT ", reserved size: " SIZE_FORMAT,
 553                p2i(base), p2i(top), (top - base) * BytesPerWord);
 554     st->cr();
 555   }
 556 }
 557 
 558 // Given a prereserved space, use that to set up the compressed class space list.
 559 void Metaspace::initialize_class_space(ReservedSpace rs) {
 560   assert(rs.size() >= CompressedClassSpaceSize,
 561          SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
 562   assert(using_class_space(), "Must be using class space");
 563 
 564   assert(rs.size() == CompressedClassSpaceSize, SIZE_FORMAT " != " SIZE_FORMAT,
 565          rs.size(), CompressedClassSpaceSize);
 566   assert(is_aligned(rs.base(), Metaspace::reserve_alignment()) &&
 567          is_aligned(rs.size(), Metaspace::reserve_alignment()),
 568          "wrong alignment");
 569 
 570   MetaspaceContext::initialize_class_space_context(rs);
 571 
 572   // This does currently not work because rs may be the result of a split
 573   // operation and NMT seems not to be able to handle splits.
 574   // Will be fixed with JDK-8243535.
 575   // MemTracker::record_virtual_memory_type((address)rs.base(), mtClass);
 576 
 577 }
 578 
 579 // Returns true if class space has been setup (initialize_class_space).
 580 bool Metaspace::class_space_is_initialized() {
 581   return MetaspaceContext::context_class() != nullptr;
 582 }
 583 
 584 // Reserve a range of memory at an address suitable for en/decoding narrow
 585 // Klass pointers (see: CompressedClassPointers::is_valid_base()).
 586 // The returned address shall both be suitable as a compressed class pointers
 587 //  base, and aligned to Metaspace::reserve_alignment (which is equal to or a
 588 //  multiple of allocation granularity).
 589 // On error, returns an unreserved space.
 590 ReservedSpace Metaspace::reserve_address_space_for_compressed_classes(size_t size) {
 591 
 592   // Note: code below is broken and needs rethinking since it confuses encoding base
 593   // with compressed class space attach address; both don't have be the same.
 594   // That is also the reason why we atm don't get zero-based encoding for aarch.
 595   // Comment is also wrong, at least for 9-bit shift.
 596 
 597   // Will be fixed. For now it works well enough.
 598 
 599 #if defined(AARCH64) || defined(PPC64)
 600   const size_t alignment = Metaspace::reserve_alignment();
 601 
 602   // AArch64: Try to align metaspace so that we can decode a compressed
 603   // klass with a single MOVK instruction. We can do this iff the
 604   // compressed class base is a multiple of 4G.
 605   // Additionally, above 32G, ensure the lower LogKlassAlignmentInBytes bits
 606   // of the upper 32-bits of the address are zero so we can handle a shift
 607   // when decoding.
 608 
 609   // PPC64: smaller heaps up to 2g will be mapped just below 4g. Then the
 610   // attempt to place the compressed class space just after the heap fails on
 611   // Linux 4.1.42 and higher because the launcher is loaded at 4g
 612   // (ELF_ET_DYN_BASE). In that case we reach here and search the address space
 613   // below 32g to get a zerobased CCS. For simplicity we reuse the search
 614   // strategy for AARCH64.
 615 
 616   static const struct {
 617     address from;
 618     address to;
 619     size_t increment;
 620   } search_ranges[] = {
 621     {  (address)(4*G),   (address)(32*G),   4*G, },
 622     {  (address)(32*G),  (address)(1024*G), (4 << LogKlassAlignmentInBytes) * G },
 623     {  nullptr, nullptr, 0 }
 624   };
 625 
 626   for (int i = 0; search_ranges[i].from != nullptr; i ++) {
 627     address a = search_ranges[i].from;
 628     if (CompressedKlassPointers::is_valid_base(a)) {
 629       while (a < search_ranges[i].to) {
 630         ReservedSpace rs(size, Metaspace::reserve_alignment(),
 631                          os::vm_page_size(), (char*)a);
 632         if (rs.is_reserved()) {
 633           assert(a == (address)rs.base(), "Sanity");
 634           return rs;
 635         }
 636         a +=  search_ranges[i].increment;
 637       }
 638     }
 639   }
 640 #endif // defined(AARCH64) || defined(PPC64)
 641 
 642 #ifdef AARCH64
 643   // Note: on AARCH64, if the code above does not find any good placement, we
 644   // have no recourse. We return an empty space and the VM will exit.
 645   return ReservedSpace();
 646 #else
 647   // Default implementation: Just reserve anywhere.
 648   return ReservedSpace(size, Metaspace::reserve_alignment(), os::vm_page_size(), (char*)nullptr);
 649 #endif // AARCH64
 650 }
 651 
 652 #endif // _LP64
 653 
 654 size_t Metaspace::reserve_alignment_words() {
 655   return metaspace::Settings::virtual_space_node_reserve_alignment_words();
 656 }
 657 
 658 size_t Metaspace::commit_alignment_words() {
 659   return metaspace::Settings::commit_granule_words();
 660 }
 661 
 662 void Metaspace::ergo_initialize() {
 663 
 664   // Must happen before using any setting from Settings::---
 665   metaspace::Settings::ergo_initialize();
 666 
 667   // MaxMetaspaceSize and CompressedClassSpaceSize:
 668   //
 669   // MaxMetaspaceSize is the maximum size, in bytes, of memory we are allowed
 670   //  to commit for the Metaspace.
 671   //  It is just a number; a limit we compare against before committing. It
 672   //  does not have to be aligned to anything.
 673   //  It gets used as compare value before attempting to increase the metaspace
 674   //  commit charge. It defaults to max_uintx (unlimited).
 675   //
 676   // CompressedClassSpaceSize is the size, in bytes, of the address range we
 677   //  pre-reserve for the compressed class space (if we use class space).
 678   //  This size has to be aligned to the metaspace reserve alignment (to the
 679   //  size of a root chunk). It gets aligned up from whatever value the caller
 680   //  gave us to the next multiple of root chunk size.
 681   //
 682   // Note: Strictly speaking MaxMetaspaceSize and CompressedClassSpaceSize have
 683   //  very little to do with each other. The notion often encountered:
 684   //  MaxMetaspaceSize = CompressedClassSpaceSize + <non-class metadata size>
 685   //  is subtly wrong: MaxMetaspaceSize can besmaller than CompressedClassSpaceSize,
 686   //  in which case we just would not be able to fully commit the class space range.
 687   //
 688   // We still adjust CompressedClassSpaceSize to reasonable limits, mainly to
 689   //  save on reserved space, and to make ergnonomics less confusing.
 690 
 691   MaxMetaspaceSize = MAX2(MaxMetaspaceSize, commit_alignment());
 692 
 693   if (UseCompressedClassPointers) {
 694     // Let CCS size not be larger than 80% of MaxMetaspaceSize. Note that is
 695     // grossly over-dimensioned for most usage scenarios; typical ratio of
 696     // class space : non class space usage is about 1:6. With many small classes,
 697     // it can get as low as 1:2. It is not a big deal though since ccs is only
 698     // reserved and will be committed on demand only.
 699     size_t max_ccs_size = MaxMetaspaceSize * 0.8;
 700     size_t adjusted_ccs_size = MIN2(CompressedClassSpaceSize, max_ccs_size);
 701 
 702     // CCS must be aligned to root chunk size, and be at least the size of one
 703     //  root chunk.
 704     adjusted_ccs_size = align_up(adjusted_ccs_size, reserve_alignment());
 705     adjusted_ccs_size = MAX2(adjusted_ccs_size, reserve_alignment());
 706 
 707     // Note: re-adjusting may have us left with a CompressedClassSpaceSize
 708     //  larger than MaxMetaspaceSize for very small values of MaxMetaspaceSize.
 709     //  Lets just live with that, its not a big deal.
 710 
 711     if (adjusted_ccs_size != CompressedClassSpaceSize) {
 712       FLAG_SET_ERGO(CompressedClassSpaceSize, adjusted_ccs_size);
 713       log_info(metaspace)("Setting CompressedClassSpaceSize to " SIZE_FORMAT ".",
 714                           CompressedClassSpaceSize);
 715     }
 716   }
 717 
 718   // Set MetaspaceSize, MinMetaspaceExpansion and MaxMetaspaceExpansion
 719   if (MetaspaceSize > MaxMetaspaceSize) {
 720     MetaspaceSize = MaxMetaspaceSize;
 721   }
 722 
 723   MetaspaceSize = align_down_bounded(MetaspaceSize, commit_alignment());
 724 
 725   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
 726 
 727   MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, commit_alignment());
 728   MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, commit_alignment());
 729 
 730 }
 731 
 732 void Metaspace::global_initialize() {
 733   MetaspaceGC::initialize(); // <- since we do not prealloc init chunks anymore is this still needed?
 734 
 735   metaspace::ChunkHeaderPool::initialize();
 736 
 737   if (DumpSharedSpaces) {
 738     assert(!UseSharedSpaces, "sanity");
 739     MetaspaceShared::initialize_for_static_dump();
 740   }
 741 
 742   // If UseCompressedClassPointers=1, we have two cases:
 743   // a) if CDS is active (runtime, Xshare=on), it will create the class space
 744   //    for us, initialize it and set up CompressedKlassPointers encoding.
 745   //    Class space will be reserved above the mapped archives.
 746   // b) if CDS either deactivated (Xshare=off) or a static dump is to be done (Xshare:dump),
 747   //    we will create the class space on our own. It will be placed above the java heap,
 748   //    since we assume it has been placed in low
 749   //    address regions. We may rethink this (see JDK-8244943). Failing that,
 750   //    it will be placed anywhere.
 751 
 752 #if INCLUDE_CDS
 753   // case (a)
 754   if (UseSharedSpaces) {
 755     if (!FLAG_IS_DEFAULT(CompressedClassSpaceBaseAddress)) {
 756       log_warning(metaspace)("CDS active - ignoring CompressedClassSpaceBaseAddress.");
 757     }
 758     MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
 759     // If any of the archived space fails to map, UseSharedSpaces
 760     // is reset to false.
 761   }
 762 #endif // INCLUDE_CDS
 763 
 764 #ifdef _LP64
 765 
 766   if (using_class_space() && !class_space_is_initialized()) {
 767     assert(!UseSharedSpaces, "CDS archive is not mapped at this point");
 768 
 769     // case (b) (No CDS)
 770     ReservedSpace rs;
 771     const size_t size = align_up(CompressedClassSpaceSize, Metaspace::reserve_alignment());
 772     address base = nullptr;
 773 
 774     // If CompressedClassSpaceBaseAddress is set, we attempt to force-map class space to
 775     // the given address. This is a debug-only feature aiding tests. Due to the ASLR lottery
 776     // this may fail, in which case the VM will exit after printing an appropriate message.
 777     // Tests using this switch should cope with that.
 778     if (CompressedClassSpaceBaseAddress != 0) {
 779       base = (address)CompressedClassSpaceBaseAddress;
 780       if (!is_aligned(base, Metaspace::reserve_alignment())) {
 781         vm_exit_during_initialization(
 782             err_msg("CompressedClassSpaceBaseAddress=" PTR_FORMAT " invalid "
 783                     "(must be aligned to " SIZE_FORMAT_X ").",
 784                     CompressedClassSpaceBaseAddress, Metaspace::reserve_alignment()));
 785       }
 786       rs = ReservedSpace(size, Metaspace::reserve_alignment(),
 787                          os::vm_page_size() /* large */, (char*)base);
 788       if (rs.is_reserved()) {
 789         log_info(metaspace)("Successfully forced class space address to " PTR_FORMAT, p2i(base));
 790       } else {
 791         vm_exit_during_initialization(
 792             err_msg("CompressedClassSpaceBaseAddress=" PTR_FORMAT " given, but reserving class space failed.",
 793                 CompressedClassSpaceBaseAddress));
 794       }
 795     }
 796 
 797     if (!rs.is_reserved()) {
 798       // If UseCompressedOops=1 and the java heap has been placed in coops-friendly
 799       //  territory, i.e. its base is under 32G, then we attempt to place ccs
 800       //  right above the java heap.
 801       // Otherwise the lower 32G are still free. We try to place ccs at the lowest
 802       // allowed mapping address.
 803       base = (UseCompressedOops && (uint64_t)CompressedOops::base() < OopEncodingHeapMax) ?
 804               CompressedOops::end() : (address)HeapBaseMinAddress;
 805       base = align_up(base, Metaspace::reserve_alignment());
 806 
 807       if (base != nullptr) {
 808         if (CompressedKlassPointers::is_valid_base(base)) {
 809           rs = ReservedSpace(size, Metaspace::reserve_alignment(),
 810                              os::vm_page_size(), (char*)base);
 811         }
 812       }
 813     }
 814 
 815     // ...failing that, reserve anywhere, but let platform do optimized placement:
 816     if (!rs.is_reserved()) {
 817       rs = Metaspace::reserve_address_space_for_compressed_classes(size);
 818     }
 819 
 820     // ...failing that, give up.
 821     if (!rs.is_reserved()) {
 822       vm_exit_during_initialization(
 823           err_msg("Could not allocate compressed class space: " SIZE_FORMAT " bytes",
 824                    CompressedClassSpaceSize));
 825     }
 826 
 827     // Initialize space
 828     Metaspace::initialize_class_space(rs);
 829 
 830     // Set up compressed class pointer encoding.
 831     CompressedKlassPointers::initialize((address)rs.base(), rs.size());
 832   }
 833 
 834 #endif
 835 
 836   // Initialize non-class virtual space list, and its chunk manager:
 837   MetaspaceContext::initialize_nonclass_space_context();
 838 
 839   _tracer = new MetaspaceTracer();
 840 
 841   // We must prevent the very first address of the ccs from being used to store
 842   // metadata, since that address would translate to a narrow pointer of 0, and the
 843   // VM does not distinguish between "narrow 0 as in null" and "narrow 0 as in start
 844   //  of ccs".
 845   // Before Elastic Metaspace that did not happen due to the fact that every Metachunk
 846   // had a header and therefore could not allocate anything at offset 0.
 847 #ifdef _LP64
 848   if (using_class_space()) {
 849     // The simplest way to fix this is to allocate a tiny dummy chunk right at the
 850     // start of ccs and do not use it for anything.
 851     MetaspaceContext::context_class()->cm()->get_chunk(metaspace::chunklevel::HIGHEST_CHUNK_LEVEL);
 852   }
 853 #endif
 854 
 855 #ifdef _LP64
 856   if (UseCompressedClassPointers) {
 857     // Note: "cds" would be a better fit but keep this for backward compatibility.
 858     LogTarget(Info, gc, metaspace) lt;
 859     if (lt.is_enabled()) {
 860       ResourceMark rm;
 861       LogStream ls(lt);
 862       CDS_ONLY(MetaspaceShared::print_on(&ls);)
 863       Metaspace::print_compressed_class_space(&ls);
 864       CompressedKlassPointers::print_mode(&ls);
 865     }
 866   }
 867 #endif
 868 
 869 }
 870 
 871 void Metaspace::post_initialize() {
 872   MetaspaceGC::post_initialize();
 873 }
 874 
 875 size_t Metaspace::max_allocation_word_size() {
 876   return metaspace::chunklevel::MAX_CHUNK_WORD_SIZE;
 877 }
 878 
 879 #ifdef _LP64
 880 // The largest allowed size for class space
 881 size_t Metaspace::max_class_space_size() {
 882   assert(KlassEncodingMetaspaceMax > 0, "too early.");
 883   // This is a bit fuzzy. Max value of class space size depends on narrow klass pointer
 884   // encoding range size and CDS, since class space shares encoding range with CDS. CDS
 885   // archives are usually pretty small though, so to keep matters simple, for now we
 886   // just assume a reasonable default (this is hackish; improve!).
 887   const size_t slice_for_cds = M * 128;
 888   assert(KlassEncodingMetaspaceMax >= (slice_for_cds * 2), "rethink this");
 889   const size_t max_class_space_size = KlassEncodingMetaspaceMax - slice_for_cds;
 890   return max_class_space_size;
 891 }
 892 #endif // _LP64
 893 
 894 // This version of Metaspace::allocate does not throw OOM but simply returns null, and
 895 // is suitable for calling from non-Java threads.
 896 // Callers are responsible for checking null.
 897 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
 898                               MetaspaceObj::Type type) {
 899   assert(word_size <= Metaspace::max_allocation_word_size(),
 900          "allocation size too large (" SIZE_FORMAT ")", word_size);
 901 
 902   assert(loader_data != nullptr, "Should never pass around a nullptr loader_data. "
 903         "ClassLoaderData::the_null_class_loader_data() should have been used.");
 904 
 905   // Deal with concurrent unloading failed allocation starvation
 906   MetaspaceCriticalAllocation::block_if_concurrent_purge();
 907 
 908   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
 909 
 910   // Try to allocate metadata.
 911   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
 912 
 913   if (result != nullptr) {
 914     // Zero initialize.
 915     Copy::fill_to_words((HeapWord*)result, word_size, 0);
 916 
 917     log_trace(metaspace)("Metaspace::allocate: type %d return " PTR_FORMAT ".", (int)type, p2i(result));
 918   }
 919 
 920   return result;
 921 }
 922 
 923 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
 924                               MetaspaceObj::Type type, TRAPS) {
 925 
 926   if (HAS_PENDING_EXCEPTION) {
 927     assert(false, "Should not allocate with exception pending");
 928     return nullptr;  // caller does a CHECK_NULL too
 929   }
 930 
 931   MetaWord* result = allocate(loader_data, word_size, type);
 932 
 933   if (result == nullptr) {
 934     MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
 935     tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
 936 
 937     // Allocation failed.
 938     if (is_init_completed()) {
 939       // Only start a GC if the bootstrapping has completed.
 940       // Try to clean out some heap memory and retry. This can prevent premature
 941       // expansion of the metaspace.
 942       result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
 943     }
 944 
 945     if (result == nullptr) {
 946       report_metadata_oome(loader_data, word_size, type, mdtype, THREAD);
 947       assert(HAS_PENDING_EXCEPTION, "sanity");
 948       return nullptr;
 949     }
 950 
 951     // Zero initialize.
 952     Copy::fill_to_words((HeapWord*)result, word_size, 0);
 953 
 954     log_trace(metaspace)("Metaspace::allocate: type %d return " PTR_FORMAT ".", (int)type, p2i(result));
 955   }
 956 
 957   return result;
 958 }
 959 
 960 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
 961   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
 962 
 963   // If result is still null, we are out of memory.
 964   Log(gc, metaspace, freelist, oom) log;
 965   if (log.is_info()) {
 966     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
 967              is_class_space_allocation(mdtype) ? "class" : "data", word_size);
 968     ResourceMark rm;
 969     if (log.is_debug()) {
 970       if (loader_data->metaspace_or_null() != nullptr) {
 971         LogStream ls(log.debug());
 972         loader_data->print_value_on(&ls);
 973       }
 974     }
 975     LogStream ls(log.info());
 976     // In case of an OOM, log out a short but still useful report.
 977     MetaspaceUtils::print_basic_report(&ls, 0);
 978   }
 979 
 980   bool out_of_compressed_class_space = false;
 981   if (is_class_space_allocation(mdtype)) {
 982     ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null();
 983     out_of_compressed_class_space =
 984       MetaspaceUtils::committed_bytes(Metaspace::ClassType) +
 985       align_up(word_size * BytesPerWord, 4 * M) >
 986       CompressedClassSpaceSize;
 987   }
 988 
 989   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
 990   const char* space_string = out_of_compressed_class_space ?
 991     "Compressed class space" : "Metaspace";
 992 
 993   report_java_out_of_memory(space_string);
 994 
 995   if (JvmtiExport::should_post_resource_exhausted()) {
 996     JvmtiExport::post_resource_exhausted(
 997         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
 998         space_string);
 999   }
1000 
1001   if (!is_init_completed()) {
1002     vm_exit_during_initialization("OutOfMemoryError", space_string);
1003   }
1004 
1005   if (out_of_compressed_class_space) {
1006     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
1007   } else {
1008     THROW_OOP(Universe::out_of_memory_error_metaspace());
1009   }
1010 }
1011 
1012 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
1013   switch (mdtype) {
1014     case Metaspace::ClassType: return "Class";
1015     case Metaspace::NonClassType: return "Metadata";
1016     default:
1017       assert(false, "Got bad mdtype: %d", (int) mdtype);
1018       return nullptr;
1019   }
1020 }
1021 
1022 void Metaspace::purge(bool classes_unloaded) {
1023   // The MetaspaceCritical_lock is used by a concurrent GC to block out concurrent metaspace
1024   // allocations, that would starve critical metaspace allocations, that are about to throw
1025   // OOM if they fail; they need precedence for correctness.
1026   MutexLocker ml(MetaspaceCritical_lock, Mutex::_no_safepoint_check_flag);
1027   if (classes_unloaded) {
1028     ChunkManager* cm = ChunkManager::chunkmanager_nonclass();
1029     if (cm != nullptr) {
1030       cm->purge();
1031     }
1032     if (using_class_space()) {
1033       cm = ChunkManager::chunkmanager_class();
1034       if (cm != nullptr) {
1035         cm->purge();
1036       }
1037     }
1038   }
1039 
1040   // Try to satisfy queued metaspace allocation requests.
1041   //
1042   // It might seem unnecessary to try to process allocation requests if no
1043   // classes have been unloaded. However, this call is required for the code
1044   // in MetaspaceCriticalAllocation::try_allocate_critical to work.
1045   MetaspaceCriticalAllocation::process();
1046 }
1047 
1048 bool Metaspace::contains(const void* ptr) {
1049   if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
1050     return true;
1051   }
1052   return contains_non_shared(ptr);
1053 }
1054 
1055 bool Metaspace::contains_non_shared(const void* ptr) {
1056   if (using_class_space() && VirtualSpaceList::vslist_class()->contains((MetaWord*)ptr)) {
1057      return true;
1058   }
1059 
1060   return VirtualSpaceList::vslist_nonclass()->contains((MetaWord*)ptr);
1061 }