1 /* 2 * Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2017, 2021 SAP SE. All rights reserved. 4 * Copyright (c) 2023, 2024, Red Hat, Inc. All rights reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #include "precompiled.hpp" 28 #include "cds/cdsConfig.hpp" 29 #include "cds/metaspaceShared.hpp" 30 #include "classfile/classLoaderData.hpp" 31 #include "gc/shared/collectedHeap.hpp" 32 #include "logging/log.hpp" 33 #include "logging/logStream.hpp" 34 #include "memory/classLoaderMetaspace.hpp" 35 #include "memory/memoryReserver.hpp" 36 #include "memory/metaspace.hpp" 37 #include "memory/metaspace/chunkHeaderPool.hpp" 38 #include "memory/metaspace/chunkManager.hpp" 39 #include "memory/metaspace/commitLimiter.hpp" 40 #include "memory/metaspace/internalStats.hpp" 41 #include "memory/metaspace/metaspaceCommon.hpp" 42 #include "memory/metaspace/metaspaceContext.hpp" 43 #include "memory/metaspace/metaspaceReporter.hpp" 44 #include "memory/metaspace/metaspaceSettings.hpp" 45 #include "memory/metaspace/runningCounters.hpp" 46 #include "memory/metaspace/virtualSpaceList.hpp" 47 #include "memory/metaspaceCriticalAllocation.hpp" 48 #include "memory/metaspaceStats.hpp" 49 #include "memory/metaspaceTracer.hpp" 50 #include "memory/metaspaceUtils.hpp" 51 #include "memory/resourceArea.hpp" 52 #include "memory/universe.hpp" 53 #include "nmt/memTracker.hpp" 54 #include "oops/compressedKlass.inline.hpp" 55 #include "oops/compressedOops.hpp" 56 #include "prims/jvmtiExport.hpp" 57 #include "runtime/atomic.hpp" 58 #include "runtime/globals_extension.hpp" 59 #include "runtime/init.hpp" 60 #include "runtime/java.hpp" 61 #include "runtime/mutexLocker.hpp" 62 #include "utilities/copy.hpp" 63 #include "utilities/debug.hpp" 64 #include "utilities/formatBuffer.hpp" 65 #include "utilities/globalDefinitions.hpp" 66 67 using metaspace::ChunkManager; 68 using metaspace::CommitLimiter; 69 using metaspace::MetaspaceContext; 70 using metaspace::MetaspaceReporter; 71 using metaspace::RunningCounters; 72 using metaspace::VirtualSpaceList; 73 74 size_t MetaspaceUtils::used_words() { 75 return RunningCounters::used_words(); 76 } 77 78 size_t MetaspaceUtils::used_words(Metaspace::MetadataType mdtype) { 79 return mdtype == Metaspace::ClassType ? RunningCounters::used_words_class() : RunningCounters::used_words_nonclass(); 80 } 81 82 size_t MetaspaceUtils::reserved_words() { 83 return RunningCounters::reserved_words(); 84 } 85 86 size_t MetaspaceUtils::reserved_words(Metaspace::MetadataType mdtype) { 87 return mdtype == Metaspace::ClassType ? RunningCounters::reserved_words_class() : RunningCounters::reserved_words_nonclass(); 88 } 89 90 size_t MetaspaceUtils::committed_words() { 91 return RunningCounters::committed_words(); 92 } 93 94 size_t MetaspaceUtils::committed_words(Metaspace::MetadataType mdtype) { 95 return mdtype == Metaspace::ClassType ? RunningCounters::committed_words_class() : RunningCounters::committed_words_nonclass(); 96 } 97 98 // Helper for get_statistics() 99 static void get_values_for(Metaspace::MetadataType mdtype, size_t* reserved, size_t* committed, size_t* used) { 100 #define w2b(x) (x * sizeof(MetaWord)) 101 if (mdtype == Metaspace::ClassType) { 102 *reserved = w2b(RunningCounters::reserved_words_class()); 103 *committed = w2b(RunningCounters::committed_words_class()); 104 *used = w2b(RunningCounters::used_words_class()); 105 } else { 106 *reserved = w2b(RunningCounters::reserved_words_nonclass()); 107 *committed = w2b(RunningCounters::committed_words_nonclass()); 108 *used = w2b(RunningCounters::used_words_nonclass()); 109 } 110 #undef w2b 111 } 112 113 // Retrieve all statistics in one go; make sure the values are consistent. 114 MetaspaceStats MetaspaceUtils::get_statistics(Metaspace::MetadataType mdtype) { 115 116 // Consistency: 117 // This function reads three values (reserved, committed, used) from different counters. These counters 118 // may (very rarely) be out of sync. This has been a source for intermittent test errors in the past 119 // (see e.g. JDK-8237872, JDK-8151460). 120 // - reserved and committed counter are updated under protection of Metaspace_lock; an inconsistency 121 // between them can be the result of a dirty read. 122 // - used is an atomic counter updated outside any lock range; there is no way to guarantee 123 // a clean read wrt the other two values. 124 // Reading these values under lock protection would would only help for the first case. Therefore 125 // we don't bother and just re-read several times, then give up and correct the values. 126 127 size_t r = 0, c = 0, u = 0; // Note: byte values. 128 get_values_for(mdtype, &r, &c, &u); 129 int retries = 10; 130 // If the first retrieval resulted in inconsistent values, retry a bit... 131 while ((r < c || c < u) && --retries >= 0) { 132 get_values_for(mdtype, &r, &c, &u); 133 } 134 if (c < u || r < c) { // still inconsistent. 135 // ... but not endlessly. If we don't get consistent values, correct them on the fly. 136 // The logic here is that we trust the used counter - its an atomic counter and whatever we see 137 // must have been the truth once - and from that we reconstruct a likely set of committed/reserved 138 // values. 139 metaspace::InternalStats::inc_num_inconsistent_stats(); 140 if (c < u) { 141 c = align_up(u, Metaspace::commit_alignment()); 142 } 143 if (r < c) { 144 r = align_up(c, Metaspace::reserve_alignment()); 145 } 146 } 147 return MetaspaceStats(r, c, u); 148 } 149 150 MetaspaceCombinedStats MetaspaceUtils::get_combined_statistics() { 151 return MetaspaceCombinedStats(get_statistics(Metaspace::ClassType), get_statistics(Metaspace::NonClassType)); 152 } 153 154 void MetaspaceUtils::print_metaspace_change(const MetaspaceCombinedStats& pre_meta_values) { 155 // Get values now: 156 const MetaspaceCombinedStats meta_values = get_combined_statistics(); 157 158 // We print used and committed since these are the most useful at-a-glance vitals for Metaspace: 159 // - used tells you how much memory is actually used for metadata 160 // - committed tells you how much memory is committed for the purpose of metadata 161 // The difference between those two would be waste, which can have various forms (freelists, 162 // unused parts of committed chunks etc) 163 // 164 // Left out is reserved, since this is not as exciting as the first two values: for class space, 165 // it is a constant (to uninformed users, often confusingly large). For non-class space, it would 166 // be interesting since free chunks can be uncommitted, but for now it is left out. 167 168 if (Metaspace::using_class_space()) { 169 log_info(gc, metaspace)(HEAP_CHANGE_FORMAT" " 170 HEAP_CHANGE_FORMAT" " 171 HEAP_CHANGE_FORMAT, 172 HEAP_CHANGE_FORMAT_ARGS("Metaspace", 173 pre_meta_values.used(), 174 pre_meta_values.committed(), 175 meta_values.used(), 176 meta_values.committed()), 177 HEAP_CHANGE_FORMAT_ARGS("NonClass", 178 pre_meta_values.non_class_used(), 179 pre_meta_values.non_class_committed(), 180 meta_values.non_class_used(), 181 meta_values.non_class_committed()), 182 HEAP_CHANGE_FORMAT_ARGS("Class", 183 pre_meta_values.class_used(), 184 pre_meta_values.class_committed(), 185 meta_values.class_used(), 186 meta_values.class_committed())); 187 } else { 188 log_info(gc, metaspace)(HEAP_CHANGE_FORMAT, 189 HEAP_CHANGE_FORMAT_ARGS("Metaspace", 190 pre_meta_values.used(), 191 pre_meta_values.committed(), 192 meta_values.used(), 193 meta_values.committed())); 194 } 195 } 196 197 // This will print out a basic metaspace usage report but 198 // unlike print_report() is guaranteed not to lock or to walk the CLDG. 199 void MetaspaceUtils::print_basic_report(outputStream* out, size_t scale) { 200 MetaspaceReporter::print_basic_report(out, scale); 201 } 202 203 // Prints a report about the current metaspace state. 204 // Optional parts can be enabled via flags. 205 // Function will walk the CLDG and will lock the expand lock; if that is not 206 // convenient, use print_basic_report() instead. 207 void MetaspaceUtils::print_report(outputStream* out, size_t scale) { 208 const int flags = 209 (int)MetaspaceReporter::Option::ShowLoaders | 210 (int)MetaspaceReporter::Option::BreakDownByChunkType | 211 (int)MetaspaceReporter::Option::ShowClasses; 212 MetaspaceReporter::print_report(out, scale, flags); 213 } 214 215 void MetaspaceUtils::print_on(outputStream* out) { 216 217 // Used from all GCs. It first prints out totals, then, separately, the class space portion. 218 MetaspaceCombinedStats stats = get_combined_statistics(); 219 out->print_cr(" Metaspace " 220 "used " SIZE_FORMAT "K, " 221 "committed " SIZE_FORMAT "K, " 222 "reserved " SIZE_FORMAT "K", 223 stats.used()/K, 224 stats.committed()/K, 225 stats.reserved()/K); 226 227 if (Metaspace::using_class_space()) { 228 out->print_cr(" class space " 229 "used " SIZE_FORMAT "K, " 230 "committed " SIZE_FORMAT "K, " 231 "reserved " SIZE_FORMAT "K", 232 stats.class_space_stats().used()/K, 233 stats.class_space_stats().committed()/K, 234 stats.class_space_stats().reserved()/K); 235 } 236 } 237 238 #ifdef ASSERT 239 void MetaspaceUtils::verify() { 240 if (Metaspace::initialized()) { 241 242 // Verify non-class chunkmanager... 243 ChunkManager* cm = ChunkManager::chunkmanager_nonclass(); 244 cm->verify(); 245 246 // ... and space list. 247 VirtualSpaceList* vsl = VirtualSpaceList::vslist_nonclass(); 248 vsl->verify(); 249 250 if (Metaspace::using_class_space()) { 251 // If we use compressed class pointers, verify class chunkmanager... 252 cm = ChunkManager::chunkmanager_class(); 253 cm->verify(); 254 255 // ... and class spacelist. 256 vsl = VirtualSpaceList::vslist_class(); 257 vsl->verify(); 258 } 259 260 } 261 } 262 #endif 263 264 ////////////////////////////////7 265 // MetaspaceGC methods 266 267 volatile size_t MetaspaceGC::_capacity_until_GC = 0; 268 uint MetaspaceGC::_shrink_factor = 0; 269 270 // VM_CollectForMetadataAllocation is the vm operation used to GC. 271 // Within the VM operation after the GC the attempt to allocate the metadata 272 // should succeed. If the GC did not free enough space for the metaspace 273 // allocation, the HWM is increased so that another virtualspace will be 274 // allocated for the metadata. With perm gen the increase in the perm 275 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The 276 // metaspace policy uses those as the small and large steps for the HWM. 277 // 278 // After the GC the compute_new_size() for MetaspaceGC is called to 279 // resize the capacity of the metaspaces. The current implementation 280 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used 281 // to resize the Java heap by some GC's. New flags can be implemented 282 // if really needed. MinMetaspaceFreeRatio is used to calculate how much 283 // free space is desirable in the metaspace capacity to decide how much 284 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much 285 // free space is desirable in the metaspace capacity before decreasing 286 // the HWM. 287 288 // Calculate the amount to increase the high water mark (HWM). 289 // Increase by a minimum amount (MinMetaspaceExpansion) so that 290 // another expansion is not requested too soon. If that is not 291 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion. 292 // If that is still not enough, expand by the size of the allocation 293 // plus some. 294 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { 295 size_t min_delta = MinMetaspaceExpansion; 296 size_t max_delta = MaxMetaspaceExpansion; 297 size_t delta = align_up(bytes, Metaspace::commit_alignment()); 298 299 if (delta <= min_delta) { 300 delta = min_delta; 301 } else if (delta <= max_delta) { 302 // Don't want to hit the high water mark on the next 303 // allocation so make the delta greater than just enough 304 // for this allocation. 305 delta = max_delta; 306 } else { 307 // This allocation is large but the next ones are probably not 308 // so increase by the minimum. 309 delta = delta + min_delta; 310 } 311 312 assert_is_aligned(delta, Metaspace::commit_alignment()); 313 314 return delta; 315 } 316 317 size_t MetaspaceGC::capacity_until_GC() { 318 size_t value = Atomic::load_acquire(&_capacity_until_GC); 319 assert(value >= MetaspaceSize, "Not initialized properly?"); 320 return value; 321 } 322 323 // Try to increase the _capacity_until_GC limit counter by v bytes. 324 // Returns true if it succeeded. It may fail if either another thread 325 // concurrently increased the limit or the new limit would be larger 326 // than MaxMetaspaceSize. 327 // On success, optionally returns new and old metaspace capacity in 328 // new_cap_until_GC and old_cap_until_GC respectively. 329 // On error, optionally sets can_retry to indicate whether if there is 330 // actually enough space remaining to satisfy the request. 331 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC, bool* can_retry) { 332 assert_is_aligned(v, Metaspace::commit_alignment()); 333 334 size_t old_capacity_until_GC = _capacity_until_GC; 335 size_t new_value = old_capacity_until_GC + v; 336 337 if (new_value < old_capacity_until_GC) { 338 // The addition wrapped around, set new_value to aligned max value. 339 new_value = align_down(max_uintx, Metaspace::reserve_alignment()); 340 } 341 342 if (new_value > MaxMetaspaceSize) { 343 if (can_retry != nullptr) { 344 *can_retry = false; 345 } 346 return false; 347 } 348 349 if (can_retry != nullptr) { 350 *can_retry = true; 351 } 352 size_t prev_value = Atomic::cmpxchg(&_capacity_until_GC, old_capacity_until_GC, new_value); 353 354 if (old_capacity_until_GC != prev_value) { 355 return false; 356 } 357 358 if (new_cap_until_GC != nullptr) { 359 *new_cap_until_GC = new_value; 360 } 361 if (old_cap_until_GC != nullptr) { 362 *old_cap_until_GC = old_capacity_until_GC; 363 } 364 return true; 365 } 366 367 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { 368 assert_is_aligned(v, Metaspace::commit_alignment()); 369 370 return Atomic::sub(&_capacity_until_GC, v); 371 } 372 373 void MetaspaceGC::initialize() { 374 // Set the high-water mark to MaxMetapaceSize during VM initializaton since 375 // we can't do a GC during initialization. 376 _capacity_until_GC = MaxMetaspaceSize; 377 } 378 379 void MetaspaceGC::post_initialize() { 380 // Reset the high-water mark once the VM initialization is done. 381 _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize); 382 } 383 384 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) { 385 // Check if the compressed class space is full. 386 if (is_class && Metaspace::using_class_space()) { 387 size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType); 388 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) { 389 log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)", 390 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord)); 391 return false; 392 } 393 } 394 395 // Check if the user has imposed a limit on the metaspace memory. 396 size_t committed_bytes = MetaspaceUtils::committed_bytes(); 397 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) { 398 log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)", 399 (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord)); 400 return false; 401 } 402 403 return true; 404 } 405 406 size_t MetaspaceGC::allowed_expansion() { 407 size_t committed_bytes = MetaspaceUtils::committed_bytes(); 408 size_t capacity_until_gc = capacity_until_GC(); 409 410 size_t left_until_max = MaxMetaspaceSize - committed_bytes; 411 // capacity_until_GC may have been decreased concurrently and may 412 // temporarily be lower than what metaspace has committed. Allow for that. 413 size_t left_until_GC = capacity_until_gc > committed_bytes ? 414 capacity_until_gc - committed_bytes : 0; 415 size_t left_to_commit = MIN2(left_until_GC, left_until_max); 416 log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT 417 " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".", 418 left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord); 419 420 return left_to_commit / BytesPerWord; 421 } 422 423 void MetaspaceGC::compute_new_size() { 424 assert(_shrink_factor <= 100, "invalid shrink factor"); 425 uint current_shrink_factor = _shrink_factor; 426 _shrink_factor = 0; 427 428 // Using committed_bytes() for used_after_gc is an overestimation, since the 429 // chunk free lists are included in committed_bytes() and the memory in an 430 // un-fragmented chunk free list is available for future allocations. 431 // However, if the chunk free lists becomes fragmented, then the memory may 432 // not be available for future allocations and the memory is therefore "in use". 433 // Including the chunk free lists in the definition of "in use" is therefore 434 // necessary. Not including the chunk free lists can cause capacity_until_GC to 435 // shrink below committed_bytes() and this has caused serious bugs in the past. 436 const double used_after_gc = (double)MetaspaceUtils::committed_bytes(); 437 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); 438 439 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; 440 const double maximum_used_percentage = 1.0 - minimum_free_percentage; 441 442 const double min_tmp = used_after_gc / maximum_used_percentage; 443 size_t minimum_desired_capacity = 444 (size_t)MIN2(min_tmp, double(MaxMetaspaceSize)); 445 // Don't shrink less than the initial generation size 446 minimum_desired_capacity = MAX2(minimum_desired_capacity, 447 MetaspaceSize); 448 449 log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: "); 450 log_trace(gc, metaspace)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f", 451 minimum_free_percentage, maximum_used_percentage); 452 log_trace(gc, metaspace)(" used_after_gc : %6.1fKB", used_after_gc / (double) K); 453 454 size_t shrink_bytes = 0; 455 if (capacity_until_GC < minimum_desired_capacity) { 456 // If we have less capacity below the metaspace HWM, then 457 // increment the HWM. 458 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; 459 expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment()); 460 // Don't expand unless it's significant 461 if (expand_bytes >= MinMetaspaceExpansion) { 462 size_t new_capacity_until_GC = 0; 463 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC); 464 assert(succeeded, "Should always successfully increment HWM when at safepoint"); 465 466 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 467 new_capacity_until_GC, 468 MetaspaceGCThresholdUpdater::ComputeNewSize); 469 log_trace(gc, metaspace)(" expanding: minimum_desired_capacity: %6.1fKB expand_bytes: %6.1fKB MinMetaspaceExpansion: %6.1fKB new metaspace HWM: %6.1fKB", 470 (double) minimum_desired_capacity / (double) K, 471 (double) expand_bytes / (double) K, 472 (double) MinMetaspaceExpansion / (double) K, 473 (double) new_capacity_until_GC / (double) K); 474 } 475 return; 476 } 477 478 // No expansion, now see if we want to shrink 479 // We would never want to shrink more than this 480 assert(capacity_until_GC >= minimum_desired_capacity, 481 SIZE_FORMAT " >= " SIZE_FORMAT, 482 capacity_until_GC, minimum_desired_capacity); 483 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity; 484 485 // Should shrinking be considered? 486 if (MaxMetaspaceFreeRatio < 100) { 487 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0; 488 const double minimum_used_percentage = 1.0 - maximum_free_percentage; 489 const double max_tmp = used_after_gc / minimum_used_percentage; 490 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(MaxMetaspaceSize)); 491 maximum_desired_capacity = MAX2(maximum_desired_capacity, 492 MetaspaceSize); 493 log_trace(gc, metaspace)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f", 494 maximum_free_percentage, minimum_used_percentage); 495 log_trace(gc, metaspace)(" minimum_desired_capacity: %6.1fKB maximum_desired_capacity: %6.1fKB", 496 (double) minimum_desired_capacity / (double) K, (double) maximum_desired_capacity / (double) K); 497 498 assert(minimum_desired_capacity <= maximum_desired_capacity, 499 "sanity check"); 500 501 if (capacity_until_GC > maximum_desired_capacity) { 502 // Capacity too large, compute shrinking size 503 shrink_bytes = capacity_until_GC - maximum_desired_capacity; 504 // We don't want shrink all the way back to initSize if people call 505 // System.gc(), because some programs do that between "phases" and then 506 // we'd just have to grow the heap up again for the next phase. So we 507 // damp the shrinking: 0% on the first call, 10% on the second call, 40% 508 // on the third call, and 100% by the fourth call. But if we recompute 509 // size without shrinking, it goes back to 0%. 510 shrink_bytes = shrink_bytes / 100 * current_shrink_factor; 511 512 shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment()); 513 514 assert(shrink_bytes <= max_shrink_bytes, 515 "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, 516 shrink_bytes, max_shrink_bytes); 517 if (current_shrink_factor == 0) { 518 _shrink_factor = 10; 519 } else { 520 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100); 521 } 522 log_trace(gc, metaspace)(" shrinking: initThreshold: %.1fK maximum_desired_capacity: %.1fK", 523 (double) MetaspaceSize / (double) K, (double) maximum_desired_capacity / (double) K); 524 log_trace(gc, metaspace)(" shrink_bytes: %.1fK current_shrink_factor: %d new shrink factor: %d MinMetaspaceExpansion: %.1fK", 525 (double) shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, (double) MinMetaspaceExpansion / (double) K); 526 } 527 } 528 529 // Don't shrink unless it's significant 530 if (shrink_bytes >= MinMetaspaceExpansion && 531 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { 532 size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes); 533 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 534 new_capacity_until_GC, 535 MetaspaceGCThresholdUpdater::ComputeNewSize); 536 } 537 } 538 539 ////// Metaspace methods ///// 540 541 const MetaspaceTracer* Metaspace::_tracer = nullptr; 542 const void* Metaspace::_class_space_start = nullptr; 543 const void* Metaspace::_class_space_end = nullptr; 544 545 bool Metaspace::initialized() { 546 return metaspace::MetaspaceContext::context_nonclass() != nullptr 547 LP64_ONLY(&& (using_class_space() ? Metaspace::class_space_is_initialized() : true)); 548 } 549 550 #ifdef _LP64 551 552 void Metaspace::print_compressed_class_space(outputStream* st) { 553 if (VirtualSpaceList::vslist_class() != nullptr) { 554 MetaWord* base = VirtualSpaceList::vslist_class()->base_of_first_node(); 555 size_t size = VirtualSpaceList::vslist_class()->word_size_of_first_node(); 556 MetaWord* top = base + size; 557 st->print("Compressed class space mapped at: " PTR_FORMAT "-" PTR_FORMAT ", reserved size: " SIZE_FORMAT, 558 p2i(base), p2i(top), (top - base) * BytesPerWord); 559 st->cr(); 560 } 561 } 562 563 // Given a prereserved space, use that to set up the compressed class space list. 564 void Metaspace::initialize_class_space(ReservedSpace rs) { 565 assert(rs.size() >= CompressedClassSpaceSize, 566 SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize); 567 assert(using_class_space(), "Must be using class space"); 568 569 assert(rs.size() == CompressedClassSpaceSize, SIZE_FORMAT " != " SIZE_FORMAT, 570 rs.size(), CompressedClassSpaceSize); 571 assert(is_aligned(rs.base(), Metaspace::reserve_alignment()) && 572 is_aligned(rs.size(), Metaspace::reserve_alignment()), 573 "wrong alignment"); 574 575 MetaspaceContext::initialize_class_space_context(rs); 576 _class_space_start = rs.base(); 577 _class_space_end = rs.end(); 578 } 579 580 // Returns true if class space has been setup (initialize_class_space). 581 bool Metaspace::class_space_is_initialized() { 582 return MetaspaceContext::context_class() != nullptr; 583 } 584 585 // Reserve a range of memory that is to contain narrow Klass IDs. If "try_in_low_address_ranges" 586 // is true, we will attempt to reserve memory suitable for zero-based encoding. 587 ReservedSpace Metaspace::reserve_address_space_for_compressed_classes(size_t size, bool optimize_for_zero_base) { 588 char* result = nullptr; 589 590 NOT_ZERO(result = 591 (char*) CompressedKlassPointers::reserve_address_space_for_compressed_classes(size, RandomizeClassSpaceLocation, 592 optimize_for_zero_base)); 593 594 if (result == nullptr) { 595 // Fallback: reserve anywhere 596 log_debug(metaspace, map)("Trying anywhere..."); 597 result = os::reserve_memory_aligned(size, Metaspace::reserve_alignment(), false); 598 } 599 600 // Wrap resulting range in ReservedSpace 601 if (result != nullptr) { 602 log_debug(metaspace, map)("Mapped at " PTR_FORMAT, p2i(result)); 603 assert(is_aligned(result, Metaspace::reserve_alignment()), "Alignment too small for metaspace"); 604 605 return ReservedSpace(result, 606 size, 607 Metaspace::reserve_alignment(), 608 os::vm_page_size(), 609 !ExecMem, 610 false /* special */); 611 } else { 612 log_debug(metaspace, map)("Failed to map."); 613 return {}; 614 } 615 } 616 #endif // _LP64 617 618 size_t Metaspace::reserve_alignment_words() { 619 return metaspace::Settings::virtual_space_node_reserve_alignment_words(); 620 } 621 622 size_t Metaspace::commit_alignment_words() { 623 return metaspace::Settings::commit_granule_words(); 624 } 625 626 void Metaspace::ergo_initialize() { 627 628 // Must happen before using any setting from Settings::--- 629 metaspace::Settings::ergo_initialize(); 630 631 // MaxMetaspaceSize and CompressedClassSpaceSize: 632 // 633 // MaxMetaspaceSize is the maximum size, in bytes, of memory we are allowed 634 // to commit for the Metaspace. 635 // It is just a number; a limit we compare against before committing. It 636 // does not have to be aligned to anything. 637 // It gets used as compare value before attempting to increase the metaspace 638 // commit charge. It defaults to max_uintx (unlimited). 639 // 640 // CompressedClassSpaceSize is the size, in bytes, of the address range we 641 // pre-reserve for the compressed class space (if we use class space). 642 // This size has to be aligned to the metaspace reserve alignment (to the 643 // size of a root chunk). It gets aligned up from whatever value the caller 644 // gave us to the next multiple of root chunk size. 645 // 646 // Note: Strictly speaking MaxMetaspaceSize and CompressedClassSpaceSize have 647 // very little to do with each other. The notion often encountered: 648 // MaxMetaspaceSize = CompressedClassSpaceSize + <non-class metadata size> 649 // is subtly wrong: MaxMetaspaceSize can besmaller than CompressedClassSpaceSize, 650 // in which case we just would not be able to fully commit the class space range. 651 // 652 // We still adjust CompressedClassSpaceSize to reasonable limits, mainly to 653 // save on reserved space, and to make ergnonomics less confusing. 654 655 MaxMetaspaceSize = MAX2(MaxMetaspaceSize, commit_alignment()); 656 657 if (UseCompressedClassPointers) { 658 // Let Class Space not be larger than 80% of MaxMetaspaceSize. Note that is 659 // grossly over-dimensioned for most usage scenarios; typical ratio of 660 // class space : non class space usage is about 1:6. With many small classes, 661 // it can get as low as 1:2. It is not a big deal though since ccs is only 662 // reserved and will be committed on demand only. 663 const size_t max_ccs_size = 8 * (MaxMetaspaceSize / 10); 664 665 // Sanity check. 666 const size_t max_klass_range = CompressedKlassPointers::max_klass_range_size(); 667 assert(max_klass_range >= reserve_alignment(), 668 "Klass range (%zu) must cover at least a full root chunk (%zu)", 669 max_klass_range, reserve_alignment()); 670 671 size_t adjusted_ccs_size = MIN3(CompressedClassSpaceSize, max_ccs_size, max_klass_range); 672 673 // CCS must be aligned to root chunk size, and be at least the size of one 674 // root chunk. 675 adjusted_ccs_size = align_up(adjusted_ccs_size, reserve_alignment()); 676 adjusted_ccs_size = MAX2(adjusted_ccs_size, reserve_alignment()); 677 678 // Print a warning if the adjusted size differs from the users input 679 if (CompressedClassSpaceSize != adjusted_ccs_size) { 680 #define X "CompressedClassSpaceSize adjusted from user input " \ 681 "%zu bytes to %zu bytes", CompressedClassSpaceSize, adjusted_ccs_size 682 if (FLAG_IS_CMDLINE(CompressedClassSpaceSize)) { 683 log_warning(metaspace)(X); 684 } else { 685 log_info(metaspace)(X); 686 } 687 #undef X 688 } 689 690 // Note: re-adjusting may have us left with a CompressedClassSpaceSize 691 // larger than MaxMetaspaceSize for very small values of MaxMetaspaceSize. 692 // Lets just live with that, its not a big deal. 693 if (adjusted_ccs_size != CompressedClassSpaceSize) { 694 FLAG_SET_ERGO(CompressedClassSpaceSize, adjusted_ccs_size); 695 log_info(metaspace)("Setting CompressedClassSpaceSize to " SIZE_FORMAT ".", 696 CompressedClassSpaceSize); 697 } 698 } 699 700 // Set MetaspaceSize, MinMetaspaceExpansion and MaxMetaspaceExpansion 701 if (MetaspaceSize > MaxMetaspaceSize) { 702 MetaspaceSize = MaxMetaspaceSize; 703 } 704 705 MetaspaceSize = align_down_bounded(MetaspaceSize, commit_alignment()); 706 707 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize"); 708 709 MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, commit_alignment()); 710 MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, commit_alignment()); 711 712 } 713 714 void Metaspace::global_initialize() { 715 MetaspaceGC::initialize(); // <- since we do not prealloc init chunks anymore is this still needed? 716 717 metaspace::ChunkHeaderPool::initialize(); 718 719 if (CDSConfig::is_dumping_static_archive()) { 720 assert(!CDSConfig::is_using_archive(), "sanity"); 721 MetaspaceShared::initialize_for_static_dump(); 722 } 723 724 // If UseCompressedClassPointers=1, we have two cases: 725 // a) if CDS is active (runtime, Xshare=on), it will create the class space 726 // for us, initialize it and set up CompressedKlassPointers encoding. 727 // Class space will be reserved above the mapped archives. 728 // b) if CDS either deactivated (Xshare=off) or a static dump is to be done (Xshare:dump), 729 // we will create the class space on our own. It will be placed above the java heap, 730 // since we assume it has been placed in low 731 // address regions. We may rethink this (see JDK-8244943). Failing that, 732 // it will be placed anywhere. 733 734 #if INCLUDE_CDS 735 // case (a) 736 if (CDSConfig::is_using_archive()) { 737 if (!FLAG_IS_DEFAULT(CompressedClassSpaceBaseAddress)) { 738 log_warning(metaspace)("CDS active - ignoring CompressedClassSpaceBaseAddress."); 739 } 740 MetaspaceShared::initialize_runtime_shared_and_meta_spaces(); 741 // If any of the archived space fails to map, UseSharedSpaces 742 // is reset to false. 743 } 744 #endif // INCLUDE_CDS 745 746 #ifdef _LP64 747 748 if (using_class_space() && !class_space_is_initialized()) { 749 assert(!CDSConfig::is_using_archive(), "CDS archive is not mapped at this point"); 750 751 // case (b) (No CDS) 752 ReservedSpace rs; 753 const size_t size = align_up(CompressedClassSpaceSize, Metaspace::reserve_alignment()); 754 755 // If CompressedClassSpaceBaseAddress is set, we attempt to force-map class space to 756 // the given address. This is a debug-only feature aiding tests. Due to the ASLR lottery 757 // this may fail, in which case the VM will exit after printing an appropriate message. 758 // Tests using this switch should cope with that. 759 if (CompressedClassSpaceBaseAddress != 0) { 760 const address base = (address)CompressedClassSpaceBaseAddress; 761 if (!is_aligned(base, Metaspace::reserve_alignment())) { 762 vm_exit_during_initialization( 763 err_msg("CompressedClassSpaceBaseAddress=" PTR_FORMAT " invalid " 764 "(must be aligned to " SIZE_FORMAT_X ").", 765 CompressedClassSpaceBaseAddress, Metaspace::reserve_alignment())); 766 } 767 768 rs = MemoryReserver::reserve((char*)base, 769 size, 770 Metaspace::reserve_alignment(), 771 os::vm_page_size()); 772 773 if (rs.is_reserved()) { 774 log_info(metaspace)("Successfully forced class space address to " PTR_FORMAT, p2i(base)); 775 } else { 776 LogTarget(Debug, metaspace) lt; 777 if (lt.is_enabled()) { 778 LogStream ls(lt); 779 os::print_memory_mappings((char*)base, size, &ls); 780 } 781 vm_exit_during_initialization( 782 err_msg("CompressedClassSpaceBaseAddress=" PTR_FORMAT " given, but reserving class space failed.", 783 CompressedClassSpaceBaseAddress)); 784 } 785 } 786 787 // ...failing that, reserve anywhere, but let platform do optimized placement: 788 if (!rs.is_reserved()) { 789 log_info(metaspace)("Reserving compressed class space anywhere"); 790 rs = Metaspace::reserve_address_space_for_compressed_classes(size, true); 791 } 792 793 // ...failing that, give up. 794 if (!rs.is_reserved()) { 795 vm_exit_during_initialization( 796 err_msg("Could not allocate compressed class space: " SIZE_FORMAT " bytes", 797 CompressedClassSpaceSize)); 798 } 799 800 // Mark class space as such 801 MemTracker::record_virtual_memory_tag((address)rs.base(), mtClass); 802 803 // Initialize space 804 Metaspace::initialize_class_space(rs); 805 806 // Set up compressed class pointer encoding. 807 // In CDS=off mode, we give the JVM some leeway to choose a favorable base/shift combination. 808 CompressedKlassPointers::initialize((address)rs.base(), rs.size()); 809 } 810 811 #endif 812 813 // Initialize non-class virtual space list, and its chunk manager: 814 MetaspaceContext::initialize_nonclass_space_context(); 815 816 _tracer = new MetaspaceTracer(); 817 818 // We must prevent the very first address of the ccs from being used to store 819 // metadata, since that address would translate to a narrow pointer of 0, and the 820 // VM does not distinguish between "narrow 0 as in null" and "narrow 0 as in start 821 // of ccs". 822 // Before Elastic Metaspace that did not happen due to the fact that every Metachunk 823 // had a header and therefore could not allocate anything at offset 0. 824 #ifdef _LP64 825 if (using_class_space()) { 826 // The simplest way to fix this is to allocate a tiny dummy chunk right at the 827 // start of ccs and do not use it for anything. 828 MetaspaceContext::context_class()->cm()->get_chunk(metaspace::chunklevel::HIGHEST_CHUNK_LEVEL); 829 } 830 #endif 831 832 #ifdef _LP64 833 if (UseCompressedClassPointers) { 834 // Note: "cds" would be a better fit but keep this for backward compatibility. 835 LogTarget(Info, gc, metaspace) lt; 836 if (lt.is_enabled()) { 837 ResourceMark rm; 838 LogStream ls(lt); 839 CDS_ONLY(MetaspaceShared::print_on(&ls);) 840 Metaspace::print_compressed_class_space(&ls); 841 CompressedKlassPointers::print_mode(&ls); 842 } 843 } 844 #endif 845 846 } 847 848 void Metaspace::post_initialize() { 849 MetaspaceGC::post_initialize(); 850 } 851 852 size_t Metaspace::max_allocation_word_size() { 853 return metaspace::chunklevel::MAX_CHUNK_WORD_SIZE; 854 } 855 856 // This version of Metaspace::allocate does not throw OOM but simply returns null, and 857 // is suitable for calling from non-Java threads. 858 // Callers are responsible for checking null. 859 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, 860 MetaspaceObj::Type type, bool use_class_space) { 861 assert(word_size <= Metaspace::max_allocation_word_size(), 862 "allocation size too large (" SIZE_FORMAT ")", word_size); 863 864 assert(loader_data != nullptr, "Should never pass around a null loader_data. " 865 "ClassLoaderData::the_null_class_loader_data() should have been used."); 866 867 // Deal with concurrent unloading failed allocation starvation 868 MetaspaceCriticalAllocation::block_if_concurrent_purge(); 869 870 MetadataType mdtype = use_class_space ? ClassType : NonClassType; 871 872 // Try to allocate metadata. 873 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 874 875 if (result != nullptr) { 876 #ifdef ASSERT 877 if (using_class_space() && mdtype == ClassType) { 878 assert(is_in_class_space(result) && 879 is_aligned(result, CompressedKlassPointers::klass_alignment_in_bytes()), "Sanity"); 880 } else { 881 assert((is_in_class_space(result) || is_in_nonclass_metaspace(result)) && 882 is_aligned(result, Metaspace::min_allocation_alignment_bytes), "Sanity"); 883 } 884 #endif 885 // Zero initialize. 886 Copy::fill_to_words((HeapWord*)result, word_size, 0); 887 log_trace(metaspace)("Metaspace::allocate: type %d return " PTR_FORMAT ".", (int)type, p2i(result)); 888 } 889 890 return result; 891 } 892 893 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, 894 MetaspaceObj::Type type, bool use_class_space, TRAPS) { 895 896 if (HAS_PENDING_EXCEPTION) { 897 assert(false, "Should not allocate with exception pending"); 898 return nullptr; // caller does a CHECK_NULL too 899 } 900 assert(!THREAD->owns_locks(), "allocating metaspace while holding mutex"); 901 902 MetaWord* result = allocate(loader_data, word_size, type, use_class_space); 903 904 if (result == nullptr) { 905 MetadataType mdtype = use_class_space ? ClassType : NonClassType; 906 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype); 907 908 // Allocation failed. 909 if (is_init_completed()) { 910 // Only start a GC if the bootstrapping has completed. 911 // Try to clean out some heap memory and retry. This can prevent premature 912 // expansion of the metaspace. 913 result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype); 914 } 915 916 if (result == nullptr) { 917 report_metadata_oome(loader_data, word_size, type, mdtype, THREAD); 918 assert(HAS_PENDING_EXCEPTION, "sanity"); 919 return nullptr; 920 } 921 922 // Zero initialize. 923 Copy::fill_to_words((HeapWord*)result, word_size, 0); 924 925 log_trace(metaspace)("Metaspace::allocate: type %d return " PTR_FORMAT ".", (int)type, p2i(result)); 926 } 927 928 return result; 929 } 930 931 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) { 932 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype); 933 934 // If result is still null, we are out of memory. 935 { 936 LogMessage(gc, metaspace, freelist, oom) log; 937 if (log.is_info()) { 938 log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT, 939 is_class_space_allocation(mdtype) ? "class" : "data", word_size); 940 ResourceMark rm; 941 if (log.is_debug()) { 942 if (loader_data->metaspace_or_null() != nullptr) { 943 NonInterleavingLogStream ls(LogLevelType::Debug, log); 944 loader_data->print_value_on(&ls); 945 } 946 } 947 NonInterleavingLogStream ls(LogLevelType::Info, log); 948 // In case of an OOM, log out a short but still useful report. 949 MetaspaceUtils::print_basic_report(&ls, 0); 950 } 951 } 952 953 bool out_of_compressed_class_space = false; 954 if (is_class_space_allocation(mdtype)) { 955 ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null(); 956 out_of_compressed_class_space = 957 MetaspaceUtils::committed_bytes(Metaspace::ClassType) + 958 align_up(word_size * BytesPerWord, 4 * M) > 959 CompressedClassSpaceSize; 960 } 961 962 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 963 const char* space_string = out_of_compressed_class_space ? 964 "Compressed class space" : "Metaspace"; 965 966 report_java_out_of_memory(space_string); 967 968 if (JvmtiExport::should_post_resource_exhausted()) { 969 JvmtiExport::post_resource_exhausted( 970 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, 971 space_string); 972 } 973 974 if (!is_init_completed()) { 975 vm_exit_during_initialization("OutOfMemoryError", space_string); 976 } 977 978 if (out_of_compressed_class_space) { 979 THROW_OOP(Universe::out_of_memory_error_class_metaspace()); 980 } else { 981 THROW_OOP(Universe::out_of_memory_error_metaspace()); 982 } 983 } 984 985 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) { 986 switch (mdtype) { 987 case Metaspace::ClassType: return "Class"; 988 case Metaspace::NonClassType: return "Metadata"; 989 default: 990 assert(false, "Got bad mdtype: %d", (int) mdtype); 991 return nullptr; 992 } 993 } 994 995 void Metaspace::purge(bool classes_unloaded) { 996 // The MetaspaceCritical_lock is used by a concurrent GC to block out concurrent metaspace 997 // allocations, that would starve critical metaspace allocations, that are about to throw 998 // OOM if they fail; they need precedence for correctness. 999 MutexLocker ml(MetaspaceCritical_lock, Mutex::_no_safepoint_check_flag); 1000 if (classes_unloaded) { 1001 ChunkManager* cm = ChunkManager::chunkmanager_nonclass(); 1002 if (cm != nullptr) { 1003 cm->purge(); 1004 } 1005 if (using_class_space()) { 1006 cm = ChunkManager::chunkmanager_class(); 1007 if (cm != nullptr) { 1008 cm->purge(); 1009 } 1010 } 1011 } 1012 1013 // Try to satisfy queued metaspace allocation requests. 1014 // 1015 // It might seem unnecessary to try to process allocation requests if no 1016 // classes have been unloaded. However, this call is required for the code 1017 // in MetaspaceCriticalAllocation::try_allocate_critical to work. 1018 MetaspaceCriticalAllocation::process(); 1019 } 1020 1021 1022 // Returns true if pointer points into one of the metaspace regions, or 1023 // into the class space. 1024 bool Metaspace::is_in_shared_metaspace(const void* ptr) { 1025 return MetaspaceShared::is_in_shared_metaspace(ptr); 1026 } 1027 1028 // Returns true if pointer points into one of the non-class-space metaspace regions. 1029 bool Metaspace::is_in_nonclass_metaspace(const void* ptr) { 1030 return VirtualSpaceList::vslist_nonclass()->contains((MetaWord*)ptr); 1031 }