1 /* 2 * Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2017, 2021 SAP SE. All rights reserved. 4 * Copyright (c) 2023, 2024, Red Hat, Inc. All rights reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #include "precompiled.hpp" 28 #include "cds/cdsConfig.hpp" 29 #include "cds/metaspaceShared.hpp" 30 #include "classfile/classLoaderData.hpp" 31 #include "gc/shared/collectedHeap.hpp" 32 #include "logging/log.hpp" 33 #include "logging/logStream.hpp" 34 #include "memory/classLoaderMetaspace.hpp" 35 #include "memory/metaspace.hpp" 36 #include "memory/metaspace/chunkHeaderPool.hpp" 37 #include "memory/metaspace/chunkManager.hpp" 38 #include "memory/metaspace/commitLimiter.hpp" 39 #include "memory/metaspace/internalStats.hpp" 40 #include "memory/metaspace/metaspaceCommon.hpp" 41 #include "memory/metaspace/metaspaceContext.hpp" 42 #include "memory/metaspace/metaspaceReporter.hpp" 43 #include "memory/metaspace/metaspaceSettings.hpp" 44 #include "memory/metaspace/runningCounters.hpp" 45 #include "memory/metaspace/virtualSpaceList.hpp" 46 #include "memory/metaspaceCriticalAllocation.hpp" 47 #include "memory/metaspaceStats.hpp" 48 #include "memory/metaspaceTracer.hpp" 49 #include "memory/metaspaceUtils.hpp" 50 #include "memory/resourceArea.hpp" 51 #include "memory/universe.hpp" 52 #include "nmt/memTracker.hpp" 53 #include "oops/compressedKlass.inline.hpp" 54 #include "oops/compressedOops.hpp" 55 #include "prims/jvmtiExport.hpp" 56 #include "runtime/atomic.hpp" 57 #include "runtime/globals_extension.hpp" 58 #include "runtime/init.hpp" 59 #include "runtime/java.hpp" 60 #include "utilities/copy.hpp" 61 #include "utilities/debug.hpp" 62 #include "utilities/formatBuffer.hpp" 63 #include "utilities/globalDefinitions.hpp" 64 #include "virtualspace.hpp" 65 66 using metaspace::ChunkManager; 67 using metaspace::CommitLimiter; 68 using metaspace::MetaspaceContext; 69 using metaspace::MetaspaceReporter; 70 using metaspace::RunningCounters; 71 using metaspace::VirtualSpaceList; 72 73 size_t MetaspaceUtils::used_words() { 74 return RunningCounters::used_words(); 75 } 76 77 size_t MetaspaceUtils::used_words(Metaspace::MetadataType mdtype) { 78 return mdtype == Metaspace::ClassType ? RunningCounters::used_words_class() : RunningCounters::used_words_nonclass(); 79 } 80 81 size_t MetaspaceUtils::reserved_words() { 82 return RunningCounters::reserved_words(); 83 } 84 85 size_t MetaspaceUtils::reserved_words(Metaspace::MetadataType mdtype) { 86 return mdtype == Metaspace::ClassType ? RunningCounters::reserved_words_class() : RunningCounters::reserved_words_nonclass(); 87 } 88 89 size_t MetaspaceUtils::committed_words() { 90 return RunningCounters::committed_words(); 91 } 92 93 size_t MetaspaceUtils::committed_words(Metaspace::MetadataType mdtype) { 94 return mdtype == Metaspace::ClassType ? RunningCounters::committed_words_class() : RunningCounters::committed_words_nonclass(); 95 } 96 97 // Helper for get_statistics() 98 static void get_values_for(Metaspace::MetadataType mdtype, size_t* reserved, size_t* committed, size_t* used) { 99 #define w2b(x) (x * sizeof(MetaWord)) 100 if (mdtype == Metaspace::ClassType) { 101 *reserved = w2b(RunningCounters::reserved_words_class()); 102 *committed = w2b(RunningCounters::committed_words_class()); 103 *used = w2b(RunningCounters::used_words_class()); 104 } else { 105 *reserved = w2b(RunningCounters::reserved_words_nonclass()); 106 *committed = w2b(RunningCounters::committed_words_nonclass()); 107 *used = w2b(RunningCounters::used_words_nonclass()); 108 } 109 #undef w2b 110 } 111 112 // Retrieve all statistics in one go; make sure the values are consistent. 113 MetaspaceStats MetaspaceUtils::get_statistics(Metaspace::MetadataType mdtype) { 114 115 // Consistency: 116 // This function reads three values (reserved, committed, used) from different counters. These counters 117 // may (very rarely) be out of sync. This has been a source for intermittent test errors in the past 118 // (see e.g. JDK-8237872, JDK-8151460). 119 // - reserved and committed counter are updated under protection of Metaspace_lock; an inconsistency 120 // between them can be the result of a dirty read. 121 // - used is an atomic counter updated outside any lock range; there is no way to guarantee 122 // a clean read wrt the other two values. 123 // Reading these values under lock protection would would only help for the first case. Therefore 124 // we don't bother and just re-read several times, then give up and correct the values. 125 126 size_t r = 0, c = 0, u = 0; // Note: byte values. 127 get_values_for(mdtype, &r, &c, &u); 128 int retries = 10; 129 // If the first retrieval resulted in inconsistent values, retry a bit... 130 while ((r < c || c < u) && --retries >= 0) { 131 get_values_for(mdtype, &r, &c, &u); 132 } 133 if (c < u || r < c) { // still inconsistent. 134 // ... but not endlessly. If we don't get consistent values, correct them on the fly. 135 // The logic here is that we trust the used counter - its an atomic counter and whatever we see 136 // must have been the truth once - and from that we reconstruct a likely set of committed/reserved 137 // values. 138 metaspace::InternalStats::inc_num_inconsistent_stats(); 139 if (c < u) { 140 c = align_up(u, Metaspace::commit_alignment()); 141 } 142 if (r < c) { 143 r = align_up(c, Metaspace::reserve_alignment()); 144 } 145 } 146 return MetaspaceStats(r, c, u); 147 } 148 149 MetaspaceCombinedStats MetaspaceUtils::get_combined_statistics() { 150 return MetaspaceCombinedStats(get_statistics(Metaspace::ClassType), get_statistics(Metaspace::NonClassType)); 151 } 152 153 void MetaspaceUtils::print_metaspace_change(const MetaspaceCombinedStats& pre_meta_values) { 154 // Get values now: 155 const MetaspaceCombinedStats meta_values = get_combined_statistics(); 156 157 // We print used and committed since these are the most useful at-a-glance vitals for Metaspace: 158 // - used tells you how much memory is actually used for metadata 159 // - committed tells you how much memory is committed for the purpose of metadata 160 // The difference between those two would be waste, which can have various forms (freelists, 161 // unused parts of committed chunks etc) 162 // 163 // Left out is reserved, since this is not as exciting as the first two values: for class space, 164 // it is a constant (to uninformed users, often confusingly large). For non-class space, it would 165 // be interesting since free chunks can be uncommitted, but for now it is left out. 166 167 if (Metaspace::using_class_space()) { 168 log_info(gc, metaspace)(HEAP_CHANGE_FORMAT" " 169 HEAP_CHANGE_FORMAT" " 170 HEAP_CHANGE_FORMAT, 171 HEAP_CHANGE_FORMAT_ARGS("Metaspace", 172 pre_meta_values.used(), 173 pre_meta_values.committed(), 174 meta_values.used(), 175 meta_values.committed()), 176 HEAP_CHANGE_FORMAT_ARGS("NonClass", 177 pre_meta_values.non_class_used(), 178 pre_meta_values.non_class_committed(), 179 meta_values.non_class_used(), 180 meta_values.non_class_committed()), 181 HEAP_CHANGE_FORMAT_ARGS("Class", 182 pre_meta_values.class_used(), 183 pre_meta_values.class_committed(), 184 meta_values.class_used(), 185 meta_values.class_committed())); 186 } else { 187 log_info(gc, metaspace)(HEAP_CHANGE_FORMAT, 188 HEAP_CHANGE_FORMAT_ARGS("Metaspace", 189 pre_meta_values.used(), 190 pre_meta_values.committed(), 191 meta_values.used(), 192 meta_values.committed())); 193 } 194 } 195 196 // This will print out a basic metaspace usage report but 197 // unlike print_report() is guaranteed not to lock or to walk the CLDG. 198 void MetaspaceUtils::print_basic_report(outputStream* out, size_t scale) { 199 MetaspaceReporter::print_basic_report(out, scale); 200 } 201 202 // Prints a report about the current metaspace state. 203 // Optional parts can be enabled via flags. 204 // Function will walk the CLDG and will lock the expand lock; if that is not 205 // convenient, use print_basic_report() instead. 206 void MetaspaceUtils::print_report(outputStream* out, size_t scale) { 207 const int flags = 208 (int)MetaspaceReporter::Option::ShowLoaders | 209 (int)MetaspaceReporter::Option::BreakDownByChunkType | 210 (int)MetaspaceReporter::Option::ShowClasses; 211 MetaspaceReporter::print_report(out, scale, flags); 212 } 213 214 void MetaspaceUtils::print_on(outputStream* out) { 215 216 // Used from all GCs. It first prints out totals, then, separately, the class space portion. 217 MetaspaceCombinedStats stats = get_combined_statistics(); 218 out->print_cr(" Metaspace " 219 "used " SIZE_FORMAT "K, " 220 "committed " SIZE_FORMAT "K, " 221 "reserved " SIZE_FORMAT "K", 222 stats.used()/K, 223 stats.committed()/K, 224 stats.reserved()/K); 225 226 if (Metaspace::using_class_space()) { 227 out->print_cr(" class space " 228 "used " SIZE_FORMAT "K, " 229 "committed " SIZE_FORMAT "K, " 230 "reserved " SIZE_FORMAT "K", 231 stats.class_space_stats().used()/K, 232 stats.class_space_stats().committed()/K, 233 stats.class_space_stats().reserved()/K); 234 } 235 } 236 237 #ifdef ASSERT 238 void MetaspaceUtils::verify() { 239 if (Metaspace::initialized()) { 240 241 // Verify non-class chunkmanager... 242 ChunkManager* cm = ChunkManager::chunkmanager_nonclass(); 243 cm->verify(); 244 245 // ... and space list. 246 VirtualSpaceList* vsl = VirtualSpaceList::vslist_nonclass(); 247 vsl->verify(); 248 249 if (Metaspace::using_class_space()) { 250 // If we use compressed class pointers, verify class chunkmanager... 251 cm = ChunkManager::chunkmanager_class(); 252 cm->verify(); 253 254 // ... and class spacelist. 255 vsl = VirtualSpaceList::vslist_class(); 256 vsl->verify(); 257 } 258 259 } 260 } 261 #endif 262 263 ////////////////////////////////7 264 // MetaspaceGC methods 265 266 volatile size_t MetaspaceGC::_capacity_until_GC = 0; 267 uint MetaspaceGC::_shrink_factor = 0; 268 269 // VM_CollectForMetadataAllocation is the vm operation used to GC. 270 // Within the VM operation after the GC the attempt to allocate the metadata 271 // should succeed. If the GC did not free enough space for the metaspace 272 // allocation, the HWM is increased so that another virtualspace will be 273 // allocated for the metadata. With perm gen the increase in the perm 274 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The 275 // metaspace policy uses those as the small and large steps for the HWM. 276 // 277 // After the GC the compute_new_size() for MetaspaceGC is called to 278 // resize the capacity of the metaspaces. The current implementation 279 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used 280 // to resize the Java heap by some GC's. New flags can be implemented 281 // if really needed. MinMetaspaceFreeRatio is used to calculate how much 282 // free space is desirable in the metaspace capacity to decide how much 283 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much 284 // free space is desirable in the metaspace capacity before decreasing 285 // the HWM. 286 287 // Calculate the amount to increase the high water mark (HWM). 288 // Increase by a minimum amount (MinMetaspaceExpansion) so that 289 // another expansion is not requested too soon. If that is not 290 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion. 291 // If that is still not enough, expand by the size of the allocation 292 // plus some. 293 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { 294 size_t min_delta = MinMetaspaceExpansion; 295 size_t max_delta = MaxMetaspaceExpansion; 296 size_t delta = align_up(bytes, Metaspace::commit_alignment()); 297 298 if (delta <= min_delta) { 299 delta = min_delta; 300 } else if (delta <= max_delta) { 301 // Don't want to hit the high water mark on the next 302 // allocation so make the delta greater than just enough 303 // for this allocation. 304 delta = max_delta; 305 } else { 306 // This allocation is large but the next ones are probably not 307 // so increase by the minimum. 308 delta = delta + min_delta; 309 } 310 311 assert_is_aligned(delta, Metaspace::commit_alignment()); 312 313 return delta; 314 } 315 316 size_t MetaspaceGC::capacity_until_GC() { 317 size_t value = Atomic::load_acquire(&_capacity_until_GC); 318 assert(value >= MetaspaceSize, "Not initialized properly?"); 319 return value; 320 } 321 322 // Try to increase the _capacity_until_GC limit counter by v bytes. 323 // Returns true if it succeeded. It may fail if either another thread 324 // concurrently increased the limit or the new limit would be larger 325 // than MaxMetaspaceSize. 326 // On success, optionally returns new and old metaspace capacity in 327 // new_cap_until_GC and old_cap_until_GC respectively. 328 // On error, optionally sets can_retry to indicate whether if there is 329 // actually enough space remaining to satisfy the request. 330 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC, bool* can_retry) { 331 assert_is_aligned(v, Metaspace::commit_alignment()); 332 333 size_t old_capacity_until_GC = _capacity_until_GC; 334 size_t new_value = old_capacity_until_GC + v; 335 336 if (new_value < old_capacity_until_GC) { 337 // The addition wrapped around, set new_value to aligned max value. 338 new_value = align_down(max_uintx, Metaspace::reserve_alignment()); 339 } 340 341 if (new_value > MaxMetaspaceSize) { 342 if (can_retry != nullptr) { 343 *can_retry = false; 344 } 345 return false; 346 } 347 348 if (can_retry != nullptr) { 349 *can_retry = true; 350 } 351 size_t prev_value = Atomic::cmpxchg(&_capacity_until_GC, old_capacity_until_GC, new_value); 352 353 if (old_capacity_until_GC != prev_value) { 354 return false; 355 } 356 357 if (new_cap_until_GC != nullptr) { 358 *new_cap_until_GC = new_value; 359 } 360 if (old_cap_until_GC != nullptr) { 361 *old_cap_until_GC = old_capacity_until_GC; 362 } 363 return true; 364 } 365 366 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { 367 assert_is_aligned(v, Metaspace::commit_alignment()); 368 369 return Atomic::sub(&_capacity_until_GC, v); 370 } 371 372 void MetaspaceGC::initialize() { 373 // Set the high-water mark to MaxMetapaceSize during VM initializaton since 374 // we can't do a GC during initialization. 375 _capacity_until_GC = MaxMetaspaceSize; 376 } 377 378 void MetaspaceGC::post_initialize() { 379 // Reset the high-water mark once the VM initialization is done. 380 _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize); 381 } 382 383 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) { 384 // Check if the compressed class space is full. 385 if (is_class && Metaspace::using_class_space()) { 386 size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType); 387 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) { 388 log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)", 389 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord)); 390 return false; 391 } 392 } 393 394 // Check if the user has imposed a limit on the metaspace memory. 395 size_t committed_bytes = MetaspaceUtils::committed_bytes(); 396 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) { 397 log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)", 398 (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord)); 399 return false; 400 } 401 402 return true; 403 } 404 405 size_t MetaspaceGC::allowed_expansion() { 406 size_t committed_bytes = MetaspaceUtils::committed_bytes(); 407 size_t capacity_until_gc = capacity_until_GC(); 408 409 size_t left_until_max = MaxMetaspaceSize - committed_bytes; 410 // capacity_until_GC may have been decreased concurrently and may 411 // temporarily be lower than what metaspace has committed. Allow for that. 412 size_t left_until_GC = capacity_until_gc > committed_bytes ? 413 capacity_until_gc - committed_bytes : 0; 414 size_t left_to_commit = MIN2(left_until_GC, left_until_max); 415 log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT 416 " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".", 417 left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord); 418 419 return left_to_commit / BytesPerWord; 420 } 421 422 void MetaspaceGC::compute_new_size() { 423 assert(_shrink_factor <= 100, "invalid shrink factor"); 424 uint current_shrink_factor = _shrink_factor; 425 _shrink_factor = 0; 426 427 // Using committed_bytes() for used_after_gc is an overestimation, since the 428 // chunk free lists are included in committed_bytes() and the memory in an 429 // un-fragmented chunk free list is available for future allocations. 430 // However, if the chunk free lists becomes fragmented, then the memory may 431 // not be available for future allocations and the memory is therefore "in use". 432 // Including the chunk free lists in the definition of "in use" is therefore 433 // necessary. Not including the chunk free lists can cause capacity_until_GC to 434 // shrink below committed_bytes() and this has caused serious bugs in the past. 435 const double used_after_gc = (double)MetaspaceUtils::committed_bytes(); 436 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); 437 438 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; 439 const double maximum_used_percentage = 1.0 - minimum_free_percentage; 440 441 const double min_tmp = used_after_gc / maximum_used_percentage; 442 size_t minimum_desired_capacity = 443 (size_t)MIN2(min_tmp, double(MaxMetaspaceSize)); 444 // Don't shrink less than the initial generation size 445 minimum_desired_capacity = MAX2(minimum_desired_capacity, 446 MetaspaceSize); 447 448 log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: "); 449 log_trace(gc, metaspace)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f", 450 minimum_free_percentage, maximum_used_percentage); 451 log_trace(gc, metaspace)(" used_after_gc : %6.1fKB", used_after_gc / (double) K); 452 453 size_t shrink_bytes = 0; 454 if (capacity_until_GC < minimum_desired_capacity) { 455 // If we have less capacity below the metaspace HWM, then 456 // increment the HWM. 457 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; 458 expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment()); 459 // Don't expand unless it's significant 460 if (expand_bytes >= MinMetaspaceExpansion) { 461 size_t new_capacity_until_GC = 0; 462 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC); 463 assert(succeeded, "Should always successfully increment HWM when at safepoint"); 464 465 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 466 new_capacity_until_GC, 467 MetaspaceGCThresholdUpdater::ComputeNewSize); 468 log_trace(gc, metaspace)(" expanding: minimum_desired_capacity: %6.1fKB expand_bytes: %6.1fKB MinMetaspaceExpansion: %6.1fKB new metaspace HWM: %6.1fKB", 469 (double) minimum_desired_capacity / (double) K, 470 (double) expand_bytes / (double) K, 471 (double) MinMetaspaceExpansion / (double) K, 472 (double) new_capacity_until_GC / (double) K); 473 } 474 return; 475 } 476 477 // No expansion, now see if we want to shrink 478 // We would never want to shrink more than this 479 assert(capacity_until_GC >= minimum_desired_capacity, 480 SIZE_FORMAT " >= " SIZE_FORMAT, 481 capacity_until_GC, minimum_desired_capacity); 482 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity; 483 484 // Should shrinking be considered? 485 if (MaxMetaspaceFreeRatio < 100) { 486 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0; 487 const double minimum_used_percentage = 1.0 - maximum_free_percentage; 488 const double max_tmp = used_after_gc / minimum_used_percentage; 489 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(MaxMetaspaceSize)); 490 maximum_desired_capacity = MAX2(maximum_desired_capacity, 491 MetaspaceSize); 492 log_trace(gc, metaspace)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f", 493 maximum_free_percentage, minimum_used_percentage); 494 log_trace(gc, metaspace)(" minimum_desired_capacity: %6.1fKB maximum_desired_capacity: %6.1fKB", 495 (double) minimum_desired_capacity / (double) K, (double) maximum_desired_capacity / (double) K); 496 497 assert(minimum_desired_capacity <= maximum_desired_capacity, 498 "sanity check"); 499 500 if (capacity_until_GC > maximum_desired_capacity) { 501 // Capacity too large, compute shrinking size 502 shrink_bytes = capacity_until_GC - maximum_desired_capacity; 503 // We don't want shrink all the way back to initSize if people call 504 // System.gc(), because some programs do that between "phases" and then 505 // we'd just have to grow the heap up again for the next phase. So we 506 // damp the shrinking: 0% on the first call, 10% on the second call, 40% 507 // on the third call, and 100% by the fourth call. But if we recompute 508 // size without shrinking, it goes back to 0%. 509 shrink_bytes = shrink_bytes / 100 * current_shrink_factor; 510 511 shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment()); 512 513 assert(shrink_bytes <= max_shrink_bytes, 514 "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, 515 shrink_bytes, max_shrink_bytes); 516 if (current_shrink_factor == 0) { 517 _shrink_factor = 10; 518 } else { 519 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100); 520 } 521 log_trace(gc, metaspace)(" shrinking: initThreshold: %.1fK maximum_desired_capacity: %.1fK", 522 (double) MetaspaceSize / (double) K, (double) maximum_desired_capacity / (double) K); 523 log_trace(gc, metaspace)(" shrink_bytes: %.1fK current_shrink_factor: %d new shrink factor: %d MinMetaspaceExpansion: %.1fK", 524 (double) shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, (double) MinMetaspaceExpansion / (double) K); 525 } 526 } 527 528 // Don't shrink unless it's significant 529 if (shrink_bytes >= MinMetaspaceExpansion && 530 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { 531 size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes); 532 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 533 new_capacity_until_GC, 534 MetaspaceGCThresholdUpdater::ComputeNewSize); 535 } 536 } 537 538 ////// Metaspace methods ///// 539 540 const MetaspaceTracer* Metaspace::_tracer = nullptr; 541 const void* Metaspace::_class_space_start = nullptr; 542 const void* Metaspace::_class_space_end = nullptr; 543 544 bool Metaspace::initialized() { 545 return metaspace::MetaspaceContext::context_nonclass() != nullptr 546 LP64_ONLY(&& (using_class_space() ? Metaspace::class_space_is_initialized() : true)); 547 } 548 549 #ifdef _LP64 550 551 void Metaspace::print_compressed_class_space(outputStream* st) { 552 if (VirtualSpaceList::vslist_class() != nullptr) { 553 MetaWord* base = VirtualSpaceList::vslist_class()->base_of_first_node(); 554 size_t size = VirtualSpaceList::vslist_class()->word_size_of_first_node(); 555 MetaWord* top = base + size; 556 st->print("Compressed class space mapped at: " PTR_FORMAT "-" PTR_FORMAT ", reserved size: " SIZE_FORMAT, 557 p2i(base), p2i(top), (top - base) * BytesPerWord); 558 st->cr(); 559 } 560 } 561 562 // Given a prereserved space, use that to set up the compressed class space list. 563 void Metaspace::initialize_class_space(ReservedSpace rs) { 564 assert(rs.size() >= CompressedClassSpaceSize, 565 SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize); 566 assert(using_class_space(), "Must be using class space"); 567 568 assert(rs.size() == CompressedClassSpaceSize, SIZE_FORMAT " != " SIZE_FORMAT, 569 rs.size(), CompressedClassSpaceSize); 570 assert(is_aligned(rs.base(), Metaspace::reserve_alignment()) && 571 is_aligned(rs.size(), Metaspace::reserve_alignment()), 572 "wrong alignment"); 573 574 MetaspaceContext::initialize_class_space_context(rs); 575 _class_space_start = rs.base(); 576 _class_space_end = rs.end(); 577 } 578 579 // Returns true if class space has been setup (initialize_class_space). 580 bool Metaspace::class_space_is_initialized() { 581 return MetaspaceContext::context_class() != nullptr; 582 } 583 584 // Reserve a range of memory that is to contain narrow Klass IDs. If "try_in_low_address_ranges" 585 // is true, we will attempt to reserve memory suitable for zero-based encoding. 586 ReservedSpace Metaspace::reserve_address_space_for_compressed_classes(size_t size, bool optimize_for_zero_base) { 587 char* result = nullptr; 588 589 NOT_ZERO(result = 590 (char*) CompressedKlassPointers::reserve_address_space_for_compressed_classes(size, RandomizeClassSpaceLocation, 591 optimize_for_zero_base)); 592 593 if (result == nullptr) { 594 // Fallback: reserve anywhere 595 log_debug(metaspace, map)("Trying anywhere..."); 596 result = os::reserve_memory_aligned(size, Metaspace::reserve_alignment(), false); 597 } 598 599 // Wrap resulting range in ReservedSpace 600 ReservedSpace rs; 601 if (result != nullptr) { 602 log_debug(metaspace, map)("Mapped at " PTR_FORMAT, p2i(result)); 603 assert(is_aligned(result, Metaspace::reserve_alignment()), "Alignment too small for metaspace"); 604 rs = ReservedSpace::space_for_range(result, size, Metaspace::reserve_alignment(), 605 os::vm_page_size(), false, false); 606 } else { 607 log_debug(metaspace, map)("Failed to map."); 608 rs = ReservedSpace(); 609 } 610 return rs; 611 } 612 #endif // _LP64 613 614 size_t Metaspace::reserve_alignment_words() { 615 return metaspace::Settings::virtual_space_node_reserve_alignment_words(); 616 } 617 618 size_t Metaspace::commit_alignment_words() { 619 return metaspace::Settings::commit_granule_words(); 620 } 621 622 void Metaspace::ergo_initialize() { 623 624 // Must happen before using any setting from Settings::--- 625 metaspace::Settings::ergo_initialize(); 626 627 // MaxMetaspaceSize and CompressedClassSpaceSize: 628 // 629 // MaxMetaspaceSize is the maximum size, in bytes, of memory we are allowed 630 // to commit for the Metaspace. 631 // It is just a number; a limit we compare against before committing. It 632 // does not have to be aligned to anything. 633 // It gets used as compare value before attempting to increase the metaspace 634 // commit charge. It defaults to max_uintx (unlimited). 635 // 636 // CompressedClassSpaceSize is the size, in bytes, of the address range we 637 // pre-reserve for the compressed class space (if we use class space). 638 // This size has to be aligned to the metaspace reserve alignment (to the 639 // size of a root chunk). It gets aligned up from whatever value the caller 640 // gave us to the next multiple of root chunk size. 641 // 642 // Note: Strictly speaking MaxMetaspaceSize and CompressedClassSpaceSize have 643 // very little to do with each other. The notion often encountered: 644 // MaxMetaspaceSize = CompressedClassSpaceSize + <non-class metadata size> 645 // is subtly wrong: MaxMetaspaceSize can besmaller than CompressedClassSpaceSize, 646 // in which case we just would not be able to fully commit the class space range. 647 // 648 // We still adjust CompressedClassSpaceSize to reasonable limits, mainly to 649 // save on reserved space, and to make ergnonomics less confusing. 650 651 MaxMetaspaceSize = MAX2(MaxMetaspaceSize, commit_alignment()); 652 653 if (UseCompressedClassPointers) { 654 // Let CCS size not be larger than 80% of MaxMetaspaceSize. Note that is 655 // grossly over-dimensioned for most usage scenarios; typical ratio of 656 // class space : non class space usage is about 1:6. With many small classes, 657 // it can get as low as 1:2. It is not a big deal though since ccs is only 658 // reserved and will be committed on demand only. 659 size_t max_ccs_size = 8 * (MaxMetaspaceSize / 10); 660 size_t adjusted_ccs_size = MIN2(CompressedClassSpaceSize, max_ccs_size); 661 662 // CCS must be aligned to root chunk size, and be at least the size of one 663 // root chunk. 664 adjusted_ccs_size = align_up(adjusted_ccs_size, reserve_alignment()); 665 adjusted_ccs_size = MAX2(adjusted_ccs_size, reserve_alignment()); 666 667 // Note: re-adjusting may have us left with a CompressedClassSpaceSize 668 // larger than MaxMetaspaceSize for very small values of MaxMetaspaceSize. 669 // Lets just live with that, its not a big deal. 670 671 if (adjusted_ccs_size != CompressedClassSpaceSize) { 672 FLAG_SET_ERGO(CompressedClassSpaceSize, adjusted_ccs_size); 673 log_info(metaspace)("Setting CompressedClassSpaceSize to " SIZE_FORMAT ".", 674 CompressedClassSpaceSize); 675 } 676 } 677 678 // Set MetaspaceSize, MinMetaspaceExpansion and MaxMetaspaceExpansion 679 if (MetaspaceSize > MaxMetaspaceSize) { 680 MetaspaceSize = MaxMetaspaceSize; 681 } 682 683 MetaspaceSize = align_down_bounded(MetaspaceSize, commit_alignment()); 684 685 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize"); 686 687 MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, commit_alignment()); 688 MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, commit_alignment()); 689 690 } 691 692 void Metaspace::global_initialize() { 693 MetaspaceGC::initialize(); // <- since we do not prealloc init chunks anymore is this still needed? 694 695 metaspace::ChunkHeaderPool::initialize(); 696 697 if (CDSConfig::is_dumping_static_archive()) { 698 if (!CDSConfig::is_dumping_final_static_archive()) { 699 assert(!CDSConfig::is_using_archive(), "sanity"); 700 } 701 MetaspaceShared::initialize_for_static_dump(); 702 } 703 704 // If UseCompressedClassPointers=1, we have two cases: 705 // a) if CDS is active (runtime, Xshare=on), it will create the class space 706 // for us, initialize it and set up CompressedKlassPointers encoding. 707 // Class space will be reserved above the mapped archives. 708 // b) if CDS either deactivated (Xshare=off) or a static dump is to be done (Xshare:dump), 709 // we will create the class space on our own. It will be placed above the java heap, 710 // since we assume it has been placed in low 711 // address regions. We may rethink this (see JDK-8244943). Failing that, 712 // it will be placed anywhere. 713 714 #if INCLUDE_CDS 715 // case (a) 716 if (CDSConfig::is_using_archive()) { 717 if (!FLAG_IS_DEFAULT(CompressedClassSpaceBaseAddress)) { 718 log_warning(metaspace)("CDS active - ignoring CompressedClassSpaceBaseAddress."); 719 } 720 MetaspaceShared::initialize_runtime_shared_and_meta_spaces(); 721 // If any of the archived space fails to map, UseSharedSpaces 722 // is reset to false. 723 } 724 #endif // INCLUDE_CDS 725 726 #ifdef _LP64 727 728 if (using_class_space() && !class_space_is_initialized()) { 729 assert(!CDSConfig::is_using_archive(), "CDS archive is not mapped at this point"); 730 731 // case (b) (No CDS) 732 ReservedSpace rs; 733 const size_t size = align_up(CompressedClassSpaceSize, Metaspace::reserve_alignment()); 734 735 // If CompressedClassSpaceBaseAddress is set, we attempt to force-map class space to 736 // the given address. This is a debug-only feature aiding tests. Due to the ASLR lottery 737 // this may fail, in which case the VM will exit after printing an appropriate message. 738 // Tests using this switch should cope with that. 739 if (CompressedClassSpaceBaseAddress != 0) { 740 const address base = (address)CompressedClassSpaceBaseAddress; 741 if (!is_aligned(base, Metaspace::reserve_alignment())) { 742 vm_exit_during_initialization( 743 err_msg("CompressedClassSpaceBaseAddress=" PTR_FORMAT " invalid " 744 "(must be aligned to " SIZE_FORMAT_X ").", 745 CompressedClassSpaceBaseAddress, Metaspace::reserve_alignment())); 746 } 747 rs = ReservedSpace(size, Metaspace::reserve_alignment(), 748 os::vm_page_size() /* large */, (char*)base); 749 if (rs.is_reserved()) { 750 log_info(metaspace)("Successfully forced class space address to " PTR_FORMAT, p2i(base)); 751 } else { 752 LogTarget(Debug, metaspace) lt; 753 if (lt.is_enabled()) { 754 LogStream ls(lt); 755 os::print_memory_mappings((char*)base, size, &ls); 756 } 757 vm_exit_during_initialization( 758 err_msg("CompressedClassSpaceBaseAddress=" PTR_FORMAT " given, but reserving class space failed.", 759 CompressedClassSpaceBaseAddress)); 760 } 761 } 762 763 // ...failing that, reserve anywhere, but let platform do optimized placement: 764 if (!rs.is_reserved()) { 765 log_info(metaspace)("Reserving compressed class space anywhere"); 766 rs = Metaspace::reserve_address_space_for_compressed_classes(size, true); 767 } 768 769 // ...failing that, give up. 770 if (!rs.is_reserved()) { 771 vm_exit_during_initialization( 772 err_msg("Could not allocate compressed class space: " SIZE_FORMAT " bytes", 773 CompressedClassSpaceSize)); 774 } 775 776 // Mark class space as such 777 MemTracker::record_virtual_memory_tag((address)rs.base(), mtClass); 778 779 // Initialize space 780 Metaspace::initialize_class_space(rs); 781 782 // Set up compressed class pointer encoding. 783 CompressedKlassPointers::initialize((address)rs.base(), rs.size()); 784 } 785 786 #endif 787 788 // Initialize non-class virtual space list, and its chunk manager: 789 MetaspaceContext::initialize_nonclass_space_context(); 790 791 _tracer = new MetaspaceTracer(); 792 793 // We must prevent the very first address of the ccs from being used to store 794 // metadata, since that address would translate to a narrow pointer of 0, and the 795 // VM does not distinguish between "narrow 0 as in null" and "narrow 0 as in start 796 // of ccs". 797 // Before Elastic Metaspace that did not happen due to the fact that every Metachunk 798 // had a header and therefore could not allocate anything at offset 0. 799 #ifdef _LP64 800 if (using_class_space()) { 801 // The simplest way to fix this is to allocate a tiny dummy chunk right at the 802 // start of ccs and do not use it for anything. 803 MetaspaceContext::context_class()->cm()->get_chunk(metaspace::chunklevel::HIGHEST_CHUNK_LEVEL); 804 } 805 #endif 806 807 #ifdef _LP64 808 if (UseCompressedClassPointers) { 809 // Note: "cds" would be a better fit but keep this for backward compatibility. 810 LogTarget(Info, gc, metaspace) lt; 811 if (lt.is_enabled()) { 812 ResourceMark rm; 813 LogStream ls(lt); 814 CDS_ONLY(MetaspaceShared::print_on(&ls);) 815 Metaspace::print_compressed_class_space(&ls); 816 CompressedKlassPointers::print_mode(&ls); 817 } 818 } 819 #endif 820 821 } 822 823 void Metaspace::post_initialize() { 824 MetaspaceGC::post_initialize(); 825 } 826 827 size_t Metaspace::max_allocation_word_size() { 828 return metaspace::chunklevel::MAX_CHUNK_WORD_SIZE; 829 } 830 831 // This version of Metaspace::allocate does not throw OOM but simply returns null, and 832 // is suitable for calling from non-Java threads. 833 // Callers are responsible for checking null. 834 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, 835 MetaspaceObj::Type type, bool use_class_space) { 836 assert(word_size <= Metaspace::max_allocation_word_size(), 837 "allocation size too large (" SIZE_FORMAT ")", word_size); 838 839 assert(loader_data != nullptr, "Should never pass around a null loader_data. " 840 "ClassLoaderData::the_null_class_loader_data() should have been used."); 841 842 // Deal with concurrent unloading failed allocation starvation 843 MetaspaceCriticalAllocation::block_if_concurrent_purge(); 844 845 MetadataType mdtype = use_class_space ? ClassType : NonClassType; 846 847 // Try to allocate metadata. 848 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 849 850 if (result != nullptr) { 851 // Zero initialize. 852 Copy::fill_to_words((HeapWord*)result, word_size, 0); 853 854 log_trace(metaspace)("Metaspace::allocate: type %d return " PTR_FORMAT ".", (int)type, p2i(result)); 855 } 856 857 return result; 858 } 859 860 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, 861 MetaspaceObj::Type type, bool use_class_space, TRAPS) { 862 863 if (HAS_PENDING_EXCEPTION) { 864 assert(false, "Should not allocate with exception pending"); 865 return nullptr; // caller does a CHECK_NULL too 866 } 867 //leyden/premain: temporarily disabled due to JDK-8327737 868 assert(!THREAD->owns_locks(), "allocating metaspace while holding mutex"); 869 870 MetaWord* result = allocate(loader_data, word_size, type, use_class_space); 871 872 if (result == nullptr) { 873 MetadataType mdtype = use_class_space ? ClassType : NonClassType; 874 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype); 875 876 // Allocation failed. 877 if (is_init_completed()) { 878 // Only start a GC if the bootstrapping has completed. 879 // Try to clean out some heap memory and retry. This can prevent premature 880 // expansion of the metaspace. 881 result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype); 882 } 883 884 if (result == nullptr) { 885 report_metadata_oome(loader_data, word_size, type, mdtype, THREAD); 886 assert(HAS_PENDING_EXCEPTION, "sanity"); 887 return nullptr; 888 } 889 890 // Zero initialize. 891 Copy::fill_to_words((HeapWord*)result, word_size, 0); 892 893 log_trace(metaspace)("Metaspace::allocate: type %d return " PTR_FORMAT ".", (int)type, p2i(result)); 894 } 895 896 return result; 897 } 898 899 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) { 900 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype); 901 902 // If result is still null, we are out of memory. 903 { 904 LogMessage(gc, metaspace, freelist, oom) log; 905 if (log.is_info()) { 906 log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT, 907 is_class_space_allocation(mdtype) ? "class" : "data", word_size); 908 ResourceMark rm; 909 if (log.is_debug()) { 910 if (loader_data->metaspace_or_null() != nullptr) { 911 NonInterleavingLogStream ls(LogLevelType::Debug, log); 912 loader_data->print_value_on(&ls); 913 } 914 } 915 NonInterleavingLogStream ls(LogLevelType::Info, log); 916 // In case of an OOM, log out a short but still useful report. 917 MetaspaceUtils::print_basic_report(&ls, 0); 918 } 919 } 920 921 bool out_of_compressed_class_space = false; 922 if (is_class_space_allocation(mdtype)) { 923 ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null(); 924 out_of_compressed_class_space = 925 MetaspaceUtils::committed_bytes(Metaspace::ClassType) + 926 align_up(word_size * BytesPerWord, 4 * M) > 927 CompressedClassSpaceSize; 928 } 929 930 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 931 const char* space_string = out_of_compressed_class_space ? 932 "Compressed class space" : "Metaspace"; 933 934 report_java_out_of_memory(space_string); 935 936 if (JvmtiExport::should_post_resource_exhausted()) { 937 JvmtiExport::post_resource_exhausted( 938 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, 939 space_string); 940 } 941 942 if (!is_init_completed()) { 943 vm_exit_during_initialization("OutOfMemoryError", space_string); 944 } 945 946 if (out_of_compressed_class_space) { 947 THROW_OOP(Universe::out_of_memory_error_class_metaspace()); 948 } else { 949 THROW_OOP(Universe::out_of_memory_error_metaspace()); 950 } 951 } 952 953 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) { 954 switch (mdtype) { 955 case Metaspace::ClassType: return "Class"; 956 case Metaspace::NonClassType: return "Metadata"; 957 default: 958 assert(false, "Got bad mdtype: %d", (int) mdtype); 959 return nullptr; 960 } 961 } 962 963 void Metaspace::purge(bool classes_unloaded) { 964 // The MetaspaceCritical_lock is used by a concurrent GC to block out concurrent metaspace 965 // allocations, that would starve critical metaspace allocations, that are about to throw 966 // OOM if they fail; they need precedence for correctness. 967 MutexLocker ml(MetaspaceCritical_lock, Mutex::_no_safepoint_check_flag); 968 if (classes_unloaded) { 969 ChunkManager* cm = ChunkManager::chunkmanager_nonclass(); 970 if (cm != nullptr) { 971 cm->purge(); 972 } 973 if (using_class_space()) { 974 cm = ChunkManager::chunkmanager_class(); 975 if (cm != nullptr) { 976 cm->purge(); 977 } 978 } 979 } 980 981 // Try to satisfy queued metaspace allocation requests. 982 // 983 // It might seem unnecessary to try to process allocation requests if no 984 // classes have been unloaded. However, this call is required for the code 985 // in MetaspaceCriticalAllocation::try_allocate_critical to work. 986 MetaspaceCriticalAllocation::process(); 987 } 988 989 990 // Returns true if pointer points into one of the metaspace regions, or 991 // into the class space. 992 bool Metaspace::is_in_shared_metaspace(const void* ptr) { 993 return MetaspaceShared::is_in_shared_metaspace(ptr); 994 } 995 996 // Returns true if pointer points into one of the non-class-space metaspace regions. 997 bool Metaspace::is_in_nonclass_metaspace(const void* ptr) { 998 return VirtualSpaceList::vslist_nonclass()->contains((MetaWord*)ptr); 999 } 1000