1 /* 2 * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2020 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "logging/log.hpp" 28 #include "logging/logStream.hpp" 29 #include "memory/metaspace/chunkManager.hpp" 30 #include "memory/metaspace/counters.hpp" 31 #include "memory/metaspace/freeBlocks.hpp" 32 #include "memory/metaspace/internalStats.hpp" 33 #include "memory/metaspace/metachunk.hpp" 34 #include "memory/metaspace/metaspaceArena.hpp" 35 #include "memory/metaspace/metaspaceArenaGrowthPolicy.hpp" 36 #include "memory/metaspace/metaspaceCommon.hpp" 37 #include "memory/metaspace/metaspaceSettings.hpp" 38 #include "memory/metaspace/metaspaceStatistics.hpp" 39 #include "memory/metaspace/virtualSpaceList.hpp" 40 #include "runtime/atomic.hpp" 41 #include "runtime/init.hpp" 42 #include "runtime/mutexLocker.hpp" 43 #include "services/memoryService.hpp" 44 #include "utilities/align.hpp" 45 #include "utilities/debug.hpp" 46 #include "utilities/globalDefinitions.hpp" 47 48 namespace metaspace { 49 50 #define LOGFMT "Arena @" PTR_FORMAT " (%s)" 51 #define LOGFMT_ARGS p2i(this), this->_name 52 53 // Returns the level of the next chunk to be added, acc to growth policy. 54 chunklevel_t MetaspaceArena::next_chunk_level() const { 55 const int growth_step = _chunks.count(); 56 return _growth_policy->get_level_at_step(growth_step); 57 } 58 59 // Given a chunk, add its remaining free committed space to the free block list. 60 void MetaspaceArena::salvage_chunk(Metachunk* c) { 61 assert_lock_strong(lock()); 62 size_t remaining_words = c->free_below_committed_words(); 63 if (remaining_words > FreeBlocks::MinWordSize) { 64 65 UL2(trace, "salvaging chunk " METACHUNK_FULL_FORMAT ".", METACHUNK_FULL_FORMAT_ARGS(c)); 66 67 MetaWord* ptr = c->allocate(remaining_words); 68 assert(ptr != NULL, "Should have worked"); 69 _total_used_words_counter->increment_by(remaining_words); 70 71 add_allocation_to_fbl(ptr, remaining_words); 72 73 // After this operation: the chunk should have no free committed space left. 74 assert(c->free_below_committed_words() == 0, 75 "Salvaging chunk failed (chunk " METACHUNK_FULL_FORMAT ").", 76 METACHUNK_FULL_FORMAT_ARGS(c)); 77 } 78 } 79 80 // Allocate a new chunk from the underlying chunk manager able to hold at least 81 // requested word size. 82 Metachunk* MetaspaceArena::allocate_new_chunk(size_t requested_word_size) { 83 assert_lock_strong(lock()); 84 85 // Should this ever happen, we need to increase the maximum possible chunk size. 86 guarantee(requested_word_size <= chunklevel::MAX_CHUNK_WORD_SIZE, 87 "Requested size too large (" SIZE_FORMAT ") - max allowed size per allocation is " SIZE_FORMAT ".", 88 requested_word_size, chunklevel::MAX_CHUNK_WORD_SIZE); 89 90 const chunklevel_t max_level = chunklevel::level_fitting_word_size(requested_word_size); 91 const chunklevel_t preferred_level = MIN2(max_level, next_chunk_level()); 92 93 Metachunk* c = _chunk_manager->get_chunk(preferred_level, max_level, requested_word_size); 94 if (c == NULL) { 95 return NULL; 96 } 97 98 assert(c->is_in_use(), "Wrong chunk state."); 99 assert(c->free_below_committed_words() >= requested_word_size, "Chunk not committed"); 100 return c; 101 } 102 103 void MetaspaceArena::add_allocation_to_fbl(MetaWord* p, size_t word_size) { 104 if (_fbl == NULL) { 105 _fbl = new FreeBlocks(); // Create only on demand 106 } 107 _fbl->add_block(p, word_size); 108 } 109 110 MetaspaceArena::MetaspaceArena(ChunkManager* chunk_manager, const ArenaGrowthPolicy* growth_policy, 111 Mutex* lock, SizeAtomicCounter* total_used_words_counter, 112 const char* name) : 113 _lock(lock), 114 _chunk_manager(chunk_manager), 115 _growth_policy(growth_policy), 116 _chunks(), 117 _fbl(NULL), 118 _total_used_words_counter(total_used_words_counter), 119 _name(name) 120 #ifdef ASSERT 121 , _first_fence(NULL) 122 #endif 123 { 124 UL(debug, ": born."); 125 126 // Update statistics 127 InternalStats::inc_num_arena_births(); 128 } 129 130 MetaspaceArena::~MetaspaceArena() { 131 #ifdef ASSERT 132 verify(); 133 if (Settings::use_allocation_guard()) { 134 verify_allocation_guards(); 135 } 136 #endif 137 138 MutexLocker fcl(lock(), Mutex::_no_safepoint_check_flag); 139 MemRangeCounter return_counter; 140 141 Metachunk* c = _chunks.first(); 142 Metachunk* c2 = NULL; 143 144 while (c) { 145 c2 = c->next(); 146 return_counter.add(c->used_words()); 147 DEBUG_ONLY(c->set_prev(NULL);) 148 DEBUG_ONLY(c->set_next(NULL);) 149 UL2(debug, "return chunk: " METACHUNK_FORMAT ".", METACHUNK_FORMAT_ARGS(c)); 150 _chunk_manager->return_chunk(c); 151 // c may be invalid after return_chunk(c) was called. Don't access anymore. 152 c = c2; 153 } 154 155 UL2(info, "returned %d chunks, total capacity " SIZE_FORMAT " words.", 156 return_counter.count(), return_counter.total_size()); 157 158 _total_used_words_counter->decrement_by(return_counter.total_size()); 159 DEBUG_ONLY(chunk_manager()->verify();) 160 delete _fbl; 161 UL(debug, ": dies."); 162 163 // Update statistics 164 InternalStats::inc_num_arena_deaths(); 165 } 166 167 // Attempt to enlarge the current chunk to make it large enough to hold at least 168 // requested_word_size additional words. 169 // 170 // On success, true is returned, false otherwise. 171 bool MetaspaceArena::attempt_enlarge_current_chunk(size_t requested_word_size) { 172 assert_lock_strong(lock()); 173 174 Metachunk* c = current_chunk(); 175 assert(c->free_words() < requested_word_size, "Sanity"); 176 177 // Not if chunk enlargement is switched off... 178 if (Settings::enlarge_chunks_in_place() == false) { 179 return false; 180 } 181 // ... nor if we are already a root chunk ... 182 if (c->is_root_chunk()) { 183 return false; 184 } 185 // ... nor if the combined size of chunk content and new content would bring us above the size of a root chunk ... 186 if ((c->used_words() + requested_word_size) > metaspace::chunklevel::MAX_CHUNK_WORD_SIZE) { 187 return false; 188 } 189 190 const chunklevel_t new_level = 191 chunklevel::level_fitting_word_size(c->used_words() + requested_word_size); 192 assert(new_level < c->level(), "Sanity"); 193 194 // Atm we only enlarge by one level (so, doubling the chunk in size). So, if the requested enlargement 195 // would require the chunk to more than double in size, we bail. But this covers about 99% of all cases, 196 // so this is good enough. 197 if (new_level < c->level() - 1) { 198 return false; 199 } 200 // This only works if chunk is the leader of its buddy pair (and also if buddy 201 // is free and unsplit, but that we cannot check outside of metaspace lock). 202 if (!c->is_leader()) { 203 return false; 204 } 205 // If the size added to the chunk would be larger than allowed for the next growth step 206 // dont enlarge. 207 if (next_chunk_level() > c->level()) { 208 return false; 209 } 210 211 bool success = _chunk_manager->attempt_enlarge_chunk(c); 212 assert(success == false || c->free_words() >= requested_word_size, "Sanity"); 213 return success; 214 } 215 216 // Allocate memory from Metaspace. 217 // 1) Attempt to allocate from the free block list. 218 // 2) Attempt to allocate from the current chunk. 219 // 3) Attempt to enlarge the current chunk in place if it is too small. 220 // 4) Attempt to get a new chunk and allocate from that chunk. 221 // At any point, if we hit a commit limit, we return NULL. 222 MetaWord* MetaspaceArena::allocate(size_t requested_word_size) { 223 MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag); 224 UL2(trace, "requested " SIZE_FORMAT " words.", requested_word_size); 225 226 MetaWord* p = NULL; 227 const size_t raw_word_size = get_raw_word_size_for_requested_word_size(requested_word_size); 228 229 // Before bothering the arena proper, attempt to re-use a block from the free blocks list 230 if (_fbl != NULL && !_fbl->is_empty()) { 231 p = _fbl->remove_block(raw_word_size); 232 if (p != NULL) { 233 DEBUG_ONLY(InternalStats::inc_num_allocs_from_deallocated_blocks();) 234 UL2(trace, "taken from fbl (now: %d, " SIZE_FORMAT ").", 235 _fbl->count(), _fbl->total_size()); 236 // Note: free blocks in freeblock dictionary still count as "used" as far as statistics go; 237 // therefore we have no need to adjust any usage counters (see epilogue of allocate_inner()) 238 // and can just return here. 239 return p; 240 } 241 } 242 243 // Primary allocation 244 p = allocate_inner(requested_word_size); 245 246 #ifdef ASSERT 247 // Fence allocation 248 if (p != NULL && Settings::use_allocation_guard()) { 249 STATIC_ASSERT(is_aligned(sizeof(Fence), BytesPerWord)); 250 MetaWord* guard = allocate_inner(sizeof(Fence) / BytesPerWord); 251 if (guard != NULL) { 252 // Ignore allocation errors for the fence to keep coding simple. If this 253 // happens (e.g. because right at this time we hit the Metaspace GC threshold) 254 // we miss adding this one fence. Not a big deal. Note that his would 255 // be pretty rare. Chances are much higher the primary allocation above 256 // would have already failed). 257 Fence* f = new(guard) Fence(_first_fence); 258 _first_fence = f; 259 } 260 } 261 #endif // ASSERT 262 263 return p; 264 } 265 266 // Allocate from the arena proper, once dictionary allocations and fencing are sorted out. 267 MetaWord* MetaspaceArena::allocate_inner(size_t requested_word_size) { 268 269 assert_lock_strong(lock()); 270 271 const size_t raw_word_size = get_raw_word_size_for_requested_word_size(requested_word_size); 272 MetaWord* p = NULL; 273 bool current_chunk_too_small = false; 274 bool commit_failure = false; 275 276 if (current_chunk() != NULL) { 277 278 // Attempt to satisfy the allocation from the current chunk. 279 280 // If the current chunk is too small to hold the requested size, attempt to enlarge it. 281 // If that fails, retire the chunk. 282 if (current_chunk()->free_words() < raw_word_size) { 283 if (!attempt_enlarge_current_chunk(raw_word_size)) { 284 current_chunk_too_small = true; 285 } else { 286 DEBUG_ONLY(InternalStats::inc_num_chunks_enlarged();) 287 UL(debug, "enlarged chunk."); 288 } 289 } 290 291 // Commit the chunk far enough to hold the requested word size. If that fails, we 292 // hit a limit (either GC threshold or MaxMetaspaceSize). In that case retire the 293 // chunk. 294 if (!current_chunk_too_small) { 295 if (!current_chunk()->ensure_committed_additional(raw_word_size)) { 296 UL2(info, "commit failure (requested size: " SIZE_FORMAT ")", raw_word_size); 297 commit_failure = true; 298 } 299 } 300 301 // Allocate from the current chunk. This should work now. 302 if (!current_chunk_too_small && !commit_failure) { 303 p = current_chunk()->allocate(raw_word_size); 304 assert(p != NULL, "Allocation from chunk failed."); 305 } 306 } 307 308 if (p == NULL) { 309 // If we are here, we either had no current chunk to begin with or it was deemed insufficient. 310 assert(current_chunk() == NULL || 311 current_chunk_too_small || commit_failure, "Sanity"); 312 313 Metachunk* new_chunk = allocate_new_chunk(raw_word_size); 314 if (new_chunk != NULL) { 315 UL2(debug, "allocated new chunk " METACHUNK_FORMAT " for requested word size " SIZE_FORMAT ".", 316 METACHUNK_FORMAT_ARGS(new_chunk), requested_word_size); 317 318 assert(new_chunk->free_below_committed_words() >= raw_word_size, "Sanity"); 319 if (Settings::new_chunks_are_fully_committed()) { 320 assert(new_chunk->is_fully_committed(), "Chunk should be fully committed."); 321 } 322 323 // We have a new chunk. Before making it the current chunk, retire the old one. 324 if (current_chunk() != NULL) { 325 salvage_chunk(current_chunk()); 326 DEBUG_ONLY(InternalStats::inc_num_chunks_retired();) 327 } 328 329 _chunks.add(new_chunk); 330 331 // Now, allocate from that chunk. That should work. 332 p = current_chunk()->allocate(raw_word_size); 333 assert(p != NULL, "Allocation from chunk failed."); 334 } else { 335 UL2(info, "failed to allocate new chunk for requested word size " SIZE_FORMAT ".", requested_word_size); 336 } 337 } 338 339 if (p == NULL) { 340 InternalStats::inc_num_allocs_failed_limit(); 341 } else { 342 DEBUG_ONLY(InternalStats::inc_num_allocs();) 343 _total_used_words_counter->increment_by(raw_word_size); 344 } 345 346 SOMETIMES(verify_locked();) 347 348 if (p == NULL) { 349 UL(info, "allocation failed, returned NULL."); 350 } else { 351 UL2(trace, "after allocation: %u chunk(s), current:" METACHUNK_FULL_FORMAT, 352 _chunks.count(), METACHUNK_FULL_FORMAT_ARGS(current_chunk())); 353 UL2(trace, "returning " PTR_FORMAT ".", p2i(p)); 354 } 355 return p; 356 } 357 358 // Prematurely returns a metaspace allocation to the _block_freelists 359 // because it is not needed anymore (requires CLD lock to be active). 360 void MetaspaceArena::deallocate_locked(MetaWord* p, size_t word_size) { 361 assert_lock_strong(lock()); 362 // At this point a current chunk must exist since we only deallocate if we did allocate before. 363 assert(current_chunk() != NULL, "stray deallocation?"); 364 assert(is_valid_area(p, word_size), 365 "Pointer range not part of this Arena and cannot be deallocated: (" PTR_FORMAT ".." PTR_FORMAT ").", 366 p2i(p), p2i(p + word_size)); 367 368 UL2(trace, "deallocating " PTR_FORMAT ", word size: " SIZE_FORMAT ".", 369 p2i(p), word_size); 370 371 size_t raw_word_size = get_raw_word_size_for_requested_word_size(word_size); 372 add_allocation_to_fbl(p, raw_word_size); 373 374 DEBUG_ONLY(verify_locked();) 375 } 376 377 // Prematurely returns a metaspace allocation to the _block_freelists because it is not 378 // needed anymore. 379 void MetaspaceArena::deallocate(MetaWord* p, size_t word_size) { 380 MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag); 381 deallocate_locked(p, word_size); 382 } 383 384 // Update statistics. This walks all in-use chunks. 385 void MetaspaceArena::add_to_statistics(ArenaStats* out) const { 386 MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag); 387 388 for (const Metachunk* c = _chunks.first(); c != NULL; c = c->next()) { 389 InUseChunkStats& ucs = out->_stats[c->level()]; 390 ucs._num++; 391 ucs._word_size += c->word_size(); 392 ucs._committed_words += c->committed_words(); 393 ucs._used_words += c->used_words(); 394 // Note: for free and waste, we only count what's committed. 395 if (c == current_chunk()) { 396 ucs._free_words += c->free_below_committed_words(); 397 } else { 398 ucs._waste_words += c->free_below_committed_words(); 399 } 400 } 401 402 if (_fbl != NULL) { 403 out->_free_blocks_num += _fbl->count(); 404 out->_free_blocks_word_size += _fbl->total_size(); 405 } 406 407 SOMETIMES(out->verify();) 408 } 409 410 // Convenience method to get the most important usage statistics. 411 // For deeper analysis use add_to_statistics(). 412 void MetaspaceArena::usage_numbers(size_t* p_used_words, size_t* p_committed_words, size_t* p_capacity_words) const { 413 MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag); 414 size_t used = 0, comm = 0, cap = 0; 415 for (const Metachunk* c = _chunks.first(); c != NULL; c = c->next()) { 416 used += c->used_words(); 417 comm += c->committed_words(); 418 cap += c->word_size(); 419 } 420 if (p_used_words != NULL) { 421 *p_used_words = used; 422 } 423 if (p_committed_words != NULL) { 424 *p_committed_words = comm; 425 } 426 if (p_capacity_words != NULL) { 427 *p_capacity_words = cap; 428 } 429 } 430 431 #ifdef ASSERT 432 433 void MetaspaceArena::verify_locked() const { 434 assert_lock_strong(lock()); 435 assert(_growth_policy != NULL && _chunk_manager != NULL, "Sanity"); 436 _chunks.verify(); 437 if (_fbl != NULL) { 438 _fbl->verify(); 439 } 440 } 441 442 void MetaspaceArena::Fence::verify() const { 443 assert(_eye1 == EyeCatcher && _eye2 == EyeCatcher, 444 "Metaspace corruption: fence block at " PTR_FORMAT " broken.", p2i(this)); 445 } 446 447 void MetaspaceArena::verify_allocation_guards() const { 448 assert(Settings::use_allocation_guard(), "Don't call with guards disabled."); 449 for (const Fence* f = _first_fence; f != NULL; f = f->next()) { 450 f->verify(); 451 } 452 } 453 454 void MetaspaceArena::verify() const { 455 MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag); 456 verify_locked(); 457 } 458 459 // Returns true if the area indicated by pointer and size have actually been allocated 460 // from this arena. 461 bool MetaspaceArena::is_valid_area(MetaWord* p, size_t word_size) const { 462 assert(p != NULL && word_size > 0, "Sanity"); 463 bool found = false; 464 for (const Metachunk* c = _chunks.first(); c != NULL && !found; c = c->next()) { 465 assert(c->is_valid_committed_pointer(p) == 466 c->is_valid_committed_pointer(p + word_size - 1), "range intersects"); 467 found = c->is_valid_committed_pointer(p); 468 } 469 return found; 470 } 471 472 #endif // ASSERT 473 474 void MetaspaceArena::print_on(outputStream* st) const { 475 MutexLocker fcl(_lock, Mutex::_no_safepoint_check_flag); 476 print_on_locked(st); 477 } 478 479 void MetaspaceArena::print_on_locked(outputStream* st) const { 480 assert_lock_strong(_lock); 481 st->print_cr("sm %s: %d chunks, total word size: " SIZE_FORMAT ", committed word size: " SIZE_FORMAT, _name, 482 _chunks.count(), _chunks.calc_word_size(), _chunks.calc_committed_word_size()); 483 _chunks.print_on(st); 484 st->cr(); 485 st->print_cr("growth-policy " PTR_FORMAT ", lock " PTR_FORMAT ", cm " PTR_FORMAT ", fbl " PTR_FORMAT, 486 p2i(_growth_policy), p2i(_lock), p2i(_chunk_manager), p2i(_fbl)); 487 } 488 489 } // namespace metaspace 490