1 /* 2 * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2020, 2023 SAP SE. All rights reserved. 4 * Copyright (c) 2023 Red Hat, Inc. All rights reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #include "precompiled.hpp" 28 #include "memory/metaspace/chunkManager.hpp" 29 #include "memory/metaspace/commitLimiter.hpp" 30 #include "memory/metaspace/counters.hpp" 31 #include "memory/metaspace/internalStats.hpp" 32 #include "memory/metaspace/freeBlocks.hpp" 33 #include "memory/metaspace/metablock.inline.hpp" 34 #include "memory/metaspace/metaspaceArena.hpp" 35 #include "memory/metaspace/metaspaceArenaGrowthPolicy.hpp" 36 #include "memory/metaspace/metachunkList.hpp" 37 #include "memory/metaspace/metaspaceCommon.hpp" 38 #include "memory/metaspace/metaspaceSettings.hpp" 39 #include "memory/metaspace/metaspaceStatistics.hpp" 40 #include "memory/metaspace.hpp" 41 #include "utilities/debug.hpp" 42 #include "utilities/globalDefinitions.hpp" 43 44 #define LOG_PLEASE 45 #include "metaspaceGtestCommon.hpp" 46 #include "metaspaceGtestContexts.hpp" 47 #include "metaspaceGtestRangeHelpers.hpp" 48 49 #define HANDLE_FAILURE \ 50 if (testing::Test::HasFailure()) { \ 51 return; \ 52 } 53 54 namespace metaspace { 55 56 class MetaspaceArenaTestFriend { 57 const MetaspaceArena* const _arena; 58 public: 59 MetaspaceArenaTestFriend(const MetaspaceArena* arena) : _arena(arena) {} 60 const MetachunkList& chunks() const { return _arena->_chunks; } 61 const FreeBlocks* fbl() const { return _arena->_fbl; } 62 }; 63 64 class MetaspaceArenaTestHelper { 65 66 MetaspaceGtestContext& _context; 67 const ArenaGrowthPolicy* const _growth_policy; 68 69 MetaspaceArena* _arena; 70 71 public: 72 73 // Create a helper; growth policy is directly specified 74 MetaspaceArenaTestHelper(MetaspaceGtestContext& helper, const ArenaGrowthPolicy* growth_policy, 75 size_t allocation_alignment_words = Metaspace::min_allocation_alignment_words) : 76 _context(helper), _growth_policy(growth_policy), _arena(nullptr) 77 { 78 _arena = new MetaspaceArena(_context.context(), _growth_policy, allocation_alignment_words, "gtest-MetaspaceArena"); 79 DEBUG_ONLY(_arena->verify()); 80 _context.inc_num_arenas_created(); 81 } 82 83 84 // Create a helper; growth policy for arena is determined by the given spacetype|class tupel 85 MetaspaceArenaTestHelper(MetaspaceGtestContext& helper, 86 Metaspace::MetaspaceType space_type, bool is_class, 87 size_t allocation_alignment_words = Metaspace::min_allocation_alignment_words) : 88 MetaspaceArenaTestHelper(helper, ArenaGrowthPolicy::policy_for_space_type(space_type, is_class), allocation_alignment_words) 89 {} 90 91 ~MetaspaceArenaTestHelper() { 92 delete_arena_with_tests(); 93 } 94 95 MetaspaceArena* arena() const { return _arena; } 96 97 // Note: all test functions return void due to gtests limitation that we cannot use ASSERT 98 // in non-void returning tests. 99 100 void delete_arena_with_tests() { 101 if (_arena != nullptr) { 102 size_t used_words_before = _context.used_words(); 103 size_t committed_words_before = _context.committed_words(); 104 DEBUG_ONLY(_arena->verify()); 105 delete _arena; 106 _arena = nullptr; 107 size_t used_words_after = _context.used_words(); 108 size_t committed_words_after = _context.committed_words(); 109 assert(_context.num_arenas_created() >= 1, "Sanity"); 110 if (_context.num_arenas_created() == 1) { 111 ASSERT_0(used_words_after); 112 } else { 113 ASSERT_LE(used_words_after, used_words_before); 114 } 115 ASSERT_LE(committed_words_after, committed_words_before); 116 } 117 } 118 119 void usage_numbers_with_test(size_t* p_used, size_t* p_committed, size_t* p_capacity) const { 120 size_t arena_used = 0, arena_committed = 0, arena_reserved = 0; 121 _arena->usage_numbers(&arena_used, &arena_committed, &arena_reserved); 122 EXPECT_GE(arena_committed, arena_used); 123 EXPECT_GE(arena_reserved, arena_committed); 124 125 size_t context_used = _context.used_words(); 126 size_t context_committed = _context.committed_words(); 127 size_t context_reserved = _context.reserved_words(); 128 EXPECT_GE(context_committed, context_used); 129 EXPECT_GE(context_reserved, context_committed); 130 131 // If only one arena uses the context, usage numbers must match. 132 if (_context.num_arenas_created() == 1) { 133 EXPECT_EQ(context_used, arena_used); 134 } else { 135 assert(_context.num_arenas_created() > 1, "Sanity"); 136 EXPECT_GE(context_used, arena_used); 137 } 138 139 // commit, reserve numbers don't have to match since free chunks may exist 140 EXPECT_GE(context_committed, arena_committed); 141 EXPECT_GE(context_reserved, arena_reserved); 142 143 if (p_used) { 144 *p_used = arena_used; 145 } 146 if (p_committed) { 147 *p_committed = arena_committed; 148 } 149 if (p_capacity) { 150 *p_capacity = arena_reserved; 151 } 152 } 153 154 // Allocate; caller expects success; return pointer in *p_return_value 155 void allocate_from_arena_with_tests_expect_success(MetaWord** p_return_value, size_t word_size) { 156 allocate_from_arena_with_tests(p_return_value, word_size); 157 ASSERT_NOT_NULL(*p_return_value); 158 } 159 160 // Allocate; caller expects success but is not interested in return value 161 void allocate_from_arena_with_tests_expect_success(size_t word_size) { 162 MetaWord* dummy = nullptr; 163 allocate_from_arena_with_tests_expect_success(&dummy, word_size); 164 } 165 166 // Allocate; caller expects failure 167 void allocate_from_arena_with_tests_expect_failure(size_t word_size) { 168 MetaWord* dummy = nullptr; 169 allocate_from_arena_with_tests(&dummy, word_size); 170 ASSERT_NULL(dummy); 171 } 172 173 void allocate_from_arena_with_tests(MetaWord** p_return_value, size_t word_size) { 174 MetaBlock result, wastage; 175 allocate_from_arena_with_tests(word_size, result, wastage); 176 if (wastage.is_nonempty()) { 177 _arena->deallocate(wastage); 178 wastage.reset(); 179 } 180 (*p_return_value) = result.base(); 181 } 182 183 // Allocate; it may or may not work; return value in *p_return_value 184 void allocate_from_arena_with_tests(size_t word_size, MetaBlock& result, MetaBlock& wastage) { 185 186 // Note: usage_numbers walks all chunks in use and counts. 187 size_t used = 0, committed = 0, capacity = 0; 188 usage_numbers_with_test(&used, &committed, &capacity); 189 190 size_t possible_expansion = _context.commit_limiter().possible_expansion_words(); 191 192 result = _arena->allocate(word_size, wastage); 193 194 SOMETIMES(DEBUG_ONLY(_arena->verify();)) 195 196 size_t used2 = 0, committed2 = 0, capacity2 = 0; 197 usage_numbers_with_test(&used2, &committed2, &capacity2); 198 199 if (result.is_empty()) { 200 // Allocation failed. 201 ASSERT_LT(possible_expansion, word_size); 202 ASSERT_EQ(used, used2); 203 ASSERT_EQ(committed, committed2); 204 ASSERT_EQ(capacity, capacity2); 205 } else { 206 // Allocation succeeded. Should be correctly aligned. 207 ASSERT_TRUE(result.is_aligned_base(_arena->allocation_alignment_words())); 208 209 // used: may go up or may not (since our request may have been satisfied from the freeblocklist 210 // whose content already counts as used). 211 // committed: may go up, may not 212 // capacity: ditto 213 ASSERT_GE(used2, used); 214 ASSERT_GE(committed2, committed); 215 ASSERT_GE(capacity2, capacity); 216 } 217 } 218 219 // Allocate; it may or may not work; but caller does not care for the result value 220 void allocate_from_arena_with_tests(size_t word_size) { 221 MetaWord* dummy = nullptr; 222 allocate_from_arena_with_tests(&dummy, word_size); 223 } 224 225 void deallocate_with_tests(MetaWord* p, size_t word_size) { 226 size_t used = 0, committed = 0, capacity = 0; 227 usage_numbers_with_test(&used, &committed, &capacity); 228 229 _arena->deallocate(MetaBlock(p, word_size)); 230 231 SOMETIMES(DEBUG_ONLY(_arena->verify();)) 232 233 size_t used2 = 0, committed2 = 0, capacity2 = 0; 234 usage_numbers_with_test(&used2, &committed2, &capacity2); 235 236 // Nothing should have changed. Deallocated blocks are added to the free block list 237 // which still counts as used. 238 ASSERT_EQ(used2, used); 239 ASSERT_EQ(committed2, committed); 240 ASSERT_EQ(capacity2, capacity); 241 } 242 243 ArenaStats get_arena_statistics() const { 244 ArenaStats stats; 245 _arena->add_to_statistics(&stats); 246 return stats; 247 } 248 249 MetaspaceArenaTestFriend internal_access() const { 250 return MetaspaceArenaTestFriend (_arena); 251 } 252 253 // Convenience method to return number of chunks in arena (including current chunk) 254 int get_number_of_chunks() const { 255 return internal_access().chunks().count(); 256 } 257 258 }; 259 260 static void test_basics(size_t commit_limit, bool is_micro) { 261 MetaspaceGtestContext context(commit_limit); 262 MetaspaceArenaTestHelper helper(context, is_micro ? Metaspace::ReflectionMetaspaceType : Metaspace::StandardMetaspaceType, false); 263 264 helper.allocate_from_arena_with_tests(1); 265 helper.allocate_from_arena_with_tests(128); 266 helper.allocate_from_arena_with_tests(128 * K); 267 helper.allocate_from_arena_with_tests(1); 268 helper.allocate_from_arena_with_tests(128); 269 helper.allocate_from_arena_with_tests(128 * K); 270 } 271 272 TEST_VM(metaspace, MetaspaceArena_basics_micro_nolimit) { 273 test_basics(max_uintx, true); 274 } 275 276 TEST_VM(metaspace, MetaspaceArena_basics_micro_limit) { 277 test_basics(256 * K, true); 278 } 279 280 TEST_VM(metaspace, MetaspaceArena_basics_standard_nolimit) { 281 test_basics(max_uintx, false); 282 } 283 284 TEST_VM(metaspace, MetaspaceArena_basics_standard_limit) { 285 test_basics(256 * K, false); 286 } 287 288 // Test chunk enlargement: 289 // A single MetaspaceArena, left undisturbed with place to grow. Slowly fill arena up. 290 // We should see at least some occurrences of chunk-in-place enlargement. 291 static void test_chunk_enlargment_simple(Metaspace::MetaspaceType spacetype, bool is_class) { 292 293 MetaspaceGtestContext context; 294 MetaspaceArenaTestHelper helper(context, (Metaspace::MetaspaceType)spacetype, is_class); 295 296 uint64_t n1 = metaspace::InternalStats::num_chunks_enlarged(); 297 298 size_t allocated = 0; 299 while (allocated <= MAX_CHUNK_WORD_SIZE && 300 metaspace::InternalStats::num_chunks_enlarged() == n1) { 301 size_t s = IntRange(32, 128).random_value(); 302 helper.allocate_from_arena_with_tests_expect_success(s); 303 allocated += metaspace::get_raw_word_size_for_requested_word_size(s); 304 } 305 306 EXPECT_GT(metaspace::InternalStats::num_chunks_enlarged(), n1); 307 308 } 309 310 // Do this test for some of the standard types; don't do it for the boot loader type 311 // since that one starts out with max chunk size so we would not see any enlargement. 312 313 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_standard_c) { 314 test_chunk_enlargment_simple(Metaspace::StandardMetaspaceType, true); 315 } 316 317 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_standard_nc) { 318 test_chunk_enlargment_simple(Metaspace::StandardMetaspaceType, false); 319 } 320 321 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_micro_c) { 322 test_chunk_enlargment_simple(Metaspace::ReflectionMetaspaceType, true); 323 } 324 325 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_micro_nc) { 326 test_chunk_enlargment_simple(Metaspace::ReflectionMetaspaceType, false); 327 } 328 329 // Test chunk enlargement: 330 // A single MetaspaceArena, left undisturbed with place to grow. Slowly fill arena up. 331 // We should see occurrences of chunk-in-place enlargement. 332 // Here, we give it an ideal policy which should enable the initial chunk to grow unmolested 333 // until finish. 334 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_2) { 335 336 if (Settings::use_allocation_guard()) { 337 return; 338 } 339 340 // Note: internally, chunk in-place enlargement is disallowed if growing the chunk 341 // would cause the arena to claim more memory than its growth policy allows. This 342 // is done to prevent the arena to grow too fast. 343 // 344 // In order to test in-place growth here without that restriction I give it an 345 // artificial growth policy which starts out with a tiny chunk size, then balloons 346 // right up to max chunk size. This will cause the initial chunk to be tiny, and 347 // then the arena is able to grow it without violating growth policy. 348 chunklevel_t growth[] = { HIGHEST_CHUNK_LEVEL, ROOT_CHUNK_LEVEL }; 349 ArenaGrowthPolicy growth_policy(growth, 2); 350 351 MetaspaceGtestContext context; 352 MetaspaceArenaTestHelper helper(context, &growth_policy); 353 354 uint64_t n1 = metaspace::InternalStats::num_chunks_enlarged(); 355 356 size_t allocated = 0; 357 while (allocated <= MAX_CHUNK_WORD_SIZE) { 358 size_t s = IntRange(32, 128).random_value(); 359 helper.allocate_from_arena_with_tests_expect_success(s); 360 allocated += metaspace::get_raw_word_size_for_requested_word_size(s); 361 if (allocated <= MAX_CHUNK_WORD_SIZE) { 362 // Chunk should have been enlarged in place 363 ASSERT_EQ(1, helper.get_number_of_chunks()); 364 } else { 365 // Next chunk should have started 366 ASSERT_EQ(2, helper.get_number_of_chunks()); 367 } 368 } 369 370 int times_chunk_were_enlarged = metaspace::InternalStats::num_chunks_enlarged() - n1; 371 LOG("chunk was enlarged %d times.", times_chunk_were_enlarged); 372 373 ASSERT_GT0(times_chunk_were_enlarged); 374 375 } 376 377 // Regression test: Given a single MetaspaceArena, left undisturbed with place to grow, 378 // test that in place enlargement correctly fails if growing the chunk would bring us 379 // beyond the max. size of a chunk. 380 TEST_VM(metaspace, MetaspaceArena_test_failing_to_enlarge_in_place_max_chunk_size) { 381 382 if (Settings::use_allocation_guard()) { 383 return; 384 } 385 386 MetaspaceGtestContext context; 387 388 for (size_t first_allocation_size = 1; first_allocation_size <= MAX_CHUNK_WORD_SIZE / 2; first_allocation_size *= 2) { 389 390 MetaspaceArenaTestHelper helper(context, Metaspace::StandardMetaspaceType, false); 391 392 // we allocate first a small amount, then the full amount possible. 393 // The sum of first and second allocation should bring us above root chunk size. 394 // This should work, we should not see any problems, but no chunk enlargement should 395 // happen. 396 int n1 = metaspace::InternalStats::num_chunks_enlarged(); 397 398 helper.allocate_from_arena_with_tests_expect_success(first_allocation_size); 399 EXPECT_EQ(helper.get_number_of_chunks(), 1); 400 401 helper.allocate_from_arena_with_tests_expect_success(MAX_CHUNK_WORD_SIZE - first_allocation_size + 1); 402 EXPECT_EQ(helper.get_number_of_chunks(), 2); 403 404 int times_chunk_were_enlarged = metaspace::InternalStats::num_chunks_enlarged() - n1; 405 LOG("chunk was enlarged %d times.", times_chunk_were_enlarged); 406 407 EXPECT_0(times_chunk_were_enlarged); 408 409 } 410 } 411 412 // Regression test: Given a single MetaspaceArena, left undisturbed with place to grow, 413 // test that in place enlargement correctly fails if growing the chunk would cause more 414 // than doubling its size 415 TEST_VM(metaspace, MetaspaceArena_test_failing_to_enlarge_in_place_doubling_chunk_size) { 416 417 if (Settings::use_allocation_guard()) { 418 return; 419 } 420 421 MetaspaceGtestContext context; 422 MetaspaceArenaTestHelper helper(context, Metaspace::StandardMetaspaceType, false); 423 424 int n1 = metaspace::InternalStats::num_chunks_enlarged(); 425 426 helper.allocate_from_arena_with_tests_expect_success(1000); 427 EXPECT_EQ(helper.get_number_of_chunks(), 1); 428 429 helper.allocate_from_arena_with_tests_expect_success(4000); 430 EXPECT_EQ(helper.get_number_of_chunks(), 2); 431 432 int times_chunk_were_enlarged = metaspace::InternalStats::num_chunks_enlarged() - n1; 433 LOG("chunk was enlarged %d times.", times_chunk_were_enlarged); 434 435 EXPECT_0(times_chunk_were_enlarged); 436 437 } 438 439 // Test the MetaspaceArenas' free block list: 440 // Allocate, deallocate, then allocate the same block again. The second allocate should 441 // reuse the deallocated block. 442 TEST_VM(metaspace, MetaspaceArena_deallocate) { 443 if (Settings::use_allocation_guard()) { 444 return; 445 } 446 for (size_t s = 2; s <= MAX_CHUNK_WORD_SIZE; s *= 2) { 447 MetaspaceGtestContext context; 448 MetaspaceArenaTestHelper helper(context, Metaspace::StandardMetaspaceType, false); 449 450 MetaWord* p1 = nullptr; 451 helper.allocate_from_arena_with_tests_expect_success(&p1, s); 452 ASSERT_FALSE(HasFailure()); 453 454 size_t used1 = 0, capacity1 = 0; 455 helper.usage_numbers_with_test(&used1, nullptr, &capacity1); 456 ASSERT_FALSE(HasFailure()); 457 ASSERT_EQ(used1, s); 458 459 helper.deallocate_with_tests(p1, s); 460 461 size_t used2 = 0, capacity2 = 0; 462 helper.usage_numbers_with_test(&used2, nullptr, &capacity2); 463 ASSERT_FALSE(HasFailure()); 464 ASSERT_EQ(used1, used2); 465 ASSERT_EQ(capacity2, capacity2); 466 467 MetaWord* p2 = nullptr; 468 helper.allocate_from_arena_with_tests_expect_success(&p2, s); 469 ASSERT_FALSE(HasFailure()); 470 471 size_t used3 = 0, capacity3 = 0; 472 helper.usage_numbers_with_test(&used3, nullptr, &capacity3); 473 ASSERT_FALSE(HasFailure()); 474 ASSERT_EQ(used3, used2); 475 ASSERT_EQ(capacity3, capacity2); 476 477 // Actually, we should get the very same allocation back 478 ASSERT_EQ(p1, p2); 479 } 480 } 481 482 static void test_recover_from_commit_limit_hit() { 483 484 // Test: 485 // - Multiple MetaspaceArena allocate (operating under the same commit limiter). 486 // - One, while attempting to commit parts of its current chunk on demand, 487 // triggers the limit and cannot commit its chunk further. 488 // - We release the other MetaspaceArena - its content is put back to the 489 // freelists. 490 // - We re-attempt allocation from the first manager. It should now succeed. 491 // 492 // This means if the first MetaspaceArena may have to let go of its current chunk and 493 // retire it and take a fresh chunk from the freelist. 494 495 const size_t commit_limit = Settings::commit_granule_words() * 10; 496 MetaspaceGtestContext context(commit_limit); 497 498 // The first MetaspaceArena mimicks a micro loader. This will fill the free 499 // chunk list with very small chunks. We allocate from them in an interleaved 500 // way to cause fragmentation. 501 MetaspaceArenaTestHelper helper1(context, Metaspace::ReflectionMetaspaceType, false); 502 MetaspaceArenaTestHelper helper2(context, Metaspace::ReflectionMetaspaceType, false); 503 504 // This MetaspaceArena should hit the limit. We use BootMetaspaceType here since 505 // it gets a large initial chunk which is committed 506 // on demand and we are likely to hit a commit limit while trying to expand it. 507 MetaspaceArenaTestHelper helper3(context, Metaspace::BootMetaspaceType, false); 508 509 // Allocate space until we have below two but above one granule left 510 size_t allocated_from_1_and_2 = 0; 511 while (context.commit_limiter().possible_expansion_words() >= Settings::commit_granule_words() * 2 && 512 allocated_from_1_and_2 < commit_limit) { 513 helper1.allocate_from_arena_with_tests_expect_success(1); 514 helper2.allocate_from_arena_with_tests_expect_success(1); 515 allocated_from_1_and_2 += 2; 516 HANDLE_FAILURE 517 } 518 519 // Now, allocating from helper3, creep up on the limit 520 size_t allocated_from_3 = 0; 521 MetaWord* p = nullptr; 522 while ( (helper3.allocate_from_arena_with_tests(&p, 1), p != nullptr) && 523 ++allocated_from_3 < Settings::commit_granule_words() * 2); 524 525 EXPECT_LE(allocated_from_3, Settings::commit_granule_words() * 2); 526 527 // We expect the freelist to be empty of committed space... 528 EXPECT_0(context.cm().calc_committed_word_size()); 529 530 //msthelper.cm().print_on(tty); 531 532 // Release the first MetaspaceArena. 533 helper1.delete_arena_with_tests(); 534 535 //msthelper.cm().print_on(tty); 536 537 // Should have populated the freelist with committed space 538 // We expect the freelist to be empty of committed space... 539 EXPECT_GT(context.cm().calc_committed_word_size(), (size_t)0); 540 541 // Repeat allocation from helper3, should now work. 542 helper3.allocate_from_arena_with_tests_expect_success(1); 543 544 } 545 546 TEST_VM(metaspace, MetaspaceArena_recover_from_limit_hit) { 547 test_recover_from_commit_limit_hit(); 548 } 549 550 static void test_controlled_growth(Metaspace::MetaspaceType type, bool is_class, 551 size_t expected_starting_capacity, 552 bool test_in_place_enlargement) 553 { 554 555 if (Settings::use_allocation_guard()) { 556 return; 557 } 558 559 // From a MetaspaceArena in a clean room allocate tiny amounts; 560 // watch it grow. Used/committed/capacity should not grow in 561 // large jumps. Also, different types of MetaspaceArena should 562 // have different initial capacities. 563 564 MetaspaceGtestContext context; 565 MetaspaceArenaTestHelper smhelper(context, type, is_class); 566 567 MetaspaceArenaTestHelper smhelper_harrasser(context, Metaspace::ReflectionMetaspaceType, true); 568 569 size_t used = 0, committed = 0, capacity = 0; 570 const size_t alloc_words = 16; 571 572 smhelper.arena()->usage_numbers(&used, &committed, &capacity); 573 ASSERT_0(used); 574 ASSERT_0(committed); 575 ASSERT_0(capacity); 576 577 ///// First allocation // 578 579 smhelper.allocate_from_arena_with_tests_expect_success(alloc_words); 580 581 smhelper.arena()->usage_numbers(&used, &committed, &capacity); 582 583 ASSERT_EQ(used, alloc_words); 584 ASSERT_GE(committed, used); 585 ASSERT_GE(capacity, committed); 586 587 ASSERT_EQ(capacity, expected_starting_capacity); 588 589 // What happens when we allocate, commit wise: 590 // Arena allocates from current chunk, committing needed memory from the chunk on demand. 591 // The chunk asks the underlying vsnode to commit the area it is located in. Since the 592 // chunk may be smaller than one commit granule, this may result in surrounding memory 593 // also getting committed. 594 // In reality we will commit in granule granularity, but arena can only know what its first 595 // chunk did commit. So what it thinks was committed depends on the size of its first chunk, 596 // which depends on ArenaGrowthPolicy. 597 { 598 const chunklevel_t expected_level_for_first_chunk = 599 ArenaGrowthPolicy::policy_for_space_type(type, is_class)->get_level_at_step(0); 600 const size_t what_arena_should_think_was_committed = 601 MIN2(Settings::commit_granule_words(), word_size_for_level(expected_level_for_first_chunk)); 602 const size_t what_should_really_be_committed = Settings::commit_granule_words(); 603 604 ASSERT_EQ(committed, what_arena_should_think_was_committed); 605 ASSERT_EQ(context.committed_words(), what_should_really_be_committed); 606 } 607 608 ///// subsequent allocations // 609 610 DEBUG_ONLY(const uintx num_chunk_enlarged = metaspace::InternalStats::num_chunks_enlarged();) 611 612 size_t words_allocated = 0; 613 int num_allocated = 0; 614 const size_t safety = MAX_CHUNK_WORD_SIZE * 1.2; 615 size_t highest_capacity_jump = capacity; 616 int num_capacity_jumps = 0; 617 618 while (words_allocated < safety && num_capacity_jumps < 15) { 619 620 // if we want to test growth with in-place chunk enlargement, leave MetaspaceArena 621 // undisturbed; it will have all the place to grow. Otherwise allocate from a little 622 // side arena to increase fragmentation. 623 // (Note that this does not completely prevent in-place chunk enlargement but makes it 624 // rather improbable) 625 if (!test_in_place_enlargement) { 626 smhelper_harrasser.allocate_from_arena_with_tests_expect_success(alloc_words * 2); 627 } 628 629 smhelper.allocate_from_arena_with_tests_expect_success(alloc_words); 630 HANDLE_FAILURE 631 words_allocated += metaspace::get_raw_word_size_for_requested_word_size(alloc_words); 632 num_allocated++; 633 634 size_t used2 = 0, committed2 = 0, capacity2 = 0; 635 636 smhelper.arena()->usage_numbers(&used2, &committed2, &capacity2); 637 HANDLE_FAILURE 638 639 // used should not grow larger than what we allocated, plus possible overhead. 640 ASSERT_GE(used2, used); 641 ASSERT_LE(used2, used + alloc_words * 2); 642 ASSERT_LE(used2, words_allocated + 100); 643 used = used2; 644 645 // A jump in committed words should not be larger than commit granule size. 646 // It can be smaller, since the current chunk of the MetaspaceArena may be 647 // smaller than a commit granule. 648 // (Note: unless root chunks are born fully committed) 649 ASSERT_GE(committed2, used2); 650 ASSERT_GE(committed2, committed); 651 const size_t committed_jump = committed2 - committed; 652 if (committed_jump > 0) { 653 ASSERT_LE(committed_jump, Settings::commit_granule_words()); 654 } 655 committed = committed2; 656 657 // Capacity jumps: Test that arenas capacity does not grow too fast. 658 ASSERT_GE(capacity2, committed2); 659 ASSERT_GE(capacity2, capacity); 660 const size_t capacity_jump = capacity2 - capacity; 661 if (capacity_jump > 0) { 662 LOG(">" SIZE_FORMAT "->" SIZE_FORMAT "(+" SIZE_FORMAT ")", capacity, capacity2, capacity_jump) 663 if (capacity_jump > highest_capacity_jump) { 664 /* Disabled for now since this is rather shaky. The way it is tested makes it too dependent 665 * on allocation history. Need to rethink this. 666 ASSERT_LE(capacity_jump, highest_capacity_jump * 2); 667 ASSERT_GE(capacity_jump, MIN_CHUNK_WORD_SIZE); 668 ASSERT_LE(capacity_jump, MAX_CHUNK_WORD_SIZE); 669 */ 670 highest_capacity_jump = capacity_jump; 671 } 672 num_capacity_jumps++; 673 } 674 675 capacity = capacity2; 676 677 } 678 679 // No FBL should exist, we did not deallocate 680 ASSERT_EQ(smhelper.internal_access().fbl(), (FreeBlocks*)nullptr); 681 ASSERT_EQ(smhelper_harrasser.internal_access().fbl(), (FreeBlocks*)nullptr); 682 683 // After all this work, we should see an increase in number of chunk-in-place-enlargements 684 // (this especially is vulnerable to regression: the decisions of when to do in-place-enlargements are somewhat 685 // complicated, see MetaspaceArena::attempt_enlarge_current_chunk()) 686 #ifdef ASSERT 687 if (test_in_place_enlargement) { 688 const uintx num_chunk_enlarged_2 = metaspace::InternalStats::num_chunks_enlarged(); 689 ASSERT_GT(num_chunk_enlarged_2, num_chunk_enlarged); 690 } 691 #endif 692 } 693 694 // these numbers have to be in sync with arena policy numbers (see memory/metaspace/arenaGrowthPolicy.cpp) 695 TEST_VM(metaspace, MetaspaceArena_growth_refl_c_inplace) { 696 test_controlled_growth(Metaspace::ReflectionMetaspaceType, true, 697 word_size_for_level(CHUNK_LEVEL_1K), true); 698 } 699 700 TEST_VM(metaspace, MetaspaceArena_growth_refl_c_not_inplace) { 701 test_controlled_growth(Metaspace::ReflectionMetaspaceType, true, 702 word_size_for_level(CHUNK_LEVEL_1K), false); 703 } 704 705 TEST_VM(metaspace, MetaspaceArena_growth_anon_c_inplace) { 706 test_controlled_growth(Metaspace::ClassMirrorHolderMetaspaceType, true, 707 word_size_for_level(CHUNK_LEVEL_1K), true); 708 } 709 710 TEST_VM(metaspace, MetaspaceArena_growth_anon_c_not_inplace) { 711 test_controlled_growth(Metaspace::ClassMirrorHolderMetaspaceType, true, 712 word_size_for_level(CHUNK_LEVEL_1K), false); 713 } 714 715 TEST_VM(metaspace, MetaspaceArena_growth_standard_c_inplace) { 716 test_controlled_growth(Metaspace::StandardMetaspaceType, true, 717 word_size_for_level(CHUNK_LEVEL_2K), true); 718 } 719 720 TEST_VM(metaspace, MetaspaceArena_growth_standard_c_not_inplace) { 721 test_controlled_growth(Metaspace::StandardMetaspaceType, true, 722 word_size_for_level(CHUNK_LEVEL_2K), false); 723 } 724 725 /* Disabled growth tests for BootMetaspaceType: there, the growth steps are too rare, 726 * and too large, to make any reliable guess as toward chunks get enlarged in place. 727 TEST_VM(metaspace, MetaspaceArena_growth_boot_c_inplace) { 728 test_controlled_growth(Metaspace::BootMetaspaceType, true, 729 word_size_for_level(CHUNK_LEVEL_1M), true); 730 } 731 732 TEST_VM(metaspace, MetaspaceArena_growth_boot_c_not_inplace) { 733 test_controlled_growth(Metaspace::BootMetaspaceType, true, 734 word_size_for_level(CHUNK_LEVEL_1M), false); 735 } 736 */ 737 738 TEST_VM(metaspace, MetaspaceArena_growth_refl_nc_inplace) { 739 test_controlled_growth(Metaspace::ReflectionMetaspaceType, false, 740 word_size_for_level(CHUNK_LEVEL_2K), true); 741 } 742 743 TEST_VM(metaspace, MetaspaceArena_growth_refl_nc_not_inplace) { 744 test_controlled_growth(Metaspace::ReflectionMetaspaceType, false, 745 word_size_for_level(CHUNK_LEVEL_2K), false); 746 } 747 748 TEST_VM(metaspace, MetaspaceArena_growth_anon_nc_inplace) { 749 test_controlled_growth(Metaspace::ClassMirrorHolderMetaspaceType, false, 750 word_size_for_level(CHUNK_LEVEL_1K), true); 751 } 752 753 TEST_VM(metaspace, MetaspaceArena_growth_anon_nc_not_inplace) { 754 test_controlled_growth(Metaspace::ClassMirrorHolderMetaspaceType, false, 755 word_size_for_level(CHUNK_LEVEL_1K), false); 756 } 757 758 TEST_VM(metaspace, MetaspaceArena_growth_standard_nc_inplace) { 759 test_controlled_growth(Metaspace::StandardMetaspaceType, false, 760 word_size_for_level(CHUNK_LEVEL_4K), true); 761 } 762 763 TEST_VM(metaspace, MetaspaceArena_growth_standard_nc_not_inplace) { 764 test_controlled_growth(Metaspace::StandardMetaspaceType, false, 765 word_size_for_level(CHUNK_LEVEL_4K), false); 766 } 767 768 /* Disabled growth tests for BootMetaspaceType: there, the growth steps are too rare, 769 * and too large, to make any reliable guess as toward chunks get enlarged in place. 770 TEST_VM(metaspace, MetaspaceArena_growth_boot_nc_inplace) { 771 test_controlled_growth(Metaspace::BootMetaspaceType, false, 772 word_size_for_level(CHUNK_LEVEL_4M), true); 773 } 774 775 TEST_VM(metaspace, MetaspaceArena_growth_boot_nc_not_inplace) { 776 test_controlled_growth(Metaspace::BootMetaspaceType, false, 777 word_size_for_level(CHUNK_LEVEL_4M), false); 778 } 779 */ 780 781 // Test that repeated allocation-deallocation cycles with the same block size 782 // do not increase metaspace usage after the initial allocation (the deallocated 783 // block should be reused by the next allocation). 784 static void test_repeatedly_allocate_and_deallocate(bool is_topmost) { 785 // Test various sizes, including (important) the max. possible block size = 1 root chunk 786 for (size_t blocksize = Metaspace::max_allocation_word_size(); 787 blocksize >= Metaspace::min_allocation_word_size; blocksize /= 2) { 788 size_t used1 = 0, used2 = 0, committed1 = 0, committed2 = 0; 789 MetaWord* p = nullptr, *p2 = nullptr; 790 791 MetaspaceGtestContext context; 792 MetaspaceArenaTestHelper helper(context, Metaspace::StandardMetaspaceType, false); 793 794 // First allocation 795 helper.allocate_from_arena_with_tests_expect_success(&p, blocksize); 796 if (!is_topmost) { 797 // another one on top, size does not matter. 798 helper.allocate_from_arena_with_tests_expect_success(0x10); 799 HANDLE_FAILURE 800 } 801 802 // Measure 803 helper.usage_numbers_with_test(&used1, &committed1, nullptr); 804 805 // Dealloc, alloc several times with the same size. 806 for (int i = 0; i < 5; i ++) { 807 helper.deallocate_with_tests(p, blocksize); 808 helper.allocate_from_arena_with_tests_expect_success(&p2, blocksize); 809 HANDLE_FAILURE 810 // We should get the same pointer back. 811 EXPECT_EQ(p2, p); 812 } 813 814 // Measure again 815 helper.usage_numbers_with_test(&used2, &committed2, nullptr); 816 EXPECT_EQ(used2, used1); 817 EXPECT_EQ(committed1, committed2); 818 } 819 } 820 821 TEST_VM(metaspace, MetaspaceArena_test_repeatedly_allocate_and_deallocate_top_allocation) { 822 test_repeatedly_allocate_and_deallocate(true); 823 } 824 825 TEST_VM(metaspace, MetaspaceArena_test_repeatedly_allocate_and_deallocate_nontop_allocation) { 826 test_repeatedly_allocate_and_deallocate(false); 827 } 828 829 static void test_random_aligned_allocation(size_t arena_alignment_words, SizeRange range) { 830 if (Settings::use_allocation_guard()) { 831 return; 832 } 833 834 // We let the arena use 4K chunks, unless the alloc size is larger. 835 chunklevel_t level = CHUNK_LEVEL_4K; 836 const ArenaGrowthPolicy policy (&level, 1); 837 const size_t chunk_word_size = word_size_for_level(level); 838 839 size_t expected_used = 0; 840 841 MetaspaceGtestContext context; 842 MetaspaceArenaTestHelper helper(context, &policy, arena_alignment_words); 843 844 size_t last_alloc_size = 0; 845 unsigned num_allocations = 0; 846 847 const size_t max_used = MIN2(MAX2(chunk_word_size * 10, (range.highest() * 100)), 848 LP64_ONLY(64) NOT_LP64(16) * M); // word size! 849 while (expected_used < max_used) { 850 851 const int chunks_before = helper.get_number_of_chunks(); 852 853 MetaBlock result, wastage; 854 size_t alloc_words = range.random_value(); 855 NOT_LP64(alloc_words = align_up(alloc_words, Metaspace::min_allocation_alignment_words)); 856 helper.allocate_from_arena_with_tests(alloc_words, result, wastage); 857 858 ASSERT_TRUE(result.is_nonempty()); 859 ASSERT_TRUE(result.is_aligned_base(arena_alignment_words)); 860 ASSERT_EQ(result.word_size(), alloc_words); 861 862 expected_used += alloc_words + wastage.word_size(); 863 const int chunks_now = helper.get_number_of_chunks(); 864 ASSERT_GE(chunks_now, chunks_before); 865 ASSERT_LE(chunks_now, chunks_before + 1); 866 867 // Estimate wastage: 868 // Guessing at wastage is somewhat simple since we don't expect to ever use the fbl (we 869 // don't deallocate). Therefore, wastage can only be caused by alignment gap or by 870 // salvaging an old chunk before a new chunk is added. 871 const bool expect_alignment_gap = !is_aligned(last_alloc_size, arena_alignment_words); 872 const bool new_chunk_added = chunks_now > chunks_before; 873 874 if (num_allocations == 0) { 875 // expect no wastage if its the first allocation in the arena 876 ASSERT_TRUE(wastage.is_empty()); 877 } else { 878 if (expect_alignment_gap) { 879 // expect wastage if the alignment requires it 880 ASSERT_TRUE(wastage.is_nonempty()); 881 } 882 } 883 884 if (wastage.is_nonempty()) { 885 // If we have wastage, we expect it to be either too small or unaligned. That would not be true 886 // for wastage from the fbl, which could have any size; however, in this test we don't deallocate, 887 // so we don't expect wastage from the fbl. 888 if (wastage.is_aligned_base(arena_alignment_words)) { 889 ASSERT_LT(wastage.word_size(), alloc_words); 890 } 891 if (new_chunk_added) { 892 // chunk turnover: no more wastage than size of a commit granule, since we salvage the 893 // committed remainder of the old chunk. 894 ASSERT_LT(wastage.word_size(), Settings::commit_granule_words()); 895 } else { 896 // No chunk turnover: no more wastage than what alignment requires. 897 ASSERT_LT(wastage.word_size(), arena_alignment_words); 898 } 899 } 900 901 // Check stats too 902 size_t used, committed, reserved; 903 helper.usage_numbers_with_test(&used, &committed, &reserved); 904 ASSERT_EQ(used, expected_used); 905 906 // No FBL should exist, we did not deallocate 907 ASSERT_EQ(helper.internal_access().fbl(), (FreeBlocks*)nullptr); 908 909 HANDLE_FAILURE 910 911 last_alloc_size = alloc_words; 912 num_allocations ++; 913 } 914 LOG("allocs: %u", num_allocations); 915 } 916 917 #define TEST_ARENA_WITH_ALIGNMENT_SMALL_RANGE(al) \ 918 TEST_VM(metaspace, MetaspaceArena_test_random_small_aligned_allocation_##al) { \ 919 static const SizeRange range(Metaspace::min_allocation_word_size, 128); \ 920 test_random_aligned_allocation(al, range); \ 921 } 922 923 #ifdef _LP64 924 TEST_ARENA_WITH_ALIGNMENT_SMALL_RANGE(1); 925 #endif 926 TEST_ARENA_WITH_ALIGNMENT_SMALL_RANGE(2); 927 TEST_ARENA_WITH_ALIGNMENT_SMALL_RANGE(8); 928 TEST_ARENA_WITH_ALIGNMENT_SMALL_RANGE(32); 929 TEST_ARENA_WITH_ALIGNMENT_SMALL_RANGE(128); 930 TEST_ARENA_WITH_ALIGNMENT_SMALL_RANGE(MIN_CHUNK_WORD_SIZE); 931 932 #define TEST_ARENA_WITH_ALIGNMENT_LARGE_RANGE(al) \ 933 TEST_VM(metaspace, MetaspaceArena_test_random_large_aligned_allocation_##al) { \ 934 static const SizeRange range(Metaspace::max_allocation_word_size() / 2, \ 935 Metaspace::max_allocation_word_size()); \ 936 test_random_aligned_allocation(al, range); \ 937 } 938 939 #ifdef _LP64 940 TEST_ARENA_WITH_ALIGNMENT_LARGE_RANGE(1); 941 #endif 942 TEST_ARENA_WITH_ALIGNMENT_LARGE_RANGE(2); 943 TEST_ARENA_WITH_ALIGNMENT_LARGE_RANGE(8); 944 TEST_ARENA_WITH_ALIGNMENT_LARGE_RANGE(32); 945 TEST_ARENA_WITH_ALIGNMENT_LARGE_RANGE(128); 946 TEST_ARENA_WITH_ALIGNMENT_LARGE_RANGE(MIN_CHUNK_WORD_SIZE); 947 948 } // namespace metaspace