1 /* 2 * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2020, 2023 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "memory/metaspace/chunkManager.hpp" 28 #include "memory/metaspace/commitLimiter.hpp" 29 #include "memory/metaspace/counters.hpp" 30 #include "memory/metaspace/internalStats.hpp" 31 #include "memory/metaspace/metaspaceArena.hpp" 32 #include "memory/metaspace/metaspaceArenaGrowthPolicy.hpp" 33 #include "memory/metaspace/metaspaceCommon.hpp" 34 #include "memory/metaspace/metaspaceSettings.hpp" 35 #include "memory/metaspace/metaspaceStatistics.hpp" 36 #include "utilities/debug.hpp" 37 #include "utilities/globalDefinitions.hpp" 38 39 //#define LOG_PLEASE 40 #include "metaspaceGtestCommon.hpp" 41 #include "metaspaceGtestContexts.hpp" 42 #include "metaspaceGtestRangeHelpers.hpp" 43 44 using metaspace::AllocationAlignmentByteSize; 45 using metaspace::ArenaGrowthPolicy; 46 using metaspace::CommitLimiter; 47 using metaspace::InternalStats; 48 using metaspace::MemRangeCounter; 49 using metaspace::MetaspaceArena; 50 using metaspace::SizeAtomicCounter; 51 using metaspace::Settings; 52 using metaspace::ArenaStats; 53 54 class MetaspaceArenaTestHelper { 55 56 MetaspaceGtestContext& _context; 57 58 const ArenaGrowthPolicy* _growth_policy; 59 SizeAtomicCounter _used_words_counter; 60 MetaspaceArena* _arena; 61 62 void initialize(const ArenaGrowthPolicy* growth_policy, const char* name = "gtest-MetaspaceArena") { 63 _growth_policy = growth_policy; 64 _arena = new MetaspaceArena(&_context.cm(), _growth_policy, &_used_words_counter, name); 65 DEBUG_ONLY(_arena->verify()); 66 } 67 68 public: 69 70 // Create a helper; growth policy for arena is determined by the given spacetype|class tupel 71 MetaspaceArenaTestHelper(MetaspaceGtestContext& helper, 72 Metaspace::MetaspaceType space_type, bool is_class, 73 const char* name = "gtest-MetaspaceArena") : 74 _context(helper) 75 { 76 initialize(ArenaGrowthPolicy::policy_for_space_type(space_type, is_class), name); 77 } 78 79 // Create a helper; growth policy is directly specified 80 MetaspaceArenaTestHelper(MetaspaceGtestContext& helper, const ArenaGrowthPolicy* growth_policy, 81 const char* name = "gtest-MetaspaceArena") : 82 _context(helper) 83 { 84 initialize(growth_policy, name); 85 } 86 87 ~MetaspaceArenaTestHelper() { 88 delete_arena_with_tests(); 89 } 90 91 const CommitLimiter& limiter() const { return _context.commit_limiter(); } 92 MetaspaceArena* arena() const { return _arena; } 93 SizeAtomicCounter& used_words_counter() { return _used_words_counter; } 94 95 // Note: all test functions return void due to gtests limitation that we cannot use ASSERT 96 // in non-void returning tests. 97 98 void delete_arena_with_tests() { 99 if (_arena != nullptr) { 100 size_t used_words_before = _used_words_counter.get(); 101 size_t committed_words_before = limiter().committed_words(); 102 DEBUG_ONLY(_arena->verify()); 103 delete _arena; 104 _arena = nullptr; 105 size_t used_words_after = _used_words_counter.get(); 106 size_t committed_words_after = limiter().committed_words(); 107 ASSERT_0(used_words_after); 108 ASSERT_LE(committed_words_after, committed_words_before); 109 } 110 } 111 112 void usage_numbers_with_test(size_t* p_used, size_t* p_committed, size_t* p_capacity) const { 113 _arena->usage_numbers(p_used, p_committed, p_capacity); 114 if (p_used != nullptr) { 115 if (p_committed != nullptr) { 116 ASSERT_GE(*p_committed, *p_used); 117 } 118 // Since we own the used words counter, it should reflect our usage number 1:1 119 ASSERT_EQ(_used_words_counter.get(), *p_used); 120 } 121 if (p_committed != nullptr && p_capacity != nullptr) { 122 ASSERT_GE(*p_capacity, *p_committed); 123 } 124 } 125 126 // Allocate; caller expects success; return pointer in *p_return_value 127 void allocate_from_arena_with_tests_expect_success(MetaWord** p_return_value, size_t word_size) { 128 allocate_from_arena_with_tests(p_return_value, word_size); 129 ASSERT_NOT_NULL(*p_return_value); 130 } 131 132 // Allocate; caller expects success but is not interested in return value 133 void allocate_from_arena_with_tests_expect_success(size_t word_size) { 134 MetaWord* dummy = nullptr; 135 allocate_from_arena_with_tests_expect_success(&dummy, word_size); 136 } 137 138 // Allocate; caller expects failure 139 void allocate_from_arena_with_tests_expect_failure(size_t word_size) { 140 MetaWord* dummy = nullptr; 141 allocate_from_arena_with_tests(&dummy, word_size); 142 ASSERT_NULL(dummy); 143 } 144 145 // Allocate; it may or may not work; return value in *p_return_value 146 void allocate_from_arena_with_tests(MetaWord** p_return_value, size_t word_size) { 147 148 // Note: usage_numbers walks all chunks in use and counts. 149 size_t used = 0, committed = 0, capacity = 0; 150 usage_numbers_with_test(&used, &committed, &capacity); 151 152 size_t possible_expansion = limiter().possible_expansion_words(); 153 154 MetaWord* p = _arena->allocate(word_size); 155 156 SOMETIMES(DEBUG_ONLY(_arena->verify();)) 157 158 size_t used2 = 0, committed2 = 0, capacity2 = 0; 159 usage_numbers_with_test(&used2, &committed2, &capacity2); 160 161 if (p == nullptr) { 162 // Allocation failed. 163 ASSERT_LT(possible_expansion, word_size); 164 ASSERT_EQ(used, used2); 165 ASSERT_EQ(committed, committed2); 166 ASSERT_EQ(capacity, capacity2); 167 } else { 168 // Allocation succeeded. Should be correctly aligned. 169 ASSERT_TRUE(is_aligned(p, AllocationAlignmentByteSize)); 170 // used: may go up or may not (since our request may have been satisfied from the freeblocklist 171 // whose content already counts as used). 172 // committed: may go up, may not 173 // capacity: ditto 174 ASSERT_GE(used2, used); 175 ASSERT_GE(committed2, committed); 176 ASSERT_GE(capacity2, capacity); 177 } 178 179 *p_return_value = p; 180 } 181 182 // Allocate; it may or may not work; but caller does not care for the result value 183 void allocate_from_arena_with_tests(size_t word_size) { 184 MetaWord* dummy = nullptr; 185 allocate_from_arena_with_tests(&dummy, word_size); 186 } 187 188 void deallocate_with_tests(MetaWord* p, size_t word_size) { 189 size_t used = 0, committed = 0, capacity = 0; 190 usage_numbers_with_test(&used, &committed, &capacity); 191 192 _arena->deallocate(p, word_size); 193 194 SOMETIMES(DEBUG_ONLY(_arena->verify();)) 195 196 size_t used2 = 0, committed2 = 0, capacity2 = 0; 197 usage_numbers_with_test(&used2, &committed2, &capacity2); 198 199 // Nothing should have changed. Deallocated blocks are added to the free block list 200 // which still counts as used. 201 ASSERT_EQ(used2, used); 202 ASSERT_EQ(committed2, committed); 203 ASSERT_EQ(capacity2, capacity); 204 } 205 206 ArenaStats get_arena_statistics() const { 207 ArenaStats stats; 208 _arena->add_to_statistics(&stats); 209 return stats; 210 } 211 212 // Convenience method to return number of chunks in arena (including current chunk) 213 int get_number_of_chunks() const { 214 return get_arena_statistics().totals()._num; 215 } 216 217 }; 218 219 static void test_basics(size_t commit_limit, bool is_micro) { 220 MetaspaceGtestContext context(commit_limit); 221 MetaspaceArenaTestHelper helper(context, is_micro ? Metaspace::ReflectionMetaspaceType : Metaspace::StandardMetaspaceType, false); 222 223 helper.allocate_from_arena_with_tests(1); 224 helper.allocate_from_arena_with_tests(128); 225 helper.allocate_from_arena_with_tests(128 * K); 226 helper.allocate_from_arena_with_tests(1); 227 helper.allocate_from_arena_with_tests(128); 228 helper.allocate_from_arena_with_tests(128 * K); 229 } 230 231 TEST_VM(metaspace, MetaspaceArena_basics_micro_nolimit) { 232 test_basics(max_uintx, true); 233 } 234 235 TEST_VM(metaspace, MetaspaceArena_basics_micro_limit) { 236 test_basics(256 * K, true); 237 } 238 239 TEST_VM(metaspace, MetaspaceArena_basics_standard_nolimit) { 240 test_basics(max_uintx, false); 241 } 242 243 TEST_VM(metaspace, MetaspaceArena_basics_standard_limit) { 244 test_basics(256 * K, false); 245 } 246 247 // Test chunk enlargement: 248 // A single MetaspaceArena, left undisturbed with place to grow. Slowly fill arena up. 249 // We should see at least some occurrences of chunk-in-place enlargement. 250 static void test_chunk_enlargment_simple(Metaspace::MetaspaceType spacetype, bool is_class) { 251 252 MetaspaceGtestContext context; 253 MetaspaceArenaTestHelper helper(context, (Metaspace::MetaspaceType)spacetype, is_class); 254 255 uint64_t n1 = metaspace::InternalStats::num_chunks_enlarged(); 256 257 size_t allocated = 0; 258 while (allocated <= MAX_CHUNK_WORD_SIZE && 259 metaspace::InternalStats::num_chunks_enlarged() == n1) { 260 size_t s = IntRange(32, 128).random_value(); 261 helper.allocate_from_arena_with_tests_expect_success(s); 262 allocated += metaspace::get_raw_word_size_for_requested_word_size(s); 263 } 264 265 EXPECT_GT(metaspace::InternalStats::num_chunks_enlarged(), n1); 266 267 } 268 269 // Do this test for some of the standard types; don't do it for the boot loader type 270 // since that one starts out with max chunk size so we would not see any enlargement. 271 272 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_standard_c) { 273 test_chunk_enlargment_simple(Metaspace::StandardMetaspaceType, true); 274 } 275 276 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_standard_nc) { 277 test_chunk_enlargment_simple(Metaspace::StandardMetaspaceType, false); 278 } 279 280 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_micro_c) { 281 test_chunk_enlargment_simple(Metaspace::ReflectionMetaspaceType, true); 282 } 283 284 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_micro_nc) { 285 test_chunk_enlargment_simple(Metaspace::ReflectionMetaspaceType, false); 286 } 287 288 // Test chunk enlargement: 289 // A single MetaspaceArena, left undisturbed with place to grow. Slowly fill arena up. 290 // We should see occurrences of chunk-in-place enlargement. 291 // Here, we give it an ideal policy which should enable the initial chunk to grow unmolested 292 // until finish. 293 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_2) { 294 295 if (Settings::use_allocation_guard()) { 296 return; 297 } 298 299 // Note: internally, chunk in-place enlargement is disallowed if growing the chunk 300 // would cause the arena to claim more memory than its growth policy allows. This 301 // is done to prevent the arena to grow too fast. 302 // 303 // In order to test in-place growth here without that restriction I give it an 304 // artificial growth policy which starts out with a tiny chunk size, then balloons 305 // right up to max chunk size. This will cause the initial chunk to be tiny, and 306 // then the arena is able to grow it without violating growth policy. 307 chunklevel_t growth[] = { HIGHEST_CHUNK_LEVEL, ROOT_CHUNK_LEVEL }; 308 ArenaGrowthPolicy growth_policy(growth, 2); 309 310 MetaspaceGtestContext context; 311 MetaspaceArenaTestHelper helper(context, &growth_policy); 312 313 uint64_t n1 = metaspace::InternalStats::num_chunks_enlarged(); 314 315 size_t allocated = 0; 316 while (allocated <= MAX_CHUNK_WORD_SIZE) { 317 size_t s = IntRange(32, 128).random_value(); 318 helper.allocate_from_arena_with_tests_expect_success(s); 319 allocated += metaspace::get_raw_word_size_for_requested_word_size(s); 320 if (allocated <= MAX_CHUNK_WORD_SIZE) { 321 // Chunk should have been enlarged in place 322 ASSERT_EQ(1, helper.get_number_of_chunks()); 323 } else { 324 // Next chunk should have started 325 ASSERT_EQ(2, helper.get_number_of_chunks()); 326 } 327 } 328 329 int times_chunk_were_enlarged = metaspace::InternalStats::num_chunks_enlarged() - n1; 330 LOG("chunk was enlarged %d times.", times_chunk_were_enlarged); 331 332 ASSERT_GT0(times_chunk_were_enlarged); 333 334 } 335 336 // Regression test: Given a single MetaspaceArena, left undisturbed with place to grow, 337 // test that in place enlargement correctly fails if growing the chunk would bring us 338 // beyond the max. size of a chunk. 339 TEST_VM(metaspace, MetaspaceArena_test_failing_to_enlarge_in_place_max_chunk_size) { 340 341 if (Settings::use_allocation_guard()) { 342 return; 343 } 344 345 MetaspaceGtestContext context; 346 347 for (size_t first_allocation_size = 1; first_allocation_size <= MAX_CHUNK_WORD_SIZE / 2; first_allocation_size *= 2) { 348 349 MetaspaceArenaTestHelper helper(context, Metaspace::StandardMetaspaceType, false); 350 351 // we allocate first a small amount, then the full amount possible. 352 // The sum of first and second allocation should bring us above root chunk size. 353 // This should work, we should not see any problems, but no chunk enlargement should 354 // happen. 355 int n1 = metaspace::InternalStats::num_chunks_enlarged(); 356 357 helper.allocate_from_arena_with_tests_expect_success(first_allocation_size); 358 EXPECT_EQ(helper.get_number_of_chunks(), 1); 359 360 helper.allocate_from_arena_with_tests_expect_success(MAX_CHUNK_WORD_SIZE - first_allocation_size + 1); 361 EXPECT_EQ(helper.get_number_of_chunks(), 2); 362 363 int times_chunk_were_enlarged = metaspace::InternalStats::num_chunks_enlarged() - n1; 364 LOG("chunk was enlarged %d times.", times_chunk_were_enlarged); 365 366 EXPECT_0(times_chunk_were_enlarged); 367 368 } 369 } 370 371 // Regression test: Given a single MetaspaceArena, left undisturbed with place to grow, 372 // test that in place enlargement correctly fails if growing the chunk would cause more 373 // than doubling its size 374 TEST_VM(metaspace, MetaspaceArena_test_failing_to_enlarge_in_place_doubling_chunk_size) { 375 376 if (Settings::use_allocation_guard()) { 377 return; 378 } 379 380 MetaspaceGtestContext context; 381 MetaspaceArenaTestHelper helper(context, Metaspace::StandardMetaspaceType, false); 382 383 int n1 = metaspace::InternalStats::num_chunks_enlarged(); 384 385 helper.allocate_from_arena_with_tests_expect_success(1000); 386 EXPECT_EQ(helper.get_number_of_chunks(), 1); 387 388 helper.allocate_from_arena_with_tests_expect_success(4000); 389 EXPECT_EQ(helper.get_number_of_chunks(), 2); 390 391 int times_chunk_were_enlarged = metaspace::InternalStats::num_chunks_enlarged() - n1; 392 LOG("chunk was enlarged %d times.", times_chunk_were_enlarged); 393 394 EXPECT_0(times_chunk_were_enlarged); 395 396 } 397 398 // Test the MetaspaceArenas' free block list: 399 // Allocate, deallocate, then allocate the same block again. The second allocate should 400 // reuse the deallocated block. 401 TEST_VM(metaspace, MetaspaceArena_deallocate) { 402 if (Settings::use_allocation_guard()) { 403 return; 404 } 405 for (size_t s = 2; s <= MAX_CHUNK_WORD_SIZE; s *= 2) { 406 MetaspaceGtestContext context; 407 MetaspaceArenaTestHelper helper(context, Metaspace::StandardMetaspaceType, false); 408 409 MetaWord* p1 = nullptr; 410 helper.allocate_from_arena_with_tests_expect_success(&p1, s); 411 412 size_t used1 = 0, capacity1 = 0; 413 helper.usage_numbers_with_test(&used1, nullptr, &capacity1); 414 ASSERT_EQ(used1, s); 415 416 helper.deallocate_with_tests(p1, s); 417 418 size_t used2 = 0, capacity2 = 0; 419 helper.usage_numbers_with_test(&used2, nullptr, &capacity2); 420 ASSERT_EQ(used1, used2); 421 ASSERT_EQ(capacity2, capacity2); 422 423 MetaWord* p2 = nullptr; 424 helper.allocate_from_arena_with_tests_expect_success(&p2, s); 425 426 size_t used3 = 0, capacity3 = 0; 427 helper.usage_numbers_with_test(&used3, nullptr, &capacity3); 428 ASSERT_EQ(used3, used2); 429 ASSERT_EQ(capacity3, capacity2); 430 431 // Actually, we should get the very same allocation back 432 ASSERT_EQ(p1, p2); 433 } 434 } 435 436 static void test_recover_from_commit_limit_hit() { 437 438 // Test: 439 // - Multiple MetaspaceArena allocate (operating under the same commit limiter). 440 // - One, while attempting to commit parts of its current chunk on demand, 441 // triggers the limit and cannot commit its chunk further. 442 // - We release the other MetaspaceArena - its content is put back to the 443 // freelists. 444 // - We re-attempt allocation from the first manager. It should now succeed. 445 // 446 // This means if the first MetaspaceArena may have to let go of its current chunk and 447 // retire it and take a fresh chunk from the freelist. 448 449 const size_t commit_limit = Settings::commit_granule_words() * 10; 450 MetaspaceGtestContext context(commit_limit); 451 452 // The first MetaspaceArena mimicks a micro loader. This will fill the free 453 // chunk list with very small chunks. We allocate from them in an interleaved 454 // way to cause fragmentation. 455 MetaspaceArenaTestHelper helper1(context, Metaspace::ReflectionMetaspaceType, false); 456 MetaspaceArenaTestHelper helper2(context, Metaspace::ReflectionMetaspaceType, false); 457 458 // This MetaspaceArena should hit the limit. We use BootMetaspaceType here since 459 // it gets a large initial chunk which is committed 460 // on demand and we are likely to hit a commit limit while trying to expand it. 461 MetaspaceArenaTestHelper helper3(context, Metaspace::BootMetaspaceType, false); 462 463 // Allocate space until we have below two but above one granule left 464 size_t allocated_from_1_and_2 = 0; 465 while (context.commit_limiter().possible_expansion_words() >= Settings::commit_granule_words() * 2 && 466 allocated_from_1_and_2 < commit_limit) { 467 helper1.allocate_from_arena_with_tests_expect_success(1); 468 helper2.allocate_from_arena_with_tests_expect_success(1); 469 allocated_from_1_and_2 += 2; 470 } 471 472 // Now, allocating from helper3, creep up on the limit 473 size_t allocated_from_3 = 0; 474 MetaWord* p = nullptr; 475 while ( (helper3.allocate_from_arena_with_tests(&p, 1), p != nullptr) && 476 ++allocated_from_3 < Settings::commit_granule_words() * 2); 477 478 EXPECT_LE(allocated_from_3, Settings::commit_granule_words() * 2); 479 480 // We expect the freelist to be empty of committed space... 481 EXPECT_0(context.cm().calc_committed_word_size()); 482 483 //msthelper.cm().print_on(tty); 484 485 // Release the first MetaspaceArena. 486 helper1.delete_arena_with_tests(); 487 488 //msthelper.cm().print_on(tty); 489 490 // Should have populated the freelist with committed space 491 // We expect the freelist to be empty of committed space... 492 EXPECT_GT(context.cm().calc_committed_word_size(), (size_t)0); 493 494 // Repeat allocation from helper3, should now work. 495 helper3.allocate_from_arena_with_tests_expect_success(1); 496 497 } 498 499 TEST_VM(metaspace, MetaspaceArena_recover_from_limit_hit) { 500 test_recover_from_commit_limit_hit(); 501 } 502 503 static void test_controlled_growth(Metaspace::MetaspaceType type, bool is_class, 504 size_t expected_starting_capacity, 505 bool test_in_place_enlargement) 506 { 507 508 if (Settings::use_allocation_guard()) { 509 return; 510 } 511 512 // From a MetaspaceArena in a clean room allocate tiny amounts; 513 // watch it grow. Used/committed/capacity should not grow in 514 // large jumps. Also, different types of MetaspaceArena should 515 // have different initial capacities. 516 517 MetaspaceGtestContext context; 518 MetaspaceArenaTestHelper smhelper(context, type, is_class, "Grower"); 519 520 MetaspaceArenaTestHelper smhelper_harrasser(context, Metaspace::ReflectionMetaspaceType, true, "Harasser"); 521 522 size_t used = 0, committed = 0, capacity = 0; 523 const size_t alloc_words = 16; 524 525 smhelper.arena()->usage_numbers(&used, &committed, &capacity); 526 ASSERT_0(used); 527 ASSERT_0(committed); 528 ASSERT_0(capacity); 529 530 ///// First allocation // 531 532 smhelper.allocate_from_arena_with_tests_expect_success(alloc_words); 533 534 smhelper.arena()->usage_numbers(&used, &committed, &capacity); 535 536 ASSERT_EQ(used, alloc_words); 537 ASSERT_GE(committed, used); 538 ASSERT_GE(capacity, committed); 539 540 ASSERT_EQ(capacity, expected_starting_capacity); 541 542 // What happens when we allocate, commit wise: 543 // Arena allocates from current chunk, committing needed memory from the chunk on demand. 544 // The chunk asks the underlying vsnode to commit the area it is located in. Since the 545 // chunk may be smaller than one commit granule, this may result in surrounding memory 546 // also getting committed. 547 // In reality we will commit in granule granularity, but arena can only know what its first 548 // chunk did commit. So what it thinks was committed depends on the size of its first chunk, 549 // which depends on ArenaGrowthPolicy. 550 { 551 const chunklevel_t expected_level_for_first_chunk = 552 ArenaGrowthPolicy::policy_for_space_type(type, is_class)->get_level_at_step(0); 553 const size_t what_arena_should_think_was_committed = 554 MIN2(Settings::commit_granule_words(), word_size_for_level(expected_level_for_first_chunk)); 555 const size_t what_should_really_be_committed = Settings::commit_granule_words(); 556 557 ASSERT_EQ(committed, what_arena_should_think_was_committed); 558 ASSERT_EQ(context.committed_words(), what_should_really_be_committed); 559 } 560 561 ///// subsequent allocations // 562 563 DEBUG_ONLY(const uintx num_chunk_enlarged = metaspace::InternalStats::num_chunks_enlarged();) 564 565 size_t words_allocated = 0; 566 int num_allocated = 0; 567 const size_t safety = MAX_CHUNK_WORD_SIZE * 1.2; 568 size_t highest_capacity_jump = capacity; 569 int num_capacity_jumps = 0; 570 571 while (words_allocated < safety && num_capacity_jumps < 15) { 572 573 // if we want to test growth with in-place chunk enlargement, leave MetaspaceArena 574 // undisturbed; it will have all the place to grow. Otherwise allocate from a little 575 // side arena to increase fragmentation. 576 // (Note that this does not completely prevent in-place chunk enlargement but makes it 577 // rather improbable) 578 if (!test_in_place_enlargement) { 579 smhelper_harrasser.allocate_from_arena_with_tests_expect_success(alloc_words * 2); 580 } 581 582 smhelper.allocate_from_arena_with_tests_expect_success(alloc_words); 583 words_allocated += metaspace::get_raw_word_size_for_requested_word_size(alloc_words); 584 num_allocated++; 585 586 size_t used2 = 0, committed2 = 0, capacity2 = 0; 587 588 smhelper.arena()->usage_numbers(&used2, &committed2, &capacity2); 589 590 // used should not grow larger than what we allocated, plus possible overhead. 591 ASSERT_GE(used2, used); 592 ASSERT_LE(used2, used + alloc_words * 2); 593 ASSERT_LE(used2, words_allocated + 100); 594 used = used2; 595 596 // A jump in committed words should not be larger than commit granule size. 597 // It can be smaller, since the current chunk of the MetaspaceArena may be 598 // smaller than a commit granule. 599 // (Note: unless root chunks are born fully committed) 600 ASSERT_GE(committed2, used2); 601 ASSERT_GE(committed2, committed); 602 const size_t committed_jump = committed2 - committed; 603 if (committed_jump > 0) { 604 ASSERT_LE(committed_jump, Settings::commit_granule_words()); 605 } 606 committed = committed2; 607 608 // Capacity jumps: Test that arenas capacity does not grow too fast. 609 ASSERT_GE(capacity2, committed2); 610 ASSERT_GE(capacity2, capacity); 611 const size_t capacity_jump = capacity2 - capacity; 612 if (capacity_jump > 0) { 613 LOG(">" SIZE_FORMAT "->" SIZE_FORMAT "(+" SIZE_FORMAT ")", capacity, capacity2, capacity_jump) 614 if (capacity_jump > highest_capacity_jump) { 615 /* Disabled for now since this is rather shaky. The way it is tested makes it too dependent 616 * on allocation history. Need to rethink this. 617 ASSERT_LE(capacity_jump, highest_capacity_jump * 2); 618 ASSERT_GE(capacity_jump, MIN_CHUNK_WORD_SIZE); 619 ASSERT_LE(capacity_jump, MAX_CHUNK_WORD_SIZE); 620 */ 621 highest_capacity_jump = capacity_jump; 622 } 623 num_capacity_jumps++; 624 } 625 626 capacity = capacity2; 627 628 } 629 630 // After all this work, we should see an increase in number of chunk-in-place-enlargements 631 // (this especially is vulnerable to regression: the decisions of when to do in-place-enlargements are somewhat 632 // complicated, see MetaspaceArena::attempt_enlarge_current_chunk()) 633 #ifdef ASSERT 634 if (test_in_place_enlargement) { 635 const uintx num_chunk_enlarged_2 = metaspace::InternalStats::num_chunks_enlarged(); 636 ASSERT_GT(num_chunk_enlarged_2, num_chunk_enlarged); 637 } 638 #endif 639 } 640 641 // these numbers have to be in sync with arena policy numbers (see memory/metaspace/arenaGrowthPolicy.cpp) 642 TEST_VM(metaspace, MetaspaceArena_growth_refl_c_inplace) { 643 test_controlled_growth(Metaspace::ReflectionMetaspaceType, true, 644 word_size_for_level(CHUNK_LEVEL_1K), true); 645 } 646 647 TEST_VM(metaspace, MetaspaceArena_growth_refl_c_not_inplace) { 648 test_controlled_growth(Metaspace::ReflectionMetaspaceType, true, 649 word_size_for_level(CHUNK_LEVEL_1K), false); 650 } 651 652 TEST_VM(metaspace, MetaspaceArena_growth_anon_c_inplace) { 653 test_controlled_growth(Metaspace::ClassMirrorHolderMetaspaceType, true, 654 word_size_for_level(CHUNK_LEVEL_1K), true); 655 } 656 657 TEST_VM(metaspace, MetaspaceArena_growth_anon_c_not_inplace) { 658 test_controlled_growth(Metaspace::ClassMirrorHolderMetaspaceType, true, 659 word_size_for_level(CHUNK_LEVEL_1K), false); 660 } 661 662 TEST_VM(metaspace, MetaspaceArena_growth_standard_c_inplace) { 663 test_controlled_growth(Metaspace::StandardMetaspaceType, true, 664 word_size_for_level(CHUNK_LEVEL_2K), true); 665 } 666 667 TEST_VM(metaspace, MetaspaceArena_growth_standard_c_not_inplace) { 668 test_controlled_growth(Metaspace::StandardMetaspaceType, true, 669 word_size_for_level(CHUNK_LEVEL_2K), false); 670 } 671 672 /* Disabled growth tests for BootMetaspaceType: there, the growth steps are too rare, 673 * and too large, to make any reliable guess as toward chunks get enlarged in place. 674 TEST_VM(metaspace, MetaspaceArena_growth_boot_c_inplace) { 675 test_controlled_growth(Metaspace::BootMetaspaceType, true, 676 word_size_for_level(CHUNK_LEVEL_1M), true); 677 } 678 679 TEST_VM(metaspace, MetaspaceArena_growth_boot_c_not_inplace) { 680 test_controlled_growth(Metaspace::BootMetaspaceType, true, 681 word_size_for_level(CHUNK_LEVEL_1M), false); 682 } 683 */ 684 685 TEST_VM(metaspace, MetaspaceArena_growth_refl_nc_inplace) { 686 test_controlled_growth(Metaspace::ReflectionMetaspaceType, false, 687 word_size_for_level(CHUNK_LEVEL_2K), true); 688 } 689 690 TEST_VM(metaspace, MetaspaceArena_growth_refl_nc_not_inplace) { 691 test_controlled_growth(Metaspace::ReflectionMetaspaceType, false, 692 word_size_for_level(CHUNK_LEVEL_2K), false); 693 } 694 695 TEST_VM(metaspace, MetaspaceArena_growth_anon_nc_inplace) { 696 test_controlled_growth(Metaspace::ClassMirrorHolderMetaspaceType, false, 697 word_size_for_level(CHUNK_LEVEL_1K), true); 698 } 699 700 TEST_VM(metaspace, MetaspaceArena_growth_anon_nc_not_inplace) { 701 test_controlled_growth(Metaspace::ClassMirrorHolderMetaspaceType, false, 702 word_size_for_level(CHUNK_LEVEL_1K), false); 703 } 704 705 TEST_VM(metaspace, MetaspaceArena_growth_standard_nc_inplace) { 706 test_controlled_growth(Metaspace::StandardMetaspaceType, false, 707 word_size_for_level(CHUNK_LEVEL_4K), true); 708 } 709 710 TEST_VM(metaspace, MetaspaceArena_growth_standard_nc_not_inplace) { 711 test_controlled_growth(Metaspace::StandardMetaspaceType, false, 712 word_size_for_level(CHUNK_LEVEL_4K), false); 713 } 714 715 /* Disabled growth tests for BootMetaspaceType: there, the growth steps are too rare, 716 * and too large, to make any reliable guess as toward chunks get enlarged in place. 717 TEST_VM(metaspace, MetaspaceArena_growth_boot_nc_inplace) { 718 test_controlled_growth(Metaspace::BootMetaspaceType, false, 719 word_size_for_level(CHUNK_LEVEL_4M), true); 720 } 721 722 TEST_VM(metaspace, MetaspaceArena_growth_boot_nc_not_inplace) { 723 test_controlled_growth(Metaspace::BootMetaspaceType, false, 724 word_size_for_level(CHUNK_LEVEL_4M), false); 725 } 726 */ 727 728 // Test that repeated allocation-deallocation cycles with the same block size 729 // do not increase metaspace usage after the initial allocation (the deallocated 730 // block should be reused by the next allocation). 731 static void test_repeatedly_allocate_and_deallocate(bool is_topmost) { 732 // Test various sizes, including (important) the max. possible block size = 1 root chunk 733 for (size_t blocksize = Metaspace::max_allocation_word_size(); blocksize >= 1; blocksize /= 2) { 734 size_t used1 = 0, used2 = 0, committed1 = 0, committed2 = 0; 735 MetaWord* p = nullptr, *p2 = nullptr; 736 737 MetaspaceGtestContext context; 738 MetaspaceArenaTestHelper helper(context, Metaspace::StandardMetaspaceType, false); 739 740 // First allocation 741 helper.allocate_from_arena_with_tests_expect_success(&p, blocksize); 742 if (!is_topmost) { 743 // another one on top, size does not matter. 744 helper.allocate_from_arena_with_tests_expect_success(0x10); 745 } 746 747 // Measure 748 helper.usage_numbers_with_test(&used1, &committed1, nullptr); 749 750 // Dealloc, alloc several times with the same size. 751 for (int i = 0; i < 5; i ++) { 752 helper.deallocate_with_tests(p, blocksize); 753 helper.allocate_from_arena_with_tests_expect_success(&p2, blocksize); 754 // We should get the same pointer back. 755 EXPECT_EQ(p2, p); 756 } 757 758 // Measure again 759 helper.usage_numbers_with_test(&used2, &committed2, nullptr); 760 EXPECT_EQ(used2, used1); 761 EXPECT_EQ(committed1, committed2); 762 } 763 } 764 765 TEST_VM(metaspace, MetaspaceArena_test_repeatedly_allocate_and_deallocate_top_allocation) { 766 test_repeatedly_allocate_and_deallocate(true); 767 } 768 769 TEST_VM(metaspace, MetaspaceArena_test_repeatedly_allocate_and_deallocate_nontop_allocation) { 770 test_repeatedly_allocate_and_deallocate(false); 771 }