1 /*
2 * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2020, 2023 SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "memory/metaspace/chunkManager.hpp"
28 #include "memory/metaspace/commitLimiter.hpp"
29 #include "memory/metaspace/counters.hpp"
30 #include "memory/metaspace/internalStats.hpp"
31 #include "memory/metaspace/metaspaceArena.hpp"
32 #include "memory/metaspace/metaspaceArenaGrowthPolicy.hpp"
33 #include "memory/metaspace/metaspaceCommon.hpp"
34 #include "memory/metaspace/metaspaceSettings.hpp"
35 #include "memory/metaspace/metaspaceStatistics.hpp"
36 #include "utilities/debug.hpp"
37 #include "utilities/globalDefinitions.hpp"
38
39 //#define LOG_PLEASE
40 #include "metaspaceGtestCommon.hpp"
41 #include "metaspaceGtestContexts.hpp"
42 #include "metaspaceGtestRangeHelpers.hpp"
43
44 using metaspace::AllocationAlignmentByteSize;
45 using metaspace::ArenaGrowthPolicy;
46 using metaspace::CommitLimiter;
47 using metaspace::InternalStats;
48 using metaspace::MemRangeCounter;
49 using metaspace::MetaspaceArena;
50 using metaspace::SizeAtomicCounter;
51 using metaspace::Settings;
52 using metaspace::ArenaStats;
53
54 class MetaspaceArenaTestHelper {
55
56 MetaspaceGtestContext& _context;
57
58 const ArenaGrowthPolicy* _growth_policy;
59 SizeAtomicCounter _used_words_counter;
60 MetaspaceArena* _arena;
61
62 void initialize(const ArenaGrowthPolicy* growth_policy, const char* name = "gtest-MetaspaceArena") {
63 _growth_policy = growth_policy;
64 _arena = new MetaspaceArena(&_context.cm(), _growth_policy, &_used_words_counter, name);
65 DEBUG_ONLY(_arena->verify());
66 }
67
68 public:
69
70 // Create a helper; growth policy for arena is determined by the given spacetype|class tupel
71 MetaspaceArenaTestHelper(MetaspaceGtestContext& helper,
72 Metaspace::MetaspaceType space_type, bool is_class,
73 const char* name = "gtest-MetaspaceArena") :
74 _context(helper)
75 {
76 initialize(ArenaGrowthPolicy::policy_for_space_type(space_type, is_class), name);
77 }
78
79 // Create a helper; growth policy is directly specified
80 MetaspaceArenaTestHelper(MetaspaceGtestContext& helper, const ArenaGrowthPolicy* growth_policy,
81 const char* name = "gtest-MetaspaceArena") :
82 _context(helper)
83 {
84 initialize(growth_policy, name);
85 }
86
87 ~MetaspaceArenaTestHelper() {
88 delete_arena_with_tests();
89 }
90
91 const CommitLimiter& limiter() const { return _context.commit_limiter(); }
92 MetaspaceArena* arena() const { return _arena; }
93 SizeAtomicCounter& used_words_counter() { return _used_words_counter; }
94
95 // Note: all test functions return void due to gtests limitation that we cannot use ASSERT
96 // in non-void returning tests.
97
98 void delete_arena_with_tests() {
99 if (_arena != nullptr) {
100 size_t used_words_before = _used_words_counter.get();
101 size_t committed_words_before = limiter().committed_words();
102 DEBUG_ONLY(_arena->verify());
103 delete _arena;
104 _arena = nullptr;
105 size_t used_words_after = _used_words_counter.get();
106 size_t committed_words_after = limiter().committed_words();
107 ASSERT_0(used_words_after);
108 ASSERT_LE(committed_words_after, committed_words_before);
109 }
110 }
111
112 void usage_numbers_with_test(size_t* p_used, size_t* p_committed, size_t* p_capacity) const {
113 _arena->usage_numbers(p_used, p_committed, p_capacity);
114 if (p_used != nullptr) {
115 if (p_committed != nullptr) {
116 ASSERT_GE(*p_committed, *p_used);
117 }
118 // Since we own the used words counter, it should reflect our usage number 1:1
119 ASSERT_EQ(_used_words_counter.get(), *p_used);
120 }
121 if (p_committed != nullptr && p_capacity != nullptr) {
122 ASSERT_GE(*p_capacity, *p_committed);
123 }
124 }
125
126 // Allocate; caller expects success; return pointer in *p_return_value
127 void allocate_from_arena_with_tests_expect_success(MetaWord** p_return_value, size_t word_size) {
128 allocate_from_arena_with_tests(p_return_value, word_size);
129 ASSERT_NOT_NULL(*p_return_value);
130 }
131
132 // Allocate; caller expects success but is not interested in return value
133 void allocate_from_arena_with_tests_expect_success(size_t word_size) {
134 MetaWord* dummy = nullptr;
135 allocate_from_arena_with_tests_expect_success(&dummy, word_size);
136 }
137
138 // Allocate; caller expects failure
139 void allocate_from_arena_with_tests_expect_failure(size_t word_size) {
140 MetaWord* dummy = nullptr;
141 allocate_from_arena_with_tests(&dummy, word_size);
142 ASSERT_NULL(dummy);
143 }
144
145 // Allocate; it may or may not work; return value in *p_return_value
146 void allocate_from_arena_with_tests(MetaWord** p_return_value, size_t word_size) {
147
148 // Note: usage_numbers walks all chunks in use and counts.
149 size_t used = 0, committed = 0, capacity = 0;
150 usage_numbers_with_test(&used, &committed, &capacity);
151
152 size_t possible_expansion = limiter().possible_expansion_words();
153
154 MetaWord* p = _arena->allocate(word_size);
155
156 SOMETIMES(DEBUG_ONLY(_arena->verify();))
157
158 size_t used2 = 0, committed2 = 0, capacity2 = 0;
159 usage_numbers_with_test(&used2, &committed2, &capacity2);
160
161 if (p == nullptr) {
162 // Allocation failed.
163 ASSERT_LT(possible_expansion, word_size);
164 ASSERT_EQ(used, used2);
165 ASSERT_EQ(committed, committed2);
166 ASSERT_EQ(capacity, capacity2);
167 } else {
168 // Allocation succeeded. Should be correctly aligned.
169 ASSERT_TRUE(is_aligned(p, AllocationAlignmentByteSize));
170 // used: may go up or may not (since our request may have been satisfied from the freeblocklist
171 // whose content already counts as used).
172 // committed: may go up, may not
173 // capacity: ditto
174 ASSERT_GE(used2, used);
175 ASSERT_GE(committed2, committed);
176 ASSERT_GE(capacity2, capacity);
177 }
178
179 *p_return_value = p;
180 }
181
182 // Allocate; it may or may not work; but caller does not care for the result value
183 void allocate_from_arena_with_tests(size_t word_size) {
184 MetaWord* dummy = nullptr;
185 allocate_from_arena_with_tests(&dummy, word_size);
186 }
187
188 void deallocate_with_tests(MetaWord* p, size_t word_size) {
189 size_t used = 0, committed = 0, capacity = 0;
190 usage_numbers_with_test(&used, &committed, &capacity);
191
192 _arena->deallocate(p, word_size);
193
194 SOMETIMES(DEBUG_ONLY(_arena->verify();))
195
196 size_t used2 = 0, committed2 = 0, capacity2 = 0;
197 usage_numbers_with_test(&used2, &committed2, &capacity2);
198
199 // Nothing should have changed. Deallocated blocks are added to the free block list
200 // which still counts as used.
201 ASSERT_EQ(used2, used);
202 ASSERT_EQ(committed2, committed);
203 ASSERT_EQ(capacity2, capacity);
204 }
205
206 ArenaStats get_arena_statistics() const {
207 ArenaStats stats;
208 _arena->add_to_statistics(&stats);
209 return stats;
210 }
211
212 // Convenience method to return number of chunks in arena (including current chunk)
213 int get_number_of_chunks() const {
214 return get_arena_statistics().totals()._num;
215 }
216
217 };
218
219 static void test_basics(size_t commit_limit, bool is_micro) {
220 MetaspaceGtestContext context(commit_limit);
221 MetaspaceArenaTestHelper helper(context, is_micro ? Metaspace::ReflectionMetaspaceType : Metaspace::StandardMetaspaceType, false);
222
223 helper.allocate_from_arena_with_tests(1);
224 helper.allocate_from_arena_with_tests(128);
225 helper.allocate_from_arena_with_tests(128 * K);
226 helper.allocate_from_arena_with_tests(1);
227 helper.allocate_from_arena_with_tests(128);
228 helper.allocate_from_arena_with_tests(128 * K);
229 }
230
231 TEST_VM(metaspace, MetaspaceArena_basics_micro_nolimit) {
232 test_basics(max_uintx, true);
233 }
234
391 int times_chunk_were_enlarged = metaspace::InternalStats::num_chunks_enlarged() - n1;
392 LOG("chunk was enlarged %d times.", times_chunk_were_enlarged);
393
394 EXPECT_0(times_chunk_were_enlarged);
395
396 }
397
398 // Test the MetaspaceArenas' free block list:
399 // Allocate, deallocate, then allocate the same block again. The second allocate should
400 // reuse the deallocated block.
401 TEST_VM(metaspace, MetaspaceArena_deallocate) {
402 if (Settings::use_allocation_guard()) {
403 return;
404 }
405 for (size_t s = 2; s <= MAX_CHUNK_WORD_SIZE; s *= 2) {
406 MetaspaceGtestContext context;
407 MetaspaceArenaTestHelper helper(context, Metaspace::StandardMetaspaceType, false);
408
409 MetaWord* p1 = nullptr;
410 helper.allocate_from_arena_with_tests_expect_success(&p1, s);
411
412 size_t used1 = 0, capacity1 = 0;
413 helper.usage_numbers_with_test(&used1, nullptr, &capacity1);
414 ASSERT_EQ(used1, s);
415
416 helper.deallocate_with_tests(p1, s);
417
418 size_t used2 = 0, capacity2 = 0;
419 helper.usage_numbers_with_test(&used2, nullptr, &capacity2);
420 ASSERT_EQ(used1, used2);
421 ASSERT_EQ(capacity2, capacity2);
422
423 MetaWord* p2 = nullptr;
424 helper.allocate_from_arena_with_tests_expect_success(&p2, s);
425
426 size_t used3 = 0, capacity3 = 0;
427 helper.usage_numbers_with_test(&used3, nullptr, &capacity3);
428 ASSERT_EQ(used3, used2);
429 ASSERT_EQ(capacity3, capacity2);
430
431 // Actually, we should get the very same allocation back
432 ASSERT_EQ(p1, p2);
433 }
434 }
435
436 static void test_recover_from_commit_limit_hit() {
437
438 // Test:
439 // - Multiple MetaspaceArena allocate (operating under the same commit limiter).
440 // - One, while attempting to commit parts of its current chunk on demand,
441 // triggers the limit and cannot commit its chunk further.
442 // - We release the other MetaspaceArena - its content is put back to the
443 // freelists.
444 // - We re-attempt allocation from the first manager. It should now succeed.
445 //
446 // This means if the first MetaspaceArena may have to let go of its current chunk and
447 // retire it and take a fresh chunk from the freelist.
450 MetaspaceGtestContext context(commit_limit);
451
452 // The first MetaspaceArena mimicks a micro loader. This will fill the free
453 // chunk list with very small chunks. We allocate from them in an interleaved
454 // way to cause fragmentation.
455 MetaspaceArenaTestHelper helper1(context, Metaspace::ReflectionMetaspaceType, false);
456 MetaspaceArenaTestHelper helper2(context, Metaspace::ReflectionMetaspaceType, false);
457
458 // This MetaspaceArena should hit the limit. We use BootMetaspaceType here since
459 // it gets a large initial chunk which is committed
460 // on demand and we are likely to hit a commit limit while trying to expand it.
461 MetaspaceArenaTestHelper helper3(context, Metaspace::BootMetaspaceType, false);
462
463 // Allocate space until we have below two but above one granule left
464 size_t allocated_from_1_and_2 = 0;
465 while (context.commit_limiter().possible_expansion_words() >= Settings::commit_granule_words() * 2 &&
466 allocated_from_1_and_2 < commit_limit) {
467 helper1.allocate_from_arena_with_tests_expect_success(1);
468 helper2.allocate_from_arena_with_tests_expect_success(1);
469 allocated_from_1_and_2 += 2;
470 }
471
472 // Now, allocating from helper3, creep up on the limit
473 size_t allocated_from_3 = 0;
474 MetaWord* p = nullptr;
475 while ( (helper3.allocate_from_arena_with_tests(&p, 1), p != nullptr) &&
476 ++allocated_from_3 < Settings::commit_granule_words() * 2);
477
478 EXPECT_LE(allocated_from_3, Settings::commit_granule_words() * 2);
479
480 // We expect the freelist to be empty of committed space...
481 EXPECT_0(context.cm().calc_committed_word_size());
482
483 //msthelper.cm().print_on(tty);
484
485 // Release the first MetaspaceArena.
486 helper1.delete_arena_with_tests();
487
488 //msthelper.cm().print_on(tty);
489
498
499 TEST_VM(metaspace, MetaspaceArena_recover_from_limit_hit) {
500 test_recover_from_commit_limit_hit();
501 }
502
503 static void test_controlled_growth(Metaspace::MetaspaceType type, bool is_class,
504 size_t expected_starting_capacity,
505 bool test_in_place_enlargement)
506 {
507
508 if (Settings::use_allocation_guard()) {
509 return;
510 }
511
512 // From a MetaspaceArena in a clean room allocate tiny amounts;
513 // watch it grow. Used/committed/capacity should not grow in
514 // large jumps. Also, different types of MetaspaceArena should
515 // have different initial capacities.
516
517 MetaspaceGtestContext context;
518 MetaspaceArenaTestHelper smhelper(context, type, is_class, "Grower");
519
520 MetaspaceArenaTestHelper smhelper_harrasser(context, Metaspace::ReflectionMetaspaceType, true, "Harasser");
521
522 size_t used = 0, committed = 0, capacity = 0;
523 const size_t alloc_words = 16;
524
525 smhelper.arena()->usage_numbers(&used, &committed, &capacity);
526 ASSERT_0(used);
527 ASSERT_0(committed);
528 ASSERT_0(capacity);
529
530 ///// First allocation //
531
532 smhelper.allocate_from_arena_with_tests_expect_success(alloc_words);
533
534 smhelper.arena()->usage_numbers(&used, &committed, &capacity);
535
536 ASSERT_EQ(used, alloc_words);
537 ASSERT_GE(committed, used);
538 ASSERT_GE(capacity, committed);
539
540 ASSERT_EQ(capacity, expected_starting_capacity);
563 DEBUG_ONLY(const uintx num_chunk_enlarged = metaspace::InternalStats::num_chunks_enlarged();)
564
565 size_t words_allocated = 0;
566 int num_allocated = 0;
567 const size_t safety = MAX_CHUNK_WORD_SIZE * 1.2;
568 size_t highest_capacity_jump = capacity;
569 int num_capacity_jumps = 0;
570
571 while (words_allocated < safety && num_capacity_jumps < 15) {
572
573 // if we want to test growth with in-place chunk enlargement, leave MetaspaceArena
574 // undisturbed; it will have all the place to grow. Otherwise allocate from a little
575 // side arena to increase fragmentation.
576 // (Note that this does not completely prevent in-place chunk enlargement but makes it
577 // rather improbable)
578 if (!test_in_place_enlargement) {
579 smhelper_harrasser.allocate_from_arena_with_tests_expect_success(alloc_words * 2);
580 }
581
582 smhelper.allocate_from_arena_with_tests_expect_success(alloc_words);
583 words_allocated += metaspace::get_raw_word_size_for_requested_word_size(alloc_words);
584 num_allocated++;
585
586 size_t used2 = 0, committed2 = 0, capacity2 = 0;
587
588 smhelper.arena()->usage_numbers(&used2, &committed2, &capacity2);
589
590 // used should not grow larger than what we allocated, plus possible overhead.
591 ASSERT_GE(used2, used);
592 ASSERT_LE(used2, used + alloc_words * 2);
593 ASSERT_LE(used2, words_allocated + 100);
594 used = used2;
595
596 // A jump in committed words should not be larger than commit granule size.
597 // It can be smaller, since the current chunk of the MetaspaceArena may be
598 // smaller than a commit granule.
599 // (Note: unless root chunks are born fully committed)
600 ASSERT_GE(committed2, used2);
601 ASSERT_GE(committed2, committed);
602 const size_t committed_jump = committed2 - committed;
603 if (committed_jump > 0) {
604 ASSERT_LE(committed_jump, Settings::commit_granule_words());
605 }
606 committed = committed2;
607
608 // Capacity jumps: Test that arenas capacity does not grow too fast.
610 ASSERT_GE(capacity2, capacity);
611 const size_t capacity_jump = capacity2 - capacity;
612 if (capacity_jump > 0) {
613 LOG(">" SIZE_FORMAT "->" SIZE_FORMAT "(+" SIZE_FORMAT ")", capacity, capacity2, capacity_jump)
614 if (capacity_jump > highest_capacity_jump) {
615 /* Disabled for now since this is rather shaky. The way it is tested makes it too dependent
616 * on allocation history. Need to rethink this.
617 ASSERT_LE(capacity_jump, highest_capacity_jump * 2);
618 ASSERT_GE(capacity_jump, MIN_CHUNK_WORD_SIZE);
619 ASSERT_LE(capacity_jump, MAX_CHUNK_WORD_SIZE);
620 */
621 highest_capacity_jump = capacity_jump;
622 }
623 num_capacity_jumps++;
624 }
625
626 capacity = capacity2;
627
628 }
629
630 // After all this work, we should see an increase in number of chunk-in-place-enlargements
631 // (this especially is vulnerable to regression: the decisions of when to do in-place-enlargements are somewhat
632 // complicated, see MetaspaceArena::attempt_enlarge_current_chunk())
633 #ifdef ASSERT
634 if (test_in_place_enlargement) {
635 const uintx num_chunk_enlarged_2 = metaspace::InternalStats::num_chunks_enlarged();
636 ASSERT_GT(num_chunk_enlarged_2, num_chunk_enlarged);
637 }
638 #endif
639 }
640
641 // these numbers have to be in sync with arena policy numbers (see memory/metaspace/arenaGrowthPolicy.cpp)
642 TEST_VM(metaspace, MetaspaceArena_growth_refl_c_inplace) {
643 test_controlled_growth(Metaspace::ReflectionMetaspaceType, true,
644 word_size_for_level(CHUNK_LEVEL_1K), true);
645 }
646
647 TEST_VM(metaspace, MetaspaceArena_growth_refl_c_not_inplace) {
648 test_controlled_growth(Metaspace::ReflectionMetaspaceType, true,
649 word_size_for_level(CHUNK_LEVEL_1K), false);
713 }
714
715 /* Disabled growth tests for BootMetaspaceType: there, the growth steps are too rare,
716 * and too large, to make any reliable guess as toward chunks get enlarged in place.
717 TEST_VM(metaspace, MetaspaceArena_growth_boot_nc_inplace) {
718 test_controlled_growth(Metaspace::BootMetaspaceType, false,
719 word_size_for_level(CHUNK_LEVEL_4M), true);
720 }
721
722 TEST_VM(metaspace, MetaspaceArena_growth_boot_nc_not_inplace) {
723 test_controlled_growth(Metaspace::BootMetaspaceType, false,
724 word_size_for_level(CHUNK_LEVEL_4M), false);
725 }
726 */
727
728 // Test that repeated allocation-deallocation cycles with the same block size
729 // do not increase metaspace usage after the initial allocation (the deallocated
730 // block should be reused by the next allocation).
731 static void test_repeatedly_allocate_and_deallocate(bool is_topmost) {
732 // Test various sizes, including (important) the max. possible block size = 1 root chunk
733 for (size_t blocksize = Metaspace::max_allocation_word_size(); blocksize >= 1; blocksize /= 2) {
734 size_t used1 = 0, used2 = 0, committed1 = 0, committed2 = 0;
735 MetaWord* p = nullptr, *p2 = nullptr;
736
737 MetaspaceGtestContext context;
738 MetaspaceArenaTestHelper helper(context, Metaspace::StandardMetaspaceType, false);
739
740 // First allocation
741 helper.allocate_from_arena_with_tests_expect_success(&p, blocksize);
742 if (!is_topmost) {
743 // another one on top, size does not matter.
744 helper.allocate_from_arena_with_tests_expect_success(0x10);
745 }
746
747 // Measure
748 helper.usage_numbers_with_test(&used1, &committed1, nullptr);
749
750 // Dealloc, alloc several times with the same size.
751 for (int i = 0; i < 5; i ++) {
752 helper.deallocate_with_tests(p, blocksize);
753 helper.allocate_from_arena_with_tests_expect_success(&p2, blocksize);
754 // We should get the same pointer back.
755 EXPECT_EQ(p2, p);
756 }
757
758 // Measure again
759 helper.usage_numbers_with_test(&used2, &committed2, nullptr);
760 EXPECT_EQ(used2, used1);
761 EXPECT_EQ(committed1, committed2);
762 }
763 }
764
765 TEST_VM(metaspace, MetaspaceArena_test_repeatedly_allocate_and_deallocate_top_allocation) {
766 test_repeatedly_allocate_and_deallocate(true);
767 }
768
769 TEST_VM(metaspace, MetaspaceArena_test_repeatedly_allocate_and_deallocate_nontop_allocation) {
770 test_repeatedly_allocate_and_deallocate(false);
771 }
|
1 /*
2 * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2020, 2023 SAP SE. All rights reserved.
4 * Copyright (c) 2023 Red Hat, Inc. All rights reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "precompiled.hpp"
28 #include "memory/metaspace/chunkManager.hpp"
29 #include "memory/metaspace/commitLimiter.hpp"
30 #include "memory/metaspace/counters.hpp"
31 #include "memory/metaspace/internalStats.hpp"
32 #include "memory/metaspace/freeBlocks.hpp"
33 #include "memory/metaspace/metablock.inline.hpp"
34 #include "memory/metaspace/metaspaceArena.hpp"
35 #include "memory/metaspace/metaspaceArenaGrowthPolicy.hpp"
36 #include "memory/metaspace/metachunkList.hpp"
37 #include "memory/metaspace/metaspaceCommon.hpp"
38 #include "memory/metaspace/metaspaceSettings.hpp"
39 #include "memory/metaspace/metaspaceStatistics.hpp"
40 #include "memory/metaspace.hpp"
41 #include "utilities/debug.hpp"
42 #include "utilities/globalDefinitions.hpp"
43
44 #define LOG_PLEASE
45 #include "metaspaceGtestCommon.hpp"
46 #include "metaspaceGtestContexts.hpp"
47 #include "metaspaceGtestRangeHelpers.hpp"
48
49 #define HANDLE_FAILURE \
50 if (testing::Test::HasFailure()) { \
51 return; \
52 }
53
54 namespace metaspace {
55
56 class MetaspaceArenaTestFriend {
57 const MetaspaceArena* const _arena;
58 public:
59 MetaspaceArenaTestFriend(const MetaspaceArena* arena) : _arena(arena) {}
60 const MetachunkList& chunks() const { return _arena->_chunks; }
61 const FreeBlocks* fbl() const { return _arena->_fbl; }
62 };
63
64 class MetaspaceArenaTestHelper {
65
66 MetaspaceGtestContext& _context;
67 const ArenaGrowthPolicy* const _growth_policy;
68
69 MetaspaceArena* _arena;
70
71 public:
72
73 // Create a helper; growth policy is directly specified
74 MetaspaceArenaTestHelper(MetaspaceGtestContext& helper, const ArenaGrowthPolicy* growth_policy,
75 size_t allocation_alignment_words = Metaspace::min_allocation_alignment_words) :
76 _context(helper), _growth_policy(growth_policy), _arena(nullptr)
77 {
78 _arena = new MetaspaceArena(_context.context(), _growth_policy, allocation_alignment_words, "gtest-MetaspaceArena");
79 DEBUG_ONLY(_arena->verify());
80 _context.inc_num_arenas_created();
81 }
82
83
84 // Create a helper; growth policy for arena is determined by the given spacetype|class tupel
85 MetaspaceArenaTestHelper(MetaspaceGtestContext& helper,
86 Metaspace::MetaspaceType space_type, bool is_class,
87 size_t allocation_alignment_words = Metaspace::min_allocation_alignment_words) :
88 MetaspaceArenaTestHelper(helper, ArenaGrowthPolicy::policy_for_space_type(space_type, is_class), allocation_alignment_words)
89 {}
90
91 ~MetaspaceArenaTestHelper() {
92 delete_arena_with_tests();
93 }
94
95 MetaspaceArena* arena() const { return _arena; }
96
97 // Note: all test functions return void due to gtests limitation that we cannot use ASSERT
98 // in non-void returning tests.
99
100 void delete_arena_with_tests() {
101 if (_arena != nullptr) {
102 size_t used_words_before = _context.used_words();
103 size_t committed_words_before = _context.committed_words();
104 DEBUG_ONLY(_arena->verify());
105 delete _arena;
106 _arena = nullptr;
107 size_t used_words_after = _context.used_words();
108 size_t committed_words_after = _context.committed_words();
109 assert(_context.num_arenas_created() >= 1, "Sanity");
110 if (_context.num_arenas_created() == 1) {
111 ASSERT_0(used_words_after);
112 } else {
113 ASSERT_LE(used_words_after, used_words_before);
114 }
115 ASSERT_LE(committed_words_after, committed_words_before);
116 }
117 }
118
119 void usage_numbers_with_test(size_t* p_used, size_t* p_committed, size_t* p_capacity) const {
120 size_t arena_used = 0, arena_committed = 0, arena_reserved = 0;
121 _arena->usage_numbers(&arena_used, &arena_committed, &arena_reserved);
122 EXPECT_GE(arena_committed, arena_used);
123 EXPECT_GE(arena_reserved, arena_committed);
124
125 size_t context_used = _context.used_words();
126 size_t context_committed = _context.committed_words();
127 size_t context_reserved = _context.reserved_words();
128 EXPECT_GE(context_committed, context_used);
129 EXPECT_GE(context_reserved, context_committed);
130
131 // If only one arena uses the context, usage numbers must match.
132 if (_context.num_arenas_created() == 1) {
133 EXPECT_EQ(context_used, arena_used);
134 } else {
135 assert(_context.num_arenas_created() > 1, "Sanity");
136 EXPECT_GE(context_used, arena_used);
137 }
138
139 // commit, reserve numbers don't have to match since free chunks may exist
140 EXPECT_GE(context_committed, arena_committed);
141 EXPECT_GE(context_reserved, arena_reserved);
142
143 if (p_used) {
144 *p_used = arena_used;
145 }
146 if (p_committed) {
147 *p_committed = arena_committed;
148 }
149 if (p_capacity) {
150 *p_capacity = arena_reserved;
151 }
152 }
153
154 // Allocate; caller expects success; return pointer in *p_return_value
155 void allocate_from_arena_with_tests_expect_success(MetaWord** p_return_value, size_t word_size) {
156 allocate_from_arena_with_tests(p_return_value, word_size);
157 ASSERT_NOT_NULL(*p_return_value);
158 }
159
160 // Allocate; caller expects success but is not interested in return value
161 void allocate_from_arena_with_tests_expect_success(size_t word_size) {
162 MetaWord* dummy = nullptr;
163 allocate_from_arena_with_tests_expect_success(&dummy, word_size);
164 }
165
166 // Allocate; caller expects failure
167 void allocate_from_arena_with_tests_expect_failure(size_t word_size) {
168 MetaWord* dummy = nullptr;
169 allocate_from_arena_with_tests(&dummy, word_size);
170 ASSERT_NULL(dummy);
171 }
172
173 void allocate_from_arena_with_tests(MetaWord** p_return_value, size_t word_size) {
174 MetaBlock result, wastage;
175 allocate_from_arena_with_tests(word_size, result, wastage);
176 if (wastage.is_nonempty()) {
177 _arena->deallocate(wastage);
178 wastage.reset();
179 }
180 (*p_return_value) = result.base();
181 }
182
183 // Allocate; it may or may not work; return value in *p_return_value
184 void allocate_from_arena_with_tests(size_t word_size, MetaBlock& result, MetaBlock& wastage) {
185
186 // Note: usage_numbers walks all chunks in use and counts.
187 size_t used = 0, committed = 0, capacity = 0;
188 usage_numbers_with_test(&used, &committed, &capacity);
189
190 size_t possible_expansion = _context.commit_limiter().possible_expansion_words();
191
192 result = _arena->allocate(word_size, wastage);
193
194 SOMETIMES(DEBUG_ONLY(_arena->verify();))
195
196 size_t used2 = 0, committed2 = 0, capacity2 = 0;
197 usage_numbers_with_test(&used2, &committed2, &capacity2);
198
199 if (result.is_empty()) {
200 // Allocation failed.
201 ASSERT_LT(possible_expansion, word_size);
202 ASSERT_EQ(used, used2);
203 ASSERT_EQ(committed, committed2);
204 ASSERT_EQ(capacity, capacity2);
205 } else {
206 // Allocation succeeded. Should be correctly aligned.
207 ASSERT_TRUE(result.is_aligned_base(_arena->allocation_alignment_words()));
208
209 // used: may go up or may not (since our request may have been satisfied from the freeblocklist
210 // whose content already counts as used).
211 // committed: may go up, may not
212 // capacity: ditto
213 ASSERT_GE(used2, used);
214 ASSERT_GE(committed2, committed);
215 ASSERT_GE(capacity2, capacity);
216 }
217 }
218
219 // Allocate; it may or may not work; but caller does not care for the result value
220 void allocate_from_arena_with_tests(size_t word_size) {
221 MetaWord* dummy = nullptr;
222 allocate_from_arena_with_tests(&dummy, word_size);
223 }
224
225 void deallocate_with_tests(MetaWord* p, size_t word_size) {
226 size_t used = 0, committed = 0, capacity = 0;
227 usage_numbers_with_test(&used, &committed, &capacity);
228
229 _arena->deallocate(MetaBlock(p, word_size));
230
231 SOMETIMES(DEBUG_ONLY(_arena->verify();))
232
233 size_t used2 = 0, committed2 = 0, capacity2 = 0;
234 usage_numbers_with_test(&used2, &committed2, &capacity2);
235
236 // Nothing should have changed. Deallocated blocks are added to the free block list
237 // which still counts as used.
238 ASSERT_EQ(used2, used);
239 ASSERT_EQ(committed2, committed);
240 ASSERT_EQ(capacity2, capacity);
241 }
242
243 ArenaStats get_arena_statistics() const {
244 ArenaStats stats;
245 _arena->add_to_statistics(&stats);
246 return stats;
247 }
248
249 MetaspaceArenaTestFriend internal_access() const {
250 return MetaspaceArenaTestFriend (_arena);
251 }
252
253 // Convenience method to return number of chunks in arena (including current chunk)
254 int get_number_of_chunks() const {
255 return internal_access().chunks().count();
256 }
257
258 };
259
260 static void test_basics(size_t commit_limit, bool is_micro) {
261 MetaspaceGtestContext context(commit_limit);
262 MetaspaceArenaTestHelper helper(context, is_micro ? Metaspace::ReflectionMetaspaceType : Metaspace::StandardMetaspaceType, false);
263
264 helper.allocate_from_arena_with_tests(1);
265 helper.allocate_from_arena_with_tests(128);
266 helper.allocate_from_arena_with_tests(128 * K);
267 helper.allocate_from_arena_with_tests(1);
268 helper.allocate_from_arena_with_tests(128);
269 helper.allocate_from_arena_with_tests(128 * K);
270 }
271
272 TEST_VM(metaspace, MetaspaceArena_basics_micro_nolimit) {
273 test_basics(max_uintx, true);
274 }
275
432 int times_chunk_were_enlarged = metaspace::InternalStats::num_chunks_enlarged() - n1;
433 LOG("chunk was enlarged %d times.", times_chunk_were_enlarged);
434
435 EXPECT_0(times_chunk_were_enlarged);
436
437 }
438
439 // Test the MetaspaceArenas' free block list:
440 // Allocate, deallocate, then allocate the same block again. The second allocate should
441 // reuse the deallocated block.
442 TEST_VM(metaspace, MetaspaceArena_deallocate) {
443 if (Settings::use_allocation_guard()) {
444 return;
445 }
446 for (size_t s = 2; s <= MAX_CHUNK_WORD_SIZE; s *= 2) {
447 MetaspaceGtestContext context;
448 MetaspaceArenaTestHelper helper(context, Metaspace::StandardMetaspaceType, false);
449
450 MetaWord* p1 = nullptr;
451 helper.allocate_from_arena_with_tests_expect_success(&p1, s);
452 ASSERT_FALSE(HasFailure());
453
454 size_t used1 = 0, capacity1 = 0;
455 helper.usage_numbers_with_test(&used1, nullptr, &capacity1);
456 ASSERT_FALSE(HasFailure());
457 ASSERT_EQ(used1, s);
458
459 helper.deallocate_with_tests(p1, s);
460
461 size_t used2 = 0, capacity2 = 0;
462 helper.usage_numbers_with_test(&used2, nullptr, &capacity2);
463 ASSERT_FALSE(HasFailure());
464 ASSERT_EQ(used1, used2);
465 ASSERT_EQ(capacity2, capacity2);
466
467 MetaWord* p2 = nullptr;
468 helper.allocate_from_arena_with_tests_expect_success(&p2, s);
469 ASSERT_FALSE(HasFailure());
470
471 size_t used3 = 0, capacity3 = 0;
472 helper.usage_numbers_with_test(&used3, nullptr, &capacity3);
473 ASSERT_FALSE(HasFailure());
474 ASSERT_EQ(used3, used2);
475 ASSERT_EQ(capacity3, capacity2);
476
477 // Actually, we should get the very same allocation back
478 ASSERT_EQ(p1, p2);
479 }
480 }
481
482 static void test_recover_from_commit_limit_hit() {
483
484 // Test:
485 // - Multiple MetaspaceArena allocate (operating under the same commit limiter).
486 // - One, while attempting to commit parts of its current chunk on demand,
487 // triggers the limit and cannot commit its chunk further.
488 // - We release the other MetaspaceArena - its content is put back to the
489 // freelists.
490 // - We re-attempt allocation from the first manager. It should now succeed.
491 //
492 // This means if the first MetaspaceArena may have to let go of its current chunk and
493 // retire it and take a fresh chunk from the freelist.
496 MetaspaceGtestContext context(commit_limit);
497
498 // The first MetaspaceArena mimicks a micro loader. This will fill the free
499 // chunk list with very small chunks. We allocate from them in an interleaved
500 // way to cause fragmentation.
501 MetaspaceArenaTestHelper helper1(context, Metaspace::ReflectionMetaspaceType, false);
502 MetaspaceArenaTestHelper helper2(context, Metaspace::ReflectionMetaspaceType, false);
503
504 // This MetaspaceArena should hit the limit. We use BootMetaspaceType here since
505 // it gets a large initial chunk which is committed
506 // on demand and we are likely to hit a commit limit while trying to expand it.
507 MetaspaceArenaTestHelper helper3(context, Metaspace::BootMetaspaceType, false);
508
509 // Allocate space until we have below two but above one granule left
510 size_t allocated_from_1_and_2 = 0;
511 while (context.commit_limiter().possible_expansion_words() >= Settings::commit_granule_words() * 2 &&
512 allocated_from_1_and_2 < commit_limit) {
513 helper1.allocate_from_arena_with_tests_expect_success(1);
514 helper2.allocate_from_arena_with_tests_expect_success(1);
515 allocated_from_1_and_2 += 2;
516 HANDLE_FAILURE
517 }
518
519 // Now, allocating from helper3, creep up on the limit
520 size_t allocated_from_3 = 0;
521 MetaWord* p = nullptr;
522 while ( (helper3.allocate_from_arena_with_tests(&p, 1), p != nullptr) &&
523 ++allocated_from_3 < Settings::commit_granule_words() * 2);
524
525 EXPECT_LE(allocated_from_3, Settings::commit_granule_words() * 2);
526
527 // We expect the freelist to be empty of committed space...
528 EXPECT_0(context.cm().calc_committed_word_size());
529
530 //msthelper.cm().print_on(tty);
531
532 // Release the first MetaspaceArena.
533 helper1.delete_arena_with_tests();
534
535 //msthelper.cm().print_on(tty);
536
545
546 TEST_VM(metaspace, MetaspaceArena_recover_from_limit_hit) {
547 test_recover_from_commit_limit_hit();
548 }
549
550 static void test_controlled_growth(Metaspace::MetaspaceType type, bool is_class,
551 size_t expected_starting_capacity,
552 bool test_in_place_enlargement)
553 {
554
555 if (Settings::use_allocation_guard()) {
556 return;
557 }
558
559 // From a MetaspaceArena in a clean room allocate tiny amounts;
560 // watch it grow. Used/committed/capacity should not grow in
561 // large jumps. Also, different types of MetaspaceArena should
562 // have different initial capacities.
563
564 MetaspaceGtestContext context;
565 MetaspaceArenaTestHelper smhelper(context, type, is_class);
566
567 MetaspaceArenaTestHelper smhelper_harrasser(context, Metaspace::ReflectionMetaspaceType, true);
568
569 size_t used = 0, committed = 0, capacity = 0;
570 const size_t alloc_words = 16;
571
572 smhelper.arena()->usage_numbers(&used, &committed, &capacity);
573 ASSERT_0(used);
574 ASSERT_0(committed);
575 ASSERT_0(capacity);
576
577 ///// First allocation //
578
579 smhelper.allocate_from_arena_with_tests_expect_success(alloc_words);
580
581 smhelper.arena()->usage_numbers(&used, &committed, &capacity);
582
583 ASSERT_EQ(used, alloc_words);
584 ASSERT_GE(committed, used);
585 ASSERT_GE(capacity, committed);
586
587 ASSERT_EQ(capacity, expected_starting_capacity);
610 DEBUG_ONLY(const uintx num_chunk_enlarged = metaspace::InternalStats::num_chunks_enlarged();)
611
612 size_t words_allocated = 0;
613 int num_allocated = 0;
614 const size_t safety = MAX_CHUNK_WORD_SIZE * 1.2;
615 size_t highest_capacity_jump = capacity;
616 int num_capacity_jumps = 0;
617
618 while (words_allocated < safety && num_capacity_jumps < 15) {
619
620 // if we want to test growth with in-place chunk enlargement, leave MetaspaceArena
621 // undisturbed; it will have all the place to grow. Otherwise allocate from a little
622 // side arena to increase fragmentation.
623 // (Note that this does not completely prevent in-place chunk enlargement but makes it
624 // rather improbable)
625 if (!test_in_place_enlargement) {
626 smhelper_harrasser.allocate_from_arena_with_tests_expect_success(alloc_words * 2);
627 }
628
629 smhelper.allocate_from_arena_with_tests_expect_success(alloc_words);
630 HANDLE_FAILURE
631 words_allocated += metaspace::get_raw_word_size_for_requested_word_size(alloc_words);
632 num_allocated++;
633
634 size_t used2 = 0, committed2 = 0, capacity2 = 0;
635
636 smhelper.arena()->usage_numbers(&used2, &committed2, &capacity2);
637 HANDLE_FAILURE
638
639 // used should not grow larger than what we allocated, plus possible overhead.
640 ASSERT_GE(used2, used);
641 ASSERT_LE(used2, used + alloc_words * 2);
642 ASSERT_LE(used2, words_allocated + 100);
643 used = used2;
644
645 // A jump in committed words should not be larger than commit granule size.
646 // It can be smaller, since the current chunk of the MetaspaceArena may be
647 // smaller than a commit granule.
648 // (Note: unless root chunks are born fully committed)
649 ASSERT_GE(committed2, used2);
650 ASSERT_GE(committed2, committed);
651 const size_t committed_jump = committed2 - committed;
652 if (committed_jump > 0) {
653 ASSERT_LE(committed_jump, Settings::commit_granule_words());
654 }
655 committed = committed2;
656
657 // Capacity jumps: Test that arenas capacity does not grow too fast.
659 ASSERT_GE(capacity2, capacity);
660 const size_t capacity_jump = capacity2 - capacity;
661 if (capacity_jump > 0) {
662 LOG(">" SIZE_FORMAT "->" SIZE_FORMAT "(+" SIZE_FORMAT ")", capacity, capacity2, capacity_jump)
663 if (capacity_jump > highest_capacity_jump) {
664 /* Disabled for now since this is rather shaky. The way it is tested makes it too dependent
665 * on allocation history. Need to rethink this.
666 ASSERT_LE(capacity_jump, highest_capacity_jump * 2);
667 ASSERT_GE(capacity_jump, MIN_CHUNK_WORD_SIZE);
668 ASSERT_LE(capacity_jump, MAX_CHUNK_WORD_SIZE);
669 */
670 highest_capacity_jump = capacity_jump;
671 }
672 num_capacity_jumps++;
673 }
674
675 capacity = capacity2;
676
677 }
678
679 // No FBL should exist, we did not deallocate
680 ASSERT_EQ(smhelper.internal_access().fbl(), (FreeBlocks*)nullptr);
681 ASSERT_EQ(smhelper_harrasser.internal_access().fbl(), (FreeBlocks*)nullptr);
682
683 // After all this work, we should see an increase in number of chunk-in-place-enlargements
684 // (this especially is vulnerable to regression: the decisions of when to do in-place-enlargements are somewhat
685 // complicated, see MetaspaceArena::attempt_enlarge_current_chunk())
686 #ifdef ASSERT
687 if (test_in_place_enlargement) {
688 const uintx num_chunk_enlarged_2 = metaspace::InternalStats::num_chunks_enlarged();
689 ASSERT_GT(num_chunk_enlarged_2, num_chunk_enlarged);
690 }
691 #endif
692 }
693
694 // these numbers have to be in sync with arena policy numbers (see memory/metaspace/arenaGrowthPolicy.cpp)
695 TEST_VM(metaspace, MetaspaceArena_growth_refl_c_inplace) {
696 test_controlled_growth(Metaspace::ReflectionMetaspaceType, true,
697 word_size_for_level(CHUNK_LEVEL_1K), true);
698 }
699
700 TEST_VM(metaspace, MetaspaceArena_growth_refl_c_not_inplace) {
701 test_controlled_growth(Metaspace::ReflectionMetaspaceType, true,
702 word_size_for_level(CHUNK_LEVEL_1K), false);
766 }
767
768 /* Disabled growth tests for BootMetaspaceType: there, the growth steps are too rare,
769 * and too large, to make any reliable guess as toward chunks get enlarged in place.
770 TEST_VM(metaspace, MetaspaceArena_growth_boot_nc_inplace) {
771 test_controlled_growth(Metaspace::BootMetaspaceType, false,
772 word_size_for_level(CHUNK_LEVEL_4M), true);
773 }
774
775 TEST_VM(metaspace, MetaspaceArena_growth_boot_nc_not_inplace) {
776 test_controlled_growth(Metaspace::BootMetaspaceType, false,
777 word_size_for_level(CHUNK_LEVEL_4M), false);
778 }
779 */
780
781 // Test that repeated allocation-deallocation cycles with the same block size
782 // do not increase metaspace usage after the initial allocation (the deallocated
783 // block should be reused by the next allocation).
784 static void test_repeatedly_allocate_and_deallocate(bool is_topmost) {
785 // Test various sizes, including (important) the max. possible block size = 1 root chunk
786 for (size_t blocksize = Metaspace::max_allocation_word_size();
787 blocksize >= Metaspace::min_allocation_word_size; blocksize /= 2) {
788 size_t used1 = 0, used2 = 0, committed1 = 0, committed2 = 0;
789 MetaWord* p = nullptr, *p2 = nullptr;
790
791 MetaspaceGtestContext context;
792 MetaspaceArenaTestHelper helper(context, Metaspace::StandardMetaspaceType, false);
793
794 // First allocation
795 helper.allocate_from_arena_with_tests_expect_success(&p, blocksize);
796 if (!is_topmost) {
797 // another one on top, size does not matter.
798 helper.allocate_from_arena_with_tests_expect_success(0x10);
799 HANDLE_FAILURE
800 }
801
802 // Measure
803 helper.usage_numbers_with_test(&used1, &committed1, nullptr);
804
805 // Dealloc, alloc several times with the same size.
806 for (int i = 0; i < 5; i ++) {
807 helper.deallocate_with_tests(p, blocksize);
808 helper.allocate_from_arena_with_tests_expect_success(&p2, blocksize);
809 HANDLE_FAILURE
810 // We should get the same pointer back.
811 EXPECT_EQ(p2, p);
812 }
813
814 // Measure again
815 helper.usage_numbers_with_test(&used2, &committed2, nullptr);
816 EXPECT_EQ(used2, used1);
817 EXPECT_EQ(committed1, committed2);
818 }
819 }
820
821 TEST_VM(metaspace, MetaspaceArena_test_repeatedly_allocate_and_deallocate_top_allocation) {
822 test_repeatedly_allocate_and_deallocate(true);
823 }
824
825 TEST_VM(metaspace, MetaspaceArena_test_repeatedly_allocate_and_deallocate_nontop_allocation) {
826 test_repeatedly_allocate_and_deallocate(false);
827 }
828
829 static void test_random_aligned_allocation(size_t arena_alignment_words, SizeRange range) {
830 if (Settings::use_allocation_guard()) {
831 return;
832 }
833
834 // We let the arena use 4K chunks, unless the alloc size is larger.
835 chunklevel_t level = CHUNK_LEVEL_4K;
836 const ArenaGrowthPolicy policy (&level, 1);
837 const size_t chunk_word_size = word_size_for_level(level);
838
839 size_t expected_used = 0;
840
841 MetaspaceGtestContext context;
842 MetaspaceArenaTestHelper helper(context, &policy, arena_alignment_words);
843
844 size_t last_alloc_size = 0;
845 unsigned num_allocations = 0;
846
847 const size_t max_used = MIN2(MAX2(chunk_word_size * 10, (range.highest() * 100)),
848 LP64_ONLY(64) NOT_LP64(16) * M); // word size!
849 while (expected_used < max_used) {
850
851 const int chunks_before = helper.get_number_of_chunks();
852
853 MetaBlock result, wastage;
854 size_t alloc_words = range.random_value();
855 NOT_LP64(alloc_words = align_up(alloc_words, Metaspace::min_allocation_alignment_words));
856 helper.allocate_from_arena_with_tests(alloc_words, result, wastage);
857
858 ASSERT_TRUE(result.is_nonempty());
859 ASSERT_TRUE(result.is_aligned_base(arena_alignment_words));
860 ASSERT_EQ(result.word_size(), alloc_words);
861
862 expected_used += alloc_words + wastage.word_size();
863 const int chunks_now = helper.get_number_of_chunks();
864 ASSERT_GE(chunks_now, chunks_before);
865 ASSERT_LE(chunks_now, chunks_before + 1);
866
867 // Estimate wastage:
868 // Guessing at wastage is somewhat simple since we don't expect to ever use the fbl (we
869 // don't deallocate). Therefore, wastage can only be caused by alignment gap or by
870 // salvaging an old chunk before a new chunk is added.
871 const bool expect_alignment_gap = !is_aligned(last_alloc_size, arena_alignment_words);
872 const bool new_chunk_added = chunks_now > chunks_before;
873
874 if (num_allocations == 0) {
875 // expect no wastage if its the first allocation in the arena
876 ASSERT_TRUE(wastage.is_empty());
877 } else {
878 if (expect_alignment_gap) {
879 // expect wastage if the alignment requires it
880 ASSERT_TRUE(wastage.is_nonempty());
881 }
882 }
883
884 if (wastage.is_nonempty()) {
885 // If we have wastage, we expect it to be either too small or unaligned. That would not be true
886 // for wastage from the fbl, which could have any size; however, in this test we don't deallocate,
887 // so we don't expect wastage from the fbl.
888 if (wastage.is_aligned_base(arena_alignment_words)) {
889 ASSERT_LT(wastage.word_size(), alloc_words);
890 }
891 if (new_chunk_added) {
892 // chunk turnover: no more wastage than size of a commit granule, since we salvage the
893 // committed remainder of the old chunk.
894 ASSERT_LT(wastage.word_size(), Settings::commit_granule_words());
895 } else {
896 // No chunk turnover: no more wastage than what alignment requires.
897 ASSERT_LT(wastage.word_size(), arena_alignment_words);
898 }
899 }
900
901 // Check stats too
902 size_t used, committed, reserved;
903 helper.usage_numbers_with_test(&used, &committed, &reserved);
904 ASSERT_EQ(used, expected_used);
905
906 // No FBL should exist, we did not deallocate
907 ASSERT_EQ(helper.internal_access().fbl(), (FreeBlocks*)nullptr);
908
909 HANDLE_FAILURE
910
911 last_alloc_size = alloc_words;
912 num_allocations ++;
913 }
914 LOG("allocs: %u", num_allocations);
915 }
916
917 #define TEST_ARENA_WITH_ALIGNMENT_SMALL_RANGE(al) \
918 TEST_VM(metaspace, MetaspaceArena_test_random_small_aligned_allocation_##al) { \
919 static const SizeRange range(Metaspace::min_allocation_word_size, 128); \
920 test_random_aligned_allocation(al, range); \
921 }
922
923 #ifdef _LP64
924 TEST_ARENA_WITH_ALIGNMENT_SMALL_RANGE(1);
925 #endif
926 TEST_ARENA_WITH_ALIGNMENT_SMALL_RANGE(2);
927 TEST_ARENA_WITH_ALIGNMENT_SMALL_RANGE(8);
928 TEST_ARENA_WITH_ALIGNMENT_SMALL_RANGE(32);
929 TEST_ARENA_WITH_ALIGNMENT_SMALL_RANGE(128);
930 TEST_ARENA_WITH_ALIGNMENT_SMALL_RANGE(MIN_CHUNK_WORD_SIZE);
931
932 #define TEST_ARENA_WITH_ALIGNMENT_LARGE_RANGE(al) \
933 TEST_VM(metaspace, MetaspaceArena_test_random_large_aligned_allocation_##al) { \
934 static const SizeRange range(Metaspace::max_allocation_word_size() / 2, \
935 Metaspace::max_allocation_word_size()); \
936 test_random_aligned_allocation(al, range); \
937 }
938
939 #ifdef _LP64
940 TEST_ARENA_WITH_ALIGNMENT_LARGE_RANGE(1);
941 #endif
942 TEST_ARENA_WITH_ALIGNMENT_LARGE_RANGE(2);
943 TEST_ARENA_WITH_ALIGNMENT_LARGE_RANGE(8);
944 TEST_ARENA_WITH_ALIGNMENT_LARGE_RANGE(32);
945 TEST_ARENA_WITH_ALIGNMENT_LARGE_RANGE(128);
946 TEST_ARENA_WITH_ALIGNMENT_LARGE_RANGE(MIN_CHUNK_WORD_SIZE);
947
948 } // namespace metaspace
|