1 /*
  2  * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2020 SAP SE. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "memory/metaspace/commitLimiter.hpp"
 28 #include "memory/metaspace/counters.hpp"
 29 #include "memory/metaspace/internalStats.hpp"
 30 #include "memory/metaspace/metaspaceAlignment.hpp"
 31 #include "memory/metaspace/metaspaceArena.hpp"
 32 #include "memory/metaspace/metaspaceArenaGrowthPolicy.hpp"
 33 #include "memory/metaspace/metaspaceSettings.hpp"
 34 #include "memory/metaspace/metaspaceStatistics.hpp"
 35 #include "runtime/mutex.hpp"
 36 #include "runtime/mutexLocker.hpp"
 37 #include "utilities/debug.hpp"
 38 #include "utilities/globalDefinitions.hpp"
 39 
 40 //#define LOG_PLEASE
 41 #include "metaspaceGtestCommon.hpp"
 42 #include "metaspaceGtestContexts.hpp"
 43 #include "metaspaceGtestRangeHelpers.hpp"
 44 
 45 using metaspace::ArenaGrowthPolicy;
 46 using metaspace::CommitLimiter;
 47 using metaspace::InternalStats;
 48 using metaspace::MemRangeCounter;
 49 using metaspace::MetaspaceArena;
 50 using metaspace::SizeAtomicCounter;
 51 using metaspace::Settings;
 52 using metaspace::ArenaStats;
 53 
 54 class MetaspaceArenaTestHelper {
 55 
 56   MetaspaceGtestContext& _context;
 57 
 58   Mutex* _lock;
 59   const ArenaGrowthPolicy* _growth_policy;
 60   SizeAtomicCounter _used_words_counter;
 61   int _alignment_words;
 62   MetaspaceArena* _arena;
 63 
 64   void initialize(const ArenaGrowthPolicy* growth_policy, int alignment_words,
 65                   const char* name = "gtest-MetaspaceArena") {
 66     _growth_policy = growth_policy;
 67     _lock = new Mutex(Monitor::nosafepoint, "gtest-MetaspaceArenaTest_lock");
 68     // Lock during space creation, since this is what happens in the VM too
 69     //  (see ClassLoaderData::metaspace_non_null(), which we mimick here).
 70     {
 71       MutexLocker ml(_lock,  Mutex::_no_safepoint_check_flag);
 72       _arena = new MetaspaceArena(&_context.cm(), _growth_policy, alignment_words, _lock, &_used_words_counter, name);
 73     }
 74     DEBUG_ONLY(_arena->verify());
 75 
 76   }
 77 
 78 public:
 79 
 80   // Create a helper; growth policy for arena is determined by the given spacetype|class tupel
 81   MetaspaceArenaTestHelper(MetaspaceGtestContext& helper,
 82                             Metaspace::MetaspaceType space_type, bool is_class,
 83                             const char* name = "gtest-MetaspaceArena") :
 84     _context(helper)
 85   {
 86     initialize(ArenaGrowthPolicy::policy_for_space_type(space_type, is_class), metaspace::MetaspaceMinAlignmentWords, name);
 87   }
 88 
 89   // Create a helper; growth policy is directly specified
 90   MetaspaceArenaTestHelper(MetaspaceGtestContext& helper, const ArenaGrowthPolicy* growth_policy,
 91                            const char* name = "gtest-MetaspaceArena") :
 92     _context(helper)
 93   {
 94     initialize(growth_policy, metaspace::MetaspaceMinAlignmentWords, name);
 95   }
 96 
 97   ~MetaspaceArenaTestHelper() {
 98     delete_arena_with_tests();
 99     delete _lock;
100   }
101 
102   const CommitLimiter& limiter() const { return _context.commit_limiter(); }
103   MetaspaceArena* arena() const { return _arena; }
104   SizeAtomicCounter& used_words_counter() { return _used_words_counter; }
105 
106   // Note: all test functions return void due to gtests limitation that we cannot use ASSERT
107   // in non-void returning tests.
108 
109   void delete_arena_with_tests() {
110     if (_arena != NULL) {
111       size_t used_words_before = _used_words_counter.get();
112       size_t committed_words_before = limiter().committed_words();
113       DEBUG_ONLY(_arena->verify());
114       delete _arena;
115       _arena = NULL;
116       size_t used_words_after = _used_words_counter.get();
117       size_t committed_words_after = limiter().committed_words();
118       ASSERT_0(used_words_after);
119       if (Settings::uncommit_free_chunks()) {
120         ASSERT_LE(committed_words_after, committed_words_before);
121       } else {
122         ASSERT_EQ(committed_words_after, committed_words_before);
123       }
124     }
125   }
126 
127   void usage_numbers_with_test(size_t* p_used, size_t* p_committed, size_t* p_capacity) const {
128     _arena->usage_numbers(p_used, p_committed, p_capacity);
129     if (p_used != NULL) {
130       if (p_committed != NULL) {
131         ASSERT_GE(*p_committed, *p_used);
132       }
133       // Since we own the used words counter, it should reflect our usage number 1:1
134       ASSERT_EQ(_used_words_counter.get(), *p_used);
135     }
136     if (p_committed != NULL && p_capacity != NULL) {
137       ASSERT_GE(*p_capacity, *p_committed);
138     }
139   }
140 
141   // Allocate; caller expects success; return pointer in *p_return_value
142   void allocate_from_arena_with_tests_expect_success(MetaWord** p_return_value, size_t word_size) {
143     allocate_from_arena_with_tests(p_return_value, word_size);
144     ASSERT_NOT_NULL(*p_return_value);
145   }
146 
147   // Allocate; caller expects success but is not interested in return value
148   void allocate_from_arena_with_tests_expect_success(size_t word_size) {
149     MetaWord* dummy = NULL;
150     allocate_from_arena_with_tests_expect_success(&dummy, word_size);
151   }
152 
153   // Allocate; caller expects failure
154   void allocate_from_arena_with_tests_expect_failure(size_t word_size) {
155     MetaWord* dummy = NULL;
156     allocate_from_arena_with_tests(&dummy, word_size);
157     ASSERT_NULL(dummy);
158   }
159 
160   // Allocate; it may or may not work; return value in *p_return_value
161   void allocate_from_arena_with_tests(MetaWord** p_return_value, size_t word_size) {
162 
163     // Note: usage_numbers walks all chunks in use and counts.
164     size_t used = 0, committed = 0, capacity = 0;
165     usage_numbers_with_test(&used, &committed, &capacity);
166 
167     size_t possible_expansion = limiter().possible_expansion_words();
168 
169     MetaWord* p = _arena->allocate(word_size);
170 
171     SOMETIMES(DEBUG_ONLY(_arena->verify();))
172 
173     size_t used2 = 0, committed2 = 0, capacity2 = 0;
174     usage_numbers_with_test(&used2, &committed2, &capacity2);
175 
176     if (p == NULL) {
177       // Allocation failed.
178       if (Settings::new_chunks_are_fully_committed()) {
179         ASSERT_LT(possible_expansion, MAX_CHUNK_WORD_SIZE);
180       } else {
181         ASSERT_LT(possible_expansion, word_size);
182       }
183 
184       ASSERT_EQ(used, used2);
185       ASSERT_EQ(committed, committed2);
186       ASSERT_EQ(capacity, capacity2);
187     } else {
188       // Allocation succeeded. Should be correctly aligned.
189       ASSERT_TRUE(is_aligned(p, sizeof(MetaWord)));
190       // used: may go up or may not (since our request may have been satisfied from the freeblocklist
191       //   whose content already counts as used).
192       // committed: may go up, may not
193       // capacity: ditto
194       ASSERT_GE(used2, used);
195       ASSERT_GE(committed2, committed);
196       ASSERT_GE(capacity2, capacity);
197     }
198 
199     *p_return_value = p;
200   }
201 
202   // Allocate; it may or may not work; but caller does not care for the result value
203   void allocate_from_arena_with_tests(size_t word_size) {
204     MetaWord* dummy = NULL;
205     allocate_from_arena_with_tests(&dummy, word_size);
206   }
207 
208   void deallocate_with_tests(MetaWord* p, size_t word_size) {
209     size_t used = 0, committed = 0, capacity = 0;
210     usage_numbers_with_test(&used, &committed, &capacity);
211 
212     _arena->deallocate(p, word_size);
213 
214     SOMETIMES(DEBUG_ONLY(_arena->verify();))
215 
216     size_t used2 = 0, committed2 = 0, capacity2 = 0;
217     usage_numbers_with_test(&used2, &committed2, &capacity2);
218 
219     // Nothing should have changed. Deallocated blocks are added to the free block list
220     // which still counts as used.
221     ASSERT_EQ(used2, used);
222     ASSERT_EQ(committed2, committed);
223     ASSERT_EQ(capacity2, capacity);
224   }
225 
226   ArenaStats get_arena_statistics() const {
227     ArenaStats stats;
228     _arena->add_to_statistics(&stats);
229     return stats;
230   }
231 
232   // Convenience method to return number of chunks in arena (including current chunk)
233   int get_number_of_chunks() const {
234     return get_arena_statistics().totals()._num;
235   }
236 
237 };
238 
239 static void test_basics(size_t commit_limit, bool is_micro) {
240   MetaspaceGtestContext context(commit_limit);
241   MetaspaceArenaTestHelper helper(context, is_micro ? Metaspace::ReflectionMetaspaceType : Metaspace::StandardMetaspaceType, false);
242 
243   helper.allocate_from_arena_with_tests(1);
244   helper.allocate_from_arena_with_tests(128);
245   helper.allocate_from_arena_with_tests(128 * K);
246   helper.allocate_from_arena_with_tests(1);
247   helper.allocate_from_arena_with_tests(128);
248   helper.allocate_from_arena_with_tests(128 * K);
249 }
250 
251 TEST_VM(metaspace, MetaspaceArena_basics_micro_nolimit) {
252   test_basics(max_uintx, true);
253 }
254 
255 TEST_VM(metaspace, MetaspaceArena_basics_micro_limit) {
256   test_basics(256 * K, true);
257 }
258 
259 TEST_VM(metaspace, MetaspaceArena_basics_standard_nolimit) {
260   test_basics(max_uintx, false);
261 }
262 
263 TEST_VM(metaspace, MetaspaceArena_basics_standard_limit) {
264   test_basics(256 * K, false);
265 }
266 
267 // Test chunk enlargement:
268 //  A single MetaspaceArena, left undisturbed with place to grow. Slowly fill arena up.
269 //  We should see at least some occurrences of chunk-in-place enlargement.
270 static void test_chunk_enlargment_simple(Metaspace::MetaspaceType spacetype, bool is_class) {
271 
272   MetaspaceGtestContext context;
273   MetaspaceArenaTestHelper helper(context, (Metaspace::MetaspaceType)spacetype, is_class);
274 
275   uint64_t n1 = metaspace::InternalStats::num_chunks_enlarged();
276 
277   size_t allocated = 0;
278   while (allocated <= MAX_CHUNK_WORD_SIZE &&
279          metaspace::InternalStats::num_chunks_enlarged() == n1) {
280     size_t s = IntRange(32, 128).random_value();
281     helper.allocate_from_arena_with_tests_expect_success(s);
282     allocated += metaspace::get_raw_word_size_for_requested_word_size(s, metaspace::MetaspaceMinAlignmentWords);
283   }
284 
285   EXPECT_GT(metaspace::InternalStats::num_chunks_enlarged(), n1);
286 
287 }
288 
289 // Do this test for some of the standard types; don't do it for the boot loader type
290 //  since that one starts out with max chunk size so we would not see any enlargement.
291 
292 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_standard_c) {
293   test_chunk_enlargment_simple(Metaspace::StandardMetaspaceType, true);
294 }
295 
296 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_standard_nc) {
297   test_chunk_enlargment_simple(Metaspace::StandardMetaspaceType, false);
298 }
299 
300 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_micro_c) {
301   test_chunk_enlargment_simple(Metaspace::ReflectionMetaspaceType, true);
302 }
303 
304 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_micro_nc) {
305   test_chunk_enlargment_simple(Metaspace::ReflectionMetaspaceType, false);
306 }
307 
308 // Test chunk enlargement:
309 // A single MetaspaceArena, left undisturbed with place to grow. Slowly fill arena up.
310 //  We should see occurrences of chunk-in-place enlargement.
311 //  Here, we give it an ideal policy which should enable the initial chunk to grow unmolested
312 //  until finish.
313 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_2) {
314 
315   if (Settings::use_allocation_guard()) {
316     return;
317   }
318 
319   // Note: internally, chunk in-place enlargement is disallowed if growing the chunk
320   //  would cause the arena to claim more memory than its growth policy allows. This
321   //  is done to prevent the arena to grow too fast.
322   //
323   // In order to test in-place growth here without that restriction I give it an
324   //  artificial growth policy which starts out with a tiny chunk size, then balloons
325   //  right up to max chunk size. This will cause the initial chunk to be tiny, and
326   //  then the arena is able to grow it without violating growth policy.
327   chunklevel_t growth[] = { HIGHEST_CHUNK_LEVEL, ROOT_CHUNK_LEVEL };
328   ArenaGrowthPolicy growth_policy(growth, 2);
329 
330   MetaspaceGtestContext context;
331   MetaspaceArenaTestHelper helper(context, &growth_policy);
332 
333   uint64_t n1 = metaspace::InternalStats::num_chunks_enlarged();
334 
335   size_t allocated = 0;
336   while (allocated <= MAX_CHUNK_WORD_SIZE) {
337     size_t s = IntRange(32, 128).random_value();
338     helper.allocate_from_arena_with_tests_expect_success(s);
339     allocated += metaspace::get_raw_word_size_for_requested_word_size(s, metaspace::MetaspaceMinAlignmentWords);
340     if (allocated <= MAX_CHUNK_WORD_SIZE) {
341       // Chunk should have been enlarged in place
342       ASSERT_EQ(1, helper.get_number_of_chunks());
343     } else {
344       // Next chunk should have started
345       ASSERT_EQ(2, helper.get_number_of_chunks());
346     }
347   }
348 
349   int times_chunk_were_enlarged = metaspace::InternalStats::num_chunks_enlarged() - n1;
350   LOG("chunk was enlarged %d times.", times_chunk_were_enlarged);
351 
352   ASSERT_GT0(times_chunk_were_enlarged);
353 
354 }
355 
356 // Regression test: Given a single MetaspaceArena, left undisturbed with place to grow,
357 //  test that in place enlargement correctly fails if growing the chunk would bring us
358 //  beyond the max. size of a chunk.
359 TEST_VM(metaspace, MetaspaceArena_test_failing_to_enlarge_in_place_max_chunk_size) {
360 
361   if (Settings::use_allocation_guard()) {
362     return;
363   }
364 
365   MetaspaceGtestContext context;
366 
367   for (size_t first_allocation_size = 1; first_allocation_size <= MAX_CHUNK_WORD_SIZE / 2; first_allocation_size *= 2) {
368 
369     MetaspaceArenaTestHelper helper(context, Metaspace::StandardMetaspaceType, false);
370 
371     // we allocate first a small amount, then the full amount possible.
372     // The sum of first and second allocation should bring us above root chunk size.
373     // This should work, we should not see any problems, but no chunk enlargement should
374     // happen.
375     int n1 = metaspace::InternalStats::num_chunks_enlarged();
376 
377     helper.allocate_from_arena_with_tests_expect_success(first_allocation_size);
378     EXPECT_EQ(helper.get_number_of_chunks(), 1);
379 
380     helper.allocate_from_arena_with_tests_expect_success(MAX_CHUNK_WORD_SIZE - first_allocation_size + 1);
381     EXPECT_EQ(helper.get_number_of_chunks(), 2);
382 
383     int times_chunk_were_enlarged = metaspace::InternalStats::num_chunks_enlarged() - n1;
384     LOG("chunk was enlarged %d times.", times_chunk_were_enlarged);
385 
386     EXPECT_0(times_chunk_were_enlarged);
387 
388   }
389 }
390 
391 // Regression test: Given a single MetaspaceArena, left undisturbed with place to grow,
392 //  test that in place enlargement correctly fails if growing the chunk would cause more
393 //  than doubling its size
394 TEST_VM(metaspace, MetaspaceArena_test_failing_to_enlarge_in_place_doubling_chunk_size) {
395 
396   if (Settings::use_allocation_guard()) {
397     return;
398   }
399 
400   MetaspaceGtestContext context;
401   MetaspaceArenaTestHelper helper(context, Metaspace::StandardMetaspaceType, false);
402 
403   int n1 = metaspace::InternalStats::num_chunks_enlarged();
404 
405   helper.allocate_from_arena_with_tests_expect_success(1000);
406   EXPECT_EQ(helper.get_number_of_chunks(), 1);
407 
408   helper.allocate_from_arena_with_tests_expect_success(4000);
409   EXPECT_EQ(helper.get_number_of_chunks(), 2);
410 
411   int times_chunk_were_enlarged = metaspace::InternalStats::num_chunks_enlarged() - n1;
412   LOG("chunk was enlarged %d times.", times_chunk_were_enlarged);
413 
414   EXPECT_0(times_chunk_were_enlarged);
415 
416 }
417 
418 // Test the MetaspaceArenas' free block list:
419 // Allocate, deallocate, then allocate the same block again. The second allocate should
420 // reuse the deallocated block.
421 TEST_VM(metaspace, MetaspaceArena_deallocate) {
422   if (Settings::use_allocation_guard()) {
423     return;
424   }
425   for (size_t s = 2; s <= MAX_CHUNK_WORD_SIZE; s *= 2) {
426     MetaspaceGtestContext context;
427     MetaspaceArenaTestHelper helper(context, Metaspace::StandardMetaspaceType, false);
428 
429     MetaWord* p1 = NULL;
430     helper.allocate_from_arena_with_tests_expect_success(&p1, s);
431 
432     size_t used1 = 0, capacity1 = 0;
433     helper.usage_numbers_with_test(&used1, NULL, &capacity1);
434     ASSERT_EQ(used1, s);
435 
436     helper.deallocate_with_tests(p1, s);
437 
438     size_t used2 = 0, capacity2 = 0;
439     helper.usage_numbers_with_test(&used2, NULL, &capacity2);
440     ASSERT_EQ(used1, used2);
441     ASSERT_EQ(capacity2, capacity2);
442 
443     MetaWord* p2 = NULL;
444     helper.allocate_from_arena_with_tests_expect_success(&p2, s);
445 
446     size_t used3 = 0, capacity3 = 0;
447     helper.usage_numbers_with_test(&used3, NULL, &capacity3);
448     ASSERT_EQ(used3, used2);
449     ASSERT_EQ(capacity3, capacity2);
450 
451     // Actually, we should get the very same allocation back
452     ASSERT_EQ(p1, p2);
453   }
454 }
455 
456 static void test_recover_from_commit_limit_hit() {
457 
458   if (Settings::new_chunks_are_fully_committed()) {
459     return; // This would throw off the commit counting in this test.
460   }
461 
462   // Test:
463   // - Multiple MetaspaceArena allocate (operating under the same commit limiter).
464   // - One, while attempting to commit parts of its current chunk on demand,
465   //   triggers the limit and cannot commit its chunk further.
466   // - We release the other MetaspaceArena - its content is put back to the
467   //   freelists.
468   // - We re-attempt allocation from the first manager. It should now succeed.
469   //
470   // This means if the first MetaspaceArena may have to let go of its current chunk and
471   // retire it and take a fresh chunk from the freelist.
472 
473   const size_t commit_limit = Settings::commit_granule_words() * 10;
474   MetaspaceGtestContext context(commit_limit);
475 
476   // The first MetaspaceArena mimicks a micro loader. This will fill the free
477   //  chunk list with very small chunks. We allocate from them in an interleaved
478   //  way to cause fragmentation.
479   MetaspaceArenaTestHelper helper1(context, Metaspace::ReflectionMetaspaceType, false);
480   MetaspaceArenaTestHelper helper2(context, Metaspace::ReflectionMetaspaceType, false);
481 
482   // This MetaspaceArena should hit the limit. We use BootMetaspaceType here since
483   // it gets a large initial chunk which is committed
484   // on demand and we are likely to hit a commit limit while trying to expand it.
485   MetaspaceArenaTestHelper helper3(context, Metaspace::BootMetaspaceType, false);
486 
487   // Allocate space until we have below two but above one granule left
488   size_t allocated_from_1_and_2 = 0;
489   while (context.commit_limiter().possible_expansion_words() >= Settings::commit_granule_words() * 2 &&
490       allocated_from_1_and_2 < commit_limit) {
491     helper1.allocate_from_arena_with_tests_expect_success(1);
492     helper2.allocate_from_arena_with_tests_expect_success(1);
493     allocated_from_1_and_2 += 2;
494   }
495 
496   // Now, allocating from helper3, creep up on the limit
497   size_t allocated_from_3 = 0;
498   MetaWord* p = NULL;
499   while ( (helper3.allocate_from_arena_with_tests(&p, 1), p != NULL) &&
500          ++allocated_from_3 < Settings::commit_granule_words() * 2);
501 
502   EXPECT_LE(allocated_from_3, Settings::commit_granule_words() * 2);
503 
504   // We expect the freelist to be empty of committed space...
505   EXPECT_0(context.cm().calc_committed_word_size());
506 
507   //msthelper.cm().print_on(tty);
508 
509   // Release the first MetaspaceArena.
510   helper1.delete_arena_with_tests();
511 
512   //msthelper.cm().print_on(tty);
513 
514   // Should have populated the freelist with committed space
515   // We expect the freelist to be empty of committed space...
516   EXPECT_GT(context.cm().calc_committed_word_size(), (size_t)0);
517 
518   // Repeat allocation from helper3, should now work.
519   helper3.allocate_from_arena_with_tests_expect_success(1);
520 
521 }
522 
523 TEST_VM(metaspace, MetaspaceArena_recover_from_limit_hit) {
524   test_recover_from_commit_limit_hit();
525 }
526 
527 static void test_controlled_growth(Metaspace::MetaspaceType type, bool is_class,
528                                    size_t expected_starting_capacity,
529                                    bool test_in_place_enlargement)
530 {
531 
532   if (Settings::use_allocation_guard()) {
533     return;
534   }
535 
536   // From a MetaspaceArena in a clean room allocate tiny amounts;
537   // watch it grow. Used/committed/capacity should not grow in
538   // large jumps. Also, different types of MetaspaceArena should
539   // have different initial capacities.
540 
541   MetaspaceGtestContext context;
542   MetaspaceArenaTestHelper smhelper(context, type, is_class, "Grower");
543 
544   MetaspaceArenaTestHelper smhelper_harrasser(context, Metaspace::ReflectionMetaspaceType, true, "Harasser");
545 
546   size_t used = 0, committed = 0, capacity = 0;
547   const size_t alloc_words = 16;
548 
549   smhelper.arena()->usage_numbers(&used, &committed, &capacity);
550   ASSERT_0(used);
551   ASSERT_0(committed);
552   ASSERT_0(capacity);
553 
554   ///// First allocation //
555 
556   smhelper.allocate_from_arena_with_tests_expect_success(alloc_words);
557 
558   smhelper.arena()->usage_numbers(&used, &committed, &capacity);
559 
560   ASSERT_EQ(used, alloc_words);
561   ASSERT_GE(committed, used);
562   ASSERT_GE(capacity, committed);
563 
564   ASSERT_EQ(capacity, expected_starting_capacity);
565 
566   if (!(Settings::new_chunks_are_fully_committed() && type == Metaspace::BootMetaspaceType)) {
567     // Initial commit charge for the whole context should be one granule
568     ASSERT_EQ(context.committed_words(), Settings::commit_granule_words());
569     // Initial commit number for the arena should be less since - apart from boot loader - no
570     //  space type has large initial chunks.
571     ASSERT_LE(committed, Settings::commit_granule_words());
572   }
573 
574   ///// subsequent allocations //
575 
576   DEBUG_ONLY(const uintx num_chunk_enlarged = metaspace::InternalStats::num_chunks_enlarged();)
577 
578   size_t words_allocated = 0;
579   int num_allocated = 0;
580   const size_t safety = MAX_CHUNK_WORD_SIZE * 1.2;
581   size_t highest_capacity_jump = capacity;
582   int num_capacity_jumps = 0;
583 
584   while (words_allocated < safety && num_capacity_jumps < 15) {
585 
586     // if we want to test growth with in-place chunk enlargement, leave MetaspaceArena
587     // undisturbed; it will have all the place to grow. Otherwise allocate from a little
588     // side arena to increase fragmentation.
589     // (Note that this does not completely prevent in-place chunk enlargement but makes it
590     //  rather improbable)
591     if (!test_in_place_enlargement) {
592       smhelper_harrasser.allocate_from_arena_with_tests_expect_success(alloc_words * 2);
593     }
594 
595     smhelper.allocate_from_arena_with_tests_expect_success(alloc_words);
596     words_allocated += metaspace::get_raw_word_size_for_requested_word_size(alloc_words, metaspace::MetaspaceMinAlignmentWords);
597     num_allocated++;
598 
599     size_t used2 = 0, committed2 = 0, capacity2 = 0;
600 
601     smhelper.arena()->usage_numbers(&used2, &committed2, &capacity2);
602 
603     // used should not grow larger than what we allocated, plus possible overhead.
604     ASSERT_GE(used2, used);
605     ASSERT_LE(used2, used + alloc_words * 2);
606     ASSERT_LE(used2, words_allocated + 100);
607     used = used2;
608 
609     // A jump in committed words should not be larger than commit granule size.
610     // It can be smaller, since the current chunk of the MetaspaceArena may be
611     // smaller than a commit granule.
612     // (Note: unless root chunks are born fully committed)
613     ASSERT_GE(committed2, used2);
614     ASSERT_GE(committed2, committed);
615     const size_t committed_jump = committed2 - committed;
616     if (committed_jump > 0 && !Settings::new_chunks_are_fully_committed()) {
617       ASSERT_LE(committed_jump, Settings::commit_granule_words());
618     }
619     committed = committed2;
620 
621     // Capacity jumps: Test that arenas capacity does not grow too fast.
622     ASSERT_GE(capacity2, committed2);
623     ASSERT_GE(capacity2, capacity);
624     const size_t capacity_jump = capacity2 - capacity;
625     if (capacity_jump > 0) {
626       LOG(">" SIZE_FORMAT "->" SIZE_FORMAT "(+" SIZE_FORMAT ")", capacity, capacity2, capacity_jump)
627       if (capacity_jump > highest_capacity_jump) {
628         /* Disabled for now since this is rather shaky. The way it is tested makes it too dependent
629          * on allocation history. Need to rethink this.
630         ASSERT_LE(capacity_jump, highest_capacity_jump * 2);
631         ASSERT_GE(capacity_jump, MIN_CHUNK_WORD_SIZE);
632         ASSERT_LE(capacity_jump, MAX_CHUNK_WORD_SIZE);
633         */
634         highest_capacity_jump = capacity_jump;
635       }
636       num_capacity_jumps++;
637     }
638 
639     capacity = capacity2;
640 
641   }
642 
643   // After all this work, we should see an increase in number of chunk-in-place-enlargements
644   //  (this especially is vulnerable to regression: the decisions of when to do in-place-enlargements are somewhat
645   //   complicated, see MetaspaceArena::attempt_enlarge_current_chunk())
646 #ifdef ASSERT
647   if (test_in_place_enlargement) {
648     const uintx num_chunk_enlarged_2 = metaspace::InternalStats::num_chunks_enlarged();
649     ASSERT_GT(num_chunk_enlarged_2, num_chunk_enlarged);
650   }
651 #endif
652 }
653 
654 // these numbers have to be in sync with arena policy numbers (see memory/metaspace/arenaGrowthPolicy.cpp)
655 TEST_VM(metaspace, MetaspaceArena_growth_refl_c_inplace) {
656   test_controlled_growth(Metaspace::ReflectionMetaspaceType, true,
657                          word_size_for_level(CHUNK_LEVEL_1K), true);
658 }
659 
660 TEST_VM(metaspace, MetaspaceArena_growth_refl_c_not_inplace) {
661   test_controlled_growth(Metaspace::ReflectionMetaspaceType, true,
662                          word_size_for_level(CHUNK_LEVEL_1K), false);
663 }
664 
665 TEST_VM(metaspace, MetaspaceArena_growth_anon_c_inplace) {
666   test_controlled_growth(Metaspace::ClassMirrorHolderMetaspaceType, true,
667                          word_size_for_level(CHUNK_LEVEL_1K), true);
668 }
669 
670 TEST_VM(metaspace, MetaspaceArena_growth_anon_c_not_inplace) {
671   test_controlled_growth(Metaspace::ClassMirrorHolderMetaspaceType, true,
672                          word_size_for_level(CHUNK_LEVEL_1K), false);
673 }
674 
675 TEST_VM(metaspace, MetaspaceArena_growth_standard_c_inplace) {
676   test_controlled_growth(Metaspace::StandardMetaspaceType, true,
677                          word_size_for_level(CHUNK_LEVEL_2K), true);
678 }
679 
680 TEST_VM(metaspace, MetaspaceArena_growth_standard_c_not_inplace) {
681   test_controlled_growth(Metaspace::StandardMetaspaceType, true,
682                          word_size_for_level(CHUNK_LEVEL_2K), false);
683 }
684 
685 /* Disabled growth tests for BootMetaspaceType: there, the growth steps are too rare,
686  * and too large, to make any reliable guess as toward chunks get enlarged in place.
687 TEST_VM(metaspace, MetaspaceArena_growth_boot_c_inplace) {
688   test_controlled_growth(Metaspace::BootMetaspaceType, true,
689                          word_size_for_level(CHUNK_LEVEL_1M), true);
690 }
691 
692 TEST_VM(metaspace, MetaspaceArena_growth_boot_c_not_inplace) {
693   test_controlled_growth(Metaspace::BootMetaspaceType, true,
694                          word_size_for_level(CHUNK_LEVEL_1M), false);
695 }
696 */
697 
698 TEST_VM(metaspace, MetaspaceArena_growth_refl_nc_inplace) {
699   test_controlled_growth(Metaspace::ReflectionMetaspaceType, false,
700                          word_size_for_level(CHUNK_LEVEL_2K), true);
701 }
702 
703 TEST_VM(metaspace, MetaspaceArena_growth_refl_nc_not_inplace) {
704   test_controlled_growth(Metaspace::ReflectionMetaspaceType, false,
705                          word_size_for_level(CHUNK_LEVEL_2K), false);
706 }
707 
708 TEST_VM(metaspace, MetaspaceArena_growth_anon_nc_inplace) {
709   test_controlled_growth(Metaspace::ClassMirrorHolderMetaspaceType, false,
710                          word_size_for_level(CHUNK_LEVEL_1K), true);
711 }
712 
713 TEST_VM(metaspace, MetaspaceArena_growth_anon_nc_not_inplace) {
714   test_controlled_growth(Metaspace::ClassMirrorHolderMetaspaceType, false,
715                          word_size_for_level(CHUNK_LEVEL_1K), false);
716 }
717 
718 TEST_VM(metaspace, MetaspaceArena_growth_standard_nc_inplace) {
719   test_controlled_growth(Metaspace::StandardMetaspaceType, false,
720                          word_size_for_level(CHUNK_LEVEL_4K), true);
721 }
722 
723 TEST_VM(metaspace, MetaspaceArena_growth_standard_nc_not_inplace) {
724   test_controlled_growth(Metaspace::StandardMetaspaceType, false,
725                          word_size_for_level(CHUNK_LEVEL_4K), false);
726 }
727 
728 /* Disabled growth tests for BootMetaspaceType: there, the growth steps are too rare,
729  * and too large, to make any reliable guess as toward chunks get enlarged in place.
730 TEST_VM(metaspace, MetaspaceArena_growth_boot_nc_inplace) {
731   test_controlled_growth(Metaspace::BootMetaspaceType, false,
732                          word_size_for_level(CHUNK_LEVEL_4M), true);
733 }
734 
735 TEST_VM(metaspace, MetaspaceArena_growth_boot_nc_not_inplace) {
736   test_controlled_growth(Metaspace::BootMetaspaceType, false,
737                          word_size_for_level(CHUNK_LEVEL_4M), false);
738 }
739 */
740 
741 // Test that repeated allocation-deallocation cycles with the same block size
742 //  do not increase metaspace usage after the initial allocation (the deallocated
743 //  block should be reused by the next allocation).
744 static void test_repeatedly_allocate_and_deallocate(bool is_topmost) {
745   // Test various sizes, including (important) the max. possible block size = 1 root chunk
746   for (size_t blocksize = Metaspace::max_allocation_word_size(); blocksize >= 1; blocksize /= 2) {
747     size_t used1 = 0, used2 = 0, committed1 = 0, committed2 = 0;
748     MetaWord* p = NULL, *p2 = NULL;
749 
750     MetaspaceGtestContext context;
751     MetaspaceArenaTestHelper helper(context, Metaspace::StandardMetaspaceType, false);
752 
753     // First allocation
754     helper.allocate_from_arena_with_tests_expect_success(&p, blocksize);
755     if (!is_topmost) {
756       // another one on top, size does not matter.
757       helper.allocate_from_arena_with_tests_expect_success(0x10);
758     }
759 
760     // Measure
761     helper.usage_numbers_with_test(&used1, &committed1, NULL);
762 
763     // Dealloc, alloc several times with the same size.
764     for (int i = 0; i < 5; i ++) {
765       helper.deallocate_with_tests(p, blocksize);
766       helper.allocate_from_arena_with_tests_expect_success(&p2, blocksize);
767       // We should get the same pointer back.
768       EXPECT_EQ(p2, p);
769     }
770 
771     // Measure again
772     helper.usage_numbers_with_test(&used2, &committed2, NULL);
773     EXPECT_EQ(used2, used1);
774     EXPECT_EQ(committed1, committed2);
775   }
776 }
777 
778 TEST_VM(metaspace, MetaspaceArena_test_repeatedly_allocate_and_deallocate_top_allocation) {
779   test_repeatedly_allocate_and_deallocate(true);
780 }
781 
782 TEST_VM(metaspace, MetaspaceArena_test_repeatedly_allocate_and_deallocate_nontop_allocation) {
783   test_repeatedly_allocate_and_deallocate(false);
784 }