1 /*
  2  * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2020 SAP SE. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "memory/metaspace/commitLimiter.hpp"
 28 #include "memory/metaspace/counters.hpp"
 29 #include "memory/metaspace/internalStats.hpp"
 30 #include "memory/metaspace/metaspaceArena.hpp"
 31 #include "memory/metaspace/metaspaceArenaGrowthPolicy.hpp"
 32 #include "memory/metaspace/metaspaceSettings.hpp"
 33 #include "memory/metaspace/metaspaceStatistics.hpp"
 34 #include "runtime/mutex.hpp"
 35 #include "runtime/mutexLocker.hpp"
 36 #include "utilities/debug.hpp"
 37 #include "utilities/globalDefinitions.hpp"
 38 
 39 //#define LOG_PLEASE
 40 #include "metaspaceGtestCommon.hpp"
 41 #include "metaspaceGtestContexts.hpp"
 42 #include "metaspaceGtestRangeHelpers.hpp"
 43 
 44 using metaspace::ArenaGrowthPolicy;
 45 using metaspace::CommitLimiter;
 46 using metaspace::InternalStats;
 47 using metaspace::MemRangeCounter;
 48 using metaspace::MetaspaceArena;
 49 using metaspace::SizeAtomicCounter;
 50 using metaspace::Settings;
 51 using metaspace::ArenaStats;
 52 
 53 // See metaspaceArena.cpp : needed for predicting commit sizes.
 54 namespace metaspace {
 55   extern size_t get_raw_word_size_for_requested_word_size(size_t net_word_size);
 56 }
 57 
 58 class MetaspaceArenaTestHelper {
 59 
 60   MetaspaceGtestContext& _context;
 61 
 62   Mutex* _lock;
 63   const ArenaGrowthPolicy* _growth_policy;
 64   SizeAtomicCounter _used_words_counter;
 65   MetaspaceArena* _arena;
 66 
 67   void initialize(const ArenaGrowthPolicy* growth_policy, const char* name = "gtest-MetaspaceArena") {
 68     _growth_policy = growth_policy;
 69     _lock = new Mutex(Monitor::nosafepoint, "gtest-MetaspaceArenaTest_lock");
 70     // Lock during space creation, since this is what happens in the VM too
 71     //  (see ClassLoaderData::metaspace_non_null(), which we mimick here).
 72     {
 73       MutexLocker ml(_lock,  Mutex::_no_safepoint_check_flag);
 74       _arena = new MetaspaceArena(&_context.cm(), _growth_policy, _lock, &_used_words_counter, name);
 75     }
 76     DEBUG_ONLY(_arena->verify());
 77 
 78   }
 79 
 80 public:
 81 
 82   // Create a helper; growth policy for arena is determined by the given spacetype|class tupel
 83   MetaspaceArenaTestHelper(MetaspaceGtestContext& helper,
 84                             Metaspace::MetaspaceType space_type, bool is_class,
 85                             const char* name = "gtest-MetaspaceArena") :
 86     _context(helper)
 87   {
 88     initialize(ArenaGrowthPolicy::policy_for_space_type(space_type, is_class), name);
 89   }
 90 
 91   // Create a helper; growth policy is directly specified
 92   MetaspaceArenaTestHelper(MetaspaceGtestContext& helper, const ArenaGrowthPolicy* growth_policy,
 93                            const char* name = "gtest-MetaspaceArena") :
 94     _context(helper)
 95   {
 96     initialize(growth_policy, name);
 97   }
 98 
 99   ~MetaspaceArenaTestHelper() {
100     delete_arena_with_tests();
101     delete _lock;
102   }
103 
104   const CommitLimiter& limiter() const { return _context.commit_limiter(); }
105   MetaspaceArena* arena() const { return _arena; }
106   SizeAtomicCounter& used_words_counter() { return _used_words_counter; }
107 
108   // Note: all test functions return void due to gtests limitation that we cannot use ASSERT
109   // in non-void returning tests.
110 
111   void delete_arena_with_tests() {
112     if (_arena != NULL) {
113       size_t used_words_before = _used_words_counter.get();
114       size_t committed_words_before = limiter().committed_words();
115       DEBUG_ONLY(_arena->verify());
116       delete _arena;
117       _arena = NULL;
118       size_t used_words_after = _used_words_counter.get();
119       size_t committed_words_after = limiter().committed_words();
120       ASSERT_0(used_words_after);
121       if (Settings::uncommit_free_chunks()) {
122         ASSERT_LE(committed_words_after, committed_words_before);
123       } else {
124         ASSERT_EQ(committed_words_after, committed_words_before);
125       }
126     }
127   }
128 
129   void usage_numbers_with_test(size_t* p_used, size_t* p_committed, size_t* p_capacity) const {
130     _arena->usage_numbers(p_used, p_committed, p_capacity);
131     if (p_used != NULL) {
132       if (p_committed != NULL) {
133         ASSERT_GE(*p_committed, *p_used);
134       }
135       // Since we own the used words counter, it should reflect our usage number 1:1
136       ASSERT_EQ(_used_words_counter.get(), *p_used);
137     }
138     if (p_committed != NULL && p_capacity != NULL) {
139       ASSERT_GE(*p_capacity, *p_committed);
140     }
141   }
142 
143   // Allocate; caller expects success; return pointer in *p_return_value
144   void allocate_from_arena_with_tests_expect_success(MetaWord** p_return_value, size_t word_size) {
145     allocate_from_arena_with_tests(p_return_value, word_size);
146     ASSERT_NOT_NULL(*p_return_value);
147   }
148 
149   // Allocate; caller expects success but is not interested in return value
150   void allocate_from_arena_with_tests_expect_success(size_t word_size) {
151     MetaWord* dummy = NULL;
152     allocate_from_arena_with_tests_expect_success(&dummy, word_size);
153   }
154 
155   // Allocate; caller expects failure
156   void allocate_from_arena_with_tests_expect_failure(size_t word_size) {
157     MetaWord* dummy = NULL;
158     allocate_from_arena_with_tests(&dummy, word_size);
159     ASSERT_NULL(dummy);
160   }
161 
162   // Allocate; it may or may not work; return value in *p_return_value
163   void allocate_from_arena_with_tests(MetaWord** p_return_value, size_t word_size) {
164 
165     // Note: usage_numbers walks all chunks in use and counts.
166     size_t used = 0, committed = 0, capacity = 0;
167     usage_numbers_with_test(&used, &committed, &capacity);
168 
169     size_t possible_expansion = limiter().possible_expansion_words();
170 
171     MetaWord* p = _arena->allocate(word_size);
172 
173     SOMETIMES(DEBUG_ONLY(_arena->verify();))
174 
175     size_t used2 = 0, committed2 = 0, capacity2 = 0;
176     usage_numbers_with_test(&used2, &committed2, &capacity2);
177 
178     if (p == NULL) {
179       // Allocation failed.
180       if (Settings::new_chunks_are_fully_committed()) {
181         ASSERT_LT(possible_expansion, MAX_CHUNK_WORD_SIZE);
182       } else {
183         ASSERT_LT(possible_expansion, word_size);
184       }
185 
186       ASSERT_EQ(used, used2);
187       ASSERT_EQ(committed, committed2);
188       ASSERT_EQ(capacity, capacity2);
189     } else {
190       // Allocation succeeded. Should be correctly aligned.
191       ASSERT_TRUE(is_aligned(p, sizeof(MetaWord)));
192       // used: may go up or may not (since our request may have been satisfied from the freeblocklist
193       //   whose content already counts as used).
194       // committed: may go up, may not
195       // capacity: ditto
196       ASSERT_GE(used2, used);
197       ASSERT_GE(committed2, committed);
198       ASSERT_GE(capacity2, capacity);
199     }
200 
201     *p_return_value = p;
202   }
203 
204   // Allocate; it may or may not work; but caller does not care for the result value
205   void allocate_from_arena_with_tests(size_t word_size) {
206     MetaWord* dummy = NULL;
207     allocate_from_arena_with_tests(&dummy, word_size);
208   }
209 
210   void deallocate_with_tests(MetaWord* p, size_t word_size) {
211     size_t used = 0, committed = 0, capacity = 0;
212     usage_numbers_with_test(&used, &committed, &capacity);
213 
214     _arena->deallocate(p, word_size);
215 
216     SOMETIMES(DEBUG_ONLY(_arena->verify();))
217 
218     size_t used2 = 0, committed2 = 0, capacity2 = 0;
219     usage_numbers_with_test(&used2, &committed2, &capacity2);
220 
221     // Nothing should have changed. Deallocated blocks are added to the free block list
222     // which still counts as used.
223     ASSERT_EQ(used2, used);
224     ASSERT_EQ(committed2, committed);
225     ASSERT_EQ(capacity2, capacity);
226   }
227 
228   ArenaStats get_arena_statistics() const {
229     ArenaStats stats;
230     _arena->add_to_statistics(&stats);
231     return stats;
232   }
233 
234   // Convenience method to return number of chunks in arena (including current chunk)
235   int get_number_of_chunks() const {
236     return get_arena_statistics().totals()._num;
237   }
238 
239 };
240 
241 static void test_basics(size_t commit_limit, bool is_micro) {
242   MetaspaceGtestContext context(commit_limit);
243   MetaspaceArenaTestHelper helper(context, is_micro ? Metaspace::ReflectionMetaspaceType : Metaspace::StandardMetaspaceType, false);
244 
245   helper.allocate_from_arena_with_tests(1);
246   helper.allocate_from_arena_with_tests(128);
247   helper.allocate_from_arena_with_tests(128 * K);
248   helper.allocate_from_arena_with_tests(1);
249   helper.allocate_from_arena_with_tests(128);
250   helper.allocate_from_arena_with_tests(128 * K);
251 }
252 
253 TEST_VM(metaspace, MetaspaceArena_basics_micro_nolimit) {
254   test_basics(max_uintx, true);
255 }
256 
257 TEST_VM(metaspace, MetaspaceArena_basics_micro_limit) {
258   test_basics(256 * K, true);
259 }
260 
261 TEST_VM(metaspace, MetaspaceArena_basics_standard_nolimit) {
262   test_basics(max_uintx, false);
263 }
264 
265 TEST_VM(metaspace, MetaspaceArena_basics_standard_limit) {
266   test_basics(256 * K, false);
267 }
268 
269 // Test chunk enlargement:
270 //  A single MetaspaceArena, left undisturbed with place to grow. Slowly fill arena up.
271 //  We should see at least some occurrences of chunk-in-place enlargement.
272 static void test_chunk_enlargment_simple(Metaspace::MetaspaceType spacetype, bool is_class) {
273 
274   MetaspaceGtestContext context;
275   MetaspaceArenaTestHelper helper(context, (Metaspace::MetaspaceType)spacetype, is_class);
276 
277   uint64_t n1 = metaspace::InternalStats::num_chunks_enlarged();
278 
279   size_t allocated = 0;
280   while (allocated <= MAX_CHUNK_WORD_SIZE &&
281          metaspace::InternalStats::num_chunks_enlarged() == n1) {
282     size_t s = IntRange(32, 128).random_value();
283     helper.allocate_from_arena_with_tests_expect_success(s);
284     allocated += metaspace::get_raw_word_size_for_requested_word_size(s);
285   }
286 
287   EXPECT_GT(metaspace::InternalStats::num_chunks_enlarged(), n1);
288 
289 }
290 
291 // Do this test for some of the standard types; don't do it for the boot loader type
292 //  since that one starts out with max chunk size so we would not see any enlargement.
293 
294 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_standard_c) {
295   test_chunk_enlargment_simple(Metaspace::StandardMetaspaceType, true);
296 }
297 
298 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_standard_nc) {
299   test_chunk_enlargment_simple(Metaspace::StandardMetaspaceType, false);
300 }
301 
302 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_micro_c) {
303   test_chunk_enlargment_simple(Metaspace::ReflectionMetaspaceType, true);
304 }
305 
306 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_micro_nc) {
307   test_chunk_enlargment_simple(Metaspace::ReflectionMetaspaceType, false);
308 }
309 
310 // Test chunk enlargement:
311 // A single MetaspaceArena, left undisturbed with place to grow. Slowly fill arena up.
312 //  We should see occurrences of chunk-in-place enlargement.
313 //  Here, we give it an ideal policy which should enable the initial chunk to grow unmolested
314 //  until finish.
315 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_2) {
316 
317   if (Settings::use_allocation_guard()) {
318     return;
319   }
320 
321   // Note: internally, chunk in-place enlargement is disallowed if growing the chunk
322   //  would cause the arena to claim more memory than its growth policy allows. This
323   //  is done to prevent the arena to grow too fast.
324   //
325   // In order to test in-place growth here without that restriction I give it an
326   //  artificial growth policy which starts out with a tiny chunk size, then balloons
327   //  right up to max chunk size. This will cause the initial chunk to be tiny, and
328   //  then the arena is able to grow it without violating growth policy.
329   chunklevel_t growth[] = { HIGHEST_CHUNK_LEVEL, ROOT_CHUNK_LEVEL };
330   ArenaGrowthPolicy growth_policy(growth, 2);
331 
332   MetaspaceGtestContext context;
333   MetaspaceArenaTestHelper helper(context, &growth_policy);
334 
335   uint64_t n1 = metaspace::InternalStats::num_chunks_enlarged();
336 
337   size_t allocated = 0;
338   while (allocated <= MAX_CHUNK_WORD_SIZE) {
339     size_t s = IntRange(32, 128).random_value();
340     helper.allocate_from_arena_with_tests_expect_success(s);
341     allocated += metaspace::get_raw_word_size_for_requested_word_size(s);
342     if (allocated <= MAX_CHUNK_WORD_SIZE) {
343       // Chunk should have been enlarged in place
344       ASSERT_EQ(1, helper.get_number_of_chunks());
345     } else {
346       // Next chunk should have started
347       ASSERT_EQ(2, helper.get_number_of_chunks());
348     }
349   }
350 
351   int times_chunk_were_enlarged = metaspace::InternalStats::num_chunks_enlarged() - n1;
352   LOG("chunk was enlarged %d times.", times_chunk_were_enlarged);
353 
354   ASSERT_GT0(times_chunk_were_enlarged);
355 
356 }
357 
358 // Regression test: Given a single MetaspaceArena, left undisturbed with place to grow,
359 //  test that in place enlargement correctly fails if growing the chunk would bring us
360 //  beyond the max. size of a chunk.
361 TEST_VM(metaspace, MetaspaceArena_test_failing_to_enlarge_in_place_max_chunk_size) {
362 
363   if (Settings::use_allocation_guard()) {
364     return;
365   }
366 
367   MetaspaceGtestContext context;
368 
369   for (size_t first_allocation_size = 1; first_allocation_size <= MAX_CHUNK_WORD_SIZE / 2; first_allocation_size *= 2) {
370 
371     MetaspaceArenaTestHelper helper(context, Metaspace::StandardMetaspaceType, false);
372 
373     // we allocate first a small amount, then the full amount possible.
374     // The sum of first and second allocation should bring us above root chunk size.
375     // This should work, we should not see any problems, but no chunk enlargement should
376     // happen.
377     int n1 = metaspace::InternalStats::num_chunks_enlarged();
378 
379     helper.allocate_from_arena_with_tests_expect_success(first_allocation_size);
380     EXPECT_EQ(helper.get_number_of_chunks(), 1);
381 
382     helper.allocate_from_arena_with_tests_expect_success(MAX_CHUNK_WORD_SIZE - first_allocation_size + 1);
383     EXPECT_EQ(helper.get_number_of_chunks(), 2);
384 
385     int times_chunk_were_enlarged = metaspace::InternalStats::num_chunks_enlarged() - n1;
386     LOG("chunk was enlarged %d times.", times_chunk_were_enlarged);
387 
388     EXPECT_0(times_chunk_were_enlarged);
389 
390   }
391 }
392 
393 // Regression test: Given a single MetaspaceArena, left undisturbed with place to grow,
394 //  test that in place enlargement correctly fails if growing the chunk would cause more
395 //  than doubling its size
396 TEST_VM(metaspace, MetaspaceArena_test_failing_to_enlarge_in_place_doubling_chunk_size) {
397 
398   if (Settings::use_allocation_guard()) {
399     return;
400   }
401 
402   MetaspaceGtestContext context;
403   MetaspaceArenaTestHelper helper(context, Metaspace::StandardMetaspaceType, false);
404 
405   int n1 = metaspace::InternalStats::num_chunks_enlarged();
406 
407   helper.allocate_from_arena_with_tests_expect_success(1000);
408   EXPECT_EQ(helper.get_number_of_chunks(), 1);
409 
410   helper.allocate_from_arena_with_tests_expect_success(4000);
411   EXPECT_EQ(helper.get_number_of_chunks(), 2);
412 
413   int times_chunk_were_enlarged = metaspace::InternalStats::num_chunks_enlarged() - n1;
414   LOG("chunk was enlarged %d times.", times_chunk_were_enlarged);
415 
416   EXPECT_0(times_chunk_were_enlarged);
417 
418 }
419 
420 // Test the MetaspaceArenas' free block list:
421 // Allocate, deallocate, then allocate the same block again. The second allocate should
422 // reuse the deallocated block.
423 TEST_VM(metaspace, MetaspaceArena_deallocate) {
424   if (Settings::use_allocation_guard()) {
425     return;
426   }
427   for (size_t s = 2; s <= MAX_CHUNK_WORD_SIZE; s *= 2) {
428     MetaspaceGtestContext context;
429     MetaspaceArenaTestHelper helper(context, Metaspace::StandardMetaspaceType, false);
430 
431     MetaWord* p1 = NULL;
432     helper.allocate_from_arena_with_tests_expect_success(&p1, s);
433 
434     size_t used1 = 0, capacity1 = 0;
435     helper.usage_numbers_with_test(&used1, NULL, &capacity1);
436     ASSERT_EQ(used1, s);
437 
438     helper.deallocate_with_tests(p1, s);
439 
440     size_t used2 = 0, capacity2 = 0;
441     helper.usage_numbers_with_test(&used2, NULL, &capacity2);
442     ASSERT_EQ(used1, used2);
443     ASSERT_EQ(capacity2, capacity2);
444 
445     MetaWord* p2 = NULL;
446     helper.allocate_from_arena_with_tests_expect_success(&p2, s);
447 
448     size_t used3 = 0, capacity3 = 0;
449     helper.usage_numbers_with_test(&used3, NULL, &capacity3);
450     ASSERT_EQ(used3, used2);
451     ASSERT_EQ(capacity3, capacity2);
452 
453     // Actually, we should get the very same allocation back
454     ASSERT_EQ(p1, p2);
455   }
456 }
457 
458 static void test_recover_from_commit_limit_hit() {
459 
460   if (Settings::new_chunks_are_fully_committed()) {
461     return; // This would throw off the commit counting in this test.
462   }
463 
464   // Test:
465   // - Multiple MetaspaceArena allocate (operating under the same commit limiter).
466   // - One, while attempting to commit parts of its current chunk on demand,
467   //   triggers the limit and cannot commit its chunk further.
468   // - We release the other MetaspaceArena - its content is put back to the
469   //   freelists.
470   // - We re-attempt allocation from the first manager. It should now succeed.
471   //
472   // This means if the first MetaspaceArena may have to let go of its current chunk and
473   // retire it and take a fresh chunk from the freelist.
474 
475   const size_t commit_limit = Settings::commit_granule_words() * 10;
476   MetaspaceGtestContext context(commit_limit);
477 
478   // The first MetaspaceArena mimicks a micro loader. This will fill the free
479   //  chunk list with very small chunks. We allocate from them in an interleaved
480   //  way to cause fragmentation.
481   MetaspaceArenaTestHelper helper1(context, Metaspace::ReflectionMetaspaceType, false);
482   MetaspaceArenaTestHelper helper2(context, Metaspace::ReflectionMetaspaceType, false);
483 
484   // This MetaspaceArena should hit the limit. We use BootMetaspaceType here since
485   // it gets a large initial chunk which is committed
486   // on demand and we are likely to hit a commit limit while trying to expand it.
487   MetaspaceArenaTestHelper helper3(context, Metaspace::BootMetaspaceType, false);
488 
489   // Allocate space until we have below two but above one granule left
490   size_t allocated_from_1_and_2 = 0;
491   while (context.commit_limiter().possible_expansion_words() >= Settings::commit_granule_words() * 2 &&
492       allocated_from_1_and_2 < commit_limit) {
493     helper1.allocate_from_arena_with_tests_expect_success(1);
494     helper2.allocate_from_arena_with_tests_expect_success(1);
495     allocated_from_1_and_2 += 2;
496   }
497 
498   // Now, allocating from helper3, creep up on the limit
499   size_t allocated_from_3 = 0;
500   MetaWord* p = NULL;
501   while ( (helper3.allocate_from_arena_with_tests(&p, 1), p != NULL) &&
502          ++allocated_from_3 < Settings::commit_granule_words() * 2);
503 
504   EXPECT_LE(allocated_from_3, Settings::commit_granule_words() * 2);
505 
506   // We expect the freelist to be empty of committed space...
507   EXPECT_0(context.cm().calc_committed_word_size());
508 
509   //msthelper.cm().print_on(tty);
510 
511   // Release the first MetaspaceArena.
512   helper1.delete_arena_with_tests();
513 
514   //msthelper.cm().print_on(tty);
515 
516   // Should have populated the freelist with committed space
517   // We expect the freelist to be empty of committed space...
518   EXPECT_GT(context.cm().calc_committed_word_size(), (size_t)0);
519 
520   // Repeat allocation from helper3, should now work.
521   helper3.allocate_from_arena_with_tests_expect_success(1);
522 
523 }
524 
525 TEST_VM(metaspace, MetaspaceArena_recover_from_limit_hit) {
526   test_recover_from_commit_limit_hit();
527 }
528 
529 static void test_controlled_growth(Metaspace::MetaspaceType type, bool is_class,
530                                    size_t expected_starting_capacity,
531                                    bool test_in_place_enlargement)
532 {
533 
534   if (Settings::use_allocation_guard()) {
535     return;
536   }
537 
538   // From a MetaspaceArena in a clean room allocate tiny amounts;
539   // watch it grow. Used/committed/capacity should not grow in
540   // large jumps. Also, different types of MetaspaceArena should
541   // have different initial capacities.
542 
543   MetaspaceGtestContext context;
544   MetaspaceArenaTestHelper smhelper(context, type, is_class, "Grower");
545 
546   MetaspaceArenaTestHelper smhelper_harrasser(context, Metaspace::ReflectionMetaspaceType, true, "Harasser");
547 
548   size_t used = 0, committed = 0, capacity = 0;
549   const size_t alloc_words = 16;
550 
551   smhelper.arena()->usage_numbers(&used, &committed, &capacity);
552   ASSERT_0(used);
553   ASSERT_0(committed);
554   ASSERT_0(capacity);
555 
556   ///// First allocation //
557 
558   smhelper.allocate_from_arena_with_tests_expect_success(alloc_words);
559 
560   smhelper.arena()->usage_numbers(&used, &committed, &capacity);
561 
562   ASSERT_EQ(used, alloc_words);
563   ASSERT_GE(committed, used);
564   ASSERT_GE(capacity, committed);
565 
566   ASSERT_EQ(capacity, expected_starting_capacity);
567 
568   if (!(Settings::new_chunks_are_fully_committed() && type == Metaspace::BootMetaspaceType)) {
569     // Initial commit charge for the whole context should be one granule
570     ASSERT_EQ(context.committed_words(), Settings::commit_granule_words());
571     // Initial commit number for the arena should be less since - apart from boot loader - no
572     //  space type has large initial chunks.
573     ASSERT_LE(committed, Settings::commit_granule_words());
574   }
575 
576   ///// subsequent allocations //
577 
578   DEBUG_ONLY(const uintx num_chunk_enlarged = metaspace::InternalStats::num_chunks_enlarged();)
579 
580   size_t words_allocated = 0;
581   int num_allocated = 0;
582   const size_t safety = MAX_CHUNK_WORD_SIZE * 1.2;
583   size_t highest_capacity_jump = capacity;
584   int num_capacity_jumps = 0;
585 
586   while (words_allocated < safety && num_capacity_jumps < 15) {
587 
588     // if we want to test growth with in-place chunk enlargement, leave MetaspaceArena
589     // undisturbed; it will have all the place to grow. Otherwise allocate from a little
590     // side arena to increase fragmentation.
591     // (Note that this does not completely prevent in-place chunk enlargement but makes it
592     //  rather improbable)
593     if (!test_in_place_enlargement) {
594       smhelper_harrasser.allocate_from_arena_with_tests_expect_success(alloc_words * 2);
595     }
596 
597     smhelper.allocate_from_arena_with_tests_expect_success(alloc_words);
598     words_allocated += metaspace::get_raw_word_size_for_requested_word_size(alloc_words);
599     num_allocated++;
600 
601     size_t used2 = 0, committed2 = 0, capacity2 = 0;
602 
603     smhelper.arena()->usage_numbers(&used2, &committed2, &capacity2);
604 
605     // used should not grow larger than what we allocated, plus possible overhead.
606     ASSERT_GE(used2, used);
607     ASSERT_LE(used2, used + alloc_words * 2);
608     ASSERT_LE(used2, words_allocated + 100);
609     used = used2;
610 
611     // A jump in committed words should not be larger than commit granule size.
612     // It can be smaller, since the current chunk of the MetaspaceArena may be
613     // smaller than a commit granule.
614     // (Note: unless root chunks are born fully committed)
615     ASSERT_GE(committed2, used2);
616     ASSERT_GE(committed2, committed);
617     const size_t committed_jump = committed2 - committed;
618     if (committed_jump > 0 && !Settings::new_chunks_are_fully_committed()) {
619       ASSERT_LE(committed_jump, Settings::commit_granule_words());
620     }
621     committed = committed2;
622 
623     // Capacity jumps: Test that arenas capacity does not grow too fast.
624     ASSERT_GE(capacity2, committed2);
625     ASSERT_GE(capacity2, capacity);
626     const size_t capacity_jump = capacity2 - capacity;
627     if (capacity_jump > 0) {
628       LOG(">" SIZE_FORMAT "->" SIZE_FORMAT "(+" SIZE_FORMAT ")", capacity, capacity2, capacity_jump)
629       if (capacity_jump > highest_capacity_jump) {
630         /* Disabled for now since this is rather shaky. The way it is tested makes it too dependent
631          * on allocation history. Need to rethink this.
632         ASSERT_LE(capacity_jump, highest_capacity_jump * 2);
633         ASSERT_GE(capacity_jump, MIN_CHUNK_WORD_SIZE);
634         ASSERT_LE(capacity_jump, MAX_CHUNK_WORD_SIZE);
635         */
636         highest_capacity_jump = capacity_jump;
637       }
638       num_capacity_jumps++;
639     }
640 
641     capacity = capacity2;
642 
643   }
644 
645   // After all this work, we should see an increase in number of chunk-in-place-enlargements
646   //  (this especially is vulnerable to regression: the decisions of when to do in-place-enlargements are somewhat
647   //   complicated, see MetaspaceArena::attempt_enlarge_current_chunk())
648 #ifdef ASSERT
649   if (test_in_place_enlargement) {
650     const uintx num_chunk_enlarged_2 = metaspace::InternalStats::num_chunks_enlarged();
651     ASSERT_GT(num_chunk_enlarged_2, num_chunk_enlarged);
652   }
653 #endif
654 }
655 
656 // these numbers have to be in sync with arena policy numbers (see memory/metaspace/arenaGrowthPolicy.cpp)
657 TEST_VM(metaspace, MetaspaceArena_growth_refl_c_inplace) {
658   test_controlled_growth(Metaspace::ReflectionMetaspaceType, true,
659                          word_size_for_level(CHUNK_LEVEL_1K), true);
660 }
661 
662 TEST_VM(metaspace, MetaspaceArena_growth_refl_c_not_inplace) {
663   test_controlled_growth(Metaspace::ReflectionMetaspaceType, true,
664                          word_size_for_level(CHUNK_LEVEL_1K), false);
665 }
666 
667 TEST_VM(metaspace, MetaspaceArena_growth_anon_c_inplace) {
668   test_controlled_growth(Metaspace::ClassMirrorHolderMetaspaceType, true,
669                          word_size_for_level(CHUNK_LEVEL_1K), true);
670 }
671 
672 TEST_VM(metaspace, MetaspaceArena_growth_anon_c_not_inplace) {
673   test_controlled_growth(Metaspace::ClassMirrorHolderMetaspaceType, true,
674                          word_size_for_level(CHUNK_LEVEL_1K), false);
675 }
676 
677 TEST_VM(metaspace, MetaspaceArena_growth_standard_c_inplace) {
678   test_controlled_growth(Metaspace::StandardMetaspaceType, true,
679                          word_size_for_level(CHUNK_LEVEL_2K), true);
680 }
681 
682 TEST_VM(metaspace, MetaspaceArena_growth_standard_c_not_inplace) {
683   test_controlled_growth(Metaspace::StandardMetaspaceType, true,
684                          word_size_for_level(CHUNK_LEVEL_2K), false);
685 }
686 
687 /* Disabled growth tests for BootMetaspaceType: there, the growth steps are too rare,
688  * and too large, to make any reliable guess as toward chunks get enlarged in place.
689 TEST_VM(metaspace, MetaspaceArena_growth_boot_c_inplace) {
690   test_controlled_growth(Metaspace::BootMetaspaceType, true,
691                          word_size_for_level(CHUNK_LEVEL_1M), true);
692 }
693 
694 TEST_VM(metaspace, MetaspaceArena_growth_boot_c_not_inplace) {
695   test_controlled_growth(Metaspace::BootMetaspaceType, true,
696                          word_size_for_level(CHUNK_LEVEL_1M), false);
697 }
698 */
699 
700 TEST_VM(metaspace, MetaspaceArena_growth_refl_nc_inplace) {
701   test_controlled_growth(Metaspace::ReflectionMetaspaceType, false,
702                          word_size_for_level(CHUNK_LEVEL_2K), true);
703 }
704 
705 TEST_VM(metaspace, MetaspaceArena_growth_refl_nc_not_inplace) {
706   test_controlled_growth(Metaspace::ReflectionMetaspaceType, false,
707                          word_size_for_level(CHUNK_LEVEL_2K), false);
708 }
709 
710 TEST_VM(metaspace, MetaspaceArena_growth_anon_nc_inplace) {
711   test_controlled_growth(Metaspace::ClassMirrorHolderMetaspaceType, false,
712                          word_size_for_level(CHUNK_LEVEL_1K), true);
713 }
714 
715 TEST_VM(metaspace, MetaspaceArena_growth_anon_nc_not_inplace) {
716   test_controlled_growth(Metaspace::ClassMirrorHolderMetaspaceType, false,
717                          word_size_for_level(CHUNK_LEVEL_1K), false);
718 }
719 
720 TEST_VM(metaspace, MetaspaceArena_growth_standard_nc_inplace) {
721   test_controlled_growth(Metaspace::StandardMetaspaceType, false,
722                          word_size_for_level(CHUNK_LEVEL_4K), true);
723 }
724 
725 TEST_VM(metaspace, MetaspaceArena_growth_standard_nc_not_inplace) {
726   test_controlled_growth(Metaspace::StandardMetaspaceType, false,
727                          word_size_for_level(CHUNK_LEVEL_4K), false);
728 }
729 
730 /* Disabled growth tests for BootMetaspaceType: there, the growth steps are too rare,
731  * and too large, to make any reliable guess as toward chunks get enlarged in place.
732 TEST_VM(metaspace, MetaspaceArena_growth_boot_nc_inplace) {
733   test_controlled_growth(Metaspace::BootMetaspaceType, false,
734                          word_size_for_level(CHUNK_LEVEL_4M), true);
735 }
736 
737 TEST_VM(metaspace, MetaspaceArena_growth_boot_nc_not_inplace) {
738   test_controlled_growth(Metaspace::BootMetaspaceType, false,
739                          word_size_for_level(CHUNK_LEVEL_4M), false);
740 }
741 */
742 
743 // Test that repeated allocation-deallocation cycles with the same block size
744 //  do not increase metaspace usage after the initial allocation (the deallocated
745 //  block should be reused by the next allocation).
746 static void test_repeatedly_allocate_and_deallocate(bool is_topmost) {
747   // Test various sizes, including (important) the max. possible block size = 1 root chunk
748   for (size_t blocksize = Metaspace::max_allocation_word_size(); blocksize >= 1; blocksize /= 2) {
749     size_t used1 = 0, used2 = 0, committed1 = 0, committed2 = 0;
750     MetaWord* p = NULL, *p2 = NULL;
751 
752     MetaspaceGtestContext context;
753     MetaspaceArenaTestHelper helper(context, Metaspace::StandardMetaspaceType, false);
754 
755     // First allocation
756     helper.allocate_from_arena_with_tests_expect_success(&p, blocksize);
757     if (!is_topmost) {
758       // another one on top, size does not matter.
759       helper.allocate_from_arena_with_tests_expect_success(0x10);
760     }
761 
762     // Measure
763     helper.usage_numbers_with_test(&used1, &committed1, NULL);
764 
765     // Dealloc, alloc several times with the same size.
766     for (int i = 0; i < 5; i ++) {
767       helper.deallocate_with_tests(p, blocksize);
768       helper.allocate_from_arena_with_tests_expect_success(&p2, blocksize);
769       // We should get the same pointer back.
770       EXPECT_EQ(p2, p);
771     }
772 
773     // Measure again
774     helper.usage_numbers_with_test(&used2, &committed2, NULL);
775     EXPECT_EQ(used2, used1);
776     EXPECT_EQ(committed1, committed2);
777   }
778 }
779 
780 TEST_VM(metaspace, MetaspaceArena_test_repeatedly_allocate_and_deallocate_top_allocation) {
781   test_repeatedly_allocate_and_deallocate(true);
782 }
783 
784 TEST_VM(metaspace, MetaspaceArena_test_repeatedly_allocate_and_deallocate_nontop_allocation) {
785   test_repeatedly_allocate_and_deallocate(false);
786 }