1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 
  26 #include "logging/log.hpp"
  27 #include "logging/logStream.hpp"
  28 #include "memory/metaspace/chunkManager.hpp"
  29 #include "memory/metaspace/metachunk.hpp"
  30 #include "memory/metaspace/metaDebug.hpp"
  31 #include "memory/metaspace/metaspaceCommon.hpp"
  32 #include "memory/metaspace/spaceManager.hpp"
  33 #include "memory/metaspace/virtualSpaceList.hpp"
  34 #include "runtime/atomic.hpp"
  35 #include "runtime/init.hpp"
  36 #include "services/memoryService.hpp"
  37 #include "utilities/debug.hpp"
  38 #include "utilities/globalDefinitions.hpp"
  39 
  40 namespace metaspace {
  41 
  42 #define assert_counter(expected_value, real_value, msg) \
  43   assert( (expected_value) == (real_value),             \
  44          "Counter mismatch (%s): expected " SIZE_FORMAT \
  45          ", but got: " SIZE_FORMAT ".", msg, expected_value, \
  46          real_value);
  47 
  48 // SpaceManager methods
  49 
  50 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
  51   size_t chunk_sizes[] = {
  52       specialized_chunk_size(is_class_space),
  53       small_chunk_size(is_class_space),
  54       medium_chunk_size(is_class_space)
  55   };
  56 
  57   // Adjust up to one of the fixed chunk sizes ...
  58   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
  59     if (requested <= chunk_sizes[i]) {
  60       return chunk_sizes[i];
  61     }
  62   }
  63 
  64   // ... or return the size as a humongous chunk.
  65   return requested;
  66 }
  67 
  68 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
  69   return adjust_initial_chunk_size(requested, is_class());
  70 }
  71 
  72 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
  73   size_t requested;
  74 
  75   if (is_class()) {
  76     switch (type) {
  77     case Metaspace::BootMetaspaceType:              requested = Metaspace::first_class_chunk_word_size(); break;
  78     case Metaspace::UnsafeAnonymousMetaspaceType:   requested = ClassSpecializedChunk; break;
  79     case Metaspace::ReflectionMetaspaceType:        requested = ClassSpecializedChunk; break;
  80     default:                                        requested = ClassSmallChunk; break;
  81     }
  82   } else {
  83     switch (type) {
  84     case Metaspace::BootMetaspaceType:              requested = Metaspace::first_chunk_word_size(); break;
  85     case Metaspace::UnsafeAnonymousMetaspaceType:   requested = SpecializedChunk; break;
  86     case Metaspace::ReflectionMetaspaceType:        requested = SpecializedChunk; break;
  87     default:                                        requested = SmallChunk; break;
  88     }
  89   }
  90 
  91   // Adjust to one of the fixed chunk sizes (unless humongous)
  92   const size_t adjusted = adjust_initial_chunk_size(requested);
  93 
  94   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
  95          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
  96 
  97   return adjusted;
  98 }
  99 
 100 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
 101 
 102   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
 103     st->print("SpaceManager: " UINTX_FORMAT " %s chunks.",
 104         num_chunks_by_type(i), chunk_size_name(i));
 105   }
 106 
 107   chunk_manager()->locked_print_free_chunks(st);
 108 }
 109 
 110 size_t SpaceManager::calc_chunk_size(size_t word_size) {
 111 
 112   // Decide between a small chunk and a medium chunk.  Up to
 113   // _small_chunk_limit small chunks can be allocated.
 114   // After that a medium chunk is preferred.
 115   size_t chunk_word_size;
 116 
 117   // Special case for unsafe anonymous metadata space.
 118   // UnsafeAnonymous metadata space is usually small since it is used for
 119   // class loader data's whose life cycle is governed by one class such as an
 120   // unsafe anonymous class.  The majority within 1K - 2K range and
 121   // rarely about 4K (64-bits JVM).
 122   // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
 123   // from SpecializeChunk up to _anon_or_delegating_metadata_specialize_chunk_limit (4)
 124   // reduces space waste from 60+% to around 30%.
 125   if ((_space_type == Metaspace::UnsafeAnonymousMetaspaceType || _space_type == Metaspace::ReflectionMetaspaceType) &&
 126       _mdtype == Metaspace::NonClassType &&
 127       num_chunks_by_type(SpecializedIndex) < anon_and_delegating_metadata_specialize_chunk_limit &&
 128       word_size + Metachunk::overhead() <= SpecializedChunk) {
 129     return SpecializedChunk;
 130   }
 131 
 132   if (num_chunks_by_type(MediumIndex) == 0 &&
 133       num_chunks_by_type(SmallIndex) < small_chunk_limit) {
 134     chunk_word_size = (size_t) small_chunk_size();
 135     if (word_size + Metachunk::overhead() > small_chunk_size()) {
 136       chunk_word_size = medium_chunk_size();
 137     }
 138   } else {
 139     chunk_word_size = medium_chunk_size();
 140   }
 141 
 142   // Might still need a humongous chunk.  Enforce
 143   // humongous allocations sizes to be aligned up to
 144   // the smallest chunk size.
 145   size_t if_humongous_sized_chunk =
 146     align_up(word_size + Metachunk::overhead(),
 147                   smallest_chunk_size());
 148   chunk_word_size =
 149     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
 150 
 151   assert(!SpaceManager::is_humongous(word_size) ||
 152          chunk_word_size == if_humongous_sized_chunk,
 153          "Size calculation is wrong, word_size " SIZE_FORMAT
 154          " chunk_word_size " SIZE_FORMAT,
 155          word_size, chunk_word_size);
 156   Log(gc, metaspace, alloc) log;
 157   if (log.is_trace() && SpaceManager::is_humongous(word_size)) {
 158     log.trace("Metadata humongous allocation:");
 159     log.trace("  word_size " PTR_FORMAT, word_size);
 160     log.trace("  chunk_word_size " PTR_FORMAT, chunk_word_size);
 161     log.trace("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
 162   }
 163   return chunk_word_size;
 164 }
 165 
 166 void SpaceManager::track_metaspace_memory_usage() {
 167   if (is_init_completed()) {
 168     if (is_class()) {
 169       MemoryService::track_compressed_class_memory_usage();
 170     }
 171     MemoryService::track_metaspace_memory_usage();
 172   }
 173 }
 174 
 175 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
 176   assert_lock_strong(_lock);
 177   assert(vs_list()->current_virtual_space() != NULL,
 178          "Should have been set");
 179   assert(current_chunk() == NULL ||
 180          current_chunk()->allocate(word_size) == NULL,
 181          "Don't need to expand");
 182   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
 183 
 184   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
 185     size_t words_left = 0;
 186     size_t words_used = 0;
 187     if (current_chunk() != NULL) {
 188       words_left = current_chunk()->free_word_size();
 189       words_used = current_chunk()->used_word_size();
 190     }
 191     log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",
 192                                        word_size, words_used, words_left);
 193   }
 194 
 195   // Get another chunk
 196   size_t chunk_word_size = calc_chunk_size(word_size);
 197   Metachunk* next = get_new_chunk(chunk_word_size);
 198 
 199   MetaWord* mem = NULL;
 200 
 201   // If a chunk was available, add it to the in-use chunk list
 202   // and do an allocation from it.
 203   if (next != NULL) {
 204     // Add to this manager's list of chunks in use.
 205     // If the new chunk is humongous, it was created to serve a single large allocation. In that
 206     // case it usually makes no sense to make it the current chunk, since the next allocation would
 207     // need to allocate a new chunk anyway, while we would now prematurely retire a perfectly
 208     // good chunk which could be used for more normal allocations.
 209     bool make_current = true;
 210     if (next->get_chunk_type() == HumongousIndex &&
 211         current_chunk() != NULL) {
 212       make_current = false;
 213     }
 214     add_chunk(next, make_current);
 215     mem = next->allocate(word_size);
 216   }
 217 
 218   // Track metaspace memory usage statistic.
 219   track_metaspace_memory_usage();
 220 
 221   return mem;
 222 }
 223 
 224 void SpaceManager::print_on(outputStream* st) const {
 225   SpaceManagerStatistics stat;
 226   add_to_statistics(&stat); // will lock _lock.
 227   stat.print_on(st, 1*K, false);
 228 }
 229 
 230 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
 231                            Metaspace::MetaspaceType space_type,//
 232                            Mutex* lock) :
 233   _lock(lock),
 234   _mdtype(mdtype),
 235   _space_type(space_type),
 236   _chunk_list(NULL),
 237   _current_chunk(NULL),
 238   _overhead_words(0),
 239   _capacity_words(0),
 240   _used_words(0),
 241   _block_freelists(NULL) {
 242   Metadebug::init_allocation_fail_alot_count();
 243   memset(_num_chunks_by_type, 0, sizeof(_num_chunks_by_type));
 244   log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
 245 }
 246 
 247 void SpaceManager::account_for_new_chunk(const Metachunk* new_chunk) {
 248 
 249   assert_lock_strong(MetaspaceExpand_lock);
 250 
 251   _capacity_words += new_chunk->word_size();
 252   _overhead_words += Metachunk::overhead();
 253   DEBUG_ONLY(new_chunk->verify());
 254   _num_chunks_by_type[new_chunk->get_chunk_type()] ++;
 255 
 256   // Adjust global counters:
 257   MetaspaceUtils::inc_capacity(mdtype(), new_chunk->word_size());
 258   MetaspaceUtils::inc_overhead(mdtype(), Metachunk::overhead());
 259 }
 260 
 261 void SpaceManager::account_for_allocation(size_t words) {
 262   // Note: we should be locked with the ClassloaderData-specific metaspace lock.
 263   // We may or may not be locked with the global metaspace expansion lock.
 264   assert_lock_strong(lock());
 265 
 266   // Add to the per SpaceManager totals. This can be done non-atomically.
 267   _used_words += words;
 268 
 269   // Adjust global counters. This will be done atomically.
 270   MetaspaceUtils::inc_used(mdtype(), words);
 271 }
 272 
 273 void SpaceManager::account_for_spacemanager_death() {
 274 
 275   assert_lock_strong(MetaspaceExpand_lock);
 276 
 277   MetaspaceUtils::dec_capacity(mdtype(), _capacity_words);
 278   MetaspaceUtils::dec_overhead(mdtype(), _overhead_words);
 279   MetaspaceUtils::dec_used(mdtype(), _used_words);
 280 }
 281 
 282 SpaceManager::~SpaceManager() {
 283 
 284   // This call this->_lock which can't be done while holding MetaspaceExpand_lock
 285   DEBUG_ONLY(verify_metrics());
 286 
 287   MutexLockerEx fcl(MetaspaceExpand_lock,
 288                     Mutex::_no_safepoint_check_flag);
 289 
 290   account_for_spacemanager_death();
 291 
 292   Log(gc, metaspace, freelist) log;
 293   if (log.is_trace()) {
 294     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
 295     ResourceMark rm;
 296     LogStream ls(log.trace());
 297     locked_print_chunks_in_use_on(&ls);
 298     if (block_freelists() != NULL) {
 299       block_freelists()->print_on(&ls);
 300     }
 301   }
 302 
 303   // Add all the chunks in use by this space manager
 304   // to the global list of free chunks.
 305 
 306   // Follow each list of chunks-in-use and add them to the
 307   // free lists.  Each list is NULL terminated.
 308   chunk_manager()->return_chunk_list(chunk_list());
 309 #ifdef ASSERT
 310   _chunk_list = NULL;
 311   _current_chunk = NULL;
 312 #endif
 313 
 314 #ifdef ASSERT
 315   EVERY_NTH(VerifyMetaspaceInterval)
 316     chunk_manager()->locked_verify(true);
 317   END_EVERY_NTH
 318 #endif
 319 
 320   if (_block_freelists != NULL) {
 321     delete _block_freelists;
 322   }
 323 }
 324 
 325 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
 326   assert_lock_strong(lock());
 327   // Allocations and deallocations are in raw_word_size
 328   size_t raw_word_size = get_allocation_word_size(word_size);
 329   // Lazily create a block_freelist
 330   if (block_freelists() == NULL) {
 331     _block_freelists = new BlockFreelist();
 332   }
 333   block_freelists()->return_block(p, raw_word_size);
 334   DEBUG_ONLY(Atomic::inc(&(g_internal_statistics.num_deallocs)));
 335 }
 336 
 337 // Adds a chunk to the list of chunks in use.
 338 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
 339 
 340   assert_lock_strong(_lock);
 341   assert(new_chunk != NULL, "Should not be NULL");
 342   assert(new_chunk->next() == NULL, "Should not be on a list");
 343 
 344   new_chunk->reset_empty();
 345 
 346   // Find the correct list and and set the current
 347   // chunk for that list.
 348   ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
 349 
 350   if (make_current) {
 351     // If we are to make the chunk current, retire the old current chunk and replace
 352     // it with the new chunk.
 353     retire_current_chunk();
 354     set_current_chunk(new_chunk);
 355   }
 356 
 357   // Add the new chunk at the head of its respective chunk list.
 358   new_chunk->set_next(_chunk_list);
 359   _chunk_list = new_chunk;
 360 
 361   // Adjust counters.
 362   account_for_new_chunk(new_chunk);
 363 
 364   assert(new_chunk->is_empty(), "Not ready for reuse");
 365   Log(gc, metaspace, freelist) log;
 366   if (log.is_trace()) {
 367     log.trace("SpaceManager::added chunk: ");
 368     ResourceMark rm;
 369     LogStream ls(log.trace());
 370     new_chunk->print_on(&ls);
 371     chunk_manager()->locked_print_free_chunks(&ls);
 372   }
 373 }
 374 
 375 void SpaceManager::retire_current_chunk() {
 376   if (current_chunk() != NULL) {
 377     size_t remaining_words = current_chunk()->free_word_size();
 378     if (remaining_words >= SmallBlocks::small_block_min_size()) {
 379       MetaWord* ptr = current_chunk()->allocate(remaining_words);
 380       deallocate(ptr, remaining_words);
 381       account_for_allocation(remaining_words);
 382     }
 383   }
 384 }
 385 
 386 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
 387   // Get a chunk from the chunk freelist
 388   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
 389 
 390   if (next == NULL) {
 391     next = vs_list()->get_new_chunk(chunk_word_size,
 392                                     medium_chunk_bunch());
 393   }
 394 
 395   Log(gc, metaspace, alloc) log;
 396   if (log.is_trace() && next != NULL &&
 397       SpaceManager::is_humongous(next->word_size())) {
 398     log.trace("  new humongous chunk word size " PTR_FORMAT, next->word_size());
 399   }
 400 
 401   return next;
 402 }
 403 
 404 MetaWord* SpaceManager::allocate(size_t word_size) {
 405   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
 406   size_t raw_word_size = get_allocation_word_size(word_size);
 407   BlockFreelist* fl =  block_freelists();
 408   MetaWord* p = NULL;
 409 
 410   // Allocation from the dictionary is expensive in the sense that
 411   // the dictionary has to be searched for a size.  Don't allocate
 412   // from the dictionary until it starts to get fat.  Is this
 413   // a reasonable policy?  Maybe an skinny dictionary is fast enough
 414   // for allocations.  Do some profiling.  JJJ
 415   if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
 416     p = fl->get_block(raw_word_size);
 417     if (p != NULL) {
 418       DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs_from_deallocated_blocks));
 419     }
 420   }
 421   if (p == NULL) {
 422     p = allocate_work(raw_word_size);
 423   }
 424 
 425 #ifdef ASSERT
 426   EVERY_NTH(VerifyMetaspaceInterval)
 427     verify_metrics_locked();
 428   END_EVERY_NTH
 429 #endif
 430 
 431   return p;
 432 }
 433 
 434 // Returns the address of spaced allocated for "word_size".
 435 // This methods does not know about blocks (Metablocks)
 436 MetaWord* SpaceManager::allocate_work(size_t word_size) {
 437   assert_lock_strong(lock());
 438 #ifdef ASSERT
 439   if (Metadebug::test_metadata_failure()) {
 440     return NULL;
 441   }
 442 #endif
 443   // Is there space in the current chunk?
 444   MetaWord* result = NULL;
 445 
 446   if (current_chunk() != NULL) {
 447     result = current_chunk()->allocate(word_size);
 448   }
 449 
 450   if (result == NULL) {
 451     result = grow_and_allocate(word_size);
 452   }
 453 
 454   if (result != NULL) {
 455     account_for_allocation(word_size);
 456   }
 457 
 458   return result;
 459 }
 460 
 461 void SpaceManager::verify() {
 462   Metachunk* curr = chunk_list();
 463   while (curr != NULL) {
 464     DEBUG_ONLY(do_verify_chunk(curr);)
 465     assert(curr->is_tagged_free() == false, "Chunk should be tagged as in use.");
 466     curr = curr->next();
 467   }
 468 }
 469 
 470 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
 471   assert(is_humongous(chunk->word_size()) ||
 472          chunk->word_size() == medium_chunk_size() ||
 473          chunk->word_size() == small_chunk_size() ||
 474          chunk->word_size() == specialized_chunk_size(),
 475          "Chunk size is wrong");
 476   return;
 477 }
 478 
 479 void SpaceManager::add_to_statistics_locked(SpaceManagerStatistics* out) const {
 480   assert_lock_strong(lock());
 481   Metachunk* chunk = chunk_list();
 482   while (chunk != NULL) {
 483     UsedChunksStatistics& chunk_stat = out->chunk_stats(chunk->get_chunk_type());
 484     chunk_stat.add_num(1);
 485     chunk_stat.add_cap(chunk->word_size());
 486     chunk_stat.add_overhead(Metachunk::overhead());
 487     chunk_stat.add_used(chunk->used_word_size() - Metachunk::overhead());
 488     if (chunk != current_chunk()) {
 489       chunk_stat.add_waste(chunk->free_word_size());
 490     } else {
 491       chunk_stat.add_free(chunk->free_word_size());
 492     }
 493     chunk = chunk->next();
 494   }
 495   if (block_freelists() != NULL) {
 496     out->add_free_blocks_info(block_freelists()->num_blocks(), block_freelists()->total_size());
 497   }
 498 }
 499 
 500 void SpaceManager::add_to_statistics(SpaceManagerStatistics* out) const {
 501   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
 502   add_to_statistics_locked(out);
 503 }
 504 
 505 #ifdef ASSERT
 506 void SpaceManager::verify_metrics_locked() const {
 507   assert_lock_strong(lock());
 508 
 509   SpaceManagerStatistics stat;
 510   add_to_statistics_locked(&stat);
 511 
 512   UsedChunksStatistics chunk_stats = stat.totals();
 513 
 514   DEBUG_ONLY(chunk_stats.check_sanity());
 515 
 516   assert_counter(_capacity_words, chunk_stats.cap(), "SpaceManager::_capacity_words");
 517   assert_counter(_used_words, chunk_stats.used(), "SpaceManager::_used_words");
 518   assert_counter(_overhead_words, chunk_stats.overhead(), "SpaceManager::_overhead_words");
 519 }
 520 
 521 void SpaceManager::verify_metrics() const {
 522   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
 523   verify_metrics_locked();
 524 }
 525 #endif // ASSERT
 526 
 527 
 528 } // namespace metaspace
 529