1 /* 2 * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2020 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #ifndef SHARE_MEMORY_METASPACE_METASPACEARENA_HPP 27 #define SHARE_MEMORY_METASPACE_METASPACEARENA_HPP 28 29 #include "memory/allocation.hpp" 30 #include "memory/metaspace.hpp" 31 #include "memory/metaspace/chunkManager.hpp" 32 #include "memory/metaspace/counters.hpp" 33 #include "memory/metaspace/metachunk.hpp" 34 #include "memory/metaspace/metachunkList.hpp" 35 #include "memory/metaspace/metaspaceCommon.hpp" 36 37 class outputStream; 38 class Mutex; 39 40 namespace metaspace { 41 42 class ArenaGrowthPolicy; 43 class FreeBlocks; 44 45 struct ArenaStats; 46 47 // The MetaspaceArena is a growable metaspace memory pool belonging to a CLD; 48 // internally it consists of a list of metaspace chunks, of which the head chunk 49 // is the current chunk from which we allocate via pointer bump. 50 // 51 // +---------------+ 52 // | Arena | 53 // +---------------+ 54 // | 55 // | _chunks commit top 56 // | v 57 // +----------+ +----------+ +----------+ +----------+ 58 // | retired | ---> | retired | ---> | retired | ---> | current | 59 // | chunk | | chunk | | chunk | | chunk | 60 // +----------+ +----------+ +----------+ +----------+ 61 // ^ 62 // used top 63 // 64 // +------------+ 65 // | FreeBlocks | --> O -> O -> O -> O 66 // +------------+ 67 // 68 // 69 70 // When the current chunk is used up, MetaspaceArena requests a new chunk from 71 // the associated ChunkManager. 72 // 73 // MetaspaceArena also keeps a FreeBlocks structure to manage memory blocks which 74 // had been deallocated prematurely. 75 // 76 77 class MetaspaceArena : public CHeapObj<mtClass> { 78 79 // Reference to an outside lock to use for synchronizing access to this arena. 80 // This lock is normally owned by the CLD which owns the ClassLoaderMetaspace which 81 // owns this arena. 82 // Todo: This should be changed. Either the CLD should synchronize access to the 83 // CLMS and its arenas itself, or the arena should have an own lock. The latter 84 // would allow for more fine granular locking since it would allow access to 85 // both class- and non-class arena in the CLMS independently. 86 Mutex* const _lock; 87 88 // Reference to the chunk manager to allocate chunks from. 89 ChunkManager* const _chunk_manager; 90 91 // Reference to the growth policy to use. 92 const ArenaGrowthPolicy* const _growth_policy; 93 94 // List of chunks. Head of the list is the current chunk. 95 MetachunkList _chunks; 96 97 // Alignment alignment, in words. 98 const int _alignment_words; 99 100 // Structure to take care of leftover/deallocated space in used chunks. 101 // Owned by the Arena. Gets allocated on demand only. 102 FreeBlocks* _fbl; 103 104 Metachunk* current_chunk() { return _chunks.first(); } 105 const Metachunk* current_chunk() const { return _chunks.first(); } 106 107 // Reference to an outside counter to keep track of used space. 108 SizeAtomicCounter* const _total_used_words_counter; 109 110 // A name for purely debugging/logging purposes. 111 const char* const _name; 112 113 #ifdef ASSERT 114 // Allocation guards: When active, arena allocations are interleaved with 115 // fence allocations. An overwritten fence indicates a buffer overrun in either 116 // the preceding or the following user block. All fences are linked together; 117 // validating the fences just means walking that linked list. 118 // Note that for the Arena, fence blocks are just another form of user blocks. 119 class Fence { 120 static const uintx EyeCatcher = 121 NOT_LP64(0x77698465) LP64_ONLY(0x7769846577698465ULL); // "META" resp "METAMETA" 122 // Two eyecatchers to easily spot a corrupted _next pointer 123 const uintx _eye1; 124 const Fence* const _next; 125 const uintx _eye2; 126 public: 127 Fence(const Fence* next) : _eye1(EyeCatcher), _next(next), _eye2(EyeCatcher) {} 128 const Fence* next() const { return _next; } 129 void verify() const; 130 }; 131 const Fence* _first_fence; 132 #endif // ASSERT 133 134 Mutex* lock() const { return _lock; } 135 ChunkManager* chunk_manager() const { return _chunk_manager; } 136 137 // free block list 138 FreeBlocks* fbl() const { return _fbl; } 139 void add_allocation_to_fbl(MetaWord* p, size_t word_size); 140 141 // Given a chunk, add its remaining free committed space to the free block list. 142 void salvage_chunk(Metachunk* c); 143 144 // Allocate a new chunk from the underlying chunk manager able to hold at least 145 // requested word size. 146 Metachunk* allocate_new_chunk(size_t requested_word_size); 147 148 // Returns the level of the next chunk to be added, acc to growth policy. 149 chunklevel_t next_chunk_level() const; 150 151 // Attempt to enlarge the current chunk to make it large enough to hold at least 152 // requested_word_size additional words. 153 // 154 // On success, true is returned, false otherwise. 155 bool attempt_enlarge_current_chunk(size_t requested_word_size); 156 157 // Prematurely returns a metaspace allocation to the _block_freelists 158 // because it is not needed anymore (requires CLD lock to be active). 159 void deallocate_locked(MetaWord* p, size_t word_size); 160 161 // Returns true if the area indicated by pointer and size have actually been allocated 162 // from this arena. 163 DEBUG_ONLY(bool is_valid_area(MetaWord* p, size_t word_size) const;) 164 165 // Allocate from the arena proper, once dictionary allocations and fencing are sorted out. 166 MetaWord* allocate_inner(size_t word_size); 167 168 public: 169 170 MetaspaceArena(ChunkManager* chunk_manager, const ArenaGrowthPolicy* growth_policy, int alignment_words, 171 Mutex* lock, SizeAtomicCounter* total_used_words_counter, 172 const char* name); 173 174 ~MetaspaceArena(); 175 176 // Allocate memory from Metaspace. 177 // 1) Attempt to allocate from the dictionary of deallocated blocks. 178 // 2) Attempt to allocate from the current chunk. 179 // 3) Attempt to enlarge the current chunk in place if it is too small. 180 // 4) Attempt to get a new chunk and allocate from that chunk. 181 // At any point, if we hit a commit limit, we return NULL. 182 MetaWord* allocate(size_t word_size); 183 184 // Prematurely returns a metaspace allocation to the _block_freelists because it is not 185 // needed anymore. 186 void deallocate(MetaWord* p, size_t word_size); 187 188 // Update statistics. This walks all in-use chunks. 189 void add_to_statistics(ArenaStats* out) const; 190 191 // Convenience method to get the most important usage statistics. 192 // For deeper analysis use add_to_statistics(). 193 void usage_numbers(size_t* p_used_words, size_t* p_committed_words, size_t* p_capacity_words) const; 194 195 DEBUG_ONLY(void verify() const;) 196 DEBUG_ONLY(void verify_locked() const;) 197 DEBUG_ONLY(void verify_allocation_guards() const;) 198 199 void print_on(outputStream* st) const; 200 void print_on_locked(outputStream* st) const; 201 202 }; 203 204 } // namespace metaspace 205 206 #endif // SHARE_MEMORY_METASPACE_METASPACEARENA_HPP 207