1 /*
  2  * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2018, 2021 SAP SE. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "gc/shared/gc_globals.hpp"
 28 #include "logging/log.hpp"
 29 #include "memory/metaspace.hpp"
 30 #include "memory/metaspace/chunkHeaderPool.hpp"
 31 #include "memory/metaspace/chunklevel.hpp"
 32 #include "memory/metaspace/commitLimiter.hpp"
 33 #include "memory/metaspace/counters.hpp"
 34 #include "memory/metaspace/freeChunkList.hpp"
 35 #include "memory/metaspace/internalStats.hpp"
 36 #include "memory/metaspace/metachunk.hpp"
 37 #include "memory/metaspace/metaspaceCommon.hpp"
 38 #include "memory/metaspace/metaspaceSettings.hpp"
 39 #include "memory/metaspace/rootChunkArea.hpp"
 40 #include "memory/metaspace/runningCounters.hpp"
 41 #include "memory/metaspace/virtualSpaceNode.hpp"
 42 #include "runtime/globals.hpp"
 43 #include "runtime/mutexLocker.hpp"
 44 #include "runtime/os.hpp"
 45 #include "sanitizers/address.hpp"
 46 #include "sanitizers/leak.hpp"
 47 #include "services/memTracker.hpp"
 48 #include "utilities/align.hpp"
 49 #include "utilities/debug.hpp"
 50 #include "utilities/globalDefinitions.hpp"
 51 #include "utilities/ostream.hpp"
 52 
 53 namespace metaspace {
 54 
 55 #define LOGFMT         "VsListNode @" PTR_FORMAT " base " PTR_FORMAT " "
 56 #define LOGFMT_ARGS    p2i(this), p2i(_base)
 57 
 58 #ifdef ASSERT
 59 void check_pointer_is_aligned_to_commit_granule(const MetaWord* p) {
 60   assert(is_aligned(p, Settings::commit_granule_bytes()),
 61          "Pointer not aligned to commit granule size: " PTR_FORMAT ".",
 62          p2i(p));
 63 }
 64 void check_word_size_is_aligned_to_commit_granule(size_t word_size) {
 65   assert(is_aligned(word_size, Settings::commit_granule_words()),
 66          "Not aligned to commit granule size: " SIZE_FORMAT ".", word_size);
 67 }
 68 #endif
 69 
 70 // Given an address range, ensure it is committed.
 71 //
 72 // The range has to be aligned to granule size.
 73 //
 74 // Function will:
 75 // - check how many granules in that region are uncommitted; If all are committed, it
 76 //    returns true immediately.
 77 // - check if committing those uncommitted granules would bring us over the commit limit
 78 //    (GC threshold, MaxMetaspaceSize). If true, it returns false.
 79 // - commit the memory.
 80 // - mark the range as committed in the commit mask
 81 //
 82 // Returns true if success, false if it did hit a commit limit.
 83 bool VirtualSpaceNode::commit_range(MetaWord* p, size_t word_size) {
 84   DEBUG_ONLY(check_pointer_is_aligned_to_commit_granule(p);)
 85   DEBUG_ONLY(check_word_size_is_aligned_to_commit_granule(word_size);)
 86   assert_lock_strong(Metaspace_lock);
 87 
 88   // First calculate how large the committed regions in this range are
 89   const size_t committed_words_in_range = _commit_mask.get_committed_size_in_range(p, word_size);
 90   DEBUG_ONLY(check_word_size_is_aligned_to_commit_granule(committed_words_in_range);)
 91 
 92   // By how much words we would increase commit charge
 93   //  were we to commit the given address range completely.
 94   const size_t commit_increase_words = word_size - committed_words_in_range;
 95 
 96   UL2(debug, "committing range " PTR_FORMAT ".." PTR_FORMAT "(" SIZE_FORMAT " words)",
 97       p2i(p), p2i(p + word_size), word_size);
 98 
 99   if (commit_increase_words == 0) {
100     UL(debug, "... already fully committed.");
101     return true; // Already fully committed, nothing to do.
102   }
103 
104   // Before committing any more memory, check limits.
105   if (_commit_limiter->possible_expansion_words() < commit_increase_words) {
106     UL(debug, "... cannot commit (limit).");
107     return false;
108   }
109 
110   // Commit...
111   if (os::commit_memory((char*)p, word_size * BytesPerWord, false) == false) {
112     vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to commit metaspace.");
113   }
114 
115   if (AlwaysPreTouch) {
116     os::pretouch_memory(p, p + word_size);
117   }
118 
119   UL2(debug, "... committed " SIZE_FORMAT " additional words.", commit_increase_words);
120 
121   // ... tell commit limiter...
122   _commit_limiter->increase_committed(commit_increase_words);
123 
124   // ... update counters in containing vslist ...
125   _total_committed_words_counter->increment_by(commit_increase_words);
126 
127   // ... and update the commit mask.
128   _commit_mask.mark_range_as_committed(p, word_size);
129 
130 #ifdef ASSERT
131   // The commit boundary maintained in the CommitLimiter should be equal the sum of committed words
132   // in both class and non-class vslist (outside gtests).
133   if (_commit_limiter == CommitLimiter::globalLimiter()) {
134     assert(_commit_limiter->committed_words() == RunningCounters::committed_words(), "counter mismatch");
135   }
136 #endif
137 
138   InternalStats::inc_num_space_committed();
139   return true;
140 }
141 
142 // Given an address range, ensure it is committed.
143 //
144 // The range does not have to be aligned to granule size. However, the function will always commit
145 // whole granules.
146 //
147 // Function will:
148 // - check how many granules in that region are uncommitted; If all are committed, it
149 //    returns true immediately.
150 // - check if committing those uncommitted granules would bring us over the commit limit
151 //    (GC threshold, MaxMetaspaceSize). If true, it returns false.
152 // - commit the memory.
153 // - mark the range as committed in the commit mask
154 //
155 // !! Careful:
156 //    calling ensure_range_is_committed on a range which contains both committed and uncommitted
157 //    areas will commit the whole area, thus erase the content in the existing committed parts.
158 //    Make sure you never call this on an address range containing live data. !!
159 //
160 // Returns true if success, false if it did hit a commit limit.
161 bool VirtualSpaceNode::ensure_range_is_committed(MetaWord* p, size_t word_size) {
162   assert_lock_strong(Metaspace_lock);
163   assert(p != nullptr && word_size > 0, "Sanity");
164   MetaWord* p_start = align_down(p, Settings::commit_granule_bytes());
165   MetaWord* p_end = align_up(p + word_size, Settings::commit_granule_bytes());
166   return commit_range(p_start, p_end - p_start);
167 }
168 
169 // Given an address range (which has to be aligned to commit granule size):
170 //  - uncommit it
171 //  - mark it as uncommitted in the commit mask
172 void VirtualSpaceNode::uncommit_range(MetaWord* p, size_t word_size) {
173   DEBUG_ONLY(check_pointer_is_aligned_to_commit_granule(p);)
174   DEBUG_ONLY(check_word_size_is_aligned_to_commit_granule(word_size);)
175   assert_lock_strong(Metaspace_lock);
176 
177   // First calculate how large the committed regions in this range are
178   const size_t committed_words_in_range = _commit_mask.get_committed_size_in_range(p, word_size);
179   DEBUG_ONLY(check_word_size_is_aligned_to_commit_granule(committed_words_in_range);)
180 
181   UL2(debug, "uncommitting range " PTR_FORMAT ".." PTR_FORMAT "(" SIZE_FORMAT " words)",
182       p2i(p), p2i(p + word_size), word_size);
183 
184   if (committed_words_in_range == 0) {
185     UL(debug, "... already fully uncommitted.");
186     return; // Already fully uncommitted, nothing to do.
187   }
188 
189   // Uncommit...
190   if (os::uncommit_memory((char*)p, word_size * BytesPerWord) == false) {
191     // Note: this can actually happen, since uncommit may increase the number of mappings.
192     fatal("Failed to uncommit metaspace.");
193   }
194 
195   UL2(debug, "... uncommitted " SIZE_FORMAT " words.", committed_words_in_range);
196 
197   // ... tell commit limiter...
198   _commit_limiter->decrease_committed(committed_words_in_range);
199 
200   // ... and global counters...
201   _total_committed_words_counter->decrement_by(committed_words_in_range);
202 
203    // ... and update the commit mask.
204   _commit_mask.mark_range_as_uncommitted(p, word_size);
205 
206 #ifdef ASSERT
207   // The commit boundary maintained in the CommitLimiter should be equal the sum of committed words
208   // in both class and non-class vslist (outside gtests).
209   if (_commit_limiter == CommitLimiter::globalLimiter()) { // We are outside a test scenario
210     assert(_commit_limiter->committed_words() == RunningCounters::committed_words(), "counter mismatch");
211   }
212 #endif
213   InternalStats::inc_num_space_uncommitted();
214 }
215 
216 //// creation, destruction ////
217 
218 VirtualSpaceNode::VirtualSpaceNode(ReservedSpace rs, bool owns_rs, CommitLimiter* limiter,
219                                    SizeCounter* reserve_counter, SizeCounter* commit_counter) :
220   _next(nullptr),
221   _rs(rs),
222   _owns_rs(owns_rs),
223   _base((MetaWord*)rs.base()),
224   _word_size(rs.size() / BytesPerWord),
225   _used_words(0),
226   _commit_mask((MetaWord*)rs.base(), rs.size() / BytesPerWord),
227   _root_chunk_area_lut((MetaWord*)rs.base(), rs.size() / BytesPerWord),
228   _commit_limiter(limiter),
229   _total_reserved_words_counter(reserve_counter),
230   _total_committed_words_counter(commit_counter)
231 {
232   UL2(debug, "born: [" PTR_FORMAT ".." PTR_FORMAT "), (word_size " SIZE_FORMAT ").",
233       p2i(_rs.base()), p2i(_rs.end()), _word_size);
234 
235   // Update reserved counter in vslist
236   _total_reserved_words_counter->increment_by(_word_size);
237 
238   assert_is_aligned(_base, chunklevel::MAX_CHUNK_BYTE_SIZE);
239   assert_is_aligned(_word_size, chunklevel::MAX_CHUNK_WORD_SIZE);
240 
241   // Poison the memory region. It will be unpoisoned later on a per-chunk base for chunks that are
242   // handed to arenas.
243   ASAN_POISON_MEMORY_REGION(rs.base(), rs.size());
244 
245   // Register memory region related to Metaspace. The Metaspace contains lots of pointers to malloc
246   // memory.
247   LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
248 }
249 
250 // Create a node of a given size (it will create its own space).
251 VirtualSpaceNode* VirtualSpaceNode::create_node(size_t word_size,
252                                                 CommitLimiter* limiter, SizeCounter* reserve_words_counter,
253                                                 SizeCounter* commit_words_counter)
254 {
255   DEBUG_ONLY(assert_is_aligned(word_size, chunklevel::MAX_CHUNK_WORD_SIZE);)
256   ReservedSpace rs(word_size * BytesPerWord,
257                    Settings::virtual_space_node_reserve_alignment_words() * BytesPerWord,
258                    os::vm_page_size());
259   if (!rs.is_reserved()) {
260     vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to reserve memory for metaspace");
261   }
262   MemTracker::record_virtual_memory_type(rs.base(), mtMetaspace);
263   assert_is_aligned(rs.base(), chunklevel::MAX_CHUNK_BYTE_SIZE);
264   InternalStats::inc_num_vsnodes_births();
265   return new VirtualSpaceNode(rs, true, limiter, reserve_words_counter, commit_words_counter);
266 }
267 
268 // Create a node over an existing space
269 VirtualSpaceNode* VirtualSpaceNode::create_node(ReservedSpace rs, CommitLimiter* limiter,
270                                                 SizeCounter* reserve_words_counter, SizeCounter* commit_words_counter)
271 {
272   InternalStats::inc_num_vsnodes_births();
273   return new VirtualSpaceNode(rs, false, limiter, reserve_words_counter, commit_words_counter);
274 }
275 
276 VirtualSpaceNode::~VirtualSpaceNode() {
277   DEBUG_ONLY(verify_locked();)
278 
279   // Unregister memory region related to Metaspace.
280   LSAN_UNREGISTER_ROOT_REGION(_rs.base(), _rs.size());
281 
282   // Undo the poisoning before potentially unmapping memory. This ensures that future mappings at
283   // the same address do not unexpectedly fail with use-after-poison.
284   ASAN_UNPOISON_MEMORY_REGION(_rs.base(), _rs.size());
285 
286   UL(debug, ": dies.");
287 
288   if (_owns_rs) {
289     _rs.release();
290   }
291 
292   // Update counters in vslist
293   size_t committed = committed_words();
294   _total_committed_words_counter->decrement_by(committed);
295   _total_reserved_words_counter->decrement_by(_word_size);
296 
297   // ... and tell commit limiter
298   _commit_limiter->decrease_committed(committed);
299 
300   InternalStats::inc_num_vsnodes_deaths();
301 }
302 
303 //// Chunk allocation, splitting, merging /////
304 
305 // Allocate a root chunk from this node. Will fail and return null if the node is full
306 //  - if we used up the whole address space of this node's memory region.
307 //    (in case this node backs compressed class space, this is how we hit
308 //     CompressedClassSpaceSize).
309 // Note that this just returns reserved memory; caller must take care of committing this
310 //  chunk before using it.
311 Metachunk* VirtualSpaceNode::allocate_root_chunk() {
312   assert_lock_strong(Metaspace_lock);
313   assert_is_aligned(free_words(), chunklevel::MAX_CHUNK_WORD_SIZE);
314 
315   if (free_words() >= chunklevel::MAX_CHUNK_WORD_SIZE) {
316 
317     MetaWord* loc = _base + _used_words;
318     _used_words += chunklevel::MAX_CHUNK_WORD_SIZE;
319 
320     RootChunkArea* rca = _root_chunk_area_lut.get_area_by_address(loc);
321 
322     // Create a root chunk header and initialize it;
323     Metachunk* c = rca->alloc_root_chunk_header(this);
324     assert(c->base() == loc && c->vsnode() == this &&
325            c->is_free(), "Sanity");
326     DEBUG_ONLY(c->verify();)
327 
328     UL2(debug, "new root chunk " METACHUNK_FORMAT ".", METACHUNK_FORMAT_ARGS(c));
329     return c;
330   }
331   return nullptr; // Node is full.
332 }
333 
334 // Given a chunk c, split it recursively until you get a chunk of the given target_level.
335 //
336 // The resulting target chunk resides at the same address as the original chunk.
337 // The resulting splinters are added to freelists.
338 void VirtualSpaceNode::split(chunklevel_t target_level, Metachunk* c, FreeChunkListVector* freelists) {
339   assert_lock_strong(Metaspace_lock);
340   // Get the area associated with this chunk and let it handle the splitting
341   RootChunkArea* rca = _root_chunk_area_lut.get_area_by_address(c->base());
342   DEBUG_ONLY(rca->verify_area_is_ideally_merged();)
343   rca->split(target_level, c, freelists);
344 }
345 
346 // Given a chunk, attempt to merge it recursively with its neighboring chunks.
347 //
348 // If successful (merged at least once), returns address of
349 // the merged chunk; null otherwise.
350 //
351 // The merged chunks are removed from the freelists.
352 //
353 // !!! Please note that if this method returns a non-null value, the
354 // original chunk will be invalid and should not be accessed anymore! !!!
355 Metachunk* VirtualSpaceNode::merge(Metachunk* c, FreeChunkListVector* freelists) {
356   assert(c != nullptr && c->is_free(), "Sanity");
357   assert_lock_strong(Metaspace_lock);
358 
359   // Get the rca associated with this chunk and let it handle the merging
360   RootChunkArea* rca = _root_chunk_area_lut.get_area_by_address(c->base());
361   Metachunk* c2 = rca->merge(c, freelists);
362   DEBUG_ONLY(rca->verify_area_is_ideally_merged();)
363   return c2;
364 }
365 
366 // Given a chunk c, which must be "in use" and must not be a root chunk, attempt to
367 // enlarge it in place by claiming its trailing buddy.
368 //
369 // This will only work if c is the leader of the buddy pair and the trailing buddy is free.
370 //
371 // If successful, the follower chunk will be removed from the freelists, the leader chunk c will
372 // double in size (level decreased by one).
373 //
374 // On success, true is returned, false otherwise.
375 bool VirtualSpaceNode::attempt_enlarge_chunk(Metachunk* c, FreeChunkListVector* freelists) {
376   assert(c != nullptr && c->is_in_use() && !c->is_root_chunk(), "Sanity");
377   assert_lock_strong(Metaspace_lock);
378 
379   // Get the rca associated with this chunk and let it handle the merging
380   RootChunkArea* rca = _root_chunk_area_lut.get_area_by_address(c->base());
381 
382   bool rc = rca->attempt_enlarge_chunk(c, freelists);
383   DEBUG_ONLY(rca->verify_area_is_ideally_merged();)
384   if (rc) {
385     InternalStats::inc_num_chunks_enlarged();
386   }
387 
388   return rc;
389 }
390 
391 void VirtualSpaceNode::print_on(outputStream* st) const {
392   size_t scale = K;
393 
394   st->print("base " PTR_FORMAT ": ", p2i(base()));
395   st->print("reserved=");
396   print_scaled_words(st, word_size(), scale);
397   st->print(", committed=");
398   print_scaled_words_and_percentage(st, committed_words(), word_size(), scale);
399   st->print(", used=");
400   print_scaled_words_and_percentage(st, used_words(), word_size(), scale);
401 
402   st->cr();
403   _root_chunk_area_lut.print_on(st);
404   _commit_mask.print_on(st);
405 }
406 
407 // Returns size, in words, of committed space in this node alone.
408 // Note: iterates over commit mask and hence may be a tad expensive on large nodes.
409 size_t VirtualSpaceNode::committed_words() const {
410   return _commit_mask.get_committed_size();
411 }
412 
413 #ifdef ASSERT
414 void VirtualSpaceNode::verify() const {
415   MutexLocker fcl(Metaspace_lock, Mutex::_no_safepoint_check_flag);
416   verify_locked();
417 }
418 
419 volatile int test_access = 0;
420 
421 // Verify counters and basic structure. Slow mode: verify all chunks in depth
422 void VirtualSpaceNode::verify_locked() const {
423   assert_lock_strong(Metaspace_lock);
424   assert(base() != nullptr, "Invalid base");
425   assert(base() == (MetaWord*)_rs.base() &&
426          word_size() == _rs.size() / BytesPerWord,
427          "Sanity");
428   assert_is_aligned(base(), chunklevel::MAX_CHUNK_BYTE_SIZE);
429   assert(used_words() <= word_size(), "Sanity");
430   // Since we only ever hand out root chunks from a vsnode, top should always be aligned
431   // to root chunk size.
432   assert_is_aligned(used_words(), chunklevel::MAX_CHUNK_WORD_SIZE);
433 
434   _commit_mask.verify();
435 
436   // Verify memory against commit mask.
437   SOMETIMES(
438     for (MetaWord* p = base(); p < base() + used_words(); p += os::vm_page_size()) {
439       if (_commit_mask.is_committed_address(p)) {
440         test_access += *(int*)p;
441       }
442     }
443   )
444 
445   assert(committed_words() <= word_size(), "Sanity");
446   assert_is_aligned(committed_words(), Settings::commit_granule_words());
447   _root_chunk_area_lut.verify();
448 }
449 
450 #endif
451 
452 } // namespace metaspace