1 /*
  2  * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2018, 2021 SAP SE. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "gc/shared/gc_globals.hpp"
 28 #include "logging/log.hpp"
 29 #include "memory/metaspace.hpp"
 30 #include "memory/metaspace/chunkHeaderPool.hpp"
 31 #include "memory/metaspace/chunklevel.hpp"
 32 #include "memory/metaspace/commitLimiter.hpp"
 33 #include "memory/metaspace/counters.hpp"
 34 #include "memory/metaspace/freeChunkList.hpp"
 35 #include "memory/metaspace/internalStats.hpp"
 36 #include "memory/metaspace/metachunk.hpp"
 37 #include "memory/metaspace/metaspaceCommon.hpp"
 38 #include "memory/metaspace/metaspaceSettings.hpp"
 39 #include "memory/metaspace/rootChunkArea.hpp"
 40 #include "memory/metaspace/runningCounters.hpp"
 41 #include "memory/metaspace/virtualSpaceNode.hpp"
 42 #include "runtime/globals.hpp"
 43 #include "runtime/mutexLocker.hpp"
 44 #include "runtime/os.hpp"
 45 #include "sanitizers/address.hpp"
 46 #include "sanitizers/leak.hpp"
 47 #include "services/memTracker.hpp"
 48 #include "utilities/align.hpp"
 49 #include "utilities/debug.hpp"
 50 #include "utilities/globalDefinitions.hpp"
 51 #include "utilities/ostream.hpp"
 52 
 53 namespace metaspace {
 54 
 55 #define LOGFMT         "VsListNode @" PTR_FORMAT " base " PTR_FORMAT " "
 56 #define LOGFMT_ARGS    p2i(this), p2i(_base)
 57 
 58 #ifdef ASSERT
 59 void check_pointer_is_aligned_to_commit_granule(const MetaWord* p) {
 60   assert(is_aligned(p, Settings::commit_granule_bytes()),
 61          "Pointer not aligned to commit granule size: " PTR_FORMAT ".",
 62          p2i(p));
 63 }
 64 void check_word_size_is_aligned_to_commit_granule(size_t word_size) {
 65   assert(is_aligned(word_size, Settings::commit_granule_words()),
 66          "Not aligned to commit granule size: " SIZE_FORMAT ".", word_size);
 67 }
 68 #endif
 69 
 70 // Given an address range, ensure it is committed.
 71 //
 72 // The range has to be aligned to granule size.
 73 //
 74 // Function will:
 75 // - check how many granules in that region are uncommitted; If all are committed, it
 76 //    returns true immediately.
 77 // - check if committing those uncommitted granules would bring us over the commit limit
 78 //    (GC threshold, MaxMetaspaceSize). If true, it returns false.
 79 // - commit the memory.
 80 // - mark the range as committed in the commit mask
 81 //
 82 // Returns true if success, false if it did hit a commit limit.
 83 bool VirtualSpaceNode::commit_range(MetaWord* p, size_t word_size) {
 84   DEBUG_ONLY(check_pointer_is_aligned_to_commit_granule(p);)
 85   DEBUG_ONLY(check_word_size_is_aligned_to_commit_granule(word_size);)
 86   assert_lock_strong(Metaspace_lock);
 87 
 88   // First calculate how large the committed regions in this range are
 89   const size_t committed_words_in_range = _commit_mask.get_committed_size_in_range(p, word_size);
 90   DEBUG_ONLY(check_word_size_is_aligned_to_commit_granule(committed_words_in_range);)
 91 
 92   // By how much words we would increase commit charge
 93   //  were we to commit the given address range completely.
 94   const size_t commit_increase_words = word_size - committed_words_in_range;
 95 
 96   UL2(debug, "committing range " PTR_FORMAT ".." PTR_FORMAT "(" SIZE_FORMAT " words)",
 97       p2i(p), p2i(p + word_size), word_size);
 98 
 99   if (commit_increase_words == 0) {
100     UL(debug, "... already fully committed.");
101     return true; // Already fully committed, nothing to do.
102   }
103 
104   // Before committing any more memory, check limits.
105   if (_commit_limiter->possible_expansion_words() < commit_increase_words) {
106     UL(debug, "... cannot commit (limit).");
107     return false;
108   }
109 
110   // Commit...
111   if (os::commit_memory((char*)p, word_size * BytesPerWord, false) == false) {
112     vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to commit metaspace.");
113   }
114 
115   if (AlwaysPreTouch) {
116     os::pretouch_memory(p, p + word_size);
117   }
118 
119   UL2(debug, "... committed " SIZE_FORMAT " additional words.", commit_increase_words);
120 
121   // ... tell commit limiter...
122   _commit_limiter->increase_committed(commit_increase_words);
123 
124   // ... update counters in containing vslist ...
125   _total_committed_words_counter->increment_by(commit_increase_words);
126 
127   // ... and update the commit mask.
128   _commit_mask.mark_range_as_committed(p, word_size);
129 
130 #ifdef ASSERT
131   // The commit boundary maintained in the CommitLimiter should be equal the sum of committed words
132   // in both class and non-class vslist (outside gtests).
133   if (_commit_limiter == CommitLimiter::globalLimiter()) {
134     assert(_commit_limiter->committed_words() == RunningCounters::committed_words(), "counter mismatch");
135   }
136 #endif
137 
138   InternalStats::inc_num_space_committed();
139   return true;
140 }
141 
142 // Given an address range, ensure it is committed.
143 //
144 // The range does not have to be aligned to granule size. However, the function will always commit
145 // whole granules.
146 //
147 // Function will:
148 // - check how many granules in that region are uncommitted; If all are committed, it
149 //    returns true immediately.
150 // - check if committing those uncommitted granules would bring us over the commit limit
151 //    (GC threshold, MaxMetaspaceSize). If true, it returns false.
152 // - commit the memory.
153 // - mark the range as committed in the commit mask
154 //
155 // !! Careful:
156 //    calling ensure_range_is_committed on a range which contains both committed and uncommitted
157 //    areas will commit the whole area, thus erase the content in the existing committed parts.
158 //    Make sure you never call this on an address range containing live data. !!
159 //
160 // Returns true if success, false if it did hit a commit limit.
161 bool VirtualSpaceNode::ensure_range_is_committed(MetaWord* p, size_t word_size) {
162   assert_lock_strong(Metaspace_lock);
163   assert(p != nullptr && word_size > 0, "Sanity");
164   MetaWord* p_start = align_down(p, Settings::commit_granule_bytes());
165   MetaWord* p_end = align_up(p + word_size, Settings::commit_granule_bytes());
166   return commit_range(p_start, p_end - p_start);
167 }
168 
169 // Given an address range (which has to be aligned to commit granule size):
170 //  - uncommit it
171 //  - mark it as uncommitted in the commit mask
172 void VirtualSpaceNode::uncommit_range(MetaWord* p, size_t word_size) {
173   DEBUG_ONLY(check_pointer_is_aligned_to_commit_granule(p);)
174   DEBUG_ONLY(check_word_size_is_aligned_to_commit_granule(word_size);)
175   assert_lock_strong(Metaspace_lock);
176 
177   // First calculate how large the committed regions in this range are
178   const size_t committed_words_in_range = _commit_mask.get_committed_size_in_range(p, word_size);
179   DEBUG_ONLY(check_word_size_is_aligned_to_commit_granule(committed_words_in_range);)
180 
181   UL2(debug, "uncommitting range " PTR_FORMAT ".." PTR_FORMAT "(" SIZE_FORMAT " words)",
182       p2i(p), p2i(p + word_size), word_size);
183 
184   if (committed_words_in_range == 0) {
185     UL(debug, "... already fully uncommitted.");
186     return; // Already fully uncommitted, nothing to do.
187   }
188 
189   // Uncommit...
190   if (os::uncommit_memory((char*)p, word_size * BytesPerWord) == false) {
191     // Note: this can actually happen, since uncommit may increase the number of mappings.
192     fatal("Failed to uncommit metaspace.");
193   }
194 
195   UL2(debug, "... uncommitted " SIZE_FORMAT " words.", committed_words_in_range);
196 
197   // ... tell commit limiter...
198   _commit_limiter->decrease_committed(committed_words_in_range);
199 
200   // ... and global counters...
201   _total_committed_words_counter->decrement_by(committed_words_in_range);
202 
203    // ... and update the commit mask.
204   _commit_mask.mark_range_as_uncommitted(p, word_size);
205 
206 #ifdef ASSERT
207   // The commit boundary maintained in the CommitLimiter should be equal the sum of committed words
208   // in both class and non-class vslist (outside gtests).
209   if (_commit_limiter == CommitLimiter::globalLimiter()) { // We are outside a test scenario
210     assert(_commit_limiter->committed_words() == RunningCounters::committed_words(), "counter mismatch");
211   }
212 #endif
213   InternalStats::inc_num_space_uncommitted();
214 }
215 
216 //// creation, destruction ////
217 
218 VirtualSpaceNode::VirtualSpaceNode(ReservedSpace rs, bool owns_rs, CommitLimiter* limiter,
219                                    SizeCounter* reserve_counter, SizeCounter* commit_counter) :
220   _next(nullptr),
221   _rs(rs),
222   _owns_rs(owns_rs),
223   _base((MetaWord*)rs.base()),
224   _word_size(rs.size() / BytesPerWord),
225   _used_words(0),
226   _commit_mask((MetaWord*)rs.base(), rs.size() / BytesPerWord),
227   _root_chunk_area_lut((MetaWord*)rs.base(), rs.size() / BytesPerWord),
228   _commit_limiter(limiter),
229   _total_reserved_words_counter(reserve_counter),
230   _total_committed_words_counter(commit_counter)
231 {
232   UL2(debug, "born (word_size " SIZE_FORMAT ").", _word_size);
233 
234   // Update reserved counter in vslist
235   _total_reserved_words_counter->increment_by(_word_size);
236 
237   assert_is_aligned(_base, chunklevel::MAX_CHUNK_BYTE_SIZE);
238   assert_is_aligned(_word_size, chunklevel::MAX_CHUNK_WORD_SIZE);
239 
240   // Poison the memory region. It will be unpoisoned later on a per-chunk base for chunks that are
241   // handed to arenas.
242   ASAN_POISON_MEMORY_REGION(rs.base(), rs.size());
243 
244   // Register memory region related to Metaspace. The Metaspace contains lots of pointers to malloc
245   // memory.
246   LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
247 }
248 
249 // Create a node of a given size (it will create its own space).
250 VirtualSpaceNode* VirtualSpaceNode::create_node(size_t word_size,
251                                                 CommitLimiter* limiter, SizeCounter* reserve_words_counter,
252                                                 SizeCounter* commit_words_counter)
253 {
254   DEBUG_ONLY(assert_is_aligned(word_size, chunklevel::MAX_CHUNK_WORD_SIZE);)
255   ReservedSpace rs(word_size * BytesPerWord,
256                    Settings::virtual_space_node_reserve_alignment_words() * BytesPerWord,
257                    os::vm_page_size());
258   if (!rs.is_reserved()) {
259     vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to reserve memory for metaspace");
260   }
261   MemTracker::record_virtual_memory_type(rs.base(), mtMetaspace);
262   assert_is_aligned(rs.base(), chunklevel::MAX_CHUNK_BYTE_SIZE);
263   InternalStats::inc_num_vsnodes_births();
264   return new VirtualSpaceNode(rs, true, limiter, reserve_words_counter, commit_words_counter);
265 }
266 
267 // Create a node over an existing space
268 VirtualSpaceNode* VirtualSpaceNode::create_node(ReservedSpace rs, CommitLimiter* limiter,
269                                                 SizeCounter* reserve_words_counter, SizeCounter* commit_words_counter)
270 {
271   InternalStats::inc_num_vsnodes_births();
272   return new VirtualSpaceNode(rs, false, limiter, reserve_words_counter, commit_words_counter);
273 }
274 
275 VirtualSpaceNode::~VirtualSpaceNode() {
276   DEBUG_ONLY(verify_locked();)
277 
278   // Unregister memory region related to Metaspace.
279   LSAN_UNREGISTER_ROOT_REGION(_rs.base(), _rs.size());
280 
281   // Undo the poisoning before potentially unmapping memory. This ensures that future mappings at
282   // the same address do not unexpectedly fail with use-after-poison.
283   ASAN_UNPOISON_MEMORY_REGION(_rs.base(), _rs.size());
284 
285   UL(debug, ": dies.");
286 
287   if (_owns_rs) {
288     _rs.release();
289   }
290 
291   // Update counters in vslist
292   size_t committed = committed_words();
293   _total_committed_words_counter->decrement_by(committed);
294   _total_reserved_words_counter->decrement_by(_word_size);
295 
296   // ... and tell commit limiter
297   _commit_limiter->decrease_committed(committed);
298 
299   InternalStats::inc_num_vsnodes_deaths();
300 }
301 
302 //// Chunk allocation, splitting, merging /////
303 
304 // Allocate a root chunk from this node. Will fail and return null if the node is full
305 //  - if we used up the whole address space of this node's memory region.
306 //    (in case this node backs compressed class space, this is how we hit
307 //     CompressedClassSpaceSize).
308 // Note that this just returns reserved memory; caller must take care of committing this
309 //  chunk before using it.
310 Metachunk* VirtualSpaceNode::allocate_root_chunk() {
311   assert_lock_strong(Metaspace_lock);
312   assert_is_aligned(free_words(), chunklevel::MAX_CHUNK_WORD_SIZE);
313 
314   if (free_words() >= chunklevel::MAX_CHUNK_WORD_SIZE) {
315 
316     MetaWord* loc = _base + _used_words;
317     _used_words += chunklevel::MAX_CHUNK_WORD_SIZE;
318 
319     RootChunkArea* rca = _root_chunk_area_lut.get_area_by_address(loc);
320 
321     // Create a root chunk header and initialize it;
322     Metachunk* c = rca->alloc_root_chunk_header(this);
323     assert(c->base() == loc && c->vsnode() == this &&
324            c->is_free(), "Sanity");
325     DEBUG_ONLY(c->verify();)
326 
327     UL2(debug, "new root chunk " METACHUNK_FORMAT ".", METACHUNK_FORMAT_ARGS(c));
328     return c;
329   }
330   return nullptr; // Node is full.
331 }
332 
333 // Given a chunk c, split it recursively until you get a chunk of the given target_level.
334 //
335 // The resulting target chunk resides at the same address as the original chunk.
336 // The resulting splinters are added to freelists.
337 void VirtualSpaceNode::split(chunklevel_t target_level, Metachunk* c, FreeChunkListVector* freelists) {
338   assert_lock_strong(Metaspace_lock);
339   // Get the area associated with this chunk and let it handle the splitting
340   RootChunkArea* rca = _root_chunk_area_lut.get_area_by_address(c->base());
341   DEBUG_ONLY(rca->verify_area_is_ideally_merged();)
342   rca->split(target_level, c, freelists);
343 }
344 
345 // Given a chunk, attempt to merge it recursively with its neighboring chunks.
346 //
347 // If successful (merged at least once), returns address of
348 // the merged chunk; null otherwise.
349 //
350 // The merged chunks are removed from the freelists.
351 //
352 // !!! Please note that if this method returns a non-null value, the
353 // original chunk will be invalid and should not be accessed anymore! !!!
354 Metachunk* VirtualSpaceNode::merge(Metachunk* c, FreeChunkListVector* freelists) {
355   assert(c != nullptr && c->is_free(), "Sanity");
356   assert_lock_strong(Metaspace_lock);
357 
358   // Get the rca associated with this chunk and let it handle the merging
359   RootChunkArea* rca = _root_chunk_area_lut.get_area_by_address(c->base());
360   Metachunk* c2 = rca->merge(c, freelists);
361   DEBUG_ONLY(rca->verify_area_is_ideally_merged();)
362   return c2;
363 }
364 
365 // Given a chunk c, which must be "in use" and must not be a root chunk, attempt to
366 // enlarge it in place by claiming its trailing buddy.
367 //
368 // This will only work if c is the leader of the buddy pair and the trailing buddy is free.
369 //
370 // If successful, the follower chunk will be removed from the freelists, the leader chunk c will
371 // double in size (level decreased by one).
372 //
373 // On success, true is returned, false otherwise.
374 bool VirtualSpaceNode::attempt_enlarge_chunk(Metachunk* c, FreeChunkListVector* freelists) {
375   assert(c != nullptr && c->is_in_use() && !c->is_root_chunk(), "Sanity");
376   assert_lock_strong(Metaspace_lock);
377 
378   // Get the rca associated with this chunk and let it handle the merging
379   RootChunkArea* rca = _root_chunk_area_lut.get_area_by_address(c->base());
380 
381   bool rc = rca->attempt_enlarge_chunk(c, freelists);
382   DEBUG_ONLY(rca->verify_area_is_ideally_merged();)
383   if (rc) {
384     InternalStats::inc_num_chunks_enlarged();
385   }
386 
387   return rc;
388 }
389 
390 void VirtualSpaceNode::print_on(outputStream* st) const {
391   size_t scale = K;
392 
393   st->print("base " PTR_FORMAT ": ", p2i(base()));
394   st->print("reserved=");
395   print_scaled_words(st, word_size(), scale);
396   st->print(", committed=");
397   print_scaled_words_and_percentage(st, committed_words(), word_size(), scale);
398   st->print(", used=");
399   print_scaled_words_and_percentage(st, used_words(), word_size(), scale);
400 
401   st->cr();
402   _root_chunk_area_lut.print_on(st);
403   _commit_mask.print_on(st);
404 }
405 
406 // Returns size, in words, of committed space in this node alone.
407 // Note: iterates over commit mask and hence may be a tad expensive on large nodes.
408 size_t VirtualSpaceNode::committed_words() const {
409   return _commit_mask.get_committed_size();
410 }
411 
412 #ifdef ASSERT
413 void VirtualSpaceNode::verify() const {
414   MutexLocker fcl(Metaspace_lock, Mutex::_no_safepoint_check_flag);
415   verify_locked();
416 }
417 
418 volatile int test_access = 0;
419 
420 // Verify counters and basic structure. Slow mode: verify all chunks in depth
421 void VirtualSpaceNode::verify_locked() const {
422   assert_lock_strong(Metaspace_lock);
423   assert(base() != nullptr, "Invalid base");
424   assert(base() == (MetaWord*)_rs.base() &&
425          word_size() == _rs.size() / BytesPerWord,
426          "Sanity");
427   assert_is_aligned(base(), chunklevel::MAX_CHUNK_BYTE_SIZE);
428   assert(used_words() <= word_size(), "Sanity");
429   // Since we only ever hand out root chunks from a vsnode, top should always be aligned
430   // to root chunk size.
431   assert_is_aligned(used_words(), chunklevel::MAX_CHUNK_WORD_SIZE);
432 
433   _commit_mask.verify();
434 
435   // Verify memory against commit mask.
436   SOMETIMES(
437     for (MetaWord* p = base(); p < base() + used_words(); p += os::vm_page_size()) {
438       if (_commit_mask.is_committed_address(p)) {
439         test_access += *(int*)p;
440       }
441     }
442   )
443 
444   assert(committed_words() <= word_size(), "Sanity");
445   assert_is_aligned(committed_words(), Settings::commit_granule_words());
446   _root_chunk_area_lut.verify();
447 }
448 
449 #endif
450 
451 } // namespace metaspace