1 /*
  2  * Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 #include "logging/log.hpp"
 25 #include "memory/metaspaceStats.hpp"
 26 #include "memory/metaspaceUtils.hpp"
 27 #include "nmt/memTracker.hpp"
 28 #include "nmt/nativeCallStackPrinter.hpp"
 29 #include "nmt/threadStackTracker.hpp"
 30 #include "nmt/virtualMemoryTracker.hpp"
 31 #include "runtime/os.hpp"
 32 #include "utilities/ostream.hpp"
 33 
 34 VirtualMemorySnapshot VirtualMemorySummary::_snapshot;
 35 
 36 void VirtualMemory::update_peak(size_t size) {
 37   size_t peak_sz = peak_size();
 38   while (peak_sz < size) {
 39     size_t old_sz = Atomic::cmpxchg(&_peak_size, peak_sz, size, memory_order_relaxed);
 40     if (old_sz == peak_sz) {
 41       break;
 42     } else {
 43       peak_sz = old_sz;
 44     }
 45   }
 46 }
 47 
 48 void VirtualMemorySummary::snapshot(VirtualMemorySnapshot* s) {
 49   // Snapshot current thread stacks
 50   VirtualMemoryTracker::snapshot_thread_stacks();
 51   as_snapshot()->copy_to(s);
 52 }
 53 
 54 SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
 55 
 56 int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
 57   return r1.compare(r2);
 58 }
 59 
 60 int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
 61   return r1.compare(r2);
 62 }
 63 
 64 static bool is_mergeable_with(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
 65   return rgn->adjacent_to(addr, size) && rgn->call_stack()->equals(stack);
 66 }
 67 
 68 static bool is_same_as(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
 69   // It would have made sense to use rgn->equals(...), but equals returns true for overlapping regions.
 70   return rgn->same_region(addr, size) && rgn->call_stack()->equals(stack);
 71 }
 72 
 73 static LinkedListNode<CommittedMemoryRegion>* find_preceding_node_from(LinkedListNode<CommittedMemoryRegion>* from, address addr) {
 74   LinkedListNode<CommittedMemoryRegion>* preceding = nullptr;
 75 
 76   for (LinkedListNode<CommittedMemoryRegion>* node = from; node != nullptr; node = node->next()) {
 77     CommittedMemoryRegion* rgn = node->data();
 78 
 79     // We searched past the region start.
 80     if (rgn->end() > addr) {
 81       break;
 82     }
 83 
 84     preceding = node;
 85   }
 86 
 87   return preceding;
 88 }
 89 
 90 static bool try_merge_with(LinkedListNode<CommittedMemoryRegion>* node, address addr, size_t size, const NativeCallStack& stack) {
 91   if (node != nullptr) {
 92     CommittedMemoryRegion* rgn = node->data();
 93 
 94     if (is_mergeable_with(rgn, addr, size, stack)) {
 95       rgn->expand_region(addr, size);
 96       return true;
 97     }
 98   }
 99 
100   return false;
101 }
102 
103 static bool try_merge_with(LinkedListNode<CommittedMemoryRegion>* node, LinkedListNode<CommittedMemoryRegion>* other) {
104   if (other == nullptr) {
105     return false;
106   }
107 
108   CommittedMemoryRegion* rgn = other->data();
109   return try_merge_with(node, rgn->base(), rgn->size(), *rgn->call_stack());
110 }
111 
112 bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) {
113   assert(addr != nullptr, "Invalid address");
114   assert(size > 0, "Invalid size");
115   assert(contain_region(addr, size), "Not contain this region");
116 
117   // Find the region that fully precedes the [addr, addr + size) region.
118   LinkedListNode<CommittedMemoryRegion>* prev = find_preceding_node_from(_committed_regions.head(), addr);
119   LinkedListNode<CommittedMemoryRegion>* next = (prev != nullptr ? prev->next() : _committed_regions.head());
120 
121   if (next != nullptr) {
122     // Ignore request if region already exists.
123     if (is_same_as(next->data(), addr, size, stack)) {
124       return true;
125     }
126 
127     // The new region is after prev, and either overlaps with the
128     // next region (and maybe more regions), or overlaps with no region.
129     if (next->data()->overlap_region(addr, size)) {
130       // Remove _all_ overlapping regions, and parts of regions,
131       // in preparation for the addition of this new region.
132       remove_uncommitted_region(addr, size);
133 
134       // The remove could have split a region into two and created a
135       // new prev region. Need to reset the prev and next pointers.
136       prev = find_preceding_node_from((prev != nullptr ? prev : _committed_regions.head()), addr);
137       next = (prev != nullptr ? prev->next() : _committed_regions.head());
138     }
139   }
140 
141   // At this point the previous overlapping regions have been
142   // cleared, and the full region is guaranteed to be inserted.
143   VirtualMemorySummary::record_committed_memory(size, mem_tag());
144 
145   // Try to merge with prev and possibly next.
146   if (try_merge_with(prev, addr, size, stack)) {
147     if (try_merge_with(prev, next)) {
148       // prev was expanded to contain the new region
149       // and next, need to remove next from the list
150       _committed_regions.remove_after(prev);
151     }
152 
153     return true;
154   }
155 
156   // Didn't merge with prev, try with next.
157   if (try_merge_with(next, addr, size, stack)) {
158     return true;
159   }
160 
161   // Couldn't merge with any regions - create a new region.
162   return add_committed_region(CommittedMemoryRegion(addr, size, stack));
163 }
164 
165 bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
166   address addr, size_t size) {
167   assert(addr != nullptr, "Invalid address");
168   assert(size > 0, "Invalid size");
169 
170   CommittedMemoryRegion* rgn = node->data();
171   assert(rgn->contain_region(addr, size), "Has to be contained");
172   assert(!rgn->same_region(addr, size), "Can not be the same region");
173 
174   if (rgn->base() == addr ||
175       rgn->end() == addr + size) {
176     rgn->exclude_region(addr, size);
177     return true;
178   } else {
179     // split this region
180     address top =rgn->end();
181     // use this region for lower part
182     size_t exclude_size = rgn->end() - addr;
183     rgn->exclude_region(addr, exclude_size);
184 
185     // higher part
186     address high_base = addr + size;
187     size_t  high_size = top - high_base;
188 
189     CommittedMemoryRegion high_rgn(high_base, high_size, *rgn->call_stack());
190     LinkedListNode<CommittedMemoryRegion>* high_node = _committed_regions.add(high_rgn);
191     assert(high_node == nullptr || node->next() == high_node, "Should be right after");
192     return (high_node != nullptr);
193   }
194 
195   return false;
196 }
197 
198 bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) {
199   assert(addr != nullptr, "Invalid address");
200   assert(sz > 0, "Invalid size");
201 
202   CommittedMemoryRegion del_rgn(addr, sz, *call_stack());
203   address end = addr + sz;
204 
205   LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
206   LinkedListNode<CommittedMemoryRegion>* prev = nullptr;
207   CommittedMemoryRegion* crgn;
208 
209   while (head != nullptr) {
210     crgn = head->data();
211 
212     if (crgn->same_region(addr, sz)) {
213       VirtualMemorySummary::record_uncommitted_memory(crgn->size(), mem_tag());
214       _committed_regions.remove_after(prev);
215       return true;
216     }
217 
218     // del_rgn contains crgn
219     if (del_rgn.contain_region(crgn->base(), crgn->size())) {
220       VirtualMemorySummary::record_uncommitted_memory(crgn->size(), mem_tag());
221       head = head->next();
222       _committed_regions.remove_after(prev);
223       continue;  // don't update head or prev
224     }
225 
226     // Found addr in the current crgn. There are 2 subcases:
227     if (crgn->contain_address(addr)) {
228 
229       // (1) Found addr+size in current crgn as well. (del_rgn is contained in crgn)
230       if (crgn->contain_address(end - 1)) {
231         VirtualMemorySummary::record_uncommitted_memory(sz, mem_tag());
232         return remove_uncommitted_region(head, addr, sz); // done!
233       } else {
234         // (2) Did not find del_rgn's end in crgn.
235         size_t size = crgn->end() - del_rgn.base();
236         crgn->exclude_region(addr, size);
237         VirtualMemorySummary::record_uncommitted_memory(size, mem_tag());
238       }
239 
240     } else if (crgn->contain_address(end - 1)) {
241       // Found del_rgn's end, but not its base addr.
242       size_t size = del_rgn.end() - crgn->base();
243       crgn->exclude_region(crgn->base(), size);
244       VirtualMemorySummary::record_uncommitted_memory(size, mem_tag());
245       return true;  // should be done if the list is sorted properly!
246     }
247 
248     prev = head;
249     head = head->next();
250   }
251 
252   return true;
253 }
254 
255 void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRegion& rgn) {
256   assert(addr != nullptr, "Invalid address");
257 
258   // split committed regions
259   LinkedListNode<CommittedMemoryRegion>* head =
260     _committed_regions.head();
261   LinkedListNode<CommittedMemoryRegion>* prev = nullptr;
262 
263   while (head != nullptr) {
264     if (head->data()->base() >= addr) {
265       break;
266     }
267     prev = head;
268     head = head->next();
269   }
270 
271   if (head != nullptr) {
272     if (prev != nullptr) {
273       prev->set_next(head->next());
274     } else {
275       _committed_regions.set_head(nullptr);
276     }
277   }
278 
279   rgn._committed_regions.set_head(head);
280 }
281 
282 size_t ReservedMemoryRegion::committed_size() const {
283   size_t committed = 0;
284   LinkedListNode<CommittedMemoryRegion>* head =
285     _committed_regions.head();
286   while (head != nullptr) {
287     committed += head->data()->size();
288     head = head->next();
289   }
290   return committed;
291 }
292 
293 void ReservedMemoryRegion::set_mem_tag(MemTag new_mem_tag) {
294   assert((mem_tag() == mtNone || mem_tag() == new_mem_tag),
295          "Overwrite memory tag for region [" INTPTR_FORMAT "-" INTPTR_FORMAT "), %u->%u.",
296          p2i(base()), p2i(end()), (unsigned)mem_tag(), (unsigned)new_mem_tag);
297   if (mem_tag() != new_mem_tag) {
298     VirtualMemorySummary::move_reserved_memory(mem_tag(), new_mem_tag, size());
299     VirtualMemorySummary::move_committed_memory(mem_tag(), new_mem_tag, committed_size());
300     _mem_tag = new_mem_tag;
301   }
302 }
303 
304 address ReservedMemoryRegion::thread_stack_uncommitted_bottom() const {
305   assert(mem_tag() == mtThreadStack, "Only for thread stack");
306   LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
307   address bottom = base();
308   address top = base() + size();
309   while (head != nullptr) {
310     address committed_top = head->data()->base() + head->data()->size();
311     if (committed_top < top) {
312       // committed stack guard pages, skip them
313       bottom = head->data()->base() + head->data()->size();
314       head = head->next();
315     } else {
316       assert(top == committed_top, "Sanity");
317       break;
318     }
319   }
320 
321   return bottom;
322 }
323 
324 bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
325   assert(_reserved_regions == nullptr, "only call once");
326   if (level >= NMT_summary) {
327     _reserved_regions = new (std::nothrow, mtNMT)
328       SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>();
329     return (_reserved_regions != nullptr);
330   }
331   return true;
332 }
333 
334 bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
335     const NativeCallStack& stack, MemTag mem_tag) {
336   assert(base_addr != nullptr, "Invalid address");
337   assert(size > 0, "Invalid size");
338   assert(_reserved_regions != nullptr, "Sanity check");
339   MemTracker::assert_locked();
340 
341   ReservedMemoryRegion  rgn(base_addr, size, stack, mem_tag);
342   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
343 
344   log_debug(nmt)("Add reserved region \'%s\' (" INTPTR_FORMAT ", %zu)",
345                 rgn.mem_tag_name(), p2i(rgn.base()), rgn.size());
346   if (reserved_rgn == nullptr) {
347     VirtualMemorySummary::record_reserved_memory(size, mem_tag);
348     return _reserved_regions->add(rgn) != nullptr;
349   } else {
350     // Deal with recursive reservation
351     // os::reserve_memory() -> pd_reserve_memory() -> os::reserve_memory()
352     // See JDK-8198226.
353     if (reserved_rgn->same_region(base_addr, size) &&
354         (reserved_rgn->mem_tag() == mem_tag || reserved_rgn->mem_tag() == mtNone)) {
355       reserved_rgn->set_call_stack(stack);
356       reserved_rgn->set_mem_tag(mem_tag);
357       return true;
358     } else {
359       assert(reserved_rgn->overlap_region(base_addr, size), "Must be");
360 
361       // Overlapped reservation.
362       // It can happen when the regions are thread stacks, as JNI
363       // thread does not detach from VM before exits, and leads to
364       // leak JavaThread object
365       if (reserved_rgn->mem_tag() == mtThreadStack) {
366         guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached");
367         // Overwrite with new region
368 
369         // Release old region
370         VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->mem_tag());
371         VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->mem_tag());
372 
373         // Add new region
374         VirtualMemorySummary::record_reserved_memory(rgn.size(), mem_tag);
375 
376         *reserved_rgn = rgn;
377         return true;
378       }
379 
380       // CDS mapping region.
381       // CDS reserves the whole region for mapping CDS archive, then maps each section into the region.
382       // NMT reports CDS as a whole.
383       if (reserved_rgn->mem_tag() == mtClassShared) {
384         log_debug(nmt)("CDS reserved region \'%s\' as a whole (" INTPTR_FORMAT ", %zu)",
385                       reserved_rgn->mem_tag_name(), p2i(reserved_rgn->base()), reserved_rgn->size());
386         assert(reserved_rgn->contain_region(base_addr, size), "Reserved CDS region should contain this mapping region");
387         return true;
388       }
389 
390       // Mapped CDS string region.
391       // The string region(s) is part of the java heap.
392       if (reserved_rgn->mem_tag() == mtJavaHeap) {
393         log_debug(nmt)("CDS reserved region \'%s\' as a whole (" INTPTR_FORMAT ", %zu)",
394                       reserved_rgn->mem_tag_name(), p2i(reserved_rgn->base()), reserved_rgn->size());
395         assert(reserved_rgn->contain_region(base_addr, size), "Reserved heap region should contain this mapping region");
396         return true;
397       }
398 
399       // Print some more details. Don't use UL here to avoid circularities.
400       tty->print_cr("Error: existing region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), memory tag %u.\n"
401                     "       new region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), memory tag %u.",
402                     p2i(reserved_rgn->base()), p2i(reserved_rgn->end()), (unsigned)reserved_rgn->mem_tag(),
403                     p2i(base_addr), p2i(base_addr + size), (unsigned)mem_tag);
404       if (MemTracker::tracking_level() == NMT_detail) {
405         tty->print_cr("Existing region allocated from:");
406         reserved_rgn->call_stack()->print_on(tty);
407         tty->print_cr("New region allocated from:");
408         stack.print_on(tty);
409       }
410       ShouldNotReachHere();
411       return false;
412     }
413   }
414 }
415 
416 void VirtualMemoryTracker::set_reserved_region_type(address addr, MemTag mem_tag) {
417   assert(addr != nullptr, "Invalid address");
418   assert(_reserved_regions != nullptr, "Sanity check");
419   MemTracker::assert_locked();
420 
421   ReservedMemoryRegion   rgn(addr, 1);
422   ReservedMemoryRegion*  reserved_rgn = _reserved_regions->find(rgn);
423   if (reserved_rgn != nullptr) {
424     assert(reserved_rgn->contain_address(addr), "Containment");
425     if (reserved_rgn->mem_tag() != mem_tag) {
426       assert(reserved_rgn->mem_tag() == mtNone, "Overwrite memory tag (should be mtNone, is: \"%s\")",
427              NMTUtil::tag_to_name(reserved_rgn->mem_tag()));
428       reserved_rgn->set_mem_tag(mem_tag);
429     }
430   }
431 }
432 
433 bool VirtualMemoryTracker::add_committed_region(address addr, size_t size,
434   const NativeCallStack& stack) {
435   assert(addr != nullptr, "Invalid address");
436   assert(size > 0, "Invalid size");
437   assert(_reserved_regions != nullptr, "Sanity check");
438   MemTracker::assert_locked();
439 
440   ReservedMemoryRegion  rgn(addr, size);
441   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
442 
443   if (reserved_rgn == nullptr) {
444     log_debug(nmt)("Add committed region \'%s\', No reserved region found for  (" INTPTR_FORMAT ", %zu)",
445                   rgn.mem_tag_name(),  p2i(rgn.base()), rgn.size());
446   }
447   assert(reserved_rgn != nullptr, "Add committed region, No reserved region found");
448   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
449   bool result = reserved_rgn->add_committed_region(addr, size, stack);
450   log_debug(nmt)("Add committed region \'%s\'(" INTPTR_FORMAT ", %zu) %s",
451                 reserved_rgn->mem_tag_name(),  p2i(rgn.base()), rgn.size(), (result ? "Succeeded" : "Failed"));
452   return result;
453 }
454 
455 bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
456   assert(addr != nullptr, "Invalid address");
457   assert(size > 0, "Invalid size");
458   assert(_reserved_regions != nullptr, "Sanity check");
459   MemTracker::assert_locked();
460 
461   ReservedMemoryRegion  rgn(addr, size);
462   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
463   assert(reserved_rgn != nullptr, "No reserved region (" INTPTR_FORMAT ", %zu)", p2i(addr), size);
464   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
465   const char* type_name = reserved_rgn->mem_tag_name();  // after remove, info is not complete
466   bool result = reserved_rgn->remove_uncommitted_region(addr, size);
467   log_debug(nmt)("Removed uncommitted region \'%s\' (" INTPTR_FORMAT ", %zu) %s",
468                  type_name,  p2i(addr), size, (result ? " Succeeded" : "Failed"));
469   return result;
470 }
471 
472 bool VirtualMemoryTracker::remove_released_region(ReservedMemoryRegion* rgn) {
473   assert(rgn != nullptr, "Sanity check");
474   assert(_reserved_regions != nullptr, "Sanity check");
475   MemTracker::assert_locked();
476 
477   // uncommit regions within the released region
478   ReservedMemoryRegion backup(*rgn);
479   bool result = rgn->remove_uncommitted_region(rgn->base(), rgn->size());
480   log_debug(nmt)("Remove uncommitted region \'%s\' (" INTPTR_FORMAT ", %zu) %s",
481                 backup.mem_tag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed"));
482   if (!result) {
483     return false;
484   }
485 
486   VirtualMemorySummary::record_released_memory(rgn->size(), rgn->mem_tag());
487   result =  _reserved_regions->remove(*rgn);
488   log_debug(nmt)("Removed region \'%s\' (" INTPTR_FORMAT ", %zu) from _reserved_regions %s" ,
489                 backup.mem_tag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed"));
490   return result;
491 }
492 
493 bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
494   assert(addr != nullptr, "Invalid address");
495   assert(size > 0, "Invalid size");
496   assert(_reserved_regions != nullptr, "Sanity check");
497   MemTracker::assert_locked();
498 
499   ReservedMemoryRegion  rgn(addr, size);
500   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
501 
502   if (reserved_rgn == nullptr) {
503     log_debug(nmt)("No reserved region found for (" INTPTR_FORMAT ", %zu)!",
504                   p2i(rgn.base()), rgn.size());
505   }
506   assert(reserved_rgn != nullptr, "No reserved region");
507   if (reserved_rgn->same_region(addr, size)) {
508     return remove_released_region(reserved_rgn);
509   }
510 
511   // uncommit regions within the released region
512   if (!reserved_rgn->remove_uncommitted_region(addr, size)) {
513     return false;
514   }
515 
516   if (reserved_rgn->mem_tag() == mtClassShared) {
517     if (reserved_rgn->contain_region(addr, size)) {
518       // This is an unmapped CDS region, which is part of the reserved shared
519       // memory region.
520       // See special handling in VirtualMemoryTracker::add_reserved_region also.
521       return true;
522     }
523 
524     if (size > reserved_rgn->size()) {
525       // This is from release the whole region spanning from archive space to class space,
526       // so we release them altogether.
527       ReservedMemoryRegion class_rgn(addr + reserved_rgn->size(),
528                                      (size - reserved_rgn->size()));
529       ReservedMemoryRegion* cls_rgn = _reserved_regions->find(class_rgn);
530       assert(cls_rgn != nullptr, "Class space region  not recorded?");
531       assert(cls_rgn->mem_tag() == mtClass, "Must be class mem tag");
532       remove_released_region(reserved_rgn);
533       remove_released_region(cls_rgn);
534       return true;
535     }
536   }
537 
538   VirtualMemorySummary::record_released_memory(size, reserved_rgn->mem_tag());
539 
540   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
541   if (reserved_rgn->base() == addr ||
542       reserved_rgn->end() == addr + size) {
543       reserved_rgn->exclude_region(addr, size);
544     return true;
545   } else {
546     address top = reserved_rgn->end();
547     address high_base = addr + size;
548     ReservedMemoryRegion high_rgn(high_base, top - high_base,
549       *reserved_rgn->call_stack(), reserved_rgn->mem_tag());
550 
551     // use original region for lower region
552     reserved_rgn->exclude_region(addr, top - addr);
553     LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn);
554     if (new_rgn == nullptr) {
555       return false;
556     } else {
557       reserved_rgn->move_committed_regions(addr, *new_rgn->data());
558       return true;
559     }
560   }
561 }
562 
563 // Given an existing memory mapping registered with NMT, split the mapping in
564 //  two. The newly created two mappings will be registered under the call
565 //  stack and the memory tags of the original section.
566 bool VirtualMemoryTracker::split_reserved_region(address addr, size_t size, size_t split, MemTag mem_tag, MemTag split_tag) {
567 
568   ReservedMemoryRegion  rgn(addr, size);
569   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
570   assert(reserved_rgn->same_region(addr, size), "Must be identical region");
571   assert(reserved_rgn != nullptr, "No reserved region");
572   assert(reserved_rgn->committed_size() == 0, "Splitting committed region?");
573 
574   NativeCallStack original_stack = *reserved_rgn->call_stack();
575   MemTag original_tag = reserved_rgn->mem_tag();
576 
577   const char* name = reserved_rgn->mem_tag_name();
578   remove_released_region(reserved_rgn);
579   log_debug(nmt)("Split region \'%s\' (" INTPTR_FORMAT ", %zu)  with size %zu",
580                 name, p2i(rgn.base()), rgn.size(), split);
581   // Now, create two new regions.
582   add_reserved_region(addr, split, original_stack, mem_tag);
583   add_reserved_region(addr + split, size - split, original_stack, split_tag);
584 
585   return true;
586 }
587 
588 
589 // Iterate the range, find committed region within its bound.
590 class RegionIterator : public StackObj {
591 private:
592   const address _start;
593   const size_t  _size;
594 
595   address _current_start;
596 public:
597   RegionIterator(address start, size_t size) :
598     _start(start), _size(size), _current_start(start) {
599   }
600 
601   // return true if committed region is found
602   bool next_committed(address& start, size_t& size);
603 private:
604   address end() const { return _start + _size; }
605 };
606 
607 bool RegionIterator::next_committed(address& committed_start, size_t& committed_size) {
608   if (end() <= _current_start) return false;
609 
610   const size_t page_sz = os::vm_page_size();
611   const size_t current_size = end() - _current_start;
612   if (os::committed_in_range(_current_start, current_size, committed_start, committed_size)) {
613     assert(committed_start != nullptr, "Must be");
614     assert(committed_size > 0 && is_aligned(committed_size, os::vm_page_size()), "Must be");
615 
616     _current_start = committed_start + committed_size;
617     return true;
618   } else {
619     return false;
620   }
621 }
622 
623 // Walk all known thread stacks, snapshot their committed ranges.
624 class SnapshotThreadStackWalker : public VirtualMemoryWalker {
625 public:
626   SnapshotThreadStackWalker() {}
627 
628   bool do_allocation_site(const ReservedMemoryRegion* rgn) {
629     if (MemTracker::NmtVirtualMemoryLocker::is_safe_to_use()) {
630       assert_lock_strong(NmtVirtualMemory_lock);
631     }
632     if (rgn->mem_tag() == mtThreadStack) {
633       address stack_bottom = rgn->thread_stack_uncommitted_bottom();
634       address committed_start;
635       size_t  committed_size;
636       size_t stack_size = rgn->base() + rgn->size() - stack_bottom;
637       // Align the size to work with full pages (Alpine and AIX stack top is not page aligned)
638       size_t aligned_stack_size = align_up(stack_size, os::vm_page_size());
639 
640       ReservedMemoryRegion* region = const_cast<ReservedMemoryRegion*>(rgn);
641       NativeCallStack ncs; // empty stack
642 
643       RegionIterator itr(stack_bottom, aligned_stack_size);
644       DEBUG_ONLY(bool found_stack = false;)
645       while (itr.next_committed(committed_start, committed_size)) {
646         assert(committed_start != nullptr, "Should not be null");
647         assert(committed_size > 0, "Should not be 0");
648         // unaligned stack_size case: correct the region to fit the actual stack_size
649         if (stack_bottom + stack_size < committed_start + committed_size) {
650           committed_size = stack_bottom + stack_size - committed_start;
651         }
652         region->add_committed_region(committed_start, committed_size, ncs);
653         DEBUG_ONLY(found_stack = true;)
654       }
655 #ifdef ASSERT
656       if (!found_stack) {
657         log_debug(thread)("Thread exited without proper cleanup, may leak thread object");
658       }
659 #endif
660     }
661     return true;
662   }
663 };
664 
665 void VirtualMemoryTracker::snapshot_thread_stacks() {
666   SnapshotThreadStackWalker walker;
667   walk_virtual_memory(&walker);
668 }
669 
670 bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
671   assert(_reserved_regions != nullptr, "Sanity check");
672   MemTracker::NmtVirtualMemoryLocker nvml;
673   // Check that the _reserved_regions haven't been deleted.
674   if (_reserved_regions != nullptr) {
675     LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head();
676     while (head != nullptr) {
677       const ReservedMemoryRegion* rgn = head->peek();
678       if (!walker->do_allocation_site(rgn)) {
679         return false;
680       }
681       head = head->next();
682     }
683    }
684   return true;
685 }
686 
687 class PrintRegionWalker : public VirtualMemoryWalker {
688 private:
689   const address               _p;
690   outputStream*               _st;
691   NativeCallStackPrinter      _stackprinter;
692 public:
693   PrintRegionWalker(const void* p, outputStream* st) :
694     _p((address)p), _st(st), _stackprinter(st) { }
695 
696   bool do_allocation_site(const ReservedMemoryRegion* rgn) {
697     if (rgn->contain_address(_p)) {
698       _st->print_cr(PTR_FORMAT " in mmap'd memory region [" PTR_FORMAT " - " PTR_FORMAT "], tag %s",
699         p2i(_p), p2i(rgn->base()), p2i(rgn->base() + rgn->size()), NMTUtil::tag_to_enum_name(rgn->mem_tag()));
700       if (MemTracker::tracking_level() == NMT_detail) {
701         _stackprinter.print_stack(rgn->call_stack());
702         _st->cr();
703       }
704       return false;
705     }
706     return true;
707   }
708 };
709 
710 // If p is contained within a known memory region, print information about it to the
711 // given stream and return true; false otherwise.
712 bool VirtualMemoryTracker::print_containing_region(const void* p, outputStream* st) {
713   PrintRegionWalker walker(p, st);
714   return !walk_virtual_memory(&walker);
715 
716 }