1 /*
  2  * Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 #include "logging/log.hpp"
 25 #include "memory/metaspaceStats.hpp"
 26 #include "memory/metaspaceUtils.hpp"
 27 #include "nmt/memTracker.hpp"
 28 #include "nmt/nativeCallStackPrinter.hpp"
 29 #include "nmt/threadStackTracker.hpp"
 30 #include "nmt/virtualMemoryTracker.hpp"
 31 #include "runtime/os.hpp"
 32 #include "utilities/ostream.hpp"
 33 
 34 VirtualMemorySnapshot VirtualMemorySummary::_snapshot;
 35 
 36 void VirtualMemory::update_peak(size_t size) {
 37   size_t peak_sz = peak_size();
 38   while (peak_sz < size) {
 39     size_t old_sz = Atomic::cmpxchg(&_peak_size, peak_sz, size, memory_order_relaxed);
 40     if (old_sz == peak_sz) {
 41       break;
 42     } else {
 43       peak_sz = old_sz;
 44     }
 45   }
 46 }
 47 
 48 void VirtualMemorySummary::snapshot(VirtualMemorySnapshot* s) {
 49   // Snapshot current thread stacks
 50   VirtualMemoryTracker::snapshot_thread_stacks();
 51   as_snapshot()->copy_to(s);
 52 }
 53 
 54 SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
 55 
 56 int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
 57   return r1.compare(r2);
 58 }
 59 
 60 int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
 61   return r1.compare(r2);
 62 }
 63 
 64 static bool is_mergeable_with(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
 65   return rgn->adjacent_to(addr, size) && rgn->call_stack()->equals(stack);
 66 }
 67 
 68 static bool is_same_as(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
 69   // It would have made sense to use rgn->equals(...), but equals returns true for overlapping regions.
 70   return rgn->same_region(addr, size) && rgn->call_stack()->equals(stack);
 71 }
 72 
 73 static LinkedListNode<CommittedMemoryRegion>* find_preceding_node_from(LinkedListNode<CommittedMemoryRegion>* from, address addr) {
 74   LinkedListNode<CommittedMemoryRegion>* preceding = nullptr;
 75 
 76   for (LinkedListNode<CommittedMemoryRegion>* node = from; node != nullptr; node = node->next()) {
 77     CommittedMemoryRegion* rgn = node->data();
 78 
 79     // We searched past the region start.
 80     if (rgn->end() > addr) {
 81       break;
 82     }
 83 
 84     preceding = node;
 85   }
 86 
 87   return preceding;
 88 }
 89 
 90 static bool try_merge_with(LinkedListNode<CommittedMemoryRegion>* node, address addr, size_t size, const NativeCallStack& stack) {
 91   if (node != nullptr) {
 92     CommittedMemoryRegion* rgn = node->data();
 93 
 94     if (is_mergeable_with(rgn, addr, size, stack)) {
 95       rgn->expand_region(addr, size);
 96       return true;
 97     }
 98   }
 99 
100   return false;
101 }
102 
103 static bool try_merge_with(LinkedListNode<CommittedMemoryRegion>* node, LinkedListNode<CommittedMemoryRegion>* other) {
104   if (other == nullptr) {
105     return false;
106   }
107 
108   CommittedMemoryRegion* rgn = other->data();
109   return try_merge_with(node, rgn->base(), rgn->size(), *rgn->call_stack());
110 }
111 
112 bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) {
113   assert(addr != nullptr, "Invalid address");
114   assert(size > 0, "Invalid size");
115   assert(contain_region(addr, size), "Not contain this region");
116 
117   // Find the region that fully precedes the [addr, addr + size) region.
118   LinkedListNode<CommittedMemoryRegion>* prev = find_preceding_node_from(_committed_regions.head(), addr);
119   LinkedListNode<CommittedMemoryRegion>* next = (prev != nullptr ? prev->next() : _committed_regions.head());
120 
121   if (next != nullptr) {
122     // Ignore request if region already exists.
123     if (is_same_as(next->data(), addr, size, stack)) {
124       return true;
125     }
126 
127     // The new region is after prev, and either overlaps with the
128     // next region (and maybe more regions), or overlaps with no region.
129     if (next->data()->overlap_region(addr, size)) {
130       // Remove _all_ overlapping regions, and parts of regions,
131       // in preparation for the addition of this new region.
132       remove_uncommitted_region(addr, size);
133 
134       // The remove could have split a region into two and created a
135       // new prev region. Need to reset the prev and next pointers.
136       prev = find_preceding_node_from((prev != nullptr ? prev : _committed_regions.head()), addr);
137       next = (prev != nullptr ? prev->next() : _committed_regions.head());
138     }
139   }
140 
141   // At this point the previous overlapping regions have been
142   // cleared, and the full region is guaranteed to be inserted.
143   VirtualMemorySummary::record_committed_memory(size, mem_tag());
144 
145   // Try to merge with prev and possibly next.
146   if (try_merge_with(prev, addr, size, stack)) {
147     if (try_merge_with(prev, next)) {
148       // prev was expanded to contain the new region
149       // and next, need to remove next from the list
150       _committed_regions.remove_after(prev);
151     }
152 
153     return true;
154   }
155 
156   // Didn't merge with prev, try with next.
157   if (try_merge_with(next, addr, size, stack)) {
158     return true;
159   }
160 
161   // Couldn't merge with any regions - create a new region.
162   return add_committed_region(CommittedMemoryRegion(addr, size, stack));
163 }
164 
165 bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
166   address addr, size_t size) {
167   assert(addr != nullptr, "Invalid address");
168   assert(size > 0, "Invalid size");
169 
170   CommittedMemoryRegion* rgn = node->data();
171   assert(rgn->contain_region(addr, size), "Has to be contained");
172   assert(!rgn->same_region(addr, size), "Can not be the same region");
173 
174   if (rgn->base() == addr ||
175       rgn->end() == addr + size) {
176     rgn->exclude_region(addr, size);
177     return true;
178   } else {
179     // split this region
180     address top =rgn->end();
181     // use this region for lower part
182     size_t exclude_size = rgn->end() - addr;
183     rgn->exclude_region(addr, exclude_size);
184 
185     // higher part
186     address high_base = addr + size;
187     size_t  high_size = top - high_base;
188 
189     CommittedMemoryRegion high_rgn(high_base, high_size, *rgn->call_stack());
190     LinkedListNode<CommittedMemoryRegion>* high_node = _committed_regions.add(high_rgn);
191     assert(high_node == nullptr || node->next() == high_node, "Should be right after");
192     return (high_node != nullptr);
193   }
194 
195   return false;
196 }
197 
198 bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) {
199   assert(addr != nullptr, "Invalid address");
200   assert(sz > 0, "Invalid size");
201 
202   CommittedMemoryRegion del_rgn(addr, sz, *call_stack());
203   address end = addr + sz;
204 
205   LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
206   LinkedListNode<CommittedMemoryRegion>* prev = nullptr;
207   CommittedMemoryRegion* crgn;
208 
209   while (head != nullptr) {
210     crgn = head->data();
211 
212     if (crgn->same_region(addr, sz)) {
213       VirtualMemorySummary::record_uncommitted_memory(crgn->size(), mem_tag());
214       _committed_regions.remove_after(prev);
215       return true;
216     }
217 
218     // del_rgn contains crgn
219     if (del_rgn.contain_region(crgn->base(), crgn->size())) {
220       VirtualMemorySummary::record_uncommitted_memory(crgn->size(), mem_tag());
221       head = head->next();
222       _committed_regions.remove_after(prev);
223       continue;  // don't update head or prev
224     }
225 
226     // Found addr in the current crgn. There are 2 subcases:
227     if (crgn->contain_address(addr)) {
228 
229       // (1) Found addr+size in current crgn as well. (del_rgn is contained in crgn)
230       if (crgn->contain_address(end - 1)) {
231         VirtualMemorySummary::record_uncommitted_memory(sz, mem_tag());
232         return remove_uncommitted_region(head, addr, sz); // done!
233       } else {
234         // (2) Did not find del_rgn's end in crgn.
235         size_t size = crgn->end() - del_rgn.base();
236         crgn->exclude_region(addr, size);
237         VirtualMemorySummary::record_uncommitted_memory(size, mem_tag());
238       }
239 
240     } else if (crgn->contain_address(end - 1)) {
241       // Found del_rgn's end, but not its base addr.
242       size_t size = del_rgn.end() - crgn->base();
243       crgn->exclude_region(crgn->base(), size);
244       VirtualMemorySummary::record_uncommitted_memory(size, mem_tag());
245       return true;  // should be done if the list is sorted properly!
246     }
247 
248     prev = head;
249     head = head->next();
250   }
251 
252   return true;
253 }
254 
255 void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRegion& rgn) {
256   assert(addr != nullptr, "Invalid address");
257 
258   // split committed regions
259   LinkedListNode<CommittedMemoryRegion>* head =
260     _committed_regions.head();
261   LinkedListNode<CommittedMemoryRegion>* prev = nullptr;
262 
263   while (head != nullptr) {
264     if (head->data()->base() >= addr) {
265       break;
266     }
267     prev = head;
268     head = head->next();
269   }
270 
271   if (head != nullptr) {
272     if (prev != nullptr) {
273       prev->set_next(head->next());
274     } else {
275       _committed_regions.set_head(nullptr);
276     }
277   }
278 
279   rgn._committed_regions.set_head(head);
280 }
281 
282 size_t ReservedMemoryRegion::committed_size() const {
283   size_t committed = 0;
284   LinkedListNode<CommittedMemoryRegion>* head =
285     _committed_regions.head();
286   while (head != nullptr) {
287     committed += head->data()->size();
288     head = head->next();
289   }
290   return committed;
291 }
292 
293 void ReservedMemoryRegion::set_mem_tag(MemTag new_mem_tag) {
294   assert((mem_tag() == mtNone || mem_tag() == new_mem_tag),
295          "Overwrite memory tag for region [" INTPTR_FORMAT "-" INTPTR_FORMAT "), %u->%u.",
296          p2i(base()), p2i(end()), (unsigned)mem_tag(), (unsigned)new_mem_tag);
297   if (mem_tag() != new_mem_tag) {
298     VirtualMemorySummary::move_reserved_memory(mem_tag(), new_mem_tag, size());
299     VirtualMemorySummary::move_committed_memory(mem_tag(), new_mem_tag, committed_size());
300     _mem_tag = new_mem_tag;
301   }
302 }
303 
304 address ReservedMemoryRegion::thread_stack_uncommitted_bottom() const {
305   assert(mem_tag() == mtThreadStack, "Only for thread stack");
306   LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
307   address bottom = base();
308   address top = base() + size();
309   while (head != nullptr) {
310     address committed_top = head->data()->base() + head->data()->size();
311     if (committed_top < top) {
312       // committed stack guard pages, skip them
313       bottom = head->data()->base() + head->data()->size();
314       head = head->next();
315     } else {
316       assert(top == committed_top, "Sanity");
317       break;
318     }
319   }
320 
321   return bottom;
322 }
323 
324 bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
325   assert(_reserved_regions == nullptr, "only call once");
326   if (level >= NMT_summary) {
327     _reserved_regions = new (std::nothrow, mtNMT)
328       SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>();
329     return (_reserved_regions != nullptr);
330   }
331   return true;
332 }
333 
334 bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
335     const NativeCallStack& stack, MemTag mem_tag) {
336   assert(base_addr != nullptr, "Invalid address");
337   assert(size > 0, "Invalid size");
338   assert(_reserved_regions != nullptr, "Sanity check");
339   MemTracker::assert_locked();
340 
341   ReservedMemoryRegion  rgn(base_addr, size, stack, mem_tag);
342   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
343 
344   log_debug(nmt)("Add reserved region \'%s\' (" INTPTR_FORMAT ", %zu)",
345                 rgn.mem_tag_name(), p2i(rgn.base()), rgn.size());
346   if (reserved_rgn == nullptr) {
347     VirtualMemorySummary::record_reserved_memory(size, mem_tag);
348     return _reserved_regions->add(rgn) != nullptr;
349   } else {
350     // Deal with recursive reservation
351     // os::reserve_memory() -> pd_reserve_memory() -> os::reserve_memory()
352     // See JDK-8198226.
353     if (reserved_rgn->same_region(base_addr, size) &&
354         (reserved_rgn->mem_tag() == mem_tag || reserved_rgn->mem_tag() == mtNone)) {
355       reserved_rgn->set_call_stack(stack);
356       reserved_rgn->set_mem_tag(mem_tag);
357       return true;
358     } else {
359       assert(reserved_rgn->overlap_region(base_addr, size), "Must be");
360 
361       // Overlapped reservation.
362       // It can happen when the regions are thread stacks, as JNI
363       // thread does not detach from VM before exits, and leads to
364       // leak JavaThread object
365       if (reserved_rgn->mem_tag() == mtThreadStack) {
366         guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached");
367         // Overwrite with new region
368 
369         // Release old region
370         VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->mem_tag());
371         VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->mem_tag());
372 
373         // Add new region
374         VirtualMemorySummary::record_reserved_memory(rgn.size(), mem_tag);
375 
376         *reserved_rgn = rgn;
377         return true;
378       }
379 
380       // CDS mapping region.
381       // CDS reserves the whole region for mapping CDS archive, then maps each section into the region.
382       // NMT reports CDS as a whole.
383       if (reserved_rgn->mem_tag() == mtClassShared) {
384         log_debug(nmt)("CDS reserved region \'%s\' as a whole (" INTPTR_FORMAT ", %zu)",
385                       reserved_rgn->mem_tag_name(), p2i(reserved_rgn->base()), reserved_rgn->size());
386         assert(reserved_rgn->contain_region(base_addr, size), "Reserved CDS region should contain this mapping region");
387         return true;
388       }
389 
390       // Mapped CDS string region.
391       // The string region(s) is part of the java heap.
392       if (reserved_rgn->mem_tag() == mtJavaHeap) {
393         log_debug(nmt)("CDS reserved region \'%s\' as a whole (" INTPTR_FORMAT ", %zu)",
394                       reserved_rgn->mem_tag_name(), p2i(reserved_rgn->base()), reserved_rgn->size());
395         assert(reserved_rgn->contain_region(base_addr, size), "Reserved heap region should contain this mapping region");
396         return true;
397       }
398 
399       if (reserved_rgn->mem_tag() == mtCode) {
400         assert(reserved_rgn->contain_region(base_addr, size), "Reserved code region should contain this mapping region");
401         return true;
402       }
403 
404       // Print some more details. Don't use UL here to avoid circularities.
405       tty->print_cr("Error: existing region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), memory tag %u.\n"
406                     "       new region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), memory tag %u.",
407                     p2i(reserved_rgn->base()), p2i(reserved_rgn->end()), (unsigned)reserved_rgn->mem_tag(),
408                     p2i(base_addr), p2i(base_addr + size), (unsigned)mem_tag);
409       if (MemTracker::tracking_level() == NMT_detail) {
410         tty->print_cr("Existing region allocated from:");
411         reserved_rgn->call_stack()->print_on(tty);
412         tty->print_cr("New region allocated from:");
413         stack.print_on(tty);
414       }
415       ShouldNotReachHere();
416       return false;
417     }
418   }
419 }
420 
421 void VirtualMemoryTracker::set_reserved_region_type(address addr, MemTag mem_tag) {
422   assert(addr != nullptr, "Invalid address");
423   assert(_reserved_regions != nullptr, "Sanity check");
424   MemTracker::assert_locked();
425 
426   ReservedMemoryRegion   rgn(addr, 1);
427   ReservedMemoryRegion*  reserved_rgn = _reserved_regions->find(rgn);
428   if (reserved_rgn != nullptr) {
429     assert(reserved_rgn->contain_address(addr), "Containment");
430     if (reserved_rgn->mem_tag() != mem_tag) {
431       assert(reserved_rgn->mem_tag() == mtNone, "Overwrite memory tag (should be mtNone, is: \"%s\")",
432              NMTUtil::tag_to_name(reserved_rgn->mem_tag()));
433       reserved_rgn->set_mem_tag(mem_tag);
434     }
435   }
436 }
437 
438 bool VirtualMemoryTracker::add_committed_region(address addr, size_t size,
439   const NativeCallStack& stack) {
440   assert(addr != nullptr, "Invalid address");
441   assert(size > 0, "Invalid size");
442   assert(_reserved_regions != nullptr, "Sanity check");
443   MemTracker::assert_locked();
444 
445   ReservedMemoryRegion  rgn(addr, size);
446   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
447 
448   if (reserved_rgn == nullptr) {
449     log_debug(nmt)("Add committed region \'%s\', No reserved region found for  (" INTPTR_FORMAT ", %zu)",
450                   rgn.mem_tag_name(),  p2i(rgn.base()), rgn.size());
451   }
452   assert(reserved_rgn != nullptr, "Add committed region, No reserved region found");
453   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
454   bool result = reserved_rgn->add_committed_region(addr, size, stack);
455   log_debug(nmt)("Add committed region \'%s\'(" INTPTR_FORMAT ", %zu) %s",
456                 reserved_rgn->mem_tag_name(),  p2i(rgn.base()), rgn.size(), (result ? "Succeeded" : "Failed"));
457   return result;
458 }
459 
460 bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
461   assert(addr != nullptr, "Invalid address");
462   assert(size > 0, "Invalid size");
463   assert(_reserved_regions != nullptr, "Sanity check");
464   MemTracker::assert_locked();
465 
466   ReservedMemoryRegion  rgn(addr, size);
467   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
468   assert(reserved_rgn != nullptr, "No reserved region (" INTPTR_FORMAT ", %zu)", p2i(addr), size);
469   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
470   const char* type_name = reserved_rgn->mem_tag_name();  // after remove, info is not complete
471   bool result = reserved_rgn->remove_uncommitted_region(addr, size);
472   log_debug(nmt)("Removed uncommitted region \'%s\' (" INTPTR_FORMAT ", %zu) %s",
473                  type_name,  p2i(addr), size, (result ? " Succeeded" : "Failed"));
474   return result;
475 }
476 
477 bool VirtualMemoryTracker::remove_released_region(ReservedMemoryRegion* rgn) {
478   assert(rgn != nullptr, "Sanity check");
479   assert(_reserved_regions != nullptr, "Sanity check");
480   MemTracker::assert_locked();
481 
482   // uncommit regions within the released region
483   ReservedMemoryRegion backup(*rgn);
484   bool result = rgn->remove_uncommitted_region(rgn->base(), rgn->size());
485   log_debug(nmt)("Remove uncommitted region \'%s\' (" INTPTR_FORMAT ", %zu) %s",
486                 backup.mem_tag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed"));
487   if (!result) {
488     return false;
489   }
490 
491   VirtualMemorySummary::record_released_memory(rgn->size(), rgn->mem_tag());
492   result =  _reserved_regions->remove(*rgn);
493   log_debug(nmt)("Removed region \'%s\' (" INTPTR_FORMAT ", %zu) from _reserved_regions %s" ,
494                 backup.mem_tag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed"));
495   return result;
496 }
497 
498 bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
499   assert(addr != nullptr, "Invalid address");
500   assert(size > 0, "Invalid size");
501   assert(_reserved_regions != nullptr, "Sanity check");
502   MemTracker::assert_locked();
503 
504   ReservedMemoryRegion  rgn(addr, size);
505   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
506 
507   if (reserved_rgn == nullptr) {
508     log_debug(nmt)("No reserved region found for (" INTPTR_FORMAT ", %zu)!",
509                   p2i(rgn.base()), rgn.size());
510   }
511   assert(reserved_rgn != nullptr, "No reserved region");
512   if (reserved_rgn->same_region(addr, size)) {
513     return remove_released_region(reserved_rgn);
514   }
515 
516   // uncommit regions within the released region
517   if (!reserved_rgn->remove_uncommitted_region(addr, size)) {
518     return false;
519   }
520 
521   if (reserved_rgn->mem_tag() == mtClassShared) {
522     if (reserved_rgn->contain_region(addr, size)) {
523       // This is an unmapped CDS region, which is part of the reserved shared
524       // memory region.
525       // See special handling in VirtualMemoryTracker::add_reserved_region also.
526       return true;
527     }
528 
529     if (size > reserved_rgn->size()) {
530       // This is from release the whole region spanning from archive space to class space,
531       // so we release them altogether.
532       ReservedMemoryRegion class_rgn(addr + reserved_rgn->size(),
533                                      (size - reserved_rgn->size()));
534       ReservedMemoryRegion* cls_rgn = _reserved_regions->find(class_rgn);
535       assert(cls_rgn != nullptr, "Class space region  not recorded?");
536       assert(cls_rgn->mem_tag() == mtClass, "Must be class mem tag");
537       remove_released_region(reserved_rgn);
538       remove_released_region(cls_rgn);
539       return true;
540     }
541   }
542 
543   VirtualMemorySummary::record_released_memory(size, reserved_rgn->mem_tag());
544 
545   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
546   if (reserved_rgn->base() == addr ||
547       reserved_rgn->end() == addr + size) {
548       reserved_rgn->exclude_region(addr, size);
549     return true;
550   } else {
551     address top = reserved_rgn->end();
552     address high_base = addr + size;
553     ReservedMemoryRegion high_rgn(high_base, top - high_base,
554       *reserved_rgn->call_stack(), reserved_rgn->mem_tag());
555 
556     // use original region for lower region
557     reserved_rgn->exclude_region(addr, top - addr);
558     LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn);
559     if (new_rgn == nullptr) {
560       return false;
561     } else {
562       reserved_rgn->move_committed_regions(addr, *new_rgn->data());
563       return true;
564     }
565   }
566 }
567 
568 // Given an existing memory mapping registered with NMT, split the mapping in
569 //  two. The newly created two mappings will be registered under the call
570 //  stack and the memory tags of the original section.
571 bool VirtualMemoryTracker::split_reserved_region(address addr, size_t size, size_t split, MemTag mem_tag, MemTag split_tag) {
572 
573   ReservedMemoryRegion  rgn(addr, size);
574   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
575   assert(reserved_rgn->same_region(addr, size), "Must be identical region");
576   assert(reserved_rgn != nullptr, "No reserved region");
577   assert(reserved_rgn->committed_size() == 0, "Splitting committed region?");
578 
579   NativeCallStack original_stack = *reserved_rgn->call_stack();
580   MemTag original_tag = reserved_rgn->mem_tag();
581 
582   const char* name = reserved_rgn->mem_tag_name();
583   remove_released_region(reserved_rgn);
584   log_debug(nmt)("Split region \'%s\' (" INTPTR_FORMAT ", %zu)  with size %zu",
585                 name, p2i(rgn.base()), rgn.size(), split);
586   // Now, create two new regions.
587   add_reserved_region(addr, split, original_stack, mem_tag);
588   add_reserved_region(addr + split, size - split, original_stack, split_tag);
589 
590   return true;
591 }
592 
593 
594 // Iterate the range, find committed region within its bound.
595 class RegionIterator : public StackObj {
596 private:
597   const address _start;
598   const size_t  _size;
599 
600   address _current_start;
601 public:
602   RegionIterator(address start, size_t size) :
603     _start(start), _size(size), _current_start(start) {
604   }
605 
606   // return true if committed region is found
607   bool next_committed(address& start, size_t& size);
608 private:
609   address end() const { return _start + _size; }
610 };
611 
612 bool RegionIterator::next_committed(address& committed_start, size_t& committed_size) {
613   if (end() <= _current_start) return false;
614 
615   const size_t page_sz = os::vm_page_size();
616   const size_t current_size = end() - _current_start;
617   if (os::committed_in_range(_current_start, current_size, committed_start, committed_size)) {
618     assert(committed_start != nullptr, "Must be");
619     assert(committed_size > 0 && is_aligned(committed_size, os::vm_page_size()), "Must be");
620 
621     _current_start = committed_start + committed_size;
622     return true;
623   } else {
624     return false;
625   }
626 }
627 
628 // Walk all known thread stacks, snapshot their committed ranges.
629 class SnapshotThreadStackWalker : public VirtualMemoryWalker {
630 public:
631   SnapshotThreadStackWalker() {}
632 
633   bool do_allocation_site(const ReservedMemoryRegion* rgn) {
634     if (MemTracker::NmtVirtualMemoryLocker::is_safe_to_use()) {
635       assert_lock_strong(NmtVirtualMemory_lock);
636     }
637     if (rgn->mem_tag() == mtThreadStack) {
638       address stack_bottom = rgn->thread_stack_uncommitted_bottom();
639       address committed_start;
640       size_t  committed_size;
641       size_t stack_size = rgn->base() + rgn->size() - stack_bottom;
642       // Align the size to work with full pages (Alpine and AIX stack top is not page aligned)
643       size_t aligned_stack_size = align_up(stack_size, os::vm_page_size());
644 
645       ReservedMemoryRegion* region = const_cast<ReservedMemoryRegion*>(rgn);
646       NativeCallStack ncs; // empty stack
647 
648       RegionIterator itr(stack_bottom, aligned_stack_size);
649       DEBUG_ONLY(bool found_stack = false;)
650       while (itr.next_committed(committed_start, committed_size)) {
651         assert(committed_start != nullptr, "Should not be null");
652         assert(committed_size > 0, "Should not be 0");
653         // unaligned stack_size case: correct the region to fit the actual stack_size
654         if (stack_bottom + stack_size < committed_start + committed_size) {
655           committed_size = stack_bottom + stack_size - committed_start;
656         }
657         region->add_committed_region(committed_start, committed_size, ncs);
658         DEBUG_ONLY(found_stack = true;)
659       }
660 #ifdef ASSERT
661       if (!found_stack) {
662         log_debug(thread)("Thread exited without proper cleanup, may leak thread object");
663       }
664 #endif
665     }
666     return true;
667   }
668 };
669 
670 void VirtualMemoryTracker::snapshot_thread_stacks() {
671   SnapshotThreadStackWalker walker;
672   walk_virtual_memory(&walker);
673 }
674 
675 bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
676   assert(_reserved_regions != nullptr, "Sanity check");
677   MemTracker::NmtVirtualMemoryLocker nvml;
678   // Check that the _reserved_regions haven't been deleted.
679   if (_reserved_regions != nullptr) {
680     LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head();
681     while (head != nullptr) {
682       const ReservedMemoryRegion* rgn = head->peek();
683       if (!walker->do_allocation_site(rgn)) {
684         return false;
685       }
686       head = head->next();
687     }
688    }
689   return true;
690 }
691 
692 class PrintRegionWalker : public VirtualMemoryWalker {
693 private:
694   const address               _p;
695   outputStream*               _st;
696   NativeCallStackPrinter      _stackprinter;
697 public:
698   PrintRegionWalker(const void* p, outputStream* st) :
699     _p((address)p), _st(st), _stackprinter(st) { }
700 
701   bool do_allocation_site(const ReservedMemoryRegion* rgn) {
702     if (rgn->contain_address(_p)) {
703       _st->print_cr(PTR_FORMAT " in mmap'd memory region [" PTR_FORMAT " - " PTR_FORMAT "], tag %s",
704         p2i(_p), p2i(rgn->base()), p2i(rgn->base() + rgn->size()), NMTUtil::tag_to_enum_name(rgn->mem_tag()));
705       if (MemTracker::tracking_level() == NMT_detail) {
706         _stackprinter.print_stack(rgn->call_stack());
707         _st->cr();
708       }
709       return false;
710     }
711     return true;
712   }
713 };
714 
715 // If p is contained within a known memory region, print information about it to the
716 // given stream and return true; false otherwise.
717 bool VirtualMemoryTracker::print_containing_region(const void* p, outputStream* st) {
718   PrintRegionWalker walker(p, st);
719   return !walk_virtual_memory(&walker);
720 
721 }