1 /*
  2  * Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 #include "precompiled.hpp"
 25 #include "logging/log.hpp"
 26 #include "memory/metaspaceStats.hpp"
 27 #include "memory/metaspaceUtils.hpp"
 28 #include "nmt/memTracker.hpp"
 29 #include "nmt/nativeCallStackPrinter.hpp"
 30 #include "nmt/threadStackTracker.hpp"
 31 #include "nmt/virtualMemoryTracker.hpp"
 32 #include "runtime/os.hpp"
 33 #include "runtime/threadCritical.hpp"
 34 #include "utilities/ostream.hpp"
 35 
 36 VirtualMemorySnapshot VirtualMemorySummary::_snapshot;
 37 
 38 void VirtualMemory::update_peak(size_t size) {
 39   size_t peak_sz = peak_size();
 40   while (peak_sz < size) {
 41     size_t old_sz = Atomic::cmpxchg(&_peak_size, peak_sz, size, memory_order_relaxed);
 42     if (old_sz == peak_sz) {
 43       break;
 44     } else {
 45       peak_sz = old_sz;
 46     }
 47   }
 48 }
 49 
 50 void VirtualMemorySummary::snapshot(VirtualMemorySnapshot* s) {
 51   // Snapshot current thread stacks
 52   VirtualMemoryTracker::snapshot_thread_stacks();
 53   as_snapshot()->copy_to(s);
 54 }
 55 
 56 SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
 57 
 58 int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
 59   return r1.compare(r2);
 60 }
 61 
 62 int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
 63   return r1.compare(r2);
 64 }
 65 
 66 static bool is_mergeable_with(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
 67   return rgn->adjacent_to(addr, size) && rgn->call_stack()->equals(stack);
 68 }
 69 
 70 static bool is_same_as(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
 71   // It would have made sense to use rgn->equals(...), but equals returns true for overlapping regions.
 72   return rgn->same_region(addr, size) && rgn->call_stack()->equals(stack);
 73 }
 74 
 75 static LinkedListNode<CommittedMemoryRegion>* find_preceding_node_from(LinkedListNode<CommittedMemoryRegion>* from, address addr) {
 76   LinkedListNode<CommittedMemoryRegion>* preceding = nullptr;
 77 
 78   for (LinkedListNode<CommittedMemoryRegion>* node = from; node != nullptr; node = node->next()) {
 79     CommittedMemoryRegion* rgn = node->data();
 80 
 81     // We searched past the region start.
 82     if (rgn->end() > addr) {
 83       break;
 84     }
 85 
 86     preceding = node;
 87   }
 88 
 89   return preceding;
 90 }
 91 
 92 static bool try_merge_with(LinkedListNode<CommittedMemoryRegion>* node, address addr, size_t size, const NativeCallStack& stack) {
 93   if (node != nullptr) {
 94     CommittedMemoryRegion* rgn = node->data();
 95 
 96     if (is_mergeable_with(rgn, addr, size, stack)) {
 97       rgn->expand_region(addr, size);
 98       return true;
 99     }
100   }
101 
102   return false;
103 }
104 
105 static bool try_merge_with(LinkedListNode<CommittedMemoryRegion>* node, LinkedListNode<CommittedMemoryRegion>* other) {
106   if (other == nullptr) {
107     return false;
108   }
109 
110   CommittedMemoryRegion* rgn = other->data();
111   return try_merge_with(node, rgn->base(), rgn->size(), *rgn->call_stack());
112 }
113 
114 bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) {
115   assert(addr != nullptr, "Invalid address");
116   assert(size > 0, "Invalid size");
117   assert(contain_region(addr, size), "Not contain this region");
118 
119   // Find the region that fully precedes the [addr, addr + size) region.
120   LinkedListNode<CommittedMemoryRegion>* prev = find_preceding_node_from(_committed_regions.head(), addr);
121   LinkedListNode<CommittedMemoryRegion>* next = (prev != nullptr ? prev->next() : _committed_regions.head());
122 
123   if (next != nullptr) {
124     // Ignore request if region already exists.
125     if (is_same_as(next->data(), addr, size, stack)) {
126       return true;
127     }
128 
129     // The new region is after prev, and either overlaps with the
130     // next region (and maybe more regions), or overlaps with no region.
131     if (next->data()->overlap_region(addr, size)) {
132       // Remove _all_ overlapping regions, and parts of regions,
133       // in preparation for the addition of this new region.
134       remove_uncommitted_region(addr, size);
135 
136       // The remove could have split a region into two and created a
137       // new prev region. Need to reset the prev and next pointers.
138       prev = find_preceding_node_from((prev != nullptr ? prev : _committed_regions.head()), addr);
139       next = (prev != nullptr ? prev->next() : _committed_regions.head());
140     }
141   }
142 
143   // At this point the previous overlapping regions have been
144   // cleared, and the full region is guaranteed to be inserted.
145   VirtualMemorySummary::record_committed_memory(size, flag());
146 
147   // Try to merge with prev and possibly next.
148   if (try_merge_with(prev, addr, size, stack)) {
149     if (try_merge_with(prev, next)) {
150       // prev was expanded to contain the new region
151       // and next, need to remove next from the list
152       _committed_regions.remove_after(prev);
153     }
154 
155     return true;
156   }
157 
158   // Didn't merge with prev, try with next.
159   if (try_merge_with(next, addr, size, stack)) {
160     return true;
161   }
162 
163   // Couldn't merge with any regions - create a new region.
164   return add_committed_region(CommittedMemoryRegion(addr, size, stack));
165 }
166 
167 bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
168   address addr, size_t size) {
169   assert(addr != nullptr, "Invalid address");
170   assert(size > 0, "Invalid size");
171 
172   CommittedMemoryRegion* rgn = node->data();
173   assert(rgn->contain_region(addr, size), "Has to be contained");
174   assert(!rgn->same_region(addr, size), "Can not be the same region");
175 
176   if (rgn->base() == addr ||
177       rgn->end() == addr + size) {
178     rgn->exclude_region(addr, size);
179     return true;
180   } else {
181     // split this region
182     address top =rgn->end();
183     // use this region for lower part
184     size_t exclude_size = rgn->end() - addr;
185     rgn->exclude_region(addr, exclude_size);
186 
187     // higher part
188     address high_base = addr + size;
189     size_t  high_size = top - high_base;
190 
191     CommittedMemoryRegion high_rgn(high_base, high_size, *rgn->call_stack());
192     LinkedListNode<CommittedMemoryRegion>* high_node = _committed_regions.add(high_rgn);
193     assert(high_node == nullptr || node->next() == high_node, "Should be right after");
194     return (high_node != nullptr);
195   }
196 
197   return false;
198 }
199 
200 bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) {
201   assert(addr != nullptr, "Invalid address");
202   assert(sz > 0, "Invalid size");
203 
204   CommittedMemoryRegion del_rgn(addr, sz, *call_stack());
205   address end = addr + sz;
206 
207   LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
208   LinkedListNode<CommittedMemoryRegion>* prev = nullptr;
209   CommittedMemoryRegion* crgn;
210 
211   while (head != nullptr) {
212     crgn = head->data();
213 
214     if (crgn->same_region(addr, sz)) {
215       VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
216       _committed_regions.remove_after(prev);
217       return true;
218     }
219 
220     // del_rgn contains crgn
221     if (del_rgn.contain_region(crgn->base(), crgn->size())) {
222       VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
223       head = head->next();
224       _committed_regions.remove_after(prev);
225       continue;  // don't update head or prev
226     }
227 
228     // Found addr in the current crgn. There are 2 subcases:
229     if (crgn->contain_address(addr)) {
230 
231       // (1) Found addr+size in current crgn as well. (del_rgn is contained in crgn)
232       if (crgn->contain_address(end - 1)) {
233         VirtualMemorySummary::record_uncommitted_memory(sz, flag());
234         return remove_uncommitted_region(head, addr, sz); // done!
235       } else {
236         // (2) Did not find del_rgn's end in crgn.
237         size_t size = crgn->end() - del_rgn.base();
238         crgn->exclude_region(addr, size);
239         VirtualMemorySummary::record_uncommitted_memory(size, flag());
240       }
241 
242     } else if (crgn->contain_address(end - 1)) {
243       // Found del_rgn's end, but not its base addr.
244       size_t size = del_rgn.end() - crgn->base();
245       crgn->exclude_region(crgn->base(), size);
246       VirtualMemorySummary::record_uncommitted_memory(size, flag());
247       return true;  // should be done if the list is sorted properly!
248     }
249 
250     prev = head;
251     head = head->next();
252   }
253 
254   return true;
255 }
256 
257 void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRegion& rgn) {
258   assert(addr != nullptr, "Invalid address");
259 
260   // split committed regions
261   LinkedListNode<CommittedMemoryRegion>* head =
262     _committed_regions.head();
263   LinkedListNode<CommittedMemoryRegion>* prev = nullptr;
264 
265   while (head != nullptr) {
266     if (head->data()->base() >= addr) {
267       break;
268     }
269     prev = head;
270     head = head->next();
271   }
272 
273   if (head != nullptr) {
274     if (prev != nullptr) {
275       prev->set_next(head->next());
276     } else {
277       _committed_regions.set_head(nullptr);
278     }
279   }
280 
281   rgn._committed_regions.set_head(head);
282 }
283 
284 size_t ReservedMemoryRegion::committed_size() const {
285   size_t committed = 0;
286   LinkedListNode<CommittedMemoryRegion>* head =
287     _committed_regions.head();
288   while (head != nullptr) {
289     committed += head->data()->size();
290     head = head->next();
291   }
292   return committed;
293 }
294 
295 void ReservedMemoryRegion::set_flag(MEMFLAGS f) {
296   assert((flag() == mtNone || flag() == f),
297          "Overwrite memory type for region [" INTPTR_FORMAT "-" INTPTR_FORMAT "), %u->%u.",
298          p2i(base()), p2i(end()), (unsigned)flag(), (unsigned)f);
299   if (flag() != f) {
300     VirtualMemorySummary::move_reserved_memory(flag(), f, size());
301     VirtualMemorySummary::move_committed_memory(flag(), f, committed_size());
302     _flag = f;
303   }
304 }
305 
306 address ReservedMemoryRegion::thread_stack_uncommitted_bottom() const {
307   assert(flag() == mtThreadStack, "Only for thread stack");
308   LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
309   address bottom = base();
310   address top = base() + size();
311   while (head != nullptr) {
312     address committed_top = head->data()->base() + head->data()->size();
313     if (committed_top < top) {
314       // committed stack guard pages, skip them
315       bottom = head->data()->base() + head->data()->size();
316       head = head->next();
317     } else {
318       assert(top == committed_top, "Sanity");
319       break;
320     }
321   }
322 
323   return bottom;
324 }
325 
326 bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
327   assert(_reserved_regions == nullptr, "only call once");
328   if (level >= NMT_summary) {
329     _reserved_regions = new (std::nothrow, mtNMT)
330       SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>();
331     return (_reserved_regions != nullptr);
332   }
333   return true;
334 }
335 
336 bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
337     const NativeCallStack& stack, MEMFLAGS flag) {
338   assert(base_addr != nullptr, "Invalid address");
339   assert(size > 0, "Invalid size");
340   assert(_reserved_regions != nullptr, "Sanity check");
341   ReservedMemoryRegion  rgn(base_addr, size, stack, flag);
342   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
343 
344   log_debug(nmt)("Add reserved region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ")",
345                 rgn.flag_name(), p2i(rgn.base()), rgn.size());
346   if (reserved_rgn == nullptr) {
347     VirtualMemorySummary::record_reserved_memory(size, flag);
348     return _reserved_regions->add(rgn) != nullptr;
349   } else {
350     // Deal with recursive reservation
351     // os::reserve_memory() -> pd_reserve_memory() -> os::reserve_memory()
352     // See JDK-8198226.
353     if (reserved_rgn->same_region(base_addr, size) &&
354         (reserved_rgn->flag() == flag || reserved_rgn->flag() == mtNone)) {
355       reserved_rgn->set_call_stack(stack);
356       reserved_rgn->set_flag(flag);
357       return true;
358     } else {
359       assert(reserved_rgn->overlap_region(base_addr, size), "Must be");
360 
361       // Overlapped reservation.
362       // It can happen when the regions are thread stacks, as JNI
363       // thread does not detach from VM before exits, and leads to
364       // leak JavaThread object
365       if (reserved_rgn->flag() == mtThreadStack) {
366         guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached");
367         // Overwrite with new region
368 
369         // Release old region
370         VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->flag());
371         VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->flag());
372 
373         // Add new region
374         VirtualMemorySummary::record_reserved_memory(rgn.size(), flag);
375 
376         *reserved_rgn = rgn;
377         return true;
378       }
379 
380       // CDS mapping region.
381       // CDS reserves the whole region for mapping CDS archive, then maps each section into the region.
382       // NMT reports CDS as a whole.
383       if (reserved_rgn->flag() == mtClassShared) {
384         log_debug(nmt)("CDS reserved region \'%s\' as a whole (" INTPTR_FORMAT ", " SIZE_FORMAT ")",
385                       reserved_rgn->flag_name(), p2i(reserved_rgn->base()), reserved_rgn->size());
386         assert(reserved_rgn->contain_region(base_addr, size), "Reserved CDS region should contain this mapping region");
387         return true;
388       }
389 
390       // Mapped CDS heap region.
391       if (reserved_rgn->flag() == mtJavaHeap) {
392         log_debug(nmt)("CDS reserved region \'%s\' as a whole (" INTPTR_FORMAT ", " SIZE_FORMAT ")",
393                       reserved_rgn->flag_name(), p2i(reserved_rgn->base()), reserved_rgn->size());
394         assert(reserved_rgn->contain_region(base_addr, size), "Reserved heap region should contain this mapping region");
395         return true;
396       }
397 
398       if (reserved_rgn->flag() == mtCode) {
399         assert(reserved_rgn->contain_region(base_addr, size), "Reserved code region should contain this mapping region");
400         return true;
401       }
402 
403       // Print some more details. Don't use UL here to avoid circularities.
404       tty->print_cr("Error: existing region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), flag %u.\n"
405                     "       new region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), flag %u.",
406                     p2i(reserved_rgn->base()), p2i(reserved_rgn->end()), (unsigned)reserved_rgn->flag(),
407                     p2i(base_addr), p2i(base_addr + size), (unsigned)flag);
408       if (MemTracker::tracking_level() == NMT_detail) {
409         tty->print_cr("Existing region allocated from:");
410         reserved_rgn->call_stack()->print_on(tty);
411         tty->print_cr("New region allocated from:");
412         stack.print_on(tty);
413       }
414       ShouldNotReachHere();
415       return false;
416     }
417   }
418 }
419 
420 void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) {
421   assert(addr != nullptr, "Invalid address");
422   assert(_reserved_regions != nullptr, "Sanity check");
423 
424   ReservedMemoryRegion   rgn(addr, 1);
425   ReservedMemoryRegion*  reserved_rgn = _reserved_regions->find(rgn);
426   if (reserved_rgn != nullptr) {
427     assert(reserved_rgn->contain_address(addr), "Containment");
428     if (reserved_rgn->flag() != flag) {
429       assert(reserved_rgn->flag() == mtNone, "Overwrite memory type (should be mtNone, is: \"%s\")",
430              NMTUtil::flag_to_name(reserved_rgn->flag()));
431       reserved_rgn->set_flag(flag);
432     }
433   }
434 }
435 
436 bool VirtualMemoryTracker::add_committed_region(address addr, size_t size,
437   const NativeCallStack& stack) {
438   assert(addr != nullptr, "Invalid address");
439   assert(size > 0, "Invalid size");
440   assert(_reserved_regions != nullptr, "Sanity check");
441 
442   ReservedMemoryRegion  rgn(addr, size);
443   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
444 
445   if (reserved_rgn == nullptr) {
446     log_debug(nmt)("Add committed region \'%s\', No reserved region found for  (" INTPTR_FORMAT ", " SIZE_FORMAT ")",
447                   rgn.flag_name(),  p2i(rgn.base()), rgn.size());
448   }
449   assert(reserved_rgn != nullptr, "Add committed region, No reserved region found");
450   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
451   bool result = reserved_rgn->add_committed_region(addr, size, stack);
452   log_debug(nmt)("Add committed region \'%s\'(" INTPTR_FORMAT ", " SIZE_FORMAT ") %s",
453                 reserved_rgn->flag_name(),  p2i(rgn.base()), rgn.size(), (result ? "Succeeded" : "Failed"));
454   return result;
455 }
456 
457 bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
458   assert(addr != nullptr, "Invalid address");
459   assert(size > 0, "Invalid size");
460   assert(_reserved_regions != nullptr, "Sanity check");
461 
462   ReservedMemoryRegion  rgn(addr, size);
463   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
464   assert(reserved_rgn != nullptr, "No reserved region (" INTPTR_FORMAT ", " SIZE_FORMAT ")", p2i(addr), size);
465   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
466   const char* flag_name = reserved_rgn->flag_name();  // after remove, info is not complete
467   bool result = reserved_rgn->remove_uncommitted_region(addr, size);
468   log_debug(nmt)("Removed uncommitted region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") %s",
469                 flag_name,  p2i(addr), size, (result ? " Succeeded" : "Failed"));
470   return result;
471 }
472 
473 bool VirtualMemoryTracker::remove_released_region(ReservedMemoryRegion* rgn) {
474   assert(rgn != nullptr, "Sanity check");
475   assert(_reserved_regions != nullptr, "Sanity check");
476 
477   // uncommit regions within the released region
478   ReservedMemoryRegion backup(*rgn);
479   bool result = rgn->remove_uncommitted_region(rgn->base(), rgn->size());
480   log_debug(nmt)("Remove uncommitted region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") %s",
481                 backup.flag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed"));
482   if (!result) {
483     return false;
484   }
485 
486   VirtualMemorySummary::record_released_memory(rgn->size(), rgn->flag());
487   result =  _reserved_regions->remove(*rgn);
488   log_debug(nmt)("Removed region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") from _reserved_regions %s" ,
489                 backup.flag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed"));
490   return result;
491 }
492 
493 bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
494   assert(addr != nullptr, "Invalid address");
495   assert(size > 0, "Invalid size");
496   assert(_reserved_regions != nullptr, "Sanity check");
497 
498   ReservedMemoryRegion  rgn(addr, size);
499   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
500 
501   if (reserved_rgn == nullptr) {
502     log_debug(nmt)("No reserved region found for (" INTPTR_FORMAT ", " SIZE_FORMAT ")!",
503                   p2i(rgn.base()), rgn.size());
504   }
505   assert(reserved_rgn != nullptr, "No reserved region");
506   if (reserved_rgn->same_region(addr, size)) {
507     return remove_released_region(reserved_rgn);
508   }
509 
510   // uncommit regions within the released region
511   if (!reserved_rgn->remove_uncommitted_region(addr, size)) {
512     return false;
513   }
514 
515   if (reserved_rgn->flag() == mtClassShared) {
516     if (reserved_rgn->contain_region(addr, size)) {
517       // This is an unmapped CDS region, which is part of the reserved shared
518       // memory region.
519       // See special handling in VirtualMemoryTracker::add_reserved_region also.
520       return true;
521     }
522 
523     if (size > reserved_rgn->size()) {
524       // This is from release the whole region spanning from archive space to class space,
525       // so we release them altogether.
526       ReservedMemoryRegion class_rgn(addr + reserved_rgn->size(),
527                                      (size - reserved_rgn->size()));
528       ReservedMemoryRegion* cls_rgn = _reserved_regions->find(class_rgn);
529       assert(cls_rgn != nullptr, "Class space region  not recorded?");
530       assert(cls_rgn->flag() == mtClass, "Must be class type");
531       remove_released_region(reserved_rgn);
532       remove_released_region(cls_rgn);
533       return true;
534     }
535   }
536 
537   VirtualMemorySummary::record_released_memory(size, reserved_rgn->flag());
538 
539   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
540   if (reserved_rgn->base() == addr ||
541       reserved_rgn->end() == addr + size) {
542       reserved_rgn->exclude_region(addr, size);
543     return true;
544   } else {
545     address top = reserved_rgn->end();
546     address high_base = addr + size;
547     ReservedMemoryRegion high_rgn(high_base, top - high_base,
548       *reserved_rgn->call_stack(), reserved_rgn->flag());
549 
550     // use original region for lower region
551     reserved_rgn->exclude_region(addr, top - addr);
552     LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn);
553     if (new_rgn == nullptr) {
554       return false;
555     } else {
556       reserved_rgn->move_committed_regions(addr, *new_rgn->data());
557       return true;
558     }
559   }
560 }
561 
562 // Given an existing memory mapping registered with NMT, split the mapping in
563 //  two. The newly created two mappings will be registered under the call
564 //  stack and the memory flags of the original section.
565 bool VirtualMemoryTracker::split_reserved_region(address addr, size_t size, size_t split, MEMFLAGS flag, MEMFLAGS split_flag) {
566 
567   ReservedMemoryRegion  rgn(addr, size);
568   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
569   assert(reserved_rgn->same_region(addr, size), "Must be identical region");
570   assert(reserved_rgn != nullptr, "No reserved region");
571   assert(reserved_rgn->committed_size() == 0, "Splitting committed region?");
572 
573   NativeCallStack original_stack = *reserved_rgn->call_stack();
574   MEMFLAGS original_flags = reserved_rgn->flag();
575 
576   const char* name = reserved_rgn->flag_name();
577   remove_released_region(reserved_rgn);
578   log_debug(nmt)("Split region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ")  with size " SIZE_FORMAT,
579                 name, p2i(rgn.base()), rgn.size(), split);
580   // Now, create two new regions.
581   add_reserved_region(addr, split, original_stack, flag);
582   add_reserved_region(addr + split, size - split, original_stack, split_flag);
583 
584   return true;
585 }
586 
587 
588 // Iterate the range, find committed region within its bound.
589 class RegionIterator : public StackObj {
590 private:
591   const address _start;
592   const size_t  _size;
593 
594   address _current_start;
595 public:
596   RegionIterator(address start, size_t size) :
597     _start(start), _size(size), _current_start(start) {
598   }
599 
600   // return true if committed region is found
601   bool next_committed(address& start, size_t& size);
602 private:
603   address end() const { return _start + _size; }
604 };
605 
606 bool RegionIterator::next_committed(address& committed_start, size_t& committed_size) {
607   if (end() <= _current_start) return false;
608 
609   const size_t page_sz = os::vm_page_size();
610   const size_t current_size = end() - _current_start;
611   if (os::committed_in_range(_current_start, current_size, committed_start, committed_size)) {
612     assert(committed_start != nullptr, "Must be");
613     assert(committed_size > 0 && is_aligned(committed_size, os::vm_page_size()), "Must be");
614 
615     _current_start = committed_start + committed_size;
616     return true;
617   } else {
618     return false;
619   }
620 }
621 
622 // Walk all known thread stacks, snapshot their committed ranges.
623 class SnapshotThreadStackWalker : public VirtualMemoryWalker {
624 public:
625   SnapshotThreadStackWalker() {}
626 
627   bool do_allocation_site(const ReservedMemoryRegion* rgn) {
628     if (rgn->flag() == mtThreadStack) {
629       address stack_bottom = rgn->thread_stack_uncommitted_bottom();
630       address committed_start;
631       size_t  committed_size;
632       size_t stack_size = rgn->base() + rgn->size() - stack_bottom;
633       // Align the size to work with full pages (Alpine and AIX stack top is not page aligned)
634       size_t aligned_stack_size = align_up(stack_size, os::vm_page_size());
635 
636       ReservedMemoryRegion* region = const_cast<ReservedMemoryRegion*>(rgn);
637       NativeCallStack ncs; // empty stack
638 
639       RegionIterator itr(stack_bottom, aligned_stack_size);
640       DEBUG_ONLY(bool found_stack = false;)
641       while (itr.next_committed(committed_start, committed_size)) {
642         assert(committed_start != nullptr, "Should not be null");
643         assert(committed_size > 0, "Should not be 0");
644         // unaligned stack_size case: correct the region to fit the actual stack_size
645         if (stack_bottom + stack_size < committed_start + committed_size) {
646           committed_size = stack_bottom + stack_size - committed_start;
647         }
648         region->add_committed_region(committed_start, committed_size, ncs);
649         DEBUG_ONLY(found_stack = true;)
650       }
651 #ifdef ASSERT
652       if (!found_stack) {
653         log_debug(thread)("Thread exited without proper cleanup, may leak thread object");
654       }
655 #endif
656     }
657     return true;
658   }
659 };
660 
661 void VirtualMemoryTracker::snapshot_thread_stacks() {
662   SnapshotThreadStackWalker walker;
663   walk_virtual_memory(&walker);
664 }
665 
666 bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
667   assert(_reserved_regions != nullptr, "Sanity check");
668   ThreadCritical tc;
669   // Check that the _reserved_regions haven't been deleted.
670   if (_reserved_regions != nullptr) {
671     LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head();
672     while (head != nullptr) {
673       const ReservedMemoryRegion* rgn = head->peek();
674       if (!walker->do_allocation_site(rgn)) {
675         return false;
676       }
677       head = head->next();
678     }
679    }
680   return true;
681 }
682 
683 class PrintRegionWalker : public VirtualMemoryWalker {
684 private:
685   const address               _p;
686   outputStream*               _st;
687   NativeCallStackPrinter      _stackprinter;
688 public:
689   PrintRegionWalker(const void* p, outputStream* st) :
690     _p((address)p), _st(st), _stackprinter(st) { }
691 
692   bool do_allocation_site(const ReservedMemoryRegion* rgn) {
693     if (rgn->contain_address(_p)) {
694       _st->print_cr(PTR_FORMAT " in mmap'd memory region [" PTR_FORMAT " - " PTR_FORMAT "], tag %s",
695         p2i(_p), p2i(rgn->base()), p2i(rgn->base() + rgn->size()), NMTUtil::flag_to_enum_name(rgn->flag()));
696       if (MemTracker::tracking_level() == NMT_detail) {
697         _stackprinter.print_stack(rgn->call_stack());
698         _st->cr();
699       }
700       return false;
701     }
702     return true;
703   }
704 };
705 
706 // If p is contained within a known memory region, print information about it to the
707 // given stream and return true; false otherwise.
708 bool VirtualMemoryTracker::print_containing_region(const void* p, outputStream* st) {
709   PrintRegionWalker walker(p, st);
710   return !walk_virtual_memory(&walker);
711 
712 }