1 /*
  2  * Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 #include "precompiled.hpp"
 25 #include "logging/log.hpp"
 26 #include "memory/metaspaceStats.hpp"
 27 #include "memory/metaspaceUtils.hpp"
 28 #include "nmt/memTracker.hpp"
 29 #include "nmt/threadStackTracker.hpp"
 30 #include "nmt/virtualMemoryTracker.hpp"
 31 #include "runtime/os.hpp"
 32 #include "runtime/threadCritical.hpp"
 33 #include "utilities/ostream.hpp"
 34 
 35 VirtualMemorySnapshot VirtualMemorySummary::_snapshot;
 36 
 37 void VirtualMemory::update_peak(size_t size) {
 38   size_t peak_sz = peak_size();
 39   while (peak_sz < size) {
 40     size_t old_sz = Atomic::cmpxchg(&_peak_size, peak_sz, size, memory_order_relaxed);
 41     if (old_sz == peak_sz) {
 42       break;
 43     } else {
 44       peak_sz = old_sz;
 45     }
 46   }
 47 }
 48 
 49 void VirtualMemorySummary::snapshot(VirtualMemorySnapshot* s) {
 50   // Only if thread stack is backed by virtual memory
 51   if (ThreadStackTracker::track_as_vm()) {
 52     // Snapshot current thread stacks
 53     VirtualMemoryTracker::snapshot_thread_stacks();
 54   }
 55   as_snapshot()->copy_to(s);
 56 }
 57 
 58 SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
 59 
 60 int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
 61   return r1.compare(r2);
 62 }
 63 
 64 int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
 65   return r1.compare(r2);
 66 }
 67 
 68 static bool is_mergeable_with(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
 69   return rgn->adjacent_to(addr, size) && rgn->call_stack()->equals(stack);
 70 }
 71 
 72 static bool is_same_as(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
 73   // It would have made sense to use rgn->equals(...), but equals returns true for overlapping regions.
 74   return rgn->same_region(addr, size) && rgn->call_stack()->equals(stack);
 75 }
 76 
 77 static LinkedListNode<CommittedMemoryRegion>* find_preceding_node_from(LinkedListNode<CommittedMemoryRegion>* from, address addr) {
 78   LinkedListNode<CommittedMemoryRegion>* preceding = nullptr;
 79 
 80   for (LinkedListNode<CommittedMemoryRegion>* node = from; node != nullptr; node = node->next()) {
 81     CommittedMemoryRegion* rgn = node->data();
 82 
 83     // We searched past the region start.
 84     if (rgn->end() > addr) {
 85       break;
 86     }
 87 
 88     preceding = node;
 89   }
 90 
 91   return preceding;
 92 }
 93 
 94 static bool try_merge_with(LinkedListNode<CommittedMemoryRegion>* node, address addr, size_t size, const NativeCallStack& stack) {
 95   if (node != nullptr) {
 96     CommittedMemoryRegion* rgn = node->data();
 97 
 98     if (is_mergeable_with(rgn, addr, size, stack)) {
 99       rgn->expand_region(addr, size);
100       return true;
101     }
102   }
103 
104   return false;
105 }
106 
107 static bool try_merge_with(LinkedListNode<CommittedMemoryRegion>* node, LinkedListNode<CommittedMemoryRegion>* other) {
108   if (other == nullptr) {
109     return false;
110   }
111 
112   CommittedMemoryRegion* rgn = other->data();
113   return try_merge_with(node, rgn->base(), rgn->size(), *rgn->call_stack());
114 }
115 
116 bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) {
117   assert(addr != nullptr, "Invalid address");
118   assert(size > 0, "Invalid size");
119   assert(contain_region(addr, size), "Not contain this region");
120 
121   // Find the region that fully precedes the [addr, addr + size) region.
122   LinkedListNode<CommittedMemoryRegion>* prev = find_preceding_node_from(_committed_regions.head(), addr);
123   LinkedListNode<CommittedMemoryRegion>* next = (prev != nullptr ? prev->next() : _committed_regions.head());
124 
125   if (next != nullptr) {
126     // Ignore request if region already exists.
127     if (is_same_as(next->data(), addr, size, stack)) {
128       return true;
129     }
130 
131     // The new region is after prev, and either overlaps with the
132     // next region (and maybe more regions), or overlaps with no region.
133     if (next->data()->overlap_region(addr, size)) {
134       // Remove _all_ overlapping regions, and parts of regions,
135       // in preparation for the addition of this new region.
136       remove_uncommitted_region(addr, size);
137 
138       // The remove could have split a region into two and created a
139       // new prev region. Need to reset the prev and next pointers.
140       prev = find_preceding_node_from((prev != nullptr ? prev : _committed_regions.head()), addr);
141       next = (prev != nullptr ? prev->next() : _committed_regions.head());
142     }
143   }
144 
145   // At this point the previous overlapping regions have been
146   // cleared, and the full region is guaranteed to be inserted.
147   VirtualMemorySummary::record_committed_memory(size, flag());
148 
149   // Try to merge with prev and possibly next.
150   if (try_merge_with(prev, addr, size, stack)) {
151     if (try_merge_with(prev, next)) {
152       // prev was expanded to contain the new region
153       // and next, need to remove next from the list
154       _committed_regions.remove_after(prev);
155     }
156 
157     return true;
158   }
159 
160   // Didn't merge with prev, try with next.
161   if (try_merge_with(next, addr, size, stack)) {
162     return true;
163   }
164 
165   // Couldn't merge with any regions - create a new region.
166   return add_committed_region(CommittedMemoryRegion(addr, size, stack));
167 }
168 
169 bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
170   address addr, size_t size) {
171   assert(addr != nullptr, "Invalid address");
172   assert(size > 0, "Invalid size");
173 
174   CommittedMemoryRegion* rgn = node->data();
175   assert(rgn->contain_region(addr, size), "Has to be contained");
176   assert(!rgn->same_region(addr, size), "Can not be the same region");
177 
178   if (rgn->base() == addr ||
179       rgn->end() == addr + size) {
180     rgn->exclude_region(addr, size);
181     return true;
182   } else {
183     // split this region
184     address top =rgn->end();
185     // use this region for lower part
186     size_t exclude_size = rgn->end() - addr;
187     rgn->exclude_region(addr, exclude_size);
188 
189     // higher part
190     address high_base = addr + size;
191     size_t  high_size = top - high_base;
192 
193     CommittedMemoryRegion high_rgn(high_base, high_size, *rgn->call_stack());
194     LinkedListNode<CommittedMemoryRegion>* high_node = _committed_regions.add(high_rgn);
195     assert(high_node == nullptr || node->next() == high_node, "Should be right after");
196     return (high_node != nullptr);
197   }
198 
199   return false;
200 }
201 
202 bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) {
203   assert(addr != nullptr, "Invalid address");
204   assert(sz > 0, "Invalid size");
205 
206   CommittedMemoryRegion del_rgn(addr, sz, *call_stack());
207   address end = addr + sz;
208 
209   LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
210   LinkedListNode<CommittedMemoryRegion>* prev = nullptr;
211   CommittedMemoryRegion* crgn;
212 
213   while (head != nullptr) {
214     crgn = head->data();
215 
216     if (crgn->same_region(addr, sz)) {
217       VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
218       _committed_regions.remove_after(prev);
219       return true;
220     }
221 
222     // del_rgn contains crgn
223     if (del_rgn.contain_region(crgn->base(), crgn->size())) {
224       VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
225       head = head->next();
226       _committed_regions.remove_after(prev);
227       continue;  // don't update head or prev
228     }
229 
230     // Found addr in the current crgn. There are 2 subcases:
231     if (crgn->contain_address(addr)) {
232 
233       // (1) Found addr+size in current crgn as well. (del_rgn is contained in crgn)
234       if (crgn->contain_address(end - 1)) {
235         VirtualMemorySummary::record_uncommitted_memory(sz, flag());
236         return remove_uncommitted_region(head, addr, sz); // done!
237       } else {
238         // (2) Did not find del_rgn's end in crgn.
239         size_t size = crgn->end() - del_rgn.base();
240         crgn->exclude_region(addr, size);
241         VirtualMemorySummary::record_uncommitted_memory(size, flag());
242       }
243 
244     } else if (crgn->contain_address(end - 1)) {
245       // Found del_rgn's end, but not its base addr.
246       size_t size = del_rgn.end() - crgn->base();
247       crgn->exclude_region(crgn->base(), size);
248       VirtualMemorySummary::record_uncommitted_memory(size, flag());
249       return true;  // should be done if the list is sorted properly!
250     }
251 
252     prev = head;
253     head = head->next();
254   }
255 
256   return true;
257 }
258 
259 void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRegion& rgn) {
260   assert(addr != nullptr, "Invalid address");
261 
262   // split committed regions
263   LinkedListNode<CommittedMemoryRegion>* head =
264     _committed_regions.head();
265   LinkedListNode<CommittedMemoryRegion>* prev = nullptr;
266 
267   while (head != nullptr) {
268     if (head->data()->base() >= addr) {
269       break;
270     }
271     prev = head;
272     head = head->next();
273   }
274 
275   if (head != nullptr) {
276     if (prev != nullptr) {
277       prev->set_next(head->next());
278     } else {
279       _committed_regions.set_head(nullptr);
280     }
281   }
282 
283   rgn._committed_regions.set_head(head);
284 }
285 
286 size_t ReservedMemoryRegion::committed_size() const {
287   size_t committed = 0;
288   LinkedListNode<CommittedMemoryRegion>* head =
289     _committed_regions.head();
290   while (head != nullptr) {
291     committed += head->data()->size();
292     head = head->next();
293   }
294   return committed;
295 }
296 
297 void ReservedMemoryRegion::set_flag(MEMFLAGS f) {
298   assert((flag() == mtNone || flag() == f),
299          "Overwrite memory type for region [" INTPTR_FORMAT "-" INTPTR_FORMAT "), %u->%u.",
300          p2i(base()), p2i(end()), (unsigned)flag(), (unsigned)f);
301   if (flag() != f) {
302     VirtualMemorySummary::move_reserved_memory(flag(), f, size());
303     VirtualMemorySummary::move_committed_memory(flag(), f, committed_size());
304     _flag = f;
305   }
306 }
307 
308 address ReservedMemoryRegion::thread_stack_uncommitted_bottom() const {
309   assert(flag() == mtThreadStack, "Only for thread stack");
310   LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
311   address bottom = base();
312   address top = base() + size();
313   while (head != nullptr) {
314     address committed_top = head->data()->base() + head->data()->size();
315     if (committed_top < top) {
316       // committed stack guard pages, skip them
317       bottom = head->data()->base() + head->data()->size();
318       head = head->next();
319     } else {
320       assert(top == committed_top, "Sanity");
321       break;
322     }
323   }
324 
325   return bottom;
326 }
327 
328 bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
329   assert(_reserved_regions == nullptr, "only call once");
330   if (level >= NMT_summary) {
331     _reserved_regions = new (std::nothrow, mtNMT)
332       SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>();
333     return (_reserved_regions != nullptr);
334   }
335   return true;
336 }
337 
338 bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
339     const NativeCallStack& stack, MEMFLAGS flag) {
340   assert(base_addr != nullptr, "Invalid address");
341   assert(size > 0, "Invalid size");
342   assert(_reserved_regions != nullptr, "Sanity check");
343   ReservedMemoryRegion  rgn(base_addr, size, stack, flag);
344   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
345 
346   log_debug(nmt)("Add reserved region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ")",
347                 rgn.flag_name(), p2i(rgn.base()), rgn.size());
348   if (reserved_rgn == nullptr) {
349     VirtualMemorySummary::record_reserved_memory(size, flag);
350     return _reserved_regions->add(rgn) != nullptr;
351   } else {
352     // Deal with recursive reservation
353     // os::reserve_memory() -> pd_reserve_memory() -> os::reserve_memory()
354     // See JDK-8198226.
355     if (reserved_rgn->same_region(base_addr, size) &&
356         (reserved_rgn->flag() == flag || reserved_rgn->flag() == mtNone)) {
357       reserved_rgn->set_call_stack(stack);
358       reserved_rgn->set_flag(flag);
359       return true;
360     } else {
361       assert(reserved_rgn->overlap_region(base_addr, size), "Must be");
362 
363       // Overlapped reservation.
364       // It can happen when the regions are thread stacks, as JNI
365       // thread does not detach from VM before exits, and leads to
366       // leak JavaThread object
367       if (reserved_rgn->flag() == mtThreadStack) {
368         guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached");
369         // Overwrite with new region
370 
371         // Release old region
372         VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->flag());
373         VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->flag());
374 
375         // Add new region
376         VirtualMemorySummary::record_reserved_memory(rgn.size(), flag);
377 
378         *reserved_rgn = rgn;
379         return true;
380       }
381 
382       // CDS mapping region.
383       // CDS reserves the whole region for mapping CDS archive, then maps each section into the region.
384       // NMT reports CDS as a whole.
385       if (reserved_rgn->flag() == mtClassShared) {
386         log_debug(nmt)("CDS reserved region \'%s\' as a whole (" INTPTR_FORMAT ", " SIZE_FORMAT ")",
387                       reserved_rgn->flag_name(), p2i(reserved_rgn->base()), reserved_rgn->size());
388         assert(reserved_rgn->contain_region(base_addr, size), "Reserved CDS region should contain this mapping region");
389         return true;
390       }
391 
392       // Mapped CDS heap region.
393       if (reserved_rgn->flag() == mtJavaHeap) {
394         log_debug(nmt)("CDS reserved region \'%s\' as a whole (" INTPTR_FORMAT ", " SIZE_FORMAT ")",
395                       reserved_rgn->flag_name(), p2i(reserved_rgn->base()), reserved_rgn->size());
396         assert(reserved_rgn->contain_region(base_addr, size), "Reserved heap region should contain this mapping region");
397         return true;
398       }
399 
400       if (reserved_rgn->flag() == mtCode) {
401         assert(reserved_rgn->contain_region(base_addr, size), "Reserved code region should contain this mapping region");
402         return true;
403       }
404 
405       // Print some more details. Don't use UL here to avoid circularities.
406       tty->print_cr("Error: existing region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), flag %u.\n"
407                     "       new region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), flag %u.",
408                     p2i(reserved_rgn->base()), p2i(reserved_rgn->end()), (unsigned)reserved_rgn->flag(),
409                     p2i(base_addr), p2i(base_addr + size), (unsigned)flag);
410       if (MemTracker::tracking_level() == NMT_detail) {
411         tty->print_cr("Existing region allocated from:");
412         reserved_rgn->call_stack()->print_on(tty);
413         tty->print_cr("New region allocated from:");
414         stack.print_on(tty);
415       }
416       ShouldNotReachHere();
417       return false;
418     }
419   }
420 }
421 
422 void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) {
423   assert(addr != nullptr, "Invalid address");
424   assert(_reserved_regions != nullptr, "Sanity check");
425 
426   ReservedMemoryRegion   rgn(addr, 1);
427   ReservedMemoryRegion*  reserved_rgn = _reserved_regions->find(rgn);
428   if (reserved_rgn != nullptr) {
429     assert(reserved_rgn->contain_address(addr), "Containment");
430     if (reserved_rgn->flag() != flag) {
431       assert(reserved_rgn->flag() == mtNone, "Overwrite memory type (should be mtNone, is: \"%s\")",
432              NMTUtil::flag_to_name(reserved_rgn->flag()));
433       reserved_rgn->set_flag(flag);
434     }
435   }
436 }
437 
438 bool VirtualMemoryTracker::add_committed_region(address addr, size_t size,
439   const NativeCallStack& stack) {
440   assert(addr != nullptr, "Invalid address");
441   assert(size > 0, "Invalid size");
442   assert(_reserved_regions != nullptr, "Sanity check");
443 
444   ReservedMemoryRegion  rgn(addr, size);
445   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
446 
447   if (reserved_rgn == nullptr) {
448     log_debug(nmt)("Add committed region \'%s\', No reserved region found for  (" INTPTR_FORMAT ", " SIZE_FORMAT ")",
449                   rgn.flag_name(),  p2i(rgn.base()), rgn.size());
450   }
451   assert(reserved_rgn != nullptr, "Add committed region, No reserved region found");
452   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
453   bool result = reserved_rgn->add_committed_region(addr, size, stack);
454   log_debug(nmt)("Add committed region \'%s\'(" INTPTR_FORMAT ", " SIZE_FORMAT ") %s",
455                 reserved_rgn->flag_name(),  p2i(rgn.base()), rgn.size(), (result ? "Succeeded" : "Failed"));
456   return result;
457 }
458 
459 bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
460   assert(addr != nullptr, "Invalid address");
461   assert(size > 0, "Invalid size");
462   assert(_reserved_regions != nullptr, "Sanity check");
463 
464   ReservedMemoryRegion  rgn(addr, size);
465   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
466   assert(reserved_rgn != nullptr, "No reserved region (" INTPTR_FORMAT ", " SIZE_FORMAT ")", p2i(addr), size);
467   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
468   const char* flag_name = reserved_rgn->flag_name();  // after remove, info is not complete
469   bool result = reserved_rgn->remove_uncommitted_region(addr, size);
470   log_debug(nmt)("Removed uncommitted region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") %s",
471                 flag_name,  p2i(addr), size, (result ? " Succeeded" : "Failed"));
472   return result;
473 }
474 
475 bool VirtualMemoryTracker::remove_released_region(ReservedMemoryRegion* rgn) {
476   assert(rgn != nullptr, "Sanity check");
477   assert(_reserved_regions != nullptr, "Sanity check");
478 
479   // uncommit regions within the released region
480   ReservedMemoryRegion backup(*rgn);
481   bool result = rgn->remove_uncommitted_region(rgn->base(), rgn->size());
482   log_debug(nmt)("Remove uncommitted region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") %s",
483                 backup.flag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed"));
484   if (!result) {
485     return false;
486   }
487 
488   VirtualMemorySummary::record_released_memory(rgn->size(), rgn->flag());
489   result =  _reserved_regions->remove(*rgn);
490   log_debug(nmt)("Removed region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") from _reserved_regions %s" ,
491                 backup.flag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed"));
492   return result;
493 }
494 
495 bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
496   assert(addr != nullptr, "Invalid address");
497   assert(size > 0, "Invalid size");
498   assert(_reserved_regions != nullptr, "Sanity check");
499 
500   ReservedMemoryRegion  rgn(addr, size);
501   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
502 
503   if (reserved_rgn == nullptr) {
504     log_debug(nmt)("No reserved region found for (" INTPTR_FORMAT ", " SIZE_FORMAT ")!",
505                   p2i(rgn.base()), rgn.size());
506   }
507   assert(reserved_rgn != nullptr, "No reserved region");
508   if (reserved_rgn->same_region(addr, size)) {
509     return remove_released_region(reserved_rgn);
510   }
511 
512   // uncommit regions within the released region
513   if (!reserved_rgn->remove_uncommitted_region(addr, size)) {
514     return false;
515   }
516 
517   if (reserved_rgn->flag() == mtClassShared) {
518     if (reserved_rgn->contain_region(addr, size)) {
519       // This is an unmapped CDS region, which is part of the reserved shared
520       // memory region.
521       // See special handling in VirtualMemoryTracker::add_reserved_region also.
522       return true;
523     }
524 
525     if (size > reserved_rgn->size()) {
526       // This is from release the whole region spanning from archive space to class space,
527       // so we release them altogether.
528       ReservedMemoryRegion class_rgn(addr + reserved_rgn->size(),
529                                      (size - reserved_rgn->size()));
530       ReservedMemoryRegion* cls_rgn = _reserved_regions->find(class_rgn);
531       assert(cls_rgn != nullptr, "Class space region  not recorded?");
532       assert(cls_rgn->flag() == mtClass, "Must be class type");
533       remove_released_region(reserved_rgn);
534       remove_released_region(cls_rgn);
535       return true;
536     }
537   }
538 
539   VirtualMemorySummary::record_released_memory(size, reserved_rgn->flag());
540 
541   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
542   if (reserved_rgn->base() == addr ||
543       reserved_rgn->end() == addr + size) {
544       reserved_rgn->exclude_region(addr, size);
545     return true;
546   } else {
547     address top = reserved_rgn->end();
548     address high_base = addr + size;
549     ReservedMemoryRegion high_rgn(high_base, top - high_base,
550       *reserved_rgn->call_stack(), reserved_rgn->flag());
551 
552     // use original region for lower region
553     reserved_rgn->exclude_region(addr, top - addr);
554     LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn);
555     if (new_rgn == nullptr) {
556       return false;
557     } else {
558       reserved_rgn->move_committed_regions(addr, *new_rgn->data());
559       return true;
560     }
561   }
562 }
563 
564 // Given an existing memory mapping registered with NMT, split the mapping in
565 //  two. The newly created two mappings will be registered under the call
566 //  stack and the memory flags of the original section.
567 bool VirtualMemoryTracker::split_reserved_region(address addr, size_t size, size_t split, MEMFLAGS flag, MEMFLAGS split_flag) {
568 
569   ReservedMemoryRegion  rgn(addr, size);
570   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
571   assert(reserved_rgn->same_region(addr, size), "Must be identical region");
572   assert(reserved_rgn != nullptr, "No reserved region");
573   assert(reserved_rgn->committed_size() == 0, "Splitting committed region?");
574 
575   NativeCallStack original_stack = *reserved_rgn->call_stack();
576   MEMFLAGS original_flags = reserved_rgn->flag();
577 
578   const char* name = reserved_rgn->flag_name();
579   remove_released_region(reserved_rgn);
580   log_debug(nmt)("Split region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ")  with size " SIZE_FORMAT,
581                 name, p2i(rgn.base()), rgn.size(), split);
582   // Now, create two new regions.
583   add_reserved_region(addr, split, original_stack, flag);
584   add_reserved_region(addr + split, size - split, original_stack, split_flag);
585 
586   return true;
587 }
588 
589 
590 // Iterate the range, find committed region within its bound.
591 class RegionIterator : public StackObj {
592 private:
593   const address _start;
594   const size_t  _size;
595 
596   address _current_start;
597 public:
598   RegionIterator(address start, size_t size) :
599     _start(start), _size(size), _current_start(start) {
600   }
601 
602   // return true if committed region is found
603   bool next_committed(address& start, size_t& size);
604 private:
605   address end() const { return _start + _size; }
606 };
607 
608 bool RegionIterator::next_committed(address& committed_start, size_t& committed_size) {
609   if (end() <= _current_start) return false;
610 
611   const size_t page_sz = os::vm_page_size();
612   const size_t current_size = end() - _current_start;
613   if (os::committed_in_range(_current_start, current_size, committed_start, committed_size)) {
614     assert(committed_start != nullptr, "Must be");
615     assert(committed_size > 0 && is_aligned(committed_size, os::vm_page_size()), "Must be");
616 
617     _current_start = committed_start + committed_size;
618     return true;
619   } else {
620     return false;
621   }
622 }
623 
624 // Walk all known thread stacks, snapshot their committed ranges.
625 class SnapshotThreadStackWalker : public VirtualMemoryWalker {
626 public:
627   SnapshotThreadStackWalker() {}
628 
629   bool do_allocation_site(const ReservedMemoryRegion* rgn) {
630     if (rgn->flag() == mtThreadStack) {
631       address stack_bottom = rgn->thread_stack_uncommitted_bottom();
632       address committed_start;
633       size_t  committed_size;
634       size_t stack_size = rgn->base() + rgn->size() - stack_bottom;
635       // Align the size to work with full pages (Alpine and AIX stack top is not page aligned)
636       size_t aligned_stack_size = align_up(stack_size, os::vm_page_size());
637 
638       ReservedMemoryRegion* region = const_cast<ReservedMemoryRegion*>(rgn);
639       NativeCallStack ncs; // empty stack
640 
641       RegionIterator itr(stack_bottom, aligned_stack_size);
642       DEBUG_ONLY(bool found_stack = false;)
643       while (itr.next_committed(committed_start, committed_size)) {
644         assert(committed_start != nullptr, "Should not be null");
645         assert(committed_size > 0, "Should not be 0");
646         // unaligned stack_size case: correct the region to fit the actual stack_size
647         if (stack_bottom + stack_size < committed_start + committed_size) {
648           committed_size = stack_bottom + stack_size - committed_start;
649         }
650         region->add_committed_region(committed_start, committed_size, ncs);
651         DEBUG_ONLY(found_stack = true;)
652       }
653 #ifdef ASSERT
654       if (!found_stack) {
655         log_debug(thread)("Thread exited without proper cleanup, may leak thread object");
656       }
657 #endif
658     }
659     return true;
660   }
661 };
662 
663 void VirtualMemoryTracker::snapshot_thread_stacks() {
664   SnapshotThreadStackWalker walker;
665   walk_virtual_memory(&walker);
666 }
667 
668 bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
669   assert(_reserved_regions != nullptr, "Sanity check");
670   ThreadCritical tc;
671   // Check that the _reserved_regions haven't been deleted.
672   if (_reserved_regions != nullptr) {
673     LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head();
674     while (head != nullptr) {
675       const ReservedMemoryRegion* rgn = head->peek();
676       if (!walker->do_allocation_site(rgn)) {
677         return false;
678       }
679       head = head->next();
680     }
681    }
682   return true;
683 }
684 
685 class PrintRegionWalker : public VirtualMemoryWalker {
686 private:
687   const address               _p;
688   outputStream*               _st;
689 public:
690   PrintRegionWalker(const void* p, outputStream* st) :
691     _p((address)p), _st(st) { }
692 
693   bool do_allocation_site(const ReservedMemoryRegion* rgn) {
694     if (rgn->contain_address(_p)) {
695       _st->print_cr(PTR_FORMAT " in mmap'd memory region [" PTR_FORMAT " - " PTR_FORMAT "], tag %s",
696         p2i(_p), p2i(rgn->base()), p2i(rgn->base() + rgn->size()), NMTUtil::flag_to_enum_name(rgn->flag()));
697       if (MemTracker::tracking_level() == NMT_detail) {
698         rgn->call_stack()->print_on(_st);
699         _st->cr();
700       }
701       return false;
702     }
703     return true;
704   }
705 };
706 
707 // If p is contained within a known memory region, print information about it to the
708 // given stream and return true; false otherwise.
709 bool VirtualMemoryTracker::print_containing_region(const void* p, outputStream* st) {
710   PrintRegionWalker walker(p, st);
711   return !walk_virtual_memory(&walker);
712 
713 }