1 /*
  2  * Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 #include "precompiled.hpp"
 25 #include "logging/log.hpp"
 26 #include "memory/metaspaceStats.hpp"
 27 #include "memory/metaspaceUtils.hpp"
 28 #include "nmt/memTracker.hpp"
 29 #include "nmt/threadStackTracker.hpp"
 30 #include "nmt/virtualMemoryTracker.hpp"
 31 #include "runtime/os.hpp"
 32 #include "runtime/threadCritical.hpp"
 33 #include "utilities/ostream.hpp"
 34 
 35 VirtualMemorySnapshot VirtualMemorySummary::_snapshot;
 36 
 37 void VirtualMemory::update_peak(size_t size) {
 38   size_t peak_sz = peak_size();
 39   while (peak_sz < size) {
 40     size_t old_sz = Atomic::cmpxchg(&_peak_size, peak_sz, size, memory_order_relaxed);
 41     if (old_sz == peak_sz) {
 42       break;
 43     } else {
 44       peak_sz = old_sz;
 45     }
 46   }
 47 }
 48 
 49 void VirtualMemorySummary::snapshot(VirtualMemorySnapshot* s) {
 50   // Only if thread stack is backed by virtual memory
 51   if (ThreadStackTracker::track_as_vm()) {
 52     // Snapshot current thread stacks
 53     VirtualMemoryTracker::snapshot_thread_stacks();
 54   }
 55   as_snapshot()->copy_to(s);
 56 }
 57 
 58 SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
 59 
 60 int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
 61   return r1.compare(r2);
 62 }
 63 
 64 int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
 65   return r1.compare(r2);
 66 }
 67 
 68 static bool is_mergeable_with(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
 69   return rgn->adjacent_to(addr, size) && rgn->call_stack()->equals(stack);
 70 }
 71 
 72 static bool is_same_as(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
 73   // It would have made sense to use rgn->equals(...), but equals returns true for overlapping regions.
 74   return rgn->same_region(addr, size) && rgn->call_stack()->equals(stack);
 75 }
 76 
 77 static LinkedListNode<CommittedMemoryRegion>* find_preceding_node_from(LinkedListNode<CommittedMemoryRegion>* from, address addr) {
 78   LinkedListNode<CommittedMemoryRegion>* preceding = nullptr;
 79 
 80   for (LinkedListNode<CommittedMemoryRegion>* node = from; node != nullptr; node = node->next()) {
 81     CommittedMemoryRegion* rgn = node->data();
 82 
 83     // We searched past the region start.
 84     if (rgn->end() > addr) {
 85       break;
 86     }
 87 
 88     preceding = node;
 89   }
 90 
 91   return preceding;
 92 }
 93 
 94 static bool try_merge_with(LinkedListNode<CommittedMemoryRegion>* node, address addr, size_t size, const NativeCallStack& stack) {
 95   if (node != nullptr) {
 96     CommittedMemoryRegion* rgn = node->data();
 97 
 98     if (is_mergeable_with(rgn, addr, size, stack)) {
 99       rgn->expand_region(addr, size);
100       return true;
101     }
102   }
103 
104   return false;
105 }
106 
107 static bool try_merge_with(LinkedListNode<CommittedMemoryRegion>* node, LinkedListNode<CommittedMemoryRegion>* other) {
108   if (other == nullptr) {
109     return false;
110   }
111 
112   CommittedMemoryRegion* rgn = other->data();
113   return try_merge_with(node, rgn->base(), rgn->size(), *rgn->call_stack());
114 }
115 
116 bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) {
117   assert(addr != nullptr, "Invalid address");
118   assert(size > 0, "Invalid size");
119   assert(contain_region(addr, size), "Not contain this region");
120 
121   // Find the region that fully precedes the [addr, addr + size) region.
122   LinkedListNode<CommittedMemoryRegion>* prev = find_preceding_node_from(_committed_regions.head(), addr);
123   LinkedListNode<CommittedMemoryRegion>* next = (prev != nullptr ? prev->next() : _committed_regions.head());
124 
125   if (next != nullptr) {
126     // Ignore request if region already exists.
127     if (is_same_as(next->data(), addr, size, stack)) {
128       return true;
129     }
130 
131     // The new region is after prev, and either overlaps with the
132     // next region (and maybe more regions), or overlaps with no region.
133     if (next->data()->overlap_region(addr, size)) {
134       // Remove _all_ overlapping regions, and parts of regions,
135       // in preparation for the addition of this new region.
136       remove_uncommitted_region(addr, size);
137 
138       // The remove could have split a region into two and created a
139       // new prev region. Need to reset the prev and next pointers.
140       prev = find_preceding_node_from((prev != nullptr ? prev : _committed_regions.head()), addr);
141       next = (prev != nullptr ? prev->next() : _committed_regions.head());
142     }
143   }
144 
145   // At this point the previous overlapping regions have been
146   // cleared, and the full region is guaranteed to be inserted.
147   VirtualMemorySummary::record_committed_memory(size, flag());
148 
149   // Try to merge with prev and possibly next.
150   if (try_merge_with(prev, addr, size, stack)) {
151     if (try_merge_with(prev, next)) {
152       // prev was expanded to contain the new region
153       // and next, need to remove next from the list
154       _committed_regions.remove_after(prev);
155     }
156 
157     return true;
158   }
159 
160   // Didn't merge with prev, try with next.
161   if (try_merge_with(next, addr, size, stack)) {
162     return true;
163   }
164 
165   // Couldn't merge with any regions - create a new region.
166   return add_committed_region(CommittedMemoryRegion(addr, size, stack));
167 }
168 
169 bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
170   address addr, size_t size) {
171   assert(addr != nullptr, "Invalid address");
172   assert(size > 0, "Invalid size");
173 
174   CommittedMemoryRegion* rgn = node->data();
175   assert(rgn->contain_region(addr, size), "Has to be contained");
176   assert(!rgn->same_region(addr, size), "Can not be the same region");
177 
178   if (rgn->base() == addr ||
179       rgn->end() == addr + size) {
180     rgn->exclude_region(addr, size);
181     return true;
182   } else {
183     // split this region
184     address top =rgn->end();
185     // use this region for lower part
186     size_t exclude_size = rgn->end() - addr;
187     rgn->exclude_region(addr, exclude_size);
188 
189     // higher part
190     address high_base = addr + size;
191     size_t  high_size = top - high_base;
192 
193     CommittedMemoryRegion high_rgn(high_base, high_size, *rgn->call_stack());
194     LinkedListNode<CommittedMemoryRegion>* high_node = _committed_regions.add(high_rgn);
195     assert(high_node == nullptr || node->next() == high_node, "Should be right after");
196     return (high_node != nullptr);
197   }
198 
199   return false;
200 }
201 
202 bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) {
203   assert(addr != nullptr, "Invalid address");
204   assert(sz > 0, "Invalid size");
205 
206   CommittedMemoryRegion del_rgn(addr, sz, *call_stack());
207   address end = addr + sz;
208 
209   LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
210   LinkedListNode<CommittedMemoryRegion>* prev = nullptr;
211   CommittedMemoryRegion* crgn;
212 
213   while (head != nullptr) {
214     crgn = head->data();
215 
216     if (crgn->same_region(addr, sz)) {
217       VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
218       _committed_regions.remove_after(prev);
219       return true;
220     }
221 
222     // del_rgn contains crgn
223     if (del_rgn.contain_region(crgn->base(), crgn->size())) {
224       VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
225       head = head->next();
226       _committed_regions.remove_after(prev);
227       continue;  // don't update head or prev
228     }
229 
230     // Found addr in the current crgn. There are 2 subcases:
231     if (crgn->contain_address(addr)) {
232 
233       // (1) Found addr+size in current crgn as well. (del_rgn is contained in crgn)
234       if (crgn->contain_address(end - 1)) {
235         VirtualMemorySummary::record_uncommitted_memory(sz, flag());
236         return remove_uncommitted_region(head, addr, sz); // done!
237       } else {
238         // (2) Did not find del_rgn's end in crgn.
239         size_t size = crgn->end() - del_rgn.base();
240         crgn->exclude_region(addr, size);
241         VirtualMemorySummary::record_uncommitted_memory(size, flag());
242       }
243 
244     } else if (crgn->contain_address(end - 1)) {
245       // Found del_rgn's end, but not its base addr.
246       size_t size = del_rgn.end() - crgn->base();
247       crgn->exclude_region(crgn->base(), size);
248       VirtualMemorySummary::record_uncommitted_memory(size, flag());
249       return true;  // should be done if the list is sorted properly!
250     }
251 
252     prev = head;
253     head = head->next();
254   }
255 
256   return true;
257 }
258 
259 void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRegion& rgn) {
260   assert(addr != nullptr, "Invalid address");
261 
262   // split committed regions
263   LinkedListNode<CommittedMemoryRegion>* head =
264     _committed_regions.head();
265   LinkedListNode<CommittedMemoryRegion>* prev = nullptr;
266 
267   while (head != nullptr) {
268     if (head->data()->base() >= addr) {
269       break;
270     }
271     prev = head;
272     head = head->next();
273   }
274 
275   if (head != nullptr) {
276     if (prev != nullptr) {
277       prev->set_next(head->next());
278     } else {
279       _committed_regions.set_head(nullptr);
280     }
281   }
282 
283   rgn._committed_regions.set_head(head);
284 }
285 
286 size_t ReservedMemoryRegion::committed_size() const {
287   size_t committed = 0;
288   LinkedListNode<CommittedMemoryRegion>* head =
289     _committed_regions.head();
290   while (head != nullptr) {
291     committed += head->data()->size();
292     head = head->next();
293   }
294   return committed;
295 }
296 
297 void ReservedMemoryRegion::set_flag(MEMFLAGS f) {
298   assert((flag() == mtNone || flag() == f),
299          "Overwrite memory type for region [" INTPTR_FORMAT "-" INTPTR_FORMAT "), %u->%u.",
300          p2i(base()), p2i(end()), (unsigned)flag(), (unsigned)f);
301   if (flag() != f) {
302     VirtualMemorySummary::move_reserved_memory(flag(), f, size());
303     VirtualMemorySummary::move_committed_memory(flag(), f, committed_size());
304     _flag = f;
305   }
306 }
307 
308 address ReservedMemoryRegion::thread_stack_uncommitted_bottom() const {
309   assert(flag() == mtThreadStack, "Only for thread stack");
310   LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
311   address bottom = base();
312   address top = base() + size();
313   while (head != nullptr) {
314     address committed_top = head->data()->base() + head->data()->size();
315     if (committed_top < top) {
316       // committed stack guard pages, skip them
317       bottom = head->data()->base() + head->data()->size();
318       head = head->next();
319     } else {
320       assert(top == committed_top, "Sanity");
321       break;
322     }
323   }
324 
325   return bottom;
326 }
327 
328 bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
329   assert(_reserved_regions == nullptr, "only call once");
330   if (level >= NMT_summary) {
331     _reserved_regions = new (std::nothrow, mtNMT)
332       SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>();
333     return (_reserved_regions != nullptr);
334   }
335   return true;
336 }
337 
338 bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
339     const NativeCallStack& stack, MEMFLAGS flag) {
340   assert(base_addr != nullptr, "Invalid address");
341   assert(size > 0, "Invalid size");
342   assert(_reserved_regions != nullptr, "Sanity check");
343   ReservedMemoryRegion  rgn(base_addr, size, stack, flag);
344   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
345 
346   log_debug(nmt)("Add reserved region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ")",
347                 rgn.flag_name(), p2i(rgn.base()), rgn.size());
348   if (reserved_rgn == nullptr) {
349     VirtualMemorySummary::record_reserved_memory(size, flag);
350     return _reserved_regions->add(rgn) != nullptr;
351   } else {
352     // Deal with recursive reservation
353     // os::reserve_memory() -> pd_reserve_memory() -> os::reserve_memory()
354     // See JDK-8198226.
355     if (reserved_rgn->same_region(base_addr, size) &&
356         (reserved_rgn->flag() == flag || reserved_rgn->flag() == mtNone)) {
357       reserved_rgn->set_call_stack(stack);
358       reserved_rgn->set_flag(flag);
359       return true;
360     } else {
361       assert(reserved_rgn->overlap_region(base_addr, size), "Must be");
362 
363       // Overlapped reservation.
364       // It can happen when the regions are thread stacks, as JNI
365       // thread does not detach from VM before exits, and leads to
366       // leak JavaThread object
367       if (reserved_rgn->flag() == mtThreadStack) {
368         guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached");
369         // Overwrite with new region
370 
371         // Release old region
372         VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->flag());
373         VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->flag());
374 
375         // Add new region
376         VirtualMemorySummary::record_reserved_memory(rgn.size(), flag);
377 
378         *reserved_rgn = rgn;
379         return true;
380       }
381 
382       // CDS mapping region.
383       // CDS reserves the whole region for mapping CDS archive, then maps each section into the region.
384       // NMT reports CDS as a whole.
385       if (reserved_rgn->flag() == mtClassShared) {
386         log_debug(nmt)("CDS reserved region \'%s\' as a whole (" INTPTR_FORMAT ", " SIZE_FORMAT ")",
387                       reserved_rgn->flag_name(), p2i(reserved_rgn->base()), reserved_rgn->size());
388         assert(reserved_rgn->contain_region(base_addr, size), "Reserved CDS region should contain this mapping region");
389         return true;
390       }
391 
392       // Mapped CDS string region.
393       // The string region(s) is part of the java heap.
394       if (reserved_rgn->flag() == mtJavaHeap) {
395         log_debug(nmt)("CDS reserved region \'%s\' as a whole (" INTPTR_FORMAT ", " SIZE_FORMAT ")",
396                       reserved_rgn->flag_name(), p2i(reserved_rgn->base()), reserved_rgn->size());
397         assert(reserved_rgn->contain_region(base_addr, size), "Reserved heap region should contain this mapping region");
398         return true;
399       }
400 
401       // Print some more details. Don't use UL here to avoid circularities.
402       tty->print_cr("Error: existing region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), flag %u.\n"
403                     "       new region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), flag %u.",
404                     p2i(reserved_rgn->base()), p2i(reserved_rgn->end()), (unsigned)reserved_rgn->flag(),
405                     p2i(base_addr), p2i(base_addr + size), (unsigned)flag);
406       if (MemTracker::tracking_level() == NMT_detail) {
407         tty->print_cr("Existing region allocated from:");
408         reserved_rgn->call_stack()->print_on(tty);
409         tty->print_cr("New region allocated from:");
410         stack.print_on(tty);
411       }
412       ShouldNotReachHere();
413       return false;
414     }
415   }
416 }
417 
418 void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) {
419   assert(addr != nullptr, "Invalid address");
420   assert(_reserved_regions != nullptr, "Sanity check");
421 
422   ReservedMemoryRegion   rgn(addr, 1);
423   ReservedMemoryRegion*  reserved_rgn = _reserved_regions->find(rgn);
424   if (reserved_rgn != nullptr) {
425     assert(reserved_rgn->contain_address(addr), "Containment");
426     if (reserved_rgn->flag() != flag) {
427       assert(reserved_rgn->flag() == mtNone, "Overwrite memory type (should be mtNone, is: \"%s\")",
428              NMTUtil::flag_to_name(reserved_rgn->flag()));
429       reserved_rgn->set_flag(flag);
430     }
431   }
432 }
433 
434 bool VirtualMemoryTracker::add_committed_region(address addr, size_t size,
435   const NativeCallStack& stack) {
436   assert(addr != nullptr, "Invalid address");
437   assert(size > 0, "Invalid size");
438   assert(_reserved_regions != nullptr, "Sanity check");
439 
440   ReservedMemoryRegion  rgn(addr, size);
441   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
442 
443   if (reserved_rgn == nullptr) {
444     log_debug(nmt)("Add committed region \'%s\', No reserved region found for  (" INTPTR_FORMAT ", " SIZE_FORMAT ")",
445                   rgn.flag_name(),  p2i(rgn.base()), rgn.size());
446   }
447   assert(reserved_rgn != nullptr, "Add committed region, No reserved region found");
448   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
449   bool result = reserved_rgn->add_committed_region(addr, size, stack);
450   log_debug(nmt)("Add committed region \'%s\'(" INTPTR_FORMAT ", " SIZE_FORMAT ") %s",
451                 reserved_rgn->flag_name(),  p2i(rgn.base()), rgn.size(), (result ? "Succeeded" : "Failed"));
452   return result;
453 }
454 
455 bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
456   assert(addr != nullptr, "Invalid address");
457   assert(size > 0, "Invalid size");
458   assert(_reserved_regions != nullptr, "Sanity check");
459 
460   ReservedMemoryRegion  rgn(addr, size);
461   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
462   assert(reserved_rgn != nullptr, "No reserved region (" INTPTR_FORMAT ", " SIZE_FORMAT ")", p2i(addr), size);
463   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
464   const char* flag_name = reserved_rgn->flag_name();  // after remove, info is not complete
465   bool result = reserved_rgn->remove_uncommitted_region(addr, size);
466   log_debug(nmt)("Removed uncommitted region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") %s",
467                 flag_name,  p2i(addr), size, (result ? " Succeeded" : "Failed"));
468   return result;
469 }
470 
471 bool VirtualMemoryTracker::remove_released_region(ReservedMemoryRegion* rgn) {
472   assert(rgn != nullptr, "Sanity check");
473   assert(_reserved_regions != nullptr, "Sanity check");
474 
475   // uncommit regions within the released region
476   ReservedMemoryRegion backup(*rgn);
477   bool result = rgn->remove_uncommitted_region(rgn->base(), rgn->size());
478   log_debug(nmt)("Remove uncommitted region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") %s",
479                 backup.flag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed"));
480   if (!result) {
481     return false;
482   }
483 
484   VirtualMemorySummary::record_released_memory(rgn->size(), rgn->flag());
485   result =  _reserved_regions->remove(*rgn);
486   log_debug(nmt)("Removed region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") from _reserved_regions %s" ,
487                 backup.flag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed"));
488   return result;
489 }
490 
491 bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
492   assert(addr != nullptr, "Invalid address");
493   assert(size > 0, "Invalid size");
494   assert(_reserved_regions != nullptr, "Sanity check");
495 
496   ReservedMemoryRegion  rgn(addr, size);
497   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
498 
499   if (reserved_rgn == nullptr) {
500     log_debug(nmt)("No reserved region found for (" INTPTR_FORMAT ", " SIZE_FORMAT ")!",
501                   p2i(rgn.base()), rgn.size());
502   }
503   assert(reserved_rgn != nullptr, "No reserved region");
504   if (reserved_rgn->same_region(addr, size)) {
505     return remove_released_region(reserved_rgn);
506   }
507 
508   // uncommit regions within the released region
509   if (!reserved_rgn->remove_uncommitted_region(addr, size)) {
510     return false;
511   }
512 
513   if (reserved_rgn->flag() == mtClassShared) {
514     if (reserved_rgn->contain_region(addr, size)) {
515       // This is an unmapped CDS region, which is part of the reserved shared
516       // memory region.
517       // See special handling in VirtualMemoryTracker::add_reserved_region also.
518       return true;
519     }
520 
521     if (size > reserved_rgn->size()) {
522       // This is from release the whole region spanning from archive space to class space,
523       // so we release them altogether.
524       ReservedMemoryRegion class_rgn(addr + reserved_rgn->size(),
525                                      (size - reserved_rgn->size()));
526       ReservedMemoryRegion* cls_rgn = _reserved_regions->find(class_rgn);
527       assert(cls_rgn != nullptr, "Class space region  not recorded?");
528       assert(cls_rgn->flag() == mtClass, "Must be class type");
529       remove_released_region(reserved_rgn);
530       remove_released_region(cls_rgn);
531       return true;
532     }
533   }
534 
535   VirtualMemorySummary::record_released_memory(size, reserved_rgn->flag());
536 
537   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
538   if (reserved_rgn->base() == addr ||
539       reserved_rgn->end() == addr + size) {
540       reserved_rgn->exclude_region(addr, size);
541     return true;
542   } else {
543     address top = reserved_rgn->end();
544     address high_base = addr + size;
545     ReservedMemoryRegion high_rgn(high_base, top - high_base,
546       *reserved_rgn->call_stack(), reserved_rgn->flag());
547 
548     // use original region for lower region
549     reserved_rgn->exclude_region(addr, top - addr);
550     LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn);
551     if (new_rgn == nullptr) {
552       return false;
553     } else {
554       reserved_rgn->move_committed_regions(addr, *new_rgn->data());
555       return true;
556     }
557   }
558 }
559 
560 // Given an existing memory mapping registered with NMT, split the mapping in
561 //  two. The newly created two mappings will be registered under the call
562 //  stack and the memory flags of the original section.
563 bool VirtualMemoryTracker::split_reserved_region(address addr, size_t size, size_t split, MEMFLAGS flag, MEMFLAGS split_flag) {
564 
565   ReservedMemoryRegion  rgn(addr, size);
566   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
567   assert(reserved_rgn->same_region(addr, size), "Must be identical region");
568   assert(reserved_rgn != nullptr, "No reserved region");
569   assert(reserved_rgn->committed_size() == 0, "Splitting committed region?");
570 
571   NativeCallStack original_stack = *reserved_rgn->call_stack();
572   MEMFLAGS original_flags = reserved_rgn->flag();
573 
574   const char* name = reserved_rgn->flag_name();
575   remove_released_region(reserved_rgn);
576   log_debug(nmt)("Split region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ")  with size " SIZE_FORMAT,
577                 name, p2i(rgn.base()), rgn.size(), split);
578   // Now, create two new regions.
579   add_reserved_region(addr, split, original_stack, flag);
580   add_reserved_region(addr + split, size - split, original_stack, split_flag);
581 
582   return true;
583 }
584 
585 
586 // Iterate the range, find committed region within its bound.
587 class RegionIterator : public StackObj {
588 private:
589   const address _start;
590   const size_t  _size;
591 
592   address _current_start;
593 public:
594   RegionIterator(address start, size_t size) :
595     _start(start), _size(size), _current_start(start) {
596   }
597 
598   // return true if committed region is found
599   bool next_committed(address& start, size_t& size);
600 private:
601   address end() const { return _start + _size; }
602 };
603 
604 bool RegionIterator::next_committed(address& committed_start, size_t& committed_size) {
605   if (end() <= _current_start) return false;
606 
607   const size_t page_sz = os::vm_page_size();
608   const size_t current_size = end() - _current_start;
609   if (os::committed_in_range(_current_start, current_size, committed_start, committed_size)) {
610     assert(committed_start != nullptr, "Must be");
611     assert(committed_size > 0 && is_aligned(committed_size, os::vm_page_size()), "Must be");
612 
613     _current_start = committed_start + committed_size;
614     return true;
615   } else {
616     return false;
617   }
618 }
619 
620 // Walk all known thread stacks, snapshot their committed ranges.
621 class SnapshotThreadStackWalker : public VirtualMemoryWalker {
622 public:
623   SnapshotThreadStackWalker() {}
624 
625   bool do_allocation_site(const ReservedMemoryRegion* rgn) {
626     if (rgn->flag() == mtThreadStack) {
627       address stack_bottom = rgn->thread_stack_uncommitted_bottom();
628       address committed_start;
629       size_t  committed_size;
630       size_t stack_size = rgn->base() + rgn->size() - stack_bottom;
631       // Align the size to work with full pages (Alpine and AIX stack top is not page aligned)
632       size_t aligned_stack_size = align_up(stack_size, os::vm_page_size());
633 
634       ReservedMemoryRegion* region = const_cast<ReservedMemoryRegion*>(rgn);
635       NativeCallStack ncs; // empty stack
636 
637       RegionIterator itr(stack_bottom, aligned_stack_size);
638       DEBUG_ONLY(bool found_stack = false;)
639       while (itr.next_committed(committed_start, committed_size)) {
640         assert(committed_start != nullptr, "Should not be null");
641         assert(committed_size > 0, "Should not be 0");
642         // unaligned stack_size case: correct the region to fit the actual stack_size
643         if (stack_bottom + stack_size < committed_start + committed_size) {
644           committed_size = stack_bottom + stack_size - committed_start;
645         }
646         region->add_committed_region(committed_start, committed_size, ncs);
647         DEBUG_ONLY(found_stack = true;)
648       }
649 #ifdef ASSERT
650       if (!found_stack) {
651         log_debug(thread)("Thread exited without proper cleanup, may leak thread object");
652       }
653 #endif
654     }
655     return true;
656   }
657 };
658 
659 void VirtualMemoryTracker::snapshot_thread_stacks() {
660   SnapshotThreadStackWalker walker;
661   walk_virtual_memory(&walker);
662 }
663 
664 bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
665   assert(_reserved_regions != nullptr, "Sanity check");
666   ThreadCritical tc;
667   // Check that the _reserved_regions haven't been deleted.
668   if (_reserved_regions != nullptr) {
669     LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head();
670     while (head != nullptr) {
671       const ReservedMemoryRegion* rgn = head->peek();
672       if (!walker->do_allocation_site(rgn)) {
673         return false;
674       }
675       head = head->next();
676     }
677    }
678   return true;
679 }
680 
681 class PrintRegionWalker : public VirtualMemoryWalker {
682 private:
683   const address               _p;
684   outputStream*               _st;
685 public:
686   PrintRegionWalker(const void* p, outputStream* st) :
687     _p((address)p), _st(st) { }
688 
689   bool do_allocation_site(const ReservedMemoryRegion* rgn) {
690     if (rgn->contain_address(_p)) {
691       _st->print_cr(PTR_FORMAT " in mmap'd memory region [" PTR_FORMAT " - " PTR_FORMAT "], tag %s",
692         p2i(_p), p2i(rgn->base()), p2i(rgn->base() + rgn->size()), NMTUtil::flag_to_enum_name(rgn->flag()));
693       if (MemTracker::tracking_level() == NMT_detail) {
694         rgn->call_stack()->print_on(_st);
695         _st->cr();
696       }
697       return false;
698     }
699     return true;
700   }
701 };
702 
703 // If p is contained within a known memory region, print information about it to the
704 // given stream and return true; false otherwise.
705 bool VirtualMemoryTracker::print_containing_region(const void* p, outputStream* st) {
706   PrintRegionWalker walker(p, st);
707   return !walk_virtual_memory(&walker);
708 
709 }