1 /* 2 * Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 #include "precompiled.hpp" 25 #include "logging/log.hpp" 26 #include "memory/metaspaceStats.hpp" 27 #include "memory/metaspaceUtils.hpp" 28 #include "nmt/memTracker.hpp" 29 #include "nmt/nativeCallStackPrinter.hpp" 30 #include "nmt/threadStackTracker.hpp" 31 #include "nmt/virtualMemoryTracker.hpp" 32 #include "runtime/os.hpp" 33 #include "runtime/threadCritical.hpp" 34 #include "utilities/ostream.hpp" 35 36 VirtualMemorySnapshot VirtualMemorySummary::_snapshot; 37 38 void VirtualMemory::update_peak(size_t size) { 39 size_t peak_sz = peak_size(); 40 while (peak_sz < size) { 41 size_t old_sz = Atomic::cmpxchg(&_peak_size, peak_sz, size, memory_order_relaxed); 42 if (old_sz == peak_sz) { 43 break; 44 } else { 45 peak_sz = old_sz; 46 } 47 } 48 } 49 50 void VirtualMemorySummary::snapshot(VirtualMemorySnapshot* s) { 51 // Snapshot current thread stacks 52 VirtualMemoryTracker::snapshot_thread_stacks(); 53 as_snapshot()->copy_to(s); 54 } 55 56 SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions; 57 58 int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) { 59 return r1.compare(r2); 60 } 61 62 int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) { 63 return r1.compare(r2); 64 } 65 66 static bool is_mergeable_with(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) { 67 return rgn->adjacent_to(addr, size) && rgn->call_stack()->equals(stack); 68 } 69 70 static bool is_same_as(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) { 71 // It would have made sense to use rgn->equals(...), but equals returns true for overlapping regions. 72 return rgn->same_region(addr, size) && rgn->call_stack()->equals(stack); 73 } 74 75 static LinkedListNode<CommittedMemoryRegion>* find_preceding_node_from(LinkedListNode<CommittedMemoryRegion>* from, address addr) { 76 LinkedListNode<CommittedMemoryRegion>* preceding = nullptr; 77 78 for (LinkedListNode<CommittedMemoryRegion>* node = from; node != nullptr; node = node->next()) { 79 CommittedMemoryRegion* rgn = node->data(); 80 81 // We searched past the region start. 82 if (rgn->end() > addr) { 83 break; 84 } 85 86 preceding = node; 87 } 88 89 return preceding; 90 } 91 92 static bool try_merge_with(LinkedListNode<CommittedMemoryRegion>* node, address addr, size_t size, const NativeCallStack& stack) { 93 if (node != nullptr) { 94 CommittedMemoryRegion* rgn = node->data(); 95 96 if (is_mergeable_with(rgn, addr, size, stack)) { 97 rgn->expand_region(addr, size); 98 return true; 99 } 100 } 101 102 return false; 103 } 104 105 static bool try_merge_with(LinkedListNode<CommittedMemoryRegion>* node, LinkedListNode<CommittedMemoryRegion>* other) { 106 if (other == nullptr) { 107 return false; 108 } 109 110 CommittedMemoryRegion* rgn = other->data(); 111 return try_merge_with(node, rgn->base(), rgn->size(), *rgn->call_stack()); 112 } 113 114 bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) { 115 assert(addr != nullptr, "Invalid address"); 116 assert(size > 0, "Invalid size"); 117 assert(contain_region(addr, size), "Not contain this region"); 118 119 // Find the region that fully precedes the [addr, addr + size) region. 120 LinkedListNode<CommittedMemoryRegion>* prev = find_preceding_node_from(_committed_regions.head(), addr); 121 LinkedListNode<CommittedMemoryRegion>* next = (prev != nullptr ? prev->next() : _committed_regions.head()); 122 123 if (next != nullptr) { 124 // Ignore request if region already exists. 125 if (is_same_as(next->data(), addr, size, stack)) { 126 return true; 127 } 128 129 // The new region is after prev, and either overlaps with the 130 // next region (and maybe more regions), or overlaps with no region. 131 if (next->data()->overlap_region(addr, size)) { 132 // Remove _all_ overlapping regions, and parts of regions, 133 // in preparation for the addition of this new region. 134 remove_uncommitted_region(addr, size); 135 136 // The remove could have split a region into two and created a 137 // new prev region. Need to reset the prev and next pointers. 138 prev = find_preceding_node_from((prev != nullptr ? prev : _committed_regions.head()), addr); 139 next = (prev != nullptr ? prev->next() : _committed_regions.head()); 140 } 141 } 142 143 // At this point the previous overlapping regions have been 144 // cleared, and the full region is guaranteed to be inserted. 145 VirtualMemorySummary::record_committed_memory(size, mem_tag()); 146 147 // Try to merge with prev and possibly next. 148 if (try_merge_with(prev, addr, size, stack)) { 149 if (try_merge_with(prev, next)) { 150 // prev was expanded to contain the new region 151 // and next, need to remove next from the list 152 _committed_regions.remove_after(prev); 153 } 154 155 return true; 156 } 157 158 // Didn't merge with prev, try with next. 159 if (try_merge_with(next, addr, size, stack)) { 160 return true; 161 } 162 163 // Couldn't merge with any regions - create a new region. 164 return add_committed_region(CommittedMemoryRegion(addr, size, stack)); 165 } 166 167 bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node, 168 address addr, size_t size) { 169 assert(addr != nullptr, "Invalid address"); 170 assert(size > 0, "Invalid size"); 171 172 CommittedMemoryRegion* rgn = node->data(); 173 assert(rgn->contain_region(addr, size), "Has to be contained"); 174 assert(!rgn->same_region(addr, size), "Can not be the same region"); 175 176 if (rgn->base() == addr || 177 rgn->end() == addr + size) { 178 rgn->exclude_region(addr, size); 179 return true; 180 } else { 181 // split this region 182 address top =rgn->end(); 183 // use this region for lower part 184 size_t exclude_size = rgn->end() - addr; 185 rgn->exclude_region(addr, exclude_size); 186 187 // higher part 188 address high_base = addr + size; 189 size_t high_size = top - high_base; 190 191 CommittedMemoryRegion high_rgn(high_base, high_size, *rgn->call_stack()); 192 LinkedListNode<CommittedMemoryRegion>* high_node = _committed_regions.add(high_rgn); 193 assert(high_node == nullptr || node->next() == high_node, "Should be right after"); 194 return (high_node != nullptr); 195 } 196 197 return false; 198 } 199 200 bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) { 201 assert(addr != nullptr, "Invalid address"); 202 assert(sz > 0, "Invalid size"); 203 204 CommittedMemoryRegion del_rgn(addr, sz, *call_stack()); 205 address end = addr + sz; 206 207 LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head(); 208 LinkedListNode<CommittedMemoryRegion>* prev = nullptr; 209 CommittedMemoryRegion* crgn; 210 211 while (head != nullptr) { 212 crgn = head->data(); 213 214 if (crgn->same_region(addr, sz)) { 215 VirtualMemorySummary::record_uncommitted_memory(crgn->size(), mem_tag()); 216 _committed_regions.remove_after(prev); 217 return true; 218 } 219 220 // del_rgn contains crgn 221 if (del_rgn.contain_region(crgn->base(), crgn->size())) { 222 VirtualMemorySummary::record_uncommitted_memory(crgn->size(), mem_tag()); 223 head = head->next(); 224 _committed_regions.remove_after(prev); 225 continue; // don't update head or prev 226 } 227 228 // Found addr in the current crgn. There are 2 subcases: 229 if (crgn->contain_address(addr)) { 230 231 // (1) Found addr+size in current crgn as well. (del_rgn is contained in crgn) 232 if (crgn->contain_address(end - 1)) { 233 VirtualMemorySummary::record_uncommitted_memory(sz, mem_tag()); 234 return remove_uncommitted_region(head, addr, sz); // done! 235 } else { 236 // (2) Did not find del_rgn's end in crgn. 237 size_t size = crgn->end() - del_rgn.base(); 238 crgn->exclude_region(addr, size); 239 VirtualMemorySummary::record_uncommitted_memory(size, mem_tag()); 240 } 241 242 } else if (crgn->contain_address(end - 1)) { 243 // Found del_rgn's end, but not its base addr. 244 size_t size = del_rgn.end() - crgn->base(); 245 crgn->exclude_region(crgn->base(), size); 246 VirtualMemorySummary::record_uncommitted_memory(size, mem_tag()); 247 return true; // should be done if the list is sorted properly! 248 } 249 250 prev = head; 251 head = head->next(); 252 } 253 254 return true; 255 } 256 257 void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRegion& rgn) { 258 assert(addr != nullptr, "Invalid address"); 259 260 // split committed regions 261 LinkedListNode<CommittedMemoryRegion>* head = 262 _committed_regions.head(); 263 LinkedListNode<CommittedMemoryRegion>* prev = nullptr; 264 265 while (head != nullptr) { 266 if (head->data()->base() >= addr) { 267 break; 268 } 269 prev = head; 270 head = head->next(); 271 } 272 273 if (head != nullptr) { 274 if (prev != nullptr) { 275 prev->set_next(head->next()); 276 } else { 277 _committed_regions.set_head(nullptr); 278 } 279 } 280 281 rgn._committed_regions.set_head(head); 282 } 283 284 size_t ReservedMemoryRegion::committed_size() const { 285 size_t committed = 0; 286 LinkedListNode<CommittedMemoryRegion>* head = 287 _committed_regions.head(); 288 while (head != nullptr) { 289 committed += head->data()->size(); 290 head = head->next(); 291 } 292 return committed; 293 } 294 295 void ReservedMemoryRegion::set_mem_tag(MemTag new_mem_tag) { 296 assert((mem_tag() == mtNone || mem_tag() == new_mem_tag), 297 "Overwrite memory tag for region [" INTPTR_FORMAT "-" INTPTR_FORMAT "), %u->%u.", 298 p2i(base()), p2i(end()), (unsigned)mem_tag(), (unsigned)new_mem_tag); 299 if (mem_tag() != new_mem_tag) { 300 VirtualMemorySummary::move_reserved_memory(mem_tag(), new_mem_tag, size()); 301 VirtualMemorySummary::move_committed_memory(mem_tag(), new_mem_tag, committed_size()); 302 _mem_tag = new_mem_tag; 303 } 304 } 305 306 address ReservedMemoryRegion::thread_stack_uncommitted_bottom() const { 307 assert(mem_tag() == mtThreadStack, "Only for thread stack"); 308 LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head(); 309 address bottom = base(); 310 address top = base() + size(); 311 while (head != nullptr) { 312 address committed_top = head->data()->base() + head->data()->size(); 313 if (committed_top < top) { 314 // committed stack guard pages, skip them 315 bottom = head->data()->base() + head->data()->size(); 316 head = head->next(); 317 } else { 318 assert(top == committed_top, "Sanity"); 319 break; 320 } 321 } 322 323 return bottom; 324 } 325 326 bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) { 327 assert(_reserved_regions == nullptr, "only call once"); 328 if (level >= NMT_summary) { 329 _reserved_regions = new (std::nothrow, mtNMT) 330 SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>(); 331 return (_reserved_regions != nullptr); 332 } 333 return true; 334 } 335 336 bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size, 337 const NativeCallStack& stack, MemTag mem_tag) { 338 assert(base_addr != nullptr, "Invalid address"); 339 assert(size > 0, "Invalid size"); 340 assert(_reserved_regions != nullptr, "Sanity check"); 341 ReservedMemoryRegion rgn(base_addr, size, stack, mem_tag); 342 ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); 343 344 log_debug(nmt)("Add reserved region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ")", 345 rgn.mem_tag_name(), p2i(rgn.base()), rgn.size()); 346 if (reserved_rgn == nullptr) { 347 VirtualMemorySummary::record_reserved_memory(size, mem_tag); 348 return _reserved_regions->add(rgn) != nullptr; 349 } else { 350 // Deal with recursive reservation 351 // os::reserve_memory() -> pd_reserve_memory() -> os::reserve_memory() 352 // See JDK-8198226. 353 if (reserved_rgn->same_region(base_addr, size) && 354 (reserved_rgn->mem_tag() == mem_tag || reserved_rgn->mem_tag() == mtNone)) { 355 reserved_rgn->set_call_stack(stack); 356 reserved_rgn->set_mem_tag(mem_tag); 357 return true; 358 } else { 359 assert(reserved_rgn->overlap_region(base_addr, size), "Must be"); 360 361 // Overlapped reservation. 362 // It can happen when the regions are thread stacks, as JNI 363 // thread does not detach from VM before exits, and leads to 364 // leak JavaThread object 365 if (reserved_rgn->mem_tag() == mtThreadStack) { 366 guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached"); 367 // Overwrite with new region 368 369 // Release old region 370 VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->mem_tag()); 371 VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->mem_tag()); 372 373 // Add new region 374 VirtualMemorySummary::record_reserved_memory(rgn.size(), mem_tag); 375 376 *reserved_rgn = rgn; 377 return true; 378 } 379 380 // CDS mapping region. 381 // CDS reserves the whole region for mapping CDS archive, then maps each section into the region. 382 // NMT reports CDS as a whole. 383 if (reserved_rgn->mem_tag() == mtClassShared) { 384 log_debug(nmt)("CDS reserved region \'%s\' as a whole (" INTPTR_FORMAT ", " SIZE_FORMAT ")", 385 reserved_rgn->mem_tag_name(), p2i(reserved_rgn->base()), reserved_rgn->size()); 386 assert(reserved_rgn->contain_region(base_addr, size), "Reserved CDS region should contain this mapping region"); 387 return true; 388 } 389 390 // Mapped CDS string region. 391 // The string region(s) is part of the java heap. 392 if (reserved_rgn->mem_tag() == mtJavaHeap) { 393 log_debug(nmt)("CDS reserved region \'%s\' as a whole (" INTPTR_FORMAT ", " SIZE_FORMAT ")", 394 reserved_rgn->mem_tag_name(), p2i(reserved_rgn->base()), reserved_rgn->size()); 395 assert(reserved_rgn->contain_region(base_addr, size), "Reserved heap region should contain this mapping region"); 396 return true; 397 } 398 399 if (reserved_rgn->mem_tag() == mtCode) { 400 assert(reserved_rgn->contain_region(base_addr, size), "Reserved code region should contain this mapping region"); 401 return true; 402 } 403 404 // Print some more details. Don't use UL here to avoid circularities. 405 tty->print_cr("Error: existing region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), memory tag %u.\n" 406 " new region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), memory tag %u.", 407 p2i(reserved_rgn->base()), p2i(reserved_rgn->end()), (unsigned)reserved_rgn->mem_tag(), 408 p2i(base_addr), p2i(base_addr + size), (unsigned)mem_tag); 409 if (MemTracker::tracking_level() == NMT_detail) { 410 tty->print_cr("Existing region allocated from:"); 411 reserved_rgn->call_stack()->print_on(tty); 412 tty->print_cr("New region allocated from:"); 413 stack.print_on(tty); 414 } 415 ShouldNotReachHere(); 416 return false; 417 } 418 } 419 } 420 421 void VirtualMemoryTracker::set_reserved_region_type(address addr, MemTag mem_tag) { 422 assert(addr != nullptr, "Invalid address"); 423 assert(_reserved_regions != nullptr, "Sanity check"); 424 425 ReservedMemoryRegion rgn(addr, 1); 426 ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); 427 if (reserved_rgn != nullptr) { 428 assert(reserved_rgn->contain_address(addr), "Containment"); 429 if (reserved_rgn->mem_tag() != mem_tag) { 430 assert(reserved_rgn->mem_tag() == mtNone, "Overwrite memory tag (should be mtNone, is: \"%s\")", 431 NMTUtil::tag_to_name(reserved_rgn->mem_tag())); 432 reserved_rgn->set_mem_tag(mem_tag); 433 } 434 } 435 } 436 437 bool VirtualMemoryTracker::add_committed_region(address addr, size_t size, 438 const NativeCallStack& stack) { 439 assert(addr != nullptr, "Invalid address"); 440 assert(size > 0, "Invalid size"); 441 assert(_reserved_regions != nullptr, "Sanity check"); 442 443 ReservedMemoryRegion rgn(addr, size); 444 ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); 445 446 if (reserved_rgn == nullptr) { 447 log_debug(nmt)("Add committed region \'%s\', No reserved region found for (" INTPTR_FORMAT ", " SIZE_FORMAT ")", 448 rgn.mem_tag_name(), p2i(rgn.base()), rgn.size()); 449 } 450 assert(reserved_rgn != nullptr, "Add committed region, No reserved region found"); 451 assert(reserved_rgn->contain_region(addr, size), "Not completely contained"); 452 bool result = reserved_rgn->add_committed_region(addr, size, stack); 453 log_debug(nmt)("Add committed region \'%s\'(" INTPTR_FORMAT ", " SIZE_FORMAT ") %s", 454 reserved_rgn->mem_tag_name(), p2i(rgn.base()), rgn.size(), (result ? "Succeeded" : "Failed")); 455 return result; 456 } 457 458 bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) { 459 assert(addr != nullptr, "Invalid address"); 460 assert(size > 0, "Invalid size"); 461 assert(_reserved_regions != nullptr, "Sanity check"); 462 463 ReservedMemoryRegion rgn(addr, size); 464 ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); 465 assert(reserved_rgn != nullptr, "No reserved region (" INTPTR_FORMAT ", " SIZE_FORMAT ")", p2i(addr), size); 466 assert(reserved_rgn->contain_region(addr, size), "Not completely contained"); 467 const char* type_name = reserved_rgn->mem_tag_name(); // after remove, info is not complete 468 bool result = reserved_rgn->remove_uncommitted_region(addr, size); 469 log_debug(nmt)("Removed uncommitted region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") %s", 470 type_name, p2i(addr), size, (result ? " Succeeded" : "Failed")); 471 return result; 472 } 473 474 bool VirtualMemoryTracker::remove_released_region(ReservedMemoryRegion* rgn) { 475 assert(rgn != nullptr, "Sanity check"); 476 assert(_reserved_regions != nullptr, "Sanity check"); 477 478 // uncommit regions within the released region 479 ReservedMemoryRegion backup(*rgn); 480 bool result = rgn->remove_uncommitted_region(rgn->base(), rgn->size()); 481 log_debug(nmt)("Remove uncommitted region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") %s", 482 backup.mem_tag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed")); 483 if (!result) { 484 return false; 485 } 486 487 VirtualMemorySummary::record_released_memory(rgn->size(), rgn->mem_tag()); 488 result = _reserved_regions->remove(*rgn); 489 log_debug(nmt)("Removed region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") from _reserved_regions %s" , 490 backup.mem_tag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed")); 491 return result; 492 } 493 494 bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) { 495 assert(addr != nullptr, "Invalid address"); 496 assert(size > 0, "Invalid size"); 497 assert(_reserved_regions != nullptr, "Sanity check"); 498 499 ReservedMemoryRegion rgn(addr, size); 500 ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); 501 502 if (reserved_rgn == nullptr) { 503 log_debug(nmt)("No reserved region found for (" INTPTR_FORMAT ", " SIZE_FORMAT ")!", 504 p2i(rgn.base()), rgn.size()); 505 } 506 assert(reserved_rgn != nullptr, "No reserved region"); 507 if (reserved_rgn->same_region(addr, size)) { 508 return remove_released_region(reserved_rgn); 509 } 510 511 // uncommit regions within the released region 512 if (!reserved_rgn->remove_uncommitted_region(addr, size)) { 513 return false; 514 } 515 516 if (reserved_rgn->mem_tag() == mtClassShared) { 517 if (reserved_rgn->contain_region(addr, size)) { 518 // This is an unmapped CDS region, which is part of the reserved shared 519 // memory region. 520 // See special handling in VirtualMemoryTracker::add_reserved_region also. 521 return true; 522 } 523 524 if (size > reserved_rgn->size()) { 525 // This is from release the whole region spanning from archive space to class space, 526 // so we release them altogether. 527 ReservedMemoryRegion class_rgn(addr + reserved_rgn->size(), 528 (size - reserved_rgn->size())); 529 ReservedMemoryRegion* cls_rgn = _reserved_regions->find(class_rgn); 530 assert(cls_rgn != nullptr, "Class space region not recorded?"); 531 assert(cls_rgn->mem_tag() == mtClass, "Must be class mem tag"); 532 remove_released_region(reserved_rgn); 533 remove_released_region(cls_rgn); 534 return true; 535 } 536 } 537 538 VirtualMemorySummary::record_released_memory(size, reserved_rgn->mem_tag()); 539 540 assert(reserved_rgn->contain_region(addr, size), "Not completely contained"); 541 if (reserved_rgn->base() == addr || 542 reserved_rgn->end() == addr + size) { 543 reserved_rgn->exclude_region(addr, size); 544 return true; 545 } else { 546 address top = reserved_rgn->end(); 547 address high_base = addr + size; 548 ReservedMemoryRegion high_rgn(high_base, top - high_base, 549 *reserved_rgn->call_stack(), reserved_rgn->mem_tag()); 550 551 // use original region for lower region 552 reserved_rgn->exclude_region(addr, top - addr); 553 LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn); 554 if (new_rgn == nullptr) { 555 return false; 556 } else { 557 reserved_rgn->move_committed_regions(addr, *new_rgn->data()); 558 return true; 559 } 560 } 561 } 562 563 // Given an existing memory mapping registered with NMT, split the mapping in 564 // two. The newly created two mappings will be registered under the call 565 // stack and the memory tags of the original section. 566 bool VirtualMemoryTracker::split_reserved_region(address addr, size_t size, size_t split, MemTag mem_tag, MemTag split_tag) { 567 568 ReservedMemoryRegion rgn(addr, size); 569 ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); 570 assert(reserved_rgn->same_region(addr, size), "Must be identical region"); 571 assert(reserved_rgn != nullptr, "No reserved region"); 572 assert(reserved_rgn->committed_size() == 0, "Splitting committed region?"); 573 574 NativeCallStack original_stack = *reserved_rgn->call_stack(); 575 MemTag original_tag = reserved_rgn->mem_tag(); 576 577 const char* name = reserved_rgn->mem_tag_name(); 578 remove_released_region(reserved_rgn); 579 log_debug(nmt)("Split region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") with size " SIZE_FORMAT, 580 name, p2i(rgn.base()), rgn.size(), split); 581 // Now, create two new regions. 582 add_reserved_region(addr, split, original_stack, mem_tag); 583 add_reserved_region(addr + split, size - split, original_stack, split_tag); 584 585 return true; 586 } 587 588 589 // Iterate the range, find committed region within its bound. 590 class RegionIterator : public StackObj { 591 private: 592 const address _start; 593 const size_t _size; 594 595 address _current_start; 596 public: 597 RegionIterator(address start, size_t size) : 598 _start(start), _size(size), _current_start(start) { 599 } 600 601 // return true if committed region is found 602 bool next_committed(address& start, size_t& size); 603 private: 604 address end() const { return _start + _size; } 605 }; 606 607 bool RegionIterator::next_committed(address& committed_start, size_t& committed_size) { 608 if (end() <= _current_start) return false; 609 610 const size_t page_sz = os::vm_page_size(); 611 const size_t current_size = end() - _current_start; 612 if (os::committed_in_range(_current_start, current_size, committed_start, committed_size)) { 613 assert(committed_start != nullptr, "Must be"); 614 assert(committed_size > 0 && is_aligned(committed_size, os::vm_page_size()), "Must be"); 615 616 _current_start = committed_start + committed_size; 617 return true; 618 } else { 619 return false; 620 } 621 } 622 623 // Walk all known thread stacks, snapshot their committed ranges. 624 class SnapshotThreadStackWalker : public VirtualMemoryWalker { 625 public: 626 SnapshotThreadStackWalker() {} 627 628 bool do_allocation_site(const ReservedMemoryRegion* rgn) { 629 if (rgn->mem_tag() == mtThreadStack) { 630 address stack_bottom = rgn->thread_stack_uncommitted_bottom(); 631 address committed_start; 632 size_t committed_size; 633 size_t stack_size = rgn->base() + rgn->size() - stack_bottom; 634 // Align the size to work with full pages (Alpine and AIX stack top is not page aligned) 635 size_t aligned_stack_size = align_up(stack_size, os::vm_page_size()); 636 637 ReservedMemoryRegion* region = const_cast<ReservedMemoryRegion*>(rgn); 638 NativeCallStack ncs; // empty stack 639 640 RegionIterator itr(stack_bottom, aligned_stack_size); 641 DEBUG_ONLY(bool found_stack = false;) 642 while (itr.next_committed(committed_start, committed_size)) { 643 assert(committed_start != nullptr, "Should not be null"); 644 assert(committed_size > 0, "Should not be 0"); 645 // unaligned stack_size case: correct the region to fit the actual stack_size 646 if (stack_bottom + stack_size < committed_start + committed_size) { 647 committed_size = stack_bottom + stack_size - committed_start; 648 } 649 region->add_committed_region(committed_start, committed_size, ncs); 650 DEBUG_ONLY(found_stack = true;) 651 } 652 #ifdef ASSERT 653 if (!found_stack) { 654 log_debug(thread)("Thread exited without proper cleanup, may leak thread object"); 655 } 656 #endif 657 } 658 return true; 659 } 660 }; 661 662 void VirtualMemoryTracker::snapshot_thread_stacks() { 663 SnapshotThreadStackWalker walker; 664 walk_virtual_memory(&walker); 665 } 666 667 bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) { 668 assert(_reserved_regions != nullptr, "Sanity check"); 669 ThreadCritical tc; 670 // Check that the _reserved_regions haven't been deleted. 671 if (_reserved_regions != nullptr) { 672 LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head(); 673 while (head != nullptr) { 674 const ReservedMemoryRegion* rgn = head->peek(); 675 if (!walker->do_allocation_site(rgn)) { 676 return false; 677 } 678 head = head->next(); 679 } 680 } 681 return true; 682 } 683 684 class PrintRegionWalker : public VirtualMemoryWalker { 685 private: 686 const address _p; 687 outputStream* _st; 688 NativeCallStackPrinter _stackprinter; 689 public: 690 PrintRegionWalker(const void* p, outputStream* st) : 691 _p((address)p), _st(st), _stackprinter(st) { } 692 693 bool do_allocation_site(const ReservedMemoryRegion* rgn) { 694 if (rgn->contain_address(_p)) { 695 _st->print_cr(PTR_FORMAT " in mmap'd memory region [" PTR_FORMAT " - " PTR_FORMAT "], tag %s", 696 p2i(_p), p2i(rgn->base()), p2i(rgn->base() + rgn->size()), NMTUtil::tag_to_enum_name(rgn->mem_tag())); 697 if (MemTracker::tracking_level() == NMT_detail) { 698 _stackprinter.print_stack(rgn->call_stack()); 699 _st->cr(); 700 } 701 return false; 702 } 703 return true; 704 } 705 }; 706 707 // If p is contained within a known memory region, print information about it to the 708 // given stream and return true; false otherwise. 709 bool VirtualMemoryTracker::print_containing_region(const void* p, outputStream* st) { 710 PrintRegionWalker walker(p, st); 711 return !walk_virtual_memory(&walker); 712 713 }