1 /* 2 * Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2023, Alibaba Group Holding Limited. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "classfile/classLoaderData.inline.hpp" 28 #include "classfile/classLoaderDataGraph.hpp" 29 #include "classfile/javaClasses.inline.hpp" 30 #include "classfile/symbolTable.hpp" 31 #include "classfile/vmClasses.hpp" 32 #include "classfile/vmSymbols.hpp" 33 #include "gc/shared/gcLocker.hpp" 34 #include "gc/shared/gcVMOperations.hpp" 35 #include "gc/shared/workerThread.hpp" 36 #include "jfr/jfrEvents.hpp" 37 #include "jvm.h" 38 #include "memory/allocation.inline.hpp" 39 #include "memory/resourceArea.hpp" 40 #include "memory/universe.hpp" 41 #include "oops/fieldStreams.inline.hpp" 42 #include "oops/klass.inline.hpp" 43 #include "oops/objArrayKlass.hpp" 44 #include "oops/objArrayOop.inline.hpp" 45 #include "oops/oop.inline.hpp" 46 #include "oops/typeArrayOop.inline.hpp" 47 #include "runtime/continuationWrapper.inline.hpp" 48 #include "runtime/frame.inline.hpp" 49 #include "runtime/handles.inline.hpp" 50 #include "runtime/javaCalls.hpp" 51 #include "runtime/javaThread.inline.hpp" 52 #include "runtime/jniHandles.hpp" 53 #include "runtime/os.hpp" 54 #include "runtime/threads.hpp" 55 #include "runtime/threadSMR.hpp" 56 #include "runtime/vframe.hpp" 57 #include "runtime/vmOperations.hpp" 58 #include "runtime/vmThread.hpp" 59 #include "runtime/timerTrace.hpp" 60 #include "services/heapDumper.hpp" 61 #include "services/heapDumperCompression.hpp" 62 #include "services/threadService.hpp" 63 #include "utilities/checkedCast.hpp" 64 #include "utilities/macros.hpp" 65 #include "utilities/ostream.hpp" 66 #ifdef LINUX 67 #include "os_linux.hpp" 68 #endif 69 70 /* 71 * HPROF binary format - description copied from: 72 * src/share/demo/jvmti/hprof/hprof_io.c 73 * 74 * 75 * header "JAVA PROFILE 1.0.2" (0-terminated) 76 * 77 * u4 size of identifiers. Identifiers are used to represent 78 * UTF8 strings, objects, stack traces, etc. They usually 79 * have the same size as host pointers. 80 * u4 high word 81 * u4 low word number of milliseconds since 0:00 GMT, 1/1/70 82 * [record]* a sequence of records. 83 * 84 * 85 * Record format: 86 * 87 * u1 a TAG denoting the type of the record 88 * u4 number of *microseconds* since the time stamp in the 89 * header. (wraps around in a little more than an hour) 90 * u4 number of bytes *remaining* in the record. Note that 91 * this number excludes the tag and the length field itself. 92 * [u1]* BODY of the record (a sequence of bytes) 93 * 94 * 95 * The following TAGs are supported: 96 * 97 * TAG BODY notes 98 *---------------------------------------------------------- 99 * HPROF_UTF8 a UTF8-encoded name 100 * 101 * id name ID 102 * [u1]* UTF8 characters (no trailing zero) 103 * 104 * HPROF_LOAD_CLASS a newly loaded class 105 * 106 * u4 class serial number (> 0) 107 * id class object ID 108 * u4 stack trace serial number 109 * id class name ID 110 * 111 * HPROF_UNLOAD_CLASS an unloading class 112 * 113 * u4 class serial_number 114 * 115 * HPROF_FRAME a Java stack frame 116 * 117 * id stack frame ID 118 * id method name ID 119 * id method signature ID 120 * id source file name ID 121 * u4 class serial number 122 * i4 line number. >0: normal 123 * -1: unknown 124 * -2: compiled method 125 * -3: native method 126 * 127 * HPROF_TRACE a Java stack trace 128 * 129 * u4 stack trace serial number 130 * u4 thread serial number 131 * u4 number of frames 132 * [id]* stack frame IDs 133 * 134 * 135 * HPROF_ALLOC_SITES a set of heap allocation sites, obtained after GC 136 * 137 * u2 flags 0x0001: incremental vs. complete 138 * 0x0002: sorted by allocation vs. live 139 * 0x0004: whether to force a GC 140 * u4 cutoff ratio 141 * u4 total live bytes 142 * u4 total live instances 143 * u8 total bytes allocated 144 * u8 total instances allocated 145 * u4 number of sites that follow 146 * [u1 is_array: 0: normal object 147 * 2: object array 148 * 4: boolean array 149 * 5: char array 150 * 6: float array 151 * 7: double array 152 * 8: byte array 153 * 9: short array 154 * 10: int array 155 * 11: long array 156 * u4 class serial number (may be zero during startup) 157 * u4 stack trace serial number 158 * u4 number of bytes alive 159 * u4 number of instances alive 160 * u4 number of bytes allocated 161 * u4]* number of instance allocated 162 * 163 * HPROF_START_THREAD a newly started thread. 164 * 165 * u4 thread serial number (> 0) 166 * id thread object ID 167 * u4 stack trace serial number 168 * id thread name ID 169 * id thread group name ID 170 * id thread group parent name ID 171 * 172 * HPROF_END_THREAD a terminating thread. 173 * 174 * u4 thread serial number 175 * 176 * HPROF_HEAP_SUMMARY heap summary 177 * 178 * u4 total live bytes 179 * u4 total live instances 180 * u8 total bytes allocated 181 * u8 total instances allocated 182 * 183 * HPROF_HEAP_DUMP denote a heap dump 184 * 185 * [heap dump sub-records]* 186 * 187 * There are four kinds of heap dump sub-records: 188 * 189 * u1 sub-record type 190 * 191 * HPROF_GC_ROOT_UNKNOWN unknown root 192 * 193 * id object ID 194 * 195 * HPROF_GC_ROOT_THREAD_OBJ thread object 196 * 197 * id thread object ID (may be 0 for a 198 * thread newly attached through JNI) 199 * u4 thread sequence number 200 * u4 stack trace sequence number 201 * 202 * HPROF_GC_ROOT_JNI_GLOBAL JNI global ref root 203 * 204 * id object ID 205 * id JNI global ref ID 206 * 207 * HPROF_GC_ROOT_JNI_LOCAL JNI local ref 208 * 209 * id object ID 210 * u4 thread serial number 211 * u4 frame # in stack trace (-1 for empty) 212 * 213 * HPROF_GC_ROOT_JAVA_FRAME Java stack frame 214 * 215 * id object ID 216 * u4 thread serial number 217 * u4 frame # in stack trace (-1 for empty) 218 * 219 * HPROF_GC_ROOT_NATIVE_STACK Native stack 220 * 221 * id object ID 222 * u4 thread serial number 223 * 224 * HPROF_GC_ROOT_STICKY_CLASS System class 225 * 226 * id object ID 227 * 228 * HPROF_GC_ROOT_THREAD_BLOCK Reference from thread block 229 * 230 * id object ID 231 * u4 thread serial number 232 * 233 * HPROF_GC_ROOT_MONITOR_USED Busy monitor 234 * 235 * id object ID 236 * 237 * HPROF_GC_CLASS_DUMP dump of a class object 238 * 239 * id class object ID 240 * u4 stack trace serial number 241 * id super class object ID 242 * id class loader object ID 243 * id signers object ID 244 * id protection domain object ID 245 * id reserved 246 * id reserved 247 * 248 * u4 instance size (in bytes) 249 * 250 * u2 size of constant pool 251 * [u2, constant pool index, 252 * ty, type 253 * 2: object 254 * 4: boolean 255 * 5: char 256 * 6: float 257 * 7: double 258 * 8: byte 259 * 9: short 260 * 10: int 261 * 11: long 262 * vl]* and value 263 * 264 * u2 number of static fields 265 * [id, static field name, 266 * ty, type, 267 * vl]* and value 268 * 269 * u2 number of inst. fields (not inc. super) 270 * [id, instance field name, 271 * ty]* type 272 * 273 * HPROF_GC_INSTANCE_DUMP dump of a normal object 274 * 275 * id object ID 276 * u4 stack trace serial number 277 * id class object ID 278 * u4 number of bytes that follow 279 * [vl]* instance field values (class, followed 280 * by super, super's super ...) 281 * 282 * HPROF_GC_OBJ_ARRAY_DUMP dump of an object array 283 * 284 * id array object ID 285 * u4 stack trace serial number 286 * u4 number of elements 287 * id array class ID 288 * [id]* elements 289 * 290 * HPROF_GC_PRIM_ARRAY_DUMP dump of a primitive array 291 * 292 * id array object ID 293 * u4 stack trace serial number 294 * u4 number of elements 295 * u1 element type 296 * 4: boolean array 297 * 5: char array 298 * 6: float array 299 * 7: double array 300 * 8: byte array 301 * 9: short array 302 * 10: int array 303 * 11: long array 304 * [u1]* elements 305 * 306 * HPROF_CPU_SAMPLES a set of sample traces of running threads 307 * 308 * u4 total number of samples 309 * u4 # of traces 310 * [u4 # of samples 311 * u4]* stack trace serial number 312 * 313 * HPROF_CONTROL_SETTINGS the settings of on/off switches 314 * 315 * u4 0x00000001: alloc traces on/off 316 * 0x00000002: cpu sampling on/off 317 * u2 stack trace depth 318 * 319 * 320 * When the header is "JAVA PROFILE 1.0.2" a heap dump can optionally 321 * be generated as a sequence of heap dump segments. This sequence is 322 * terminated by an end record. The additional tags allowed by format 323 * "JAVA PROFILE 1.0.2" are: 324 * 325 * HPROF_HEAP_DUMP_SEGMENT denote a heap dump segment 326 * 327 * [heap dump sub-records]* 328 * The same sub-record types allowed by HPROF_HEAP_DUMP 329 * 330 * HPROF_HEAP_DUMP_END denotes the end of a heap dump 331 * 332 */ 333 334 335 // HPROF tags 336 337 enum hprofTag : u1 { 338 // top-level records 339 HPROF_UTF8 = 0x01, 340 HPROF_LOAD_CLASS = 0x02, 341 HPROF_UNLOAD_CLASS = 0x03, 342 HPROF_FRAME = 0x04, 343 HPROF_TRACE = 0x05, 344 HPROF_ALLOC_SITES = 0x06, 345 HPROF_HEAP_SUMMARY = 0x07, 346 HPROF_START_THREAD = 0x0A, 347 HPROF_END_THREAD = 0x0B, 348 HPROF_HEAP_DUMP = 0x0C, 349 HPROF_CPU_SAMPLES = 0x0D, 350 HPROF_CONTROL_SETTINGS = 0x0E, 351 352 // 1.0.2 record types 353 HPROF_HEAP_DUMP_SEGMENT = 0x1C, 354 HPROF_HEAP_DUMP_END = 0x2C, 355 356 // field types 357 HPROF_ARRAY_OBJECT = 0x01, 358 HPROF_NORMAL_OBJECT = 0x02, 359 HPROF_BOOLEAN = 0x04, 360 HPROF_CHAR = 0x05, 361 HPROF_FLOAT = 0x06, 362 HPROF_DOUBLE = 0x07, 363 HPROF_BYTE = 0x08, 364 HPROF_SHORT = 0x09, 365 HPROF_INT = 0x0A, 366 HPROF_LONG = 0x0B, 367 368 // data-dump sub-records 369 HPROF_GC_ROOT_UNKNOWN = 0xFF, 370 HPROF_GC_ROOT_JNI_GLOBAL = 0x01, 371 HPROF_GC_ROOT_JNI_LOCAL = 0x02, 372 HPROF_GC_ROOT_JAVA_FRAME = 0x03, 373 HPROF_GC_ROOT_NATIVE_STACK = 0x04, 374 HPROF_GC_ROOT_STICKY_CLASS = 0x05, 375 HPROF_GC_ROOT_THREAD_BLOCK = 0x06, 376 HPROF_GC_ROOT_MONITOR_USED = 0x07, 377 HPROF_GC_ROOT_THREAD_OBJ = 0x08, 378 HPROF_GC_CLASS_DUMP = 0x20, 379 HPROF_GC_INSTANCE_DUMP = 0x21, 380 HPROF_GC_OBJ_ARRAY_DUMP = 0x22, 381 HPROF_GC_PRIM_ARRAY_DUMP = 0x23 382 }; 383 384 // Default stack trace ID (used for dummy HPROF_TRACE record) 385 enum { 386 STACK_TRACE_ID = 1, 387 INITIAL_CLASS_COUNT = 200 388 }; 389 390 // Supports I/O operations for a dump 391 // Base class for dump and parallel dump 392 class AbstractDumpWriter : public CHeapObj<mtInternal> { 393 protected: 394 enum { 395 io_buffer_max_size = 1*M, 396 dump_segment_header_size = 9 397 }; 398 399 char* _buffer; // internal buffer 400 size_t _size; 401 size_t _pos; 402 403 bool _in_dump_segment; // Are we currently in a dump segment? 404 bool _is_huge_sub_record; // Are we writing a sub-record larger than the buffer size? 405 DEBUG_ONLY(size_t _sub_record_left;) // The bytes not written for the current sub-record. 406 DEBUG_ONLY(bool _sub_record_ended;) // True if we have called the end_sub_record(). 407 408 char* buffer() const { return _buffer; } 409 size_t buffer_size() const { return _size; } 410 void set_position(size_t pos) { _pos = pos; } 411 412 // Can be called if we have enough room in the buffer. 413 void write_fast(const void* s, size_t len); 414 415 // Returns true if we have enough room in the buffer for 'len' bytes. 416 bool can_write_fast(size_t len); 417 418 void write_address(address a); 419 420 public: 421 AbstractDumpWriter() : 422 _buffer(nullptr), 423 _size(io_buffer_max_size), 424 _pos(0), 425 _in_dump_segment(false) { } 426 427 // Total number of bytes written to the disk 428 virtual julong bytes_written() const = 0; 429 // Return non-null if error occurred 430 virtual char const* error() const = 0; 431 432 size_t position() const { return _pos; } 433 // writer functions 434 virtual void write_raw(const void* s, size_t len); 435 void write_u1(u1 x); 436 void write_u2(u2 x); 437 void write_u4(u4 x); 438 void write_u8(u8 x); 439 void write_objectID(oop o); 440 void write_rootID(oop* p); 441 void write_symbolID(Symbol* o); 442 void write_classID(Klass* k); 443 void write_id(u4 x); 444 445 // Start a new sub-record. Starts a new heap dump segment if needed. 446 void start_sub_record(u1 tag, u4 len); 447 // Ends the current sub-record. 448 void end_sub_record(); 449 // Finishes the current dump segment if not already finished. 450 void finish_dump_segment(); 451 // Flush internal buffer to persistent storage 452 virtual void flush() = 0; 453 }; 454 455 void AbstractDumpWriter::write_fast(const void* s, size_t len) { 456 assert(!_in_dump_segment || (_sub_record_left >= len), "sub-record too large"); 457 assert(buffer_size() - position() >= len, "Must fit"); 458 debug_only(_sub_record_left -= len); 459 memcpy(buffer() + position(), s, len); 460 set_position(position() + len); 461 } 462 463 bool AbstractDumpWriter::can_write_fast(size_t len) { 464 return buffer_size() - position() >= len; 465 } 466 467 // write raw bytes 468 void AbstractDumpWriter::write_raw(const void* s, size_t len) { 469 assert(!_in_dump_segment || (_sub_record_left >= len), "sub-record too large"); 470 debug_only(_sub_record_left -= len); 471 472 // flush buffer to make room. 473 while (len > buffer_size() - position()) { 474 assert(!_in_dump_segment || _is_huge_sub_record, 475 "Cannot overflow in non-huge sub-record."); 476 size_t to_write = buffer_size() - position(); 477 memcpy(buffer() + position(), s, to_write); 478 s = (void*) ((char*) s + to_write); 479 len -= to_write; 480 set_position(position() + to_write); 481 flush(); 482 } 483 484 memcpy(buffer() + position(), s, len); 485 set_position(position() + len); 486 } 487 488 // Makes sure we inline the fast write into the write_u* functions. This is a big speedup. 489 #define WRITE_KNOWN_TYPE(p, len) do { if (can_write_fast((len))) write_fast((p), (len)); \ 490 else write_raw((p), (len)); } while (0) 491 492 void AbstractDumpWriter::write_u1(u1 x) { 493 WRITE_KNOWN_TYPE(&x, 1); 494 } 495 496 void AbstractDumpWriter::write_u2(u2 x) { 497 u2 v; 498 Bytes::put_Java_u2((address)&v, x); 499 WRITE_KNOWN_TYPE(&v, 2); 500 } 501 502 void AbstractDumpWriter::write_u4(u4 x) { 503 u4 v; 504 Bytes::put_Java_u4((address)&v, x); 505 WRITE_KNOWN_TYPE(&v, 4); 506 } 507 508 void AbstractDumpWriter::write_u8(u8 x) { 509 u8 v; 510 Bytes::put_Java_u8((address)&v, x); 511 WRITE_KNOWN_TYPE(&v, 8); 512 } 513 514 void AbstractDumpWriter::write_address(address a) { 515 #ifdef _LP64 516 write_u8((u8)a); 517 #else 518 write_u4((u4)a); 519 #endif 520 } 521 522 void AbstractDumpWriter::write_objectID(oop o) { 523 write_address(cast_from_oop<address>(o)); 524 } 525 526 void AbstractDumpWriter::write_rootID(oop* p) { 527 write_address((address)p); 528 } 529 530 void AbstractDumpWriter::write_symbolID(Symbol* s) { 531 write_address((address)((uintptr_t)s)); 532 } 533 534 void AbstractDumpWriter::write_id(u4 x) { 535 #ifdef _LP64 536 write_u8((u8) x); 537 #else 538 write_u4(x); 539 #endif 540 } 541 542 // We use java mirror as the class ID 543 void AbstractDumpWriter::write_classID(Klass* k) { 544 write_objectID(k->java_mirror()); 545 } 546 547 void AbstractDumpWriter::finish_dump_segment() { 548 if (_in_dump_segment) { 549 assert(_sub_record_left == 0, "Last sub-record not written completely"); 550 assert(_sub_record_ended, "sub-record must have ended"); 551 552 // Fix up the dump segment length if we haven't written a huge sub-record last 553 // (in which case the segment length was already set to the correct value initially). 554 if (!_is_huge_sub_record) { 555 assert(position() > dump_segment_header_size, "Dump segment should have some content"); 556 Bytes::put_Java_u4((address) (buffer() + 5), 557 (u4) (position() - dump_segment_header_size)); 558 } else { 559 // Finish process huge sub record 560 // Set _is_huge_sub_record to false so the parallel dump writer can flush data to file. 561 _is_huge_sub_record = false; 562 } 563 564 _in_dump_segment = false; 565 flush(); 566 } 567 } 568 569 void AbstractDumpWriter::start_sub_record(u1 tag, u4 len) { 570 if (!_in_dump_segment) { 571 if (position() > 0) { 572 flush(); 573 } 574 575 assert(position() == 0 && buffer_size() > dump_segment_header_size, "Must be at the start"); 576 577 write_u1(HPROF_HEAP_DUMP_SEGMENT); 578 write_u4(0); // timestamp 579 // Will be fixed up later if we add more sub-records. If this is a huge sub-record, 580 // this is already the correct length, since we don't add more sub-records. 581 write_u4(len); 582 assert(Bytes::get_Java_u4((address)(buffer() + 5)) == len, "Inconsistent size!"); 583 _in_dump_segment = true; 584 _is_huge_sub_record = len > buffer_size() - dump_segment_header_size; 585 } else if (_is_huge_sub_record || (len > buffer_size() - position())) { 586 // This object will not fit in completely or the last sub-record was huge. 587 // Finish the current segment and try again. 588 finish_dump_segment(); 589 start_sub_record(tag, len); 590 591 return; 592 } 593 594 debug_only(_sub_record_left = len); 595 debug_only(_sub_record_ended = false); 596 597 write_u1(tag); 598 } 599 600 void AbstractDumpWriter::end_sub_record() { 601 assert(_in_dump_segment, "must be in dump segment"); 602 assert(_sub_record_left == 0, "sub-record not written completely"); 603 assert(!_sub_record_ended, "Must not have ended yet"); 604 debug_only(_sub_record_ended = true); 605 } 606 607 // Supports I/O operations for a dump 608 609 class DumpWriter : public AbstractDumpWriter { 610 private: 611 FileWriter* _writer; 612 AbstractCompressor* _compressor; 613 size_t _bytes_written; 614 char* _error; 615 // Compression support 616 char* _out_buffer; 617 size_t _out_size; 618 size_t _out_pos; 619 char* _tmp_buffer; 620 size_t _tmp_size; 621 622 private: 623 void do_compress(); 624 625 public: 626 DumpWriter(const char* path, bool overwrite, AbstractCompressor* compressor); 627 ~DumpWriter(); 628 julong bytes_written() const override { return (julong) _bytes_written; } 629 char const* error() const override { return _error; } 630 void set_error(const char* error) { _error = (char*)error; } 631 bool has_error() const { return _error != nullptr; } 632 const char* get_file_path() const { return _writer->get_file_path(); } 633 AbstractCompressor* compressor() { return _compressor; } 634 bool is_overwrite() const { return _writer->is_overwrite(); } 635 636 void flush() override; 637 638 private: 639 // internals for DumpMerger 640 friend class DumpMerger; 641 void set_bytes_written(julong bytes_written) { _bytes_written = bytes_written; } 642 int get_fd() const { return _writer->get_fd(); } 643 void set_compressor(AbstractCompressor* p) { _compressor = p; } 644 }; 645 646 DumpWriter::DumpWriter(const char* path, bool overwrite, AbstractCompressor* compressor) : 647 AbstractDumpWriter(), 648 _writer(new (std::nothrow) FileWriter(path, overwrite)), 649 _compressor(compressor), 650 _bytes_written(0), 651 _error(nullptr), 652 _out_buffer(nullptr), 653 _out_size(0), 654 _out_pos(0), 655 _tmp_buffer(nullptr), 656 _tmp_size(0) { 657 _error = (char*)_writer->open_writer(); 658 if (_error == nullptr) { 659 _buffer = (char*)os::malloc(io_buffer_max_size, mtInternal); 660 if (compressor != nullptr) { 661 _error = (char*)_compressor->init(io_buffer_max_size, &_out_size, &_tmp_size); 662 if (_error == nullptr) { 663 if (_out_size > 0) { 664 _out_buffer = (char*)os::malloc(_out_size, mtInternal); 665 } 666 if (_tmp_size > 0) { 667 _tmp_buffer = (char*)os::malloc(_tmp_size, mtInternal); 668 } 669 } 670 } 671 } 672 // initialize internal buffer 673 _pos = 0; 674 _size = io_buffer_max_size; 675 } 676 677 DumpWriter::~DumpWriter(){ 678 if (_buffer != nullptr) { 679 os::free(_buffer); 680 } 681 if (_out_buffer != nullptr) { 682 os::free(_out_buffer); 683 } 684 if (_tmp_buffer != nullptr) { 685 os::free(_tmp_buffer); 686 } 687 if (_writer != nullptr) { 688 delete _writer; 689 } 690 _bytes_written = -1; 691 } 692 693 // flush any buffered bytes to the file 694 void DumpWriter::flush() { 695 if (_pos <= 0) { 696 return; 697 } 698 if (has_error()) { 699 _pos = 0; 700 return; 701 } 702 char* result = nullptr; 703 if (_compressor == nullptr) { 704 result = (char*)_writer->write_buf(_buffer, _pos); 705 _bytes_written += _pos; 706 } else { 707 do_compress(); 708 if (!has_error()) { 709 result = (char*)_writer->write_buf(_out_buffer, _out_pos); 710 _bytes_written += _out_pos; 711 } 712 } 713 _pos = 0; // reset pos to make internal buffer available 714 715 if (result != nullptr) { 716 set_error(result); 717 } 718 } 719 720 void DumpWriter::do_compress() { 721 const char* msg = _compressor->compress(_buffer, _pos, _out_buffer, _out_size, 722 _tmp_buffer, _tmp_size, &_out_pos); 723 724 if (msg != nullptr) { 725 set_error(msg); 726 } 727 } 728 729 class DumperClassCacheTable; 730 class DumperClassCacheTableEntry; 731 732 // Support class with a collection of functions used when dumping the heap 733 class DumperSupport : AllStatic { 734 public: 735 736 // write a header of the given type 737 static void write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len); 738 739 // returns hprof tag for the given type signature 740 static hprofTag sig2tag(Symbol* sig); 741 // returns hprof tag for the given basic type 742 static hprofTag type2tag(BasicType type); 743 // Returns the size of the data to write. 744 static u4 sig2size(Symbol* sig); 745 746 // returns the size of the instance of the given class 747 static u4 instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry = nullptr); 748 749 // dump a jfloat 750 static void dump_float(AbstractDumpWriter* writer, jfloat f); 751 // dump a jdouble 752 static void dump_double(AbstractDumpWriter* writer, jdouble d); 753 // dumps the raw value of the given field 754 static void dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset); 755 // returns the size of the static fields; also counts the static fields 756 static u4 get_static_fields_size(InstanceKlass* ik, u2& field_count); 757 // dumps static fields of the given class 758 static void dump_static_fields(AbstractDumpWriter* writer, Klass* k); 759 // dump the raw values of the instance fields of the given object 760 static void dump_instance_fields(AbstractDumpWriter* writer, oop o, DumperClassCacheTableEntry* class_cache_entry); 761 // get the count of the instance fields for a given class 762 static u2 get_instance_fields_count(InstanceKlass* ik); 763 // dumps the definition of the instance fields for a given class 764 static void dump_instance_field_descriptors(AbstractDumpWriter* writer, Klass* k); 765 // creates HPROF_GC_INSTANCE_DUMP record for the given object 766 static void dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache); 767 // creates HPROF_GC_CLASS_DUMP record for the given instance class 768 static void dump_instance_class(AbstractDumpWriter* writer, Klass* k); 769 // creates HPROF_GC_CLASS_DUMP record for a given array class 770 static void dump_array_class(AbstractDumpWriter* writer, Klass* k); 771 772 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array 773 static void dump_object_array(AbstractDumpWriter* writer, objArrayOop array); 774 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array 775 static void dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array); 776 // create HPROF_FRAME record for the given method and bci 777 static void dump_stack_frame(AbstractDumpWriter* writer, int frame_serial_num, int class_serial_num, Method* m, int bci); 778 779 // check if we need to truncate an array 780 static int calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size); 781 782 // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record 783 static void end_of_dump(AbstractDumpWriter* writer); 784 785 static oop mask_dormant_archived_object(oop o, oop ref_obj) { 786 if (o != nullptr && o->klass()->java_mirror_no_keepalive() == nullptr) { 787 // Ignore this object since the corresponding java mirror is not loaded. 788 // Might be a dormant archive object. 789 report_dormant_archived_object(o, ref_obj); 790 return nullptr; 791 } else { 792 return o; 793 } 794 } 795 796 static void report_dormant_archived_object(oop o, oop ref_obj) { 797 if (log_is_enabled(Trace, cds, heap)) { 798 ResourceMark rm; 799 if (ref_obj != nullptr) { 800 log_trace(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s) referenced by " INTPTR_FORMAT " (%s)", 801 p2i(o), o->klass()->external_name(), 802 p2i(ref_obj), ref_obj->klass()->external_name()); 803 } else { 804 log_trace(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s)", 805 p2i(o), o->klass()->external_name()); 806 } 807 } 808 } 809 }; 810 811 // Hash table of klasses to the klass metadata. This should greatly improve the 812 // hash dumping performance. This hash table is supposed to be used by a single 813 // thread only. 814 // 815 class DumperClassCacheTableEntry : public CHeapObj<mtServiceability> { 816 friend class DumperClassCacheTable; 817 private: 818 GrowableArray<char> _sigs_start; 819 GrowableArray<int> _offsets; 820 u4 _instance_size; 821 int _entries; 822 823 public: 824 DumperClassCacheTableEntry() : _instance_size(0), _entries(0) {}; 825 826 int field_count() { return _entries; } 827 char sig_start(int field_idx) { return _sigs_start.at(field_idx); } 828 int offset(int field_idx) { return _offsets.at(field_idx); } 829 u4 instance_size() { return _instance_size; } 830 }; 831 832 class DumperClassCacheTable { 833 private: 834 // ResourceHashtable SIZE is specified at compile time so we 835 // use 1031 which is the first prime after 1024. 836 static constexpr size_t TABLE_SIZE = 1031; 837 838 // Maintain the cache for N classes. This limits memory footprint 839 // impact, regardless of how many classes we have in the dump. 840 // This also improves look up performance by keeping the statically 841 // sized table from overloading. 842 static constexpr int CACHE_TOP = 256; 843 844 typedef ResourceHashtable<InstanceKlass*, DumperClassCacheTableEntry*, 845 TABLE_SIZE, AnyObj::C_HEAP, mtServiceability> PtrTable; 846 PtrTable* _ptrs; 847 848 // Single-slot cache to handle the major case of objects of the same 849 // class back-to-back, e.g. from T[]. 850 InstanceKlass* _last_ik; 851 DumperClassCacheTableEntry* _last_entry; 852 853 void unlink_all(PtrTable* table) { 854 class CleanupEntry: StackObj { 855 public: 856 bool do_entry(InstanceKlass*& key, DumperClassCacheTableEntry*& entry) { 857 delete entry; 858 return true; 859 } 860 } cleanup; 861 table->unlink(&cleanup); 862 } 863 864 public: 865 DumperClassCacheTableEntry* lookup_or_create(InstanceKlass* ik) { 866 if (_last_ik == ik) { 867 return _last_entry; 868 } 869 870 DumperClassCacheTableEntry* entry; 871 DumperClassCacheTableEntry** from_cache = _ptrs->get(ik); 872 if (from_cache == nullptr) { 873 entry = new DumperClassCacheTableEntry(); 874 for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) { 875 if (!fld.access_flags().is_static()) { 876 Symbol* sig = fld.signature(); 877 entry->_sigs_start.push(sig->char_at(0)); 878 entry->_offsets.push(fld.offset()); 879 entry->_entries++; 880 entry->_instance_size += DumperSupport::sig2size(sig); 881 } 882 } 883 884 if (_ptrs->number_of_entries() >= CACHE_TOP) { 885 // We do not track the individual hit rates for table entries. 886 // Purge the entire table, and let the cache catch up with new 887 // distribution. 888 unlink_all(_ptrs); 889 } 890 891 _ptrs->put(ik, entry); 892 } else { 893 entry = *from_cache; 894 } 895 896 // Remember for single-slot cache. 897 _last_ik = ik; 898 _last_entry = entry; 899 900 return entry; 901 } 902 903 DumperClassCacheTable() : _ptrs(new (mtServiceability) PtrTable), _last_ik(nullptr), _last_entry(nullptr) {} 904 905 ~DumperClassCacheTable() { 906 unlink_all(_ptrs); 907 delete _ptrs; 908 } 909 }; 910 911 // write a header of the given type 912 void DumperSupport:: write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len) { 913 writer->write_u1(tag); 914 writer->write_u4(0); // current ticks 915 writer->write_u4(len); 916 } 917 918 // returns hprof tag for the given type signature 919 hprofTag DumperSupport::sig2tag(Symbol* sig) { 920 switch (sig->char_at(0)) { 921 case JVM_SIGNATURE_CLASS : return HPROF_NORMAL_OBJECT; 922 case JVM_SIGNATURE_ARRAY : return HPROF_NORMAL_OBJECT; 923 case JVM_SIGNATURE_BYTE : return HPROF_BYTE; 924 case JVM_SIGNATURE_CHAR : return HPROF_CHAR; 925 case JVM_SIGNATURE_FLOAT : return HPROF_FLOAT; 926 case JVM_SIGNATURE_DOUBLE : return HPROF_DOUBLE; 927 case JVM_SIGNATURE_INT : return HPROF_INT; 928 case JVM_SIGNATURE_LONG : return HPROF_LONG; 929 case JVM_SIGNATURE_SHORT : return HPROF_SHORT; 930 case JVM_SIGNATURE_BOOLEAN : return HPROF_BOOLEAN; 931 default : ShouldNotReachHere(); /* to shut up compiler */ return HPROF_BYTE; 932 } 933 } 934 935 hprofTag DumperSupport::type2tag(BasicType type) { 936 switch (type) { 937 case T_BYTE : return HPROF_BYTE; 938 case T_CHAR : return HPROF_CHAR; 939 case T_FLOAT : return HPROF_FLOAT; 940 case T_DOUBLE : return HPROF_DOUBLE; 941 case T_INT : return HPROF_INT; 942 case T_LONG : return HPROF_LONG; 943 case T_SHORT : return HPROF_SHORT; 944 case T_BOOLEAN : return HPROF_BOOLEAN; 945 default : ShouldNotReachHere(); /* to shut up compiler */ return HPROF_BYTE; 946 } 947 } 948 949 u4 DumperSupport::sig2size(Symbol* sig) { 950 switch (sig->char_at(0)) { 951 case JVM_SIGNATURE_CLASS: 952 case JVM_SIGNATURE_ARRAY: return sizeof(address); 953 case JVM_SIGNATURE_BOOLEAN: 954 case JVM_SIGNATURE_BYTE: return 1; 955 case JVM_SIGNATURE_SHORT: 956 case JVM_SIGNATURE_CHAR: return 2; 957 case JVM_SIGNATURE_INT: 958 case JVM_SIGNATURE_FLOAT: return 4; 959 case JVM_SIGNATURE_LONG: 960 case JVM_SIGNATURE_DOUBLE: return 8; 961 default: ShouldNotReachHere(); /* to shut up compiler */ return 0; 962 } 963 } 964 965 template<typename T, typename F> T bit_cast(F from) { // replace with the real thing when we can use c++20 966 T to; 967 static_assert(sizeof(to) == sizeof(from), "must be of the same size"); 968 memcpy(&to, &from, sizeof(to)); 969 return to; 970 } 971 972 // dump a jfloat 973 void DumperSupport::dump_float(AbstractDumpWriter* writer, jfloat f) { 974 if (g_isnan(f)) { 975 writer->write_u4(0x7fc00000); // collapsing NaNs 976 } else { 977 writer->write_u4(bit_cast<u4>(f)); 978 } 979 } 980 981 // dump a jdouble 982 void DumperSupport::dump_double(AbstractDumpWriter* writer, jdouble d) { 983 if (g_isnan(d)) { 984 writer->write_u8(0x7ff80000ull << 32); // collapsing NaNs 985 } else { 986 writer->write_u8(bit_cast<u8>(d)); 987 } 988 } 989 990 // dumps the raw value of the given field 991 void DumperSupport::dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset) { 992 switch (type) { 993 case JVM_SIGNATURE_CLASS : 994 case JVM_SIGNATURE_ARRAY : { 995 oop o = obj->obj_field_access<ON_UNKNOWN_OOP_REF | AS_NO_KEEPALIVE>(offset); 996 o = mask_dormant_archived_object(o, obj); 997 assert(oopDesc::is_oop_or_null(o), "Expected an oop or nullptr at " PTR_FORMAT, p2i(o)); 998 writer->write_objectID(o); 999 break; 1000 } 1001 case JVM_SIGNATURE_BYTE : { 1002 jbyte b = obj->byte_field(offset); 1003 writer->write_u1(b); 1004 break; 1005 } 1006 case JVM_SIGNATURE_CHAR : { 1007 jchar c = obj->char_field(offset); 1008 writer->write_u2(c); 1009 break; 1010 } 1011 case JVM_SIGNATURE_SHORT : { 1012 jshort s = obj->short_field(offset); 1013 writer->write_u2(s); 1014 break; 1015 } 1016 case JVM_SIGNATURE_FLOAT : { 1017 jfloat f = obj->float_field(offset); 1018 dump_float(writer, f); 1019 break; 1020 } 1021 case JVM_SIGNATURE_DOUBLE : { 1022 jdouble d = obj->double_field(offset); 1023 dump_double(writer, d); 1024 break; 1025 } 1026 case JVM_SIGNATURE_INT : { 1027 jint i = obj->int_field(offset); 1028 writer->write_u4(i); 1029 break; 1030 } 1031 case JVM_SIGNATURE_LONG : { 1032 jlong l = obj->long_field(offset); 1033 writer->write_u8(l); 1034 break; 1035 } 1036 case JVM_SIGNATURE_BOOLEAN : { 1037 jboolean b = obj->bool_field(offset); 1038 writer->write_u1(b); 1039 break; 1040 } 1041 default : { 1042 ShouldNotReachHere(); 1043 break; 1044 } 1045 } 1046 } 1047 1048 // returns the size of the instance of the given class 1049 u4 DumperSupport::instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry) { 1050 if (class_cache_entry != nullptr) { 1051 return class_cache_entry->instance_size(); 1052 } else { 1053 u4 size = 0; 1054 for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) { 1055 if (!fld.access_flags().is_static()) { 1056 size += sig2size(fld.signature()); 1057 } 1058 } 1059 return size; 1060 } 1061 } 1062 1063 u4 DumperSupport::get_static_fields_size(InstanceKlass* ik, u2& field_count) { 1064 field_count = 0; 1065 u4 size = 0; 1066 1067 for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) { 1068 if (fldc.access_flags().is_static()) { 1069 field_count++; 1070 size += sig2size(fldc.signature()); 1071 } 1072 } 1073 1074 // Add in resolved_references which is referenced by the cpCache 1075 // The resolved_references is an array per InstanceKlass holding the 1076 // strings and other oops resolved from the constant pool. 1077 oop resolved_references = ik->constants()->resolved_references_or_null(); 1078 if (resolved_references != nullptr) { 1079 field_count++; 1080 size += sizeof(address); 1081 1082 // Add in the resolved_references of the used previous versions of the class 1083 // in the case of RedefineClasses 1084 InstanceKlass* prev = ik->previous_versions(); 1085 while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) { 1086 field_count++; 1087 size += sizeof(address); 1088 prev = prev->previous_versions(); 1089 } 1090 } 1091 1092 // We write the value itself plus a name and a one byte type tag per field. 1093 return checked_cast<u4>(size + field_count * (sizeof(address) + 1)); 1094 } 1095 1096 // dumps static fields of the given class 1097 void DumperSupport::dump_static_fields(AbstractDumpWriter* writer, Klass* k) { 1098 InstanceKlass* ik = InstanceKlass::cast(k); 1099 1100 // dump the field descriptors and raw values 1101 for (JavaFieldStream fld(ik); !fld.done(); fld.next()) { 1102 if (fld.access_flags().is_static()) { 1103 Symbol* sig = fld.signature(); 1104 1105 writer->write_symbolID(fld.name()); // name 1106 writer->write_u1(sig2tag(sig)); // type 1107 1108 // value 1109 dump_field_value(writer, sig->char_at(0), ik->java_mirror(), fld.offset()); 1110 } 1111 } 1112 1113 // Add resolved_references for each class that has them 1114 oop resolved_references = ik->constants()->resolved_references_or_null(); 1115 if (resolved_references != nullptr) { 1116 writer->write_symbolID(vmSymbols::resolved_references_name()); // name 1117 writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type 1118 writer->write_objectID(resolved_references); 1119 1120 // Also write any previous versions 1121 InstanceKlass* prev = ik->previous_versions(); 1122 while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) { 1123 writer->write_symbolID(vmSymbols::resolved_references_name()); // name 1124 writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type 1125 writer->write_objectID(prev->constants()->resolved_references()); 1126 prev = prev->previous_versions(); 1127 } 1128 } 1129 } 1130 1131 // dump the raw values of the instance fields of the given object 1132 void DumperSupport::dump_instance_fields(AbstractDumpWriter* writer, oop o, DumperClassCacheTableEntry* class_cache_entry) { 1133 assert(class_cache_entry != nullptr, "Pre-condition: must be provided"); 1134 for (int idx = 0; idx < class_cache_entry->field_count(); idx++) { 1135 dump_field_value(writer, class_cache_entry->sig_start(idx), o, class_cache_entry->offset(idx)); 1136 } 1137 } 1138 1139 // dumps the definition of the instance fields for a given class 1140 u2 DumperSupport::get_instance_fields_count(InstanceKlass* ik) { 1141 u2 field_count = 0; 1142 1143 for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) { 1144 if (!fldc.access_flags().is_static()) field_count++; 1145 } 1146 1147 return field_count; 1148 } 1149 1150 // dumps the definition of the instance fields for a given class 1151 void DumperSupport::dump_instance_field_descriptors(AbstractDumpWriter* writer, Klass* k) { 1152 InstanceKlass* ik = InstanceKlass::cast(k); 1153 1154 // dump the field descriptors 1155 for (JavaFieldStream fld(ik); !fld.done(); fld.next()) { 1156 if (!fld.access_flags().is_static()) { 1157 Symbol* sig = fld.signature(); 1158 1159 writer->write_symbolID(fld.name()); // name 1160 writer->write_u1(sig2tag(sig)); // type 1161 } 1162 } 1163 } 1164 1165 // creates HPROF_GC_INSTANCE_DUMP record for the given object 1166 void DumperSupport::dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache) { 1167 InstanceKlass* ik = InstanceKlass::cast(o->klass()); 1168 1169 DumperClassCacheTableEntry* cache_entry = class_cache->lookup_or_create(ik); 1170 1171 u4 is = instance_size(ik, cache_entry); 1172 u4 size = 1 + sizeof(address) + 4 + sizeof(address) + 4 + is; 1173 1174 writer->start_sub_record(HPROF_GC_INSTANCE_DUMP, size); 1175 writer->write_objectID(o); 1176 writer->write_u4(STACK_TRACE_ID); 1177 1178 // class ID 1179 writer->write_classID(ik); 1180 1181 // number of bytes that follow 1182 writer->write_u4(is); 1183 1184 // field values 1185 dump_instance_fields(writer, o, cache_entry); 1186 1187 writer->end_sub_record(); 1188 } 1189 1190 // creates HPROF_GC_CLASS_DUMP record for the given instance class 1191 void DumperSupport::dump_instance_class(AbstractDumpWriter* writer, Klass* k) { 1192 InstanceKlass* ik = InstanceKlass::cast(k); 1193 1194 // We can safepoint and do a heap dump at a point where we have a Klass, 1195 // but no java mirror class has been setup for it. So we need to check 1196 // that the class is at least loaded, to avoid crash from a null mirror. 1197 if (!ik->is_loaded()) { 1198 return; 1199 } 1200 1201 u2 static_fields_count = 0; 1202 u4 static_size = get_static_fields_size(ik, static_fields_count); 1203 u2 instance_fields_count = get_instance_fields_count(ik); 1204 u4 instance_fields_size = instance_fields_count * (sizeof(address) + 1); 1205 u4 size = checked_cast<u4>(1 + sizeof(address) + 4 + 6 * sizeof(address) + 4 + 2 + 2 + static_size + 2 + instance_fields_size); 1206 1207 writer->start_sub_record(HPROF_GC_CLASS_DUMP, size); 1208 1209 // class ID 1210 writer->write_classID(ik); 1211 writer->write_u4(STACK_TRACE_ID); 1212 1213 // super class ID 1214 InstanceKlass* java_super = ik->java_super(); 1215 if (java_super == nullptr) { 1216 writer->write_objectID(oop(nullptr)); 1217 } else { 1218 writer->write_classID(java_super); 1219 } 1220 1221 writer->write_objectID(ik->class_loader()); 1222 writer->write_objectID(ik->signers()); 1223 writer->write_objectID(ik->protection_domain()); 1224 1225 // reserved 1226 writer->write_objectID(oop(nullptr)); 1227 writer->write_objectID(oop(nullptr)); 1228 1229 // instance size 1230 writer->write_u4(DumperSupport::instance_size(ik)); 1231 1232 // size of constant pool - ignored by HAT 1.1 1233 writer->write_u2(0); 1234 1235 // static fields 1236 writer->write_u2(static_fields_count); 1237 dump_static_fields(writer, ik); 1238 1239 // description of instance fields 1240 writer->write_u2(instance_fields_count); 1241 dump_instance_field_descriptors(writer, ik); 1242 1243 writer->end_sub_record(); 1244 } 1245 1246 // creates HPROF_GC_CLASS_DUMP record for the given array class 1247 void DumperSupport::dump_array_class(AbstractDumpWriter* writer, Klass* k) { 1248 InstanceKlass* ik = nullptr; // bottom class for object arrays, null for primitive type arrays 1249 if (k->is_objArray_klass()) { 1250 Klass *bk = ObjArrayKlass::cast(k)->bottom_klass(); 1251 assert(bk != nullptr, "checking"); 1252 if (bk->is_instance_klass()) { 1253 ik = InstanceKlass::cast(bk); 1254 } 1255 } 1256 1257 u4 size = 1 + sizeof(address) + 4 + 6 * sizeof(address) + 4 + 2 + 2 + 2; 1258 writer->start_sub_record(HPROF_GC_CLASS_DUMP, size); 1259 writer->write_classID(k); 1260 writer->write_u4(STACK_TRACE_ID); 1261 1262 // super class of array classes is java.lang.Object 1263 InstanceKlass* java_super = k->java_super(); 1264 assert(java_super != nullptr, "checking"); 1265 writer->write_classID(java_super); 1266 1267 writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->class_loader()); 1268 writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->signers()); 1269 writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->protection_domain()); 1270 1271 writer->write_objectID(oop(nullptr)); // reserved 1272 writer->write_objectID(oop(nullptr)); 1273 writer->write_u4(0); // instance size 1274 writer->write_u2(0); // constant pool 1275 writer->write_u2(0); // static fields 1276 writer->write_u2(0); // instance fields 1277 1278 writer->end_sub_record(); 1279 1280 } 1281 1282 // Hprof uses an u4 as record length field, 1283 // which means we need to truncate arrays that are too long. 1284 int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size) { 1285 BasicType type = ArrayKlass::cast(array->klass())->element_type(); 1286 assert(type >= T_BOOLEAN && type <= T_OBJECT, "invalid array element type"); 1287 1288 int length = array->length(); 1289 1290 int type_size; 1291 if (type == T_OBJECT) { 1292 type_size = sizeof(address); 1293 } else { 1294 type_size = type2aelembytes(type); 1295 } 1296 1297 size_t length_in_bytes = (size_t)length * type_size; 1298 uint max_bytes = max_juint - header_size; 1299 1300 if (length_in_bytes > max_bytes) { 1301 length = max_bytes / type_size; 1302 length_in_bytes = (size_t)length * type_size; 1303 1304 warning("cannot dump array of type %s[] with length %d; truncating to length %d", 1305 type2name_tab[type], array->length(), length); 1306 } 1307 return length; 1308 } 1309 1310 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array 1311 void DumperSupport::dump_object_array(AbstractDumpWriter* writer, objArrayOop array) { 1312 // sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID) + sizeof(classID) 1313 short header_size = 1 + 2 * 4 + 2 * sizeof(address); 1314 int length = calculate_array_max_length(writer, array, header_size); 1315 u4 size = checked_cast<u4>(header_size + length * sizeof(address)); 1316 1317 writer->start_sub_record(HPROF_GC_OBJ_ARRAY_DUMP, size); 1318 writer->write_objectID(array); 1319 writer->write_u4(STACK_TRACE_ID); 1320 writer->write_u4(length); 1321 1322 // array class ID 1323 writer->write_classID(array->klass()); 1324 1325 // [id]* elements 1326 for (int index = 0; index < length; index++) { 1327 oop o = array->obj_at(index); 1328 o = mask_dormant_archived_object(o, array); 1329 writer->write_objectID(o); 1330 } 1331 1332 writer->end_sub_record(); 1333 } 1334 1335 #define WRITE_ARRAY(Array, Type, Size, Length) \ 1336 for (int i = 0; i < Length; i++) { writer->write_##Size((Size)Array->Type##_at(i)); } 1337 1338 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array 1339 void DumperSupport::dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array) { 1340 BasicType type = TypeArrayKlass::cast(array->klass())->element_type(); 1341 // 2 * sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID) 1342 short header_size = 2 * 1 + 2 * 4 + sizeof(address); 1343 1344 int length = calculate_array_max_length(writer, array, header_size); 1345 int type_size = type2aelembytes(type); 1346 u4 length_in_bytes = (u4)length * type_size; 1347 u4 size = header_size + length_in_bytes; 1348 1349 writer->start_sub_record(HPROF_GC_PRIM_ARRAY_DUMP, size); 1350 writer->write_objectID(array); 1351 writer->write_u4(STACK_TRACE_ID); 1352 writer->write_u4(length); 1353 writer->write_u1(type2tag(type)); 1354 1355 // nothing to copy 1356 if (length == 0) { 1357 writer->end_sub_record(); 1358 return; 1359 } 1360 1361 // If the byte ordering is big endian then we can copy most types directly 1362 1363 switch (type) { 1364 case T_INT : { 1365 if (Endian::is_Java_byte_ordering_different()) { 1366 WRITE_ARRAY(array, int, u4, length); 1367 } else { 1368 writer->write_raw(array->int_at_addr(0), length_in_bytes); 1369 } 1370 break; 1371 } 1372 case T_BYTE : { 1373 writer->write_raw(array->byte_at_addr(0), length_in_bytes); 1374 break; 1375 } 1376 case T_CHAR : { 1377 if (Endian::is_Java_byte_ordering_different()) { 1378 WRITE_ARRAY(array, char, u2, length); 1379 } else { 1380 writer->write_raw(array->char_at_addr(0), length_in_bytes); 1381 } 1382 break; 1383 } 1384 case T_SHORT : { 1385 if (Endian::is_Java_byte_ordering_different()) { 1386 WRITE_ARRAY(array, short, u2, length); 1387 } else { 1388 writer->write_raw(array->short_at_addr(0), length_in_bytes); 1389 } 1390 break; 1391 } 1392 case T_BOOLEAN : { 1393 if (Endian::is_Java_byte_ordering_different()) { 1394 WRITE_ARRAY(array, bool, u1, length); 1395 } else { 1396 writer->write_raw(array->bool_at_addr(0), length_in_bytes); 1397 } 1398 break; 1399 } 1400 case T_LONG : { 1401 if (Endian::is_Java_byte_ordering_different()) { 1402 WRITE_ARRAY(array, long, u8, length); 1403 } else { 1404 writer->write_raw(array->long_at_addr(0), length_in_bytes); 1405 } 1406 break; 1407 } 1408 1409 // handle float/doubles in a special value to ensure than NaNs are 1410 // written correctly. TO DO: Check if we can avoid this on processors that 1411 // use IEEE 754. 1412 1413 case T_FLOAT : { 1414 for (int i = 0; i < length; i++) { 1415 dump_float(writer, array->float_at(i)); 1416 } 1417 break; 1418 } 1419 case T_DOUBLE : { 1420 for (int i = 0; i < length; i++) { 1421 dump_double(writer, array->double_at(i)); 1422 } 1423 break; 1424 } 1425 default : ShouldNotReachHere(); 1426 } 1427 1428 writer->end_sub_record(); 1429 } 1430 1431 // create a HPROF_FRAME record of the given Method* and bci 1432 void DumperSupport::dump_stack_frame(AbstractDumpWriter* writer, 1433 int frame_serial_num, 1434 int class_serial_num, 1435 Method* m, 1436 int bci) { 1437 int line_number; 1438 if (m->is_native()) { 1439 line_number = -3; // native frame 1440 } else { 1441 line_number = m->line_number_from_bci(bci); 1442 } 1443 1444 write_header(writer, HPROF_FRAME, 4*oopSize + 2*sizeof(u4)); 1445 writer->write_id(frame_serial_num); // frame serial number 1446 writer->write_symbolID(m->name()); // method's name 1447 writer->write_symbolID(m->signature()); // method's signature 1448 1449 assert(m->method_holder()->is_instance_klass(), "not InstanceKlass"); 1450 writer->write_symbolID(m->method_holder()->source_file_name()); // source file name 1451 writer->write_u4(class_serial_num); // class serial number 1452 writer->write_u4((u4) line_number); // line number 1453 } 1454 1455 1456 // Support class used to generate HPROF_UTF8 records from the entries in the 1457 // SymbolTable. 1458 1459 class SymbolTableDumper : public SymbolClosure { 1460 private: 1461 AbstractDumpWriter* _writer; 1462 AbstractDumpWriter* writer() const { return _writer; } 1463 public: 1464 SymbolTableDumper(AbstractDumpWriter* writer) { _writer = writer; } 1465 void do_symbol(Symbol** p); 1466 }; 1467 1468 void SymbolTableDumper::do_symbol(Symbol** p) { 1469 ResourceMark rm; 1470 Symbol* sym = *p; 1471 int len = sym->utf8_length(); 1472 if (len > 0) { 1473 char* s = sym->as_utf8(); 1474 DumperSupport::write_header(writer(), HPROF_UTF8, oopSize + len); 1475 writer()->write_symbolID(sym); 1476 writer()->write_raw(s, len); 1477 } 1478 } 1479 1480 // Support class used to generate HPROF_GC_CLASS_DUMP records 1481 1482 class ClassDumper : public KlassClosure { 1483 private: 1484 AbstractDumpWriter* _writer; 1485 AbstractDumpWriter* writer() const { return _writer; } 1486 1487 public: 1488 ClassDumper(AbstractDumpWriter* writer) : _writer(writer) {} 1489 1490 void do_klass(Klass* k) { 1491 if (k->is_instance_klass()) { 1492 DumperSupport::dump_instance_class(writer(), k); 1493 } else { 1494 DumperSupport::dump_array_class(writer(), k); 1495 } 1496 } 1497 }; 1498 1499 // Support class used to generate HPROF_GC_ROOT_JNI_LOCAL records 1500 1501 class JNILocalsDumper : public OopClosure { 1502 private: 1503 AbstractDumpWriter* _writer; 1504 u4 _thread_serial_num; 1505 int _frame_num; 1506 AbstractDumpWriter* writer() const { return _writer; } 1507 public: 1508 JNILocalsDumper(AbstractDumpWriter* writer, u4 thread_serial_num) { 1509 _writer = writer; 1510 _thread_serial_num = thread_serial_num; 1511 _frame_num = -1; // default - empty stack 1512 } 1513 void set_frame_number(int n) { _frame_num = n; } 1514 void do_oop(oop* obj_p); 1515 void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); } 1516 }; 1517 1518 void JNILocalsDumper::do_oop(oop* obj_p) { 1519 // ignore null handles 1520 oop o = *obj_p; 1521 if (o != nullptr) { 1522 u4 size = 1 + sizeof(address) + 4 + 4; 1523 writer()->start_sub_record(HPROF_GC_ROOT_JNI_LOCAL, size); 1524 writer()->write_objectID(o); 1525 writer()->write_u4(_thread_serial_num); 1526 writer()->write_u4((u4)_frame_num); 1527 writer()->end_sub_record(); 1528 } 1529 } 1530 1531 1532 // Support class used to generate HPROF_GC_ROOT_JNI_GLOBAL records 1533 1534 class JNIGlobalsDumper : public OopClosure { 1535 private: 1536 AbstractDumpWriter* _writer; 1537 AbstractDumpWriter* writer() const { return _writer; } 1538 1539 public: 1540 JNIGlobalsDumper(AbstractDumpWriter* writer) { 1541 _writer = writer; 1542 } 1543 void do_oop(oop* obj_p); 1544 void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); } 1545 }; 1546 1547 void JNIGlobalsDumper::do_oop(oop* obj_p) { 1548 oop o = NativeAccess<AS_NO_KEEPALIVE>::oop_load(obj_p); 1549 1550 // ignore these 1551 if (o == nullptr) return; 1552 // we ignore global ref to symbols and other internal objects 1553 if (o->is_instance() || o->is_objArray() || o->is_typeArray()) { 1554 u4 size = 1 + 2 * sizeof(address); 1555 writer()->start_sub_record(HPROF_GC_ROOT_JNI_GLOBAL, size); 1556 writer()->write_objectID(o); 1557 writer()->write_rootID(obj_p); // global ref ID 1558 writer()->end_sub_record(); 1559 } 1560 }; 1561 1562 // Support class used to generate HPROF_GC_ROOT_STICKY_CLASS records 1563 1564 class StickyClassDumper : public KlassClosure { 1565 private: 1566 AbstractDumpWriter* _writer; 1567 AbstractDumpWriter* writer() const { return _writer; } 1568 public: 1569 StickyClassDumper(AbstractDumpWriter* writer) { 1570 _writer = writer; 1571 } 1572 void do_klass(Klass* k) { 1573 if (k->is_instance_klass()) { 1574 InstanceKlass* ik = InstanceKlass::cast(k); 1575 u4 size = 1 + sizeof(address); 1576 writer()->start_sub_record(HPROF_GC_ROOT_STICKY_CLASS, size); 1577 writer()->write_classID(ik); 1578 writer()->end_sub_record(); 1579 } 1580 } 1581 }; 1582 1583 // Support class used to generate HPROF_GC_ROOT_JAVA_FRAME records. 1584 1585 class JavaStackRefDumper : public StackObj { 1586 private: 1587 AbstractDumpWriter* _writer; 1588 u4 _thread_serial_num; 1589 int _frame_num; 1590 AbstractDumpWriter* writer() const { return _writer; } 1591 public: 1592 JavaStackRefDumper(AbstractDumpWriter* writer, u4 thread_serial_num) 1593 : _writer(writer), _thread_serial_num(thread_serial_num), _frame_num(-1) // default - empty stack 1594 { 1595 } 1596 1597 void set_frame_number(int n) { _frame_num = n; } 1598 1599 void dump_java_stack_refs(StackValueCollection* values); 1600 }; 1601 1602 void JavaStackRefDumper::dump_java_stack_refs(StackValueCollection* values) { 1603 for (int index = 0; index < values->size(); index++) { 1604 if (values->at(index)->type() == T_OBJECT) { 1605 oop o = values->obj_at(index)(); 1606 if (o != nullptr) { 1607 u4 size = 1 + sizeof(address) + 4 + 4; 1608 writer()->start_sub_record(HPROF_GC_ROOT_JAVA_FRAME, size); 1609 writer()->write_objectID(o); 1610 writer()->write_u4(_thread_serial_num); 1611 writer()->write_u4((u4)_frame_num); 1612 writer()->end_sub_record(); 1613 } 1614 } 1615 } 1616 } 1617 1618 // Class to collect, store and dump thread-related data: 1619 // - HPROF_TRACE and HPROF_FRAME records; 1620 // - HPROF_GC_ROOT_THREAD_OBJ/HPROF_GC_ROOT_JAVA_FRAME/HPROF_GC_ROOT_JNI_LOCAL subrecords. 1621 class ThreadDumper : public CHeapObj<mtInternal> { 1622 public: 1623 enum class ThreadType { Platform, MountedVirtual, UnmountedVirtual }; 1624 1625 private: 1626 ThreadType _thread_type; 1627 JavaThread* _java_thread; 1628 oop _thread_oop; 1629 1630 GrowableArray<StackFrameInfo*>* _frames; 1631 // non-null if the thread is OOM thread 1632 Method* _oome_constructor; 1633 int _thread_serial_num; 1634 int _start_frame_serial_num; 1635 1636 vframe* get_top_frame() const; 1637 1638 public: 1639 static bool should_dump_pthread(JavaThread* thread) { 1640 return thread->threadObj() != nullptr && !thread->is_exiting() && !thread->is_hidden_from_external_view(); 1641 } 1642 1643 static bool should_dump_vthread(oop vt) { 1644 return java_lang_VirtualThread::state(vt) != java_lang_VirtualThread::NEW 1645 && java_lang_VirtualThread::state(vt) != java_lang_VirtualThread::TERMINATED; 1646 } 1647 1648 static bool is_vthread_mounted(oop vt) { 1649 // The code should be consistent with the "mounted virtual thread" case 1650 // (VM_HeapDumper::dump_stack_traces(), ThreadDumper::get_top_frame()). 1651 // I.e. virtual thread is mounted if its carrierThread is not null 1652 // and is_vthread_mounted() for the carrier thread returns true. 1653 oop carrier_thread = java_lang_VirtualThread::carrier_thread(vt); 1654 if (carrier_thread == nullptr) { 1655 return false; 1656 } 1657 JavaThread* java_thread = java_lang_Thread::thread(carrier_thread); 1658 return java_thread->is_vthread_mounted(); 1659 } 1660 1661 ThreadDumper(ThreadType thread_type, JavaThread* java_thread, oop thread_oop); 1662 1663 // affects frame_count 1664 void add_oom_frame(Method* oome_constructor) { 1665 assert(_start_frame_serial_num == 0, "add_oom_frame cannot be called after init_serial_nums"); 1666 _oome_constructor = oome_constructor; 1667 } 1668 1669 void init_serial_nums(volatile int* thread_counter, volatile int* frame_counter) { 1670 assert(_start_frame_serial_num == 0, "already initialized"); 1671 _thread_serial_num = Atomic::fetch_then_add(thread_counter, 1); 1672 _start_frame_serial_num = Atomic::fetch_then_add(frame_counter, frame_count()); 1673 } 1674 1675 bool oom_thread() const { 1676 return _oome_constructor != nullptr; 1677 } 1678 1679 int frame_count() const { 1680 return _frames->length() + (oom_thread() ? 1 : 0); 1681 } 1682 1683 u4 thread_serial_num() const { 1684 return (u4)_thread_serial_num; 1685 } 1686 1687 u4 stack_trace_serial_num() const { 1688 return (u4)(_thread_serial_num + STACK_TRACE_ID); 1689 } 1690 1691 // writes HPROF_TRACE and HPROF_FRAME records 1692 // returns number of dumped frames 1693 void dump_stack_traces(AbstractDumpWriter* writer, GrowableArray<Klass*>* klass_map); 1694 1695 // writes HPROF_GC_ROOT_THREAD_OBJ subrecord 1696 void dump_thread_obj(AbstractDumpWriter* writer); 1697 1698 // Walk the stack of the thread. 1699 // Dumps a HPROF_GC_ROOT_JAVA_FRAME subrecord for each local 1700 // Dumps a HPROF_GC_ROOT_JNI_LOCAL subrecord for each JNI local 1701 void dump_stack_refs(AbstractDumpWriter* writer); 1702 1703 }; 1704 1705 ThreadDumper::ThreadDumper(ThreadType thread_type, JavaThread* java_thread, oop thread_oop) 1706 : _thread_type(thread_type), _java_thread(java_thread), _thread_oop(thread_oop), 1707 _oome_constructor(nullptr), 1708 _thread_serial_num(0), _start_frame_serial_num(0) 1709 { 1710 // sanity checks 1711 if (_thread_type == ThreadType::UnmountedVirtual) { 1712 assert(_java_thread == nullptr, "sanity"); 1713 assert(_thread_oop != nullptr, "sanity"); 1714 } else { 1715 assert(_java_thread != nullptr, "sanity"); 1716 assert(_thread_oop != nullptr, "sanity"); 1717 } 1718 1719 _frames = new (mtServiceability) GrowableArray<StackFrameInfo*>(10, mtServiceability); 1720 bool stop_at_vthread_entry = _thread_type == ThreadType::MountedVirtual; 1721 1722 // vframes are resource allocated 1723 Thread* current_thread = Thread::current(); 1724 ResourceMark rm(current_thread); 1725 HandleMark hm(current_thread); 1726 1727 for (vframe* vf = get_top_frame(); vf != nullptr; vf = vf->sender()) { 1728 if (stop_at_vthread_entry && vf->is_vthread_entry()) { 1729 break; 1730 } 1731 if (vf->is_java_frame()) { 1732 javaVFrame* jvf = javaVFrame::cast(vf); 1733 _frames->append(new StackFrameInfo(jvf, false)); 1734 } else { 1735 // ignore non-Java frames 1736 } 1737 } 1738 } 1739 1740 void ThreadDumper::dump_stack_traces(AbstractDumpWriter* writer, GrowableArray<Klass*>* klass_map) { 1741 assert(_thread_serial_num != 0 && _start_frame_serial_num != 0, "serial_nums are not initialized"); 1742 1743 // write HPROF_FRAME records for this thread's stack trace 1744 int depth = _frames->length(); 1745 int frame_serial_num = _start_frame_serial_num; 1746 1747 if (oom_thread()) { 1748 // OOM thread 1749 // write fake frame that makes it look like the thread, which caused OOME, 1750 // is in the OutOfMemoryError zero-parameter constructor 1751 int oome_serial_num = klass_map->find(_oome_constructor->method_holder()); 1752 // the class serial number starts from 1 1753 assert(oome_serial_num > 0, "OutOfMemoryError class not found"); 1754 DumperSupport::dump_stack_frame(writer, ++frame_serial_num, oome_serial_num, _oome_constructor, 0); 1755 depth++; 1756 } 1757 1758 for (int j = 0; j < _frames->length(); j++) { 1759 StackFrameInfo* frame = _frames->at(j); 1760 Method* m = frame->method(); 1761 int class_serial_num = klass_map->find(m->method_holder()); 1762 // the class serial number starts from 1 1763 assert(class_serial_num > 0, "class not found"); 1764 DumperSupport::dump_stack_frame(writer, ++frame_serial_num, class_serial_num, m, frame->bci()); 1765 } 1766 1767 // write HPROF_TRACE record for the thread 1768 DumperSupport::write_header(writer, HPROF_TRACE, checked_cast<u4>(3 * sizeof(u4) + depth * oopSize)); 1769 writer->write_u4(stack_trace_serial_num()); // stack trace serial number 1770 writer->write_u4(thread_serial_num()); // thread serial number 1771 writer->write_u4((u4)depth); // frame count (including oom frame) 1772 for (int j = 1; j <= depth; j++) { 1773 writer->write_id(_start_frame_serial_num + j); 1774 } 1775 } 1776 1777 void ThreadDumper::dump_thread_obj(AbstractDumpWriter * writer) { 1778 assert(_thread_serial_num != 0 && _start_frame_serial_num != 0, "serial_num is not initialized"); 1779 1780 u4 size = 1 + sizeof(address) + 4 + 4; 1781 writer->start_sub_record(HPROF_GC_ROOT_THREAD_OBJ, size); 1782 writer->write_objectID(_thread_oop); 1783 writer->write_u4(thread_serial_num()); // thread serial number 1784 writer->write_u4(stack_trace_serial_num()); // stack trace serial number 1785 writer->end_sub_record(); 1786 } 1787 1788 void ThreadDumper::dump_stack_refs(AbstractDumpWriter * writer) { 1789 assert(_thread_serial_num != 0 && _start_frame_serial_num != 0, "serial_num is not initialized"); 1790 1791 JNILocalsDumper blk(writer, thread_serial_num()); 1792 if (_thread_type == ThreadType::Platform) { 1793 if (!_java_thread->has_last_Java_frame()) { 1794 // no last java frame but there may be JNI locals 1795 _java_thread->active_handles()->oops_do(&blk); 1796 return; 1797 } 1798 } 1799 1800 JavaStackRefDumper java_ref_dumper(writer, thread_serial_num()); 1801 1802 // vframes are resource allocated 1803 Thread* current_thread = Thread::current(); 1804 ResourceMark rm(current_thread); 1805 HandleMark hm(current_thread); 1806 1807 bool stopAtVthreadEntry = _thread_type == ThreadType::MountedVirtual; 1808 frame* last_entry_frame = nullptr; 1809 bool is_top_frame = true; 1810 int depth = 0; 1811 if (oom_thread()) { 1812 depth++; 1813 } 1814 1815 for (vframe* vf = get_top_frame(); vf != nullptr; vf = vf->sender()) { 1816 if (stopAtVthreadEntry && vf->is_vthread_entry()) { 1817 break; 1818 } 1819 1820 if (vf->is_java_frame()) { 1821 javaVFrame* jvf = javaVFrame::cast(vf); 1822 if (!(jvf->method()->is_native())) { 1823 java_ref_dumper.set_frame_number(depth); 1824 java_ref_dumper.dump_java_stack_refs(jvf->locals()); 1825 java_ref_dumper.dump_java_stack_refs(jvf->expressions()); 1826 } else { 1827 // native frame 1828 blk.set_frame_number(depth); 1829 if (is_top_frame) { 1830 // JNI locals for the top frame. 1831 assert(_java_thread != nullptr, "impossible for unmounted vthread"); 1832 _java_thread->active_handles()->oops_do(&blk); 1833 } else { 1834 if (last_entry_frame != nullptr) { 1835 // JNI locals for the entry frame 1836 assert(last_entry_frame->is_entry_frame(), "checking"); 1837 last_entry_frame->entry_frame_call_wrapper()->handles()->oops_do(&blk); 1838 } 1839 } 1840 } 1841 last_entry_frame = nullptr; 1842 // increment only for Java frames 1843 depth++; 1844 } else { 1845 // externalVFrame - for an entry frame then we report the JNI locals 1846 // when we find the corresponding javaVFrame 1847 frame* fr = vf->frame_pointer(); 1848 assert(fr != nullptr, "sanity check"); 1849 if (fr->is_entry_frame()) { 1850 last_entry_frame = fr; 1851 } 1852 } 1853 is_top_frame = false; 1854 } 1855 assert(depth == frame_count(), "total number of Java frames not matched"); 1856 } 1857 1858 vframe* ThreadDumper::get_top_frame() const { 1859 if (_thread_type == ThreadType::UnmountedVirtual) { 1860 ContinuationWrapper cont(java_lang_VirtualThread::continuation(_thread_oop)); 1861 if (cont.is_empty()) { 1862 return nullptr; 1863 } 1864 assert(!cont.is_mounted(), "sanity check"); 1865 stackChunkOop chunk = cont.last_nonempty_chunk(); 1866 if (chunk == nullptr || chunk->is_empty()) { 1867 return nullptr; 1868 } 1869 1870 RegisterMap reg_map(cont.continuation(), RegisterMap::UpdateMap::include); 1871 frame fr = chunk->top_frame(®_map); 1872 vframe* vf = vframe::new_vframe(&fr, ®_map, nullptr); // don't need JavaThread 1873 return vf; 1874 } 1875 1876 RegisterMap reg_map(_java_thread, 1877 RegisterMap::UpdateMap::include, 1878 RegisterMap::ProcessFrames::include, 1879 RegisterMap::WalkContinuation::skip); 1880 switch (_thread_type) { 1881 case ThreadType::Platform: 1882 if (!_java_thread->has_last_Java_frame()) { 1883 return nullptr; 1884 } 1885 return _java_thread->is_vthread_mounted() 1886 ? _java_thread->carrier_last_java_vframe(®_map) 1887 : _java_thread->platform_thread_last_java_vframe(®_map); 1888 1889 case ThreadType::MountedVirtual: 1890 return _java_thread->last_java_vframe(®_map); 1891 1892 default: // make compilers happy 1893 break; 1894 } 1895 ShouldNotReachHere(); 1896 return nullptr; 1897 } 1898 1899 // Callback to dump thread-related data for unmounted virtual threads; 1900 // implemented by VM_HeapDumper. 1901 class UnmountedVThreadDumper { 1902 public: 1903 virtual void dump_vthread(oop vt, AbstractDumpWriter* segment_writer) = 0; 1904 }; 1905 1906 // Support class used when iterating over the heap. 1907 class HeapObjectDumper : public ObjectClosure { 1908 private: 1909 AbstractDumpWriter* _writer; 1910 AbstractDumpWriter* writer() { return _writer; } 1911 UnmountedVThreadDumper* _vthread_dumper; 1912 1913 DumperClassCacheTable _class_cache; 1914 1915 public: 1916 HeapObjectDumper(AbstractDumpWriter* writer, UnmountedVThreadDumper* vthread_dumper) 1917 : _writer(writer), _vthread_dumper(vthread_dumper) {} 1918 1919 // called for each object in the heap 1920 void do_object(oop o); 1921 }; 1922 1923 void HeapObjectDumper::do_object(oop o) { 1924 // skip classes as these emitted as HPROF_GC_CLASS_DUMP records 1925 if (o->klass() == vmClasses::Class_klass()) { 1926 if (!java_lang_Class::is_primitive(o)) { 1927 return; 1928 } 1929 } 1930 1931 if (DumperSupport::mask_dormant_archived_object(o, nullptr) == nullptr) { 1932 return; 1933 } 1934 1935 if (o->is_instance()) { 1936 // create a HPROF_GC_INSTANCE record for each object 1937 DumperSupport::dump_instance(writer(), o, &_class_cache); 1938 // If we encounter an unmounted virtual thread it needs to be dumped explicitly 1939 // (mounted virtual threads are dumped with their carriers). 1940 if (java_lang_VirtualThread::is_instance(o) 1941 && ThreadDumper::should_dump_vthread(o) && !ThreadDumper::is_vthread_mounted(o)) { 1942 _vthread_dumper->dump_vthread(o, writer()); 1943 } 1944 } else if (o->is_objArray()) { 1945 // create a HPROF_GC_OBJ_ARRAY_DUMP record for each object array 1946 DumperSupport::dump_object_array(writer(), objArrayOop(o)); 1947 } else if (o->is_typeArray()) { 1948 // create a HPROF_GC_PRIM_ARRAY_DUMP record for each type array 1949 DumperSupport::dump_prim_array(writer(), typeArrayOop(o)); 1950 } 1951 } 1952 1953 // The dumper controller for parallel heap dump 1954 class DumperController : public CHeapObj<mtInternal> { 1955 private: 1956 Monitor* _lock; 1957 Mutex* _global_writer_lock; 1958 1959 const uint _dumper_number; 1960 uint _complete_number; 1961 1962 bool _started; // VM dumper started and acquired global writer lock 1963 1964 public: 1965 DumperController(uint number) : 1966 // _lock and _global_writer_lock are used for synchronization between GC worker threads inside safepoint, 1967 // so we lock with _no_safepoint_check_flag. 1968 // signal_start() acquires _lock when global writer is locked, 1969 // its rank must be less than _global_writer_lock rank. 1970 _lock(new (std::nothrow) PaddedMonitor(Mutex::nosafepoint - 1, "DumperController_lock")), 1971 _global_writer_lock(new (std::nothrow) Mutex(Mutex::nosafepoint, "DumpWriter_lock")), 1972 _dumper_number(number), 1973 _complete_number(0), 1974 _started(false) 1975 {} 1976 1977 ~DumperController() { 1978 delete _lock; 1979 delete _global_writer_lock; 1980 } 1981 1982 // parallel (non VM) dumpers must wait until VM dumper acquires global writer lock 1983 void wait_for_start_signal() { 1984 MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag); 1985 while (_started == false) { 1986 ml.wait(); 1987 } 1988 } 1989 1990 void signal_start() { 1991 MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag); 1992 _started = true; 1993 ml.notify_all(); 1994 } 1995 1996 void lock_global_writer() { 1997 _global_writer_lock->lock_without_safepoint_check(); 1998 } 1999 2000 void unlock_global_writer() { 2001 _global_writer_lock->unlock(); 2002 } 2003 2004 void dumper_complete(DumpWriter* local_writer, DumpWriter* global_writer) { 2005 MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag); 2006 _complete_number++; 2007 // propagate local error to global if any 2008 if (local_writer->has_error()) { 2009 global_writer->set_error(local_writer->error()); 2010 } 2011 ml.notify(); 2012 } 2013 2014 void wait_all_dumpers_complete() { 2015 MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag); 2016 while (_complete_number != _dumper_number) { 2017 ml.wait(); 2018 } 2019 } 2020 }; 2021 2022 // DumpMerger merges separate dump files into a complete one 2023 class DumpMerger : public StackObj { 2024 private: 2025 DumpWriter* _writer; 2026 const char* _path; 2027 bool _has_error; 2028 int _dump_seq; 2029 2030 private: 2031 void merge_file(const char* path); 2032 void merge_done(); 2033 void set_error(const char* msg); 2034 2035 public: 2036 DumpMerger(const char* path, DumpWriter* writer, int dump_seq) : 2037 _writer(writer), 2038 _path(path), 2039 _has_error(_writer->has_error()), 2040 _dump_seq(dump_seq) {} 2041 2042 void do_merge(); 2043 2044 // returns path for the parallel DumpWriter (resource allocated) 2045 static char* get_writer_path(const char* base_path, int seq); 2046 2047 }; 2048 2049 char* DumpMerger::get_writer_path(const char* base_path, int seq) { 2050 // approximate required buffer size 2051 size_t buf_size = strlen(base_path) 2052 + 2 // ".p" 2053 + 10 // number (that's enough for 2^32 parallel dumpers) 2054 + 1; // '\0' 2055 2056 char* path = NEW_RESOURCE_ARRAY(char, buf_size); 2057 memset(path, 0, buf_size); 2058 2059 os::snprintf(path, buf_size, "%s.p%d", base_path, seq); 2060 2061 return path; 2062 } 2063 2064 2065 void DumpMerger::merge_done() { 2066 // Writes the HPROF_HEAP_DUMP_END record. 2067 if (!_has_error) { 2068 DumperSupport::end_of_dump(_writer); 2069 _writer->flush(); 2070 } 2071 _dump_seq = 0; //reset 2072 } 2073 2074 void DumpMerger::set_error(const char* msg) { 2075 assert(msg != nullptr, "sanity check"); 2076 log_error(heapdump)("%s (file: %s)", msg, _path); 2077 _writer->set_error(msg); 2078 _has_error = true; 2079 } 2080 2081 #ifdef LINUX 2082 // Merge segmented heap files via sendfile, it's more efficient than the 2083 // read+write combination, which would require transferring data to and from 2084 // user space. 2085 void DumpMerger::merge_file(const char* path) { 2086 TraceTime timer("Merge segmented heap file directly", TRACETIME_LOG(Info, heapdump)); 2087 2088 int segment_fd = os::open(path, O_RDONLY, 0); 2089 if (segment_fd == -1) { 2090 set_error("Can not open segmented heap file during merging"); 2091 return; 2092 } 2093 2094 struct stat st; 2095 if (os::stat(path, &st) != 0) { 2096 ::close(segment_fd); 2097 set_error("Can not get segmented heap file size during merging"); 2098 return; 2099 } 2100 2101 // A successful call to sendfile may write fewer bytes than requested; the 2102 // caller should be prepared to retry the call if there were unsent bytes. 2103 jlong offset = 0; 2104 while (offset < st.st_size) { 2105 int ret = os::Linux::sendfile(_writer->get_fd(), segment_fd, &offset, st.st_size); 2106 if (ret == -1) { 2107 ::close(segment_fd); 2108 set_error("Failed to merge segmented heap file"); 2109 return; 2110 } 2111 } 2112 2113 // As sendfile variant does not call the write method of the global writer, 2114 // bytes_written is also incorrect for this variant, we need to explicitly 2115 // accumulate bytes_written for the global writer in this case 2116 julong accum = _writer->bytes_written() + st.st_size; 2117 _writer->set_bytes_written(accum); 2118 ::close(segment_fd); 2119 } 2120 #else 2121 // Generic implementation using read+write 2122 void DumpMerger::merge_file(const char* path) { 2123 TraceTime timer("Merge segmented heap file", TRACETIME_LOG(Info, heapdump)); 2124 2125 fileStream segment_fs(path, "rb"); 2126 if (!segment_fs.is_open()) { 2127 set_error("Can not open segmented heap file during merging"); 2128 return; 2129 } 2130 2131 jlong total = 0; 2132 size_t cnt = 0; 2133 2134 // Use _writer buffer for reading. 2135 while ((cnt = segment_fs.read(_writer->buffer(), 1, _writer->buffer_size())) != 0) { 2136 _writer->set_position(cnt); 2137 _writer->flush(); 2138 total += cnt; 2139 } 2140 2141 if (segment_fs.fileSize() != total) { 2142 set_error("Merged heap dump is incomplete"); 2143 } 2144 } 2145 #endif 2146 2147 void DumpMerger::do_merge() { 2148 TraceTime timer("Merge heap files complete", TRACETIME_LOG(Info, heapdump)); 2149 2150 // Since contents in segmented heap file were already zipped, we don't need to zip 2151 // them again during merging. 2152 AbstractCompressor* saved_compressor = _writer->compressor(); 2153 _writer->set_compressor(nullptr); 2154 2155 // Merge the content of the remaining files into base file. Regardless of whether 2156 // the merge process is successful or not, these segmented files will be deleted. 2157 for (int i = 0; i < _dump_seq; i++) { 2158 ResourceMark rm; 2159 const char* path = get_writer_path(_path, i); 2160 if (!_has_error) { 2161 merge_file(path); 2162 } 2163 // Delete selected segmented heap file nevertheless 2164 if (remove(path) != 0) { 2165 log_info(heapdump)("Removal of segment file (%d) failed (%d)", i, errno); 2166 } 2167 } 2168 2169 // restore compressor for further use 2170 _writer->set_compressor(saved_compressor); 2171 merge_done(); 2172 } 2173 2174 // The VM operation that performs the heap dump 2175 class VM_HeapDumper : public VM_GC_Operation, public WorkerTask, public UnmountedVThreadDumper { 2176 private: 2177 static VM_HeapDumper* _global_dumper; 2178 static DumpWriter* _global_writer; 2179 DumpWriter* _local_writer; 2180 JavaThread* _oome_thread; 2181 Method* _oome_constructor; 2182 bool _gc_before_heap_dump; 2183 GrowableArray<Klass*>* _klass_map; 2184 2185 ThreadDumper** _thread_dumpers; // platform, carrier and mounted virtual threads 2186 int _thread_dumpers_count; 2187 volatile int _thread_serial_num; 2188 volatile int _frame_serial_num; 2189 2190 volatile int _dump_seq; 2191 // parallel heap dump support 2192 uint _num_dumper_threads; 2193 DumperController* _dumper_controller; 2194 ParallelObjectIterator* _poi; 2195 2196 // Dumper id of VMDumper thread. 2197 static const int VMDumperId = 0; 2198 // VM dumper dumps both heap and non-heap data, other dumpers dump heap-only data. 2199 static bool is_vm_dumper(int dumper_id) { return dumper_id == VMDumperId; } 2200 // the 1st dumper calling get_next_dumper_id becomes VM dumper 2201 int get_next_dumper_id() { 2202 return Atomic::fetch_then_add(&_dump_seq, 1); 2203 } 2204 2205 // accessors and setters 2206 static VM_HeapDumper* dumper() { assert(_global_dumper != nullptr, "Error"); return _global_dumper; } 2207 static DumpWriter* writer() { assert(_global_writer != nullptr, "Error"); return _global_writer; } 2208 2209 void set_global_dumper() { 2210 assert(_global_dumper == nullptr, "Error"); 2211 _global_dumper = this; 2212 } 2213 void set_global_writer() { 2214 assert(_global_writer == nullptr, "Error"); 2215 _global_writer = _local_writer; 2216 } 2217 void clear_global_dumper() { _global_dumper = nullptr; } 2218 void clear_global_writer() { _global_writer = nullptr; } 2219 2220 bool skip_operation() const; 2221 2222 // writes a HPROF_LOAD_CLASS record to global writer 2223 static void do_load_class(Klass* k); 2224 2225 // HPROF_GC_ROOT_THREAD_OBJ records for platform and mounted virtual threads 2226 void dump_threads(AbstractDumpWriter* writer); 2227 2228 void add_class_serial_number(Klass* k, int serial_num) { 2229 _klass_map->at_put_grow(serial_num, k); 2230 } 2231 2232 bool is_oom_thread(JavaThread* thread) const { 2233 return thread == _oome_thread && _oome_constructor != nullptr; 2234 } 2235 2236 // HPROF_TRACE and HPROF_FRAME records for platform and mounted virtual threads 2237 void dump_stack_traces(AbstractDumpWriter* writer); 2238 2239 public: 2240 VM_HeapDumper(DumpWriter* writer, bool gc_before_heap_dump, bool oome, uint num_dump_threads) : 2241 VM_GC_Operation(0 /* total collections, dummy, ignored */, 2242 GCCause::_heap_dump /* GC Cause */, 2243 0 /* total full collections, dummy, ignored */, 2244 gc_before_heap_dump), 2245 WorkerTask("dump heap") { 2246 _local_writer = writer; 2247 _gc_before_heap_dump = gc_before_heap_dump; 2248 _klass_map = new (mtServiceability) GrowableArray<Klass*>(INITIAL_CLASS_COUNT, mtServiceability); 2249 2250 _thread_dumpers = nullptr; 2251 _thread_dumpers_count = 0; 2252 _thread_serial_num = 1; 2253 _frame_serial_num = 1; 2254 2255 _dump_seq = VMDumperId; 2256 _num_dumper_threads = num_dump_threads; 2257 _dumper_controller = nullptr; 2258 _poi = nullptr; 2259 if (oome) { 2260 assert(!Thread::current()->is_VM_thread(), "Dump from OutOfMemoryError cannot be called by the VMThread"); 2261 // get OutOfMemoryError zero-parameter constructor 2262 InstanceKlass* oome_ik = vmClasses::OutOfMemoryError_klass(); 2263 _oome_constructor = oome_ik->find_method(vmSymbols::object_initializer_name(), 2264 vmSymbols::void_method_signature()); 2265 // get thread throwing OOME when generating the heap dump at OOME 2266 _oome_thread = JavaThread::current(); 2267 } else { 2268 _oome_thread = nullptr; 2269 _oome_constructor = nullptr; 2270 } 2271 } 2272 2273 ~VM_HeapDumper() { 2274 if (_thread_dumpers != nullptr) { 2275 for (int i = 0; i < _thread_dumpers_count; i++) { 2276 delete _thread_dumpers[i]; 2277 } 2278 FREE_C_HEAP_ARRAY(ThreadDumper*, _thread_dumpers); 2279 } 2280 2281 if (_dumper_controller != nullptr) { 2282 delete _dumper_controller; 2283 _dumper_controller = nullptr; 2284 } 2285 delete _klass_map; 2286 } 2287 int dump_seq() { return _dump_seq; } 2288 bool is_parallel_dump() { return _num_dumper_threads > 1; } 2289 void prepare_parallel_dump(WorkerThreads* workers); 2290 2291 VMOp_Type type() const { return VMOp_HeapDumper; } 2292 virtual bool doit_prologue(); 2293 void doit(); 2294 void work(uint worker_id); 2295 2296 // UnmountedVThreadDumper implementation 2297 void dump_vthread(oop vt, AbstractDumpWriter* segment_writer); 2298 }; 2299 2300 VM_HeapDumper* VM_HeapDumper::_global_dumper = nullptr; 2301 DumpWriter* VM_HeapDumper::_global_writer = nullptr; 2302 2303 bool VM_HeapDumper::skip_operation() const { 2304 return false; 2305 } 2306 2307 // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record 2308 void DumperSupport::end_of_dump(AbstractDumpWriter* writer) { 2309 writer->finish_dump_segment(); 2310 2311 writer->write_u1(HPROF_HEAP_DUMP_END); 2312 writer->write_u4(0); 2313 writer->write_u4(0); 2314 } 2315 2316 // writes a HPROF_LOAD_CLASS record for the class 2317 void VM_HeapDumper::do_load_class(Klass* k) { 2318 static u4 class_serial_num = 0; 2319 2320 // len of HPROF_LOAD_CLASS record 2321 u4 remaining = 2*oopSize + 2*sizeof(u4); 2322 2323 DumperSupport::write_header(writer(), HPROF_LOAD_CLASS, remaining); 2324 2325 // class serial number is just a number 2326 writer()->write_u4(++class_serial_num); 2327 2328 // class ID 2329 writer()->write_classID(k); 2330 2331 // add the Klass* and class serial number pair 2332 dumper()->add_class_serial_number(k, class_serial_num); 2333 2334 writer()->write_u4(STACK_TRACE_ID); 2335 2336 // class name ID 2337 Symbol* name = k->name(); 2338 writer()->write_symbolID(name); 2339 } 2340 2341 // Write a HPROF_GC_ROOT_THREAD_OBJ record for platform/carrier and mounted virtual threads. 2342 // Then walk the stack so that locals and JNI locals are dumped. 2343 void VM_HeapDumper::dump_threads(AbstractDumpWriter* writer) { 2344 for (int i = 0; i < _thread_dumpers_count; i++) { 2345 _thread_dumpers[i]->dump_thread_obj(writer); 2346 _thread_dumpers[i]->dump_stack_refs(writer); 2347 } 2348 } 2349 2350 bool VM_HeapDumper::doit_prologue() { 2351 if (_gc_before_heap_dump && UseZGC) { 2352 // ZGC cannot perform a synchronous GC cycle from within the VM thread. 2353 // So ZCollectedHeap::collect_as_vm_thread() is a noop. To respect the 2354 // _gc_before_heap_dump flag a synchronous GC cycle is performed from 2355 // the caller thread in the prologue. 2356 Universe::heap()->collect(GCCause::_heap_dump); 2357 } 2358 return VM_GC_Operation::doit_prologue(); 2359 } 2360 2361 void VM_HeapDumper::prepare_parallel_dump(WorkerThreads* workers) { 2362 uint num_active_workers = workers != nullptr ? workers->active_workers() : 0; 2363 uint num_requested_dump_threads = _num_dumper_threads; 2364 // check if we can dump in parallel based on requested and active threads 2365 if (num_active_workers <= 1 || num_requested_dump_threads <= 1) { 2366 _num_dumper_threads = 1; 2367 } else { 2368 _num_dumper_threads = clamp(num_requested_dump_threads, 2U, num_active_workers); 2369 } 2370 _dumper_controller = new (std::nothrow) DumperController(_num_dumper_threads); 2371 bool can_parallel = _num_dumper_threads > 1; 2372 log_info(heapdump)("Requested dump threads %u, active dump threads %u, " 2373 "actual dump threads %u, parallelism %s", 2374 num_requested_dump_threads, num_active_workers, 2375 _num_dumper_threads, can_parallel ? "true" : "false"); 2376 } 2377 2378 // The VM operation that dumps the heap. The dump consists of the following 2379 // records: 2380 // 2381 // HPROF_HEADER 2382 // [HPROF_UTF8]* 2383 // [HPROF_LOAD_CLASS]* 2384 // [[HPROF_FRAME]*|HPROF_TRACE]* 2385 // [HPROF_GC_CLASS_DUMP]* 2386 // [HPROF_HEAP_DUMP_SEGMENT]* 2387 // HPROF_HEAP_DUMP_END 2388 // 2389 // The HPROF_TRACE records represent the stack traces where the heap dump 2390 // is generated and a "dummy trace" record which does not include 2391 // any frames. The dummy trace record is used to be referenced as the 2392 // unknown object alloc site. 2393 // 2394 // Each HPROF_HEAP_DUMP_SEGMENT record has a length followed by sub-records. 2395 // To allow the heap dump be generated in a single pass we remember the position 2396 // of the dump length and fix it up after all sub-records have been written. 2397 // To generate the sub-records we iterate over the heap, writing 2398 // HPROF_GC_INSTANCE_DUMP, HPROF_GC_OBJ_ARRAY_DUMP, and HPROF_GC_PRIM_ARRAY_DUMP 2399 // records as we go. Once that is done we write records for some of the GC 2400 // roots. 2401 2402 void VM_HeapDumper::doit() { 2403 2404 CollectedHeap* ch = Universe::heap(); 2405 2406 ch->ensure_parsability(false); // must happen, even if collection does 2407 // not happen (e.g. due to GCLocker) 2408 2409 if (_gc_before_heap_dump) { 2410 if (GCLocker::is_active()) { 2411 warning("GC locker is held; pre-heapdump GC was skipped"); 2412 } else { 2413 ch->collect_as_vm_thread(GCCause::_heap_dump); 2414 } 2415 } 2416 2417 // At this point we should be the only dumper active, so 2418 // the following should be safe. 2419 set_global_dumper(); 2420 set_global_writer(); 2421 2422 WorkerThreads* workers = ch->safepoint_workers(); 2423 prepare_parallel_dump(workers); 2424 2425 if (!is_parallel_dump()) { 2426 work(VMDumperId); 2427 } else { 2428 ParallelObjectIterator poi(_num_dumper_threads); 2429 _poi = &poi; 2430 workers->run_task(this, _num_dumper_threads); 2431 _poi = nullptr; 2432 } 2433 2434 // Now we clear the global variables, so that a future dumper can run. 2435 clear_global_dumper(); 2436 clear_global_writer(); 2437 } 2438 2439 void VM_HeapDumper::work(uint worker_id) { 2440 // VM Dumper works on all non-heap data dumping and part of heap iteration. 2441 int dumper_id = get_next_dumper_id(); 2442 2443 if (is_vm_dumper(dumper_id)) { 2444 // lock global writer, it will be unlocked after VM Dumper finishes with non-heap data 2445 _dumper_controller->lock_global_writer(); 2446 _dumper_controller->signal_start(); 2447 } else { 2448 _dumper_controller->wait_for_start_signal(); 2449 } 2450 2451 if (is_vm_dumper(dumper_id)) { 2452 TraceTime timer("Dump non-objects", TRACETIME_LOG(Info, heapdump)); 2453 // Write the file header - we always use 1.0.2 2454 const char* header = "JAVA PROFILE 1.0.2"; 2455 2456 // header is few bytes long - no chance to overflow int 2457 writer()->write_raw(header, strlen(header) + 1); // NUL terminated 2458 writer()->write_u4(oopSize); 2459 // timestamp is current time in ms 2460 writer()->write_u8(os::javaTimeMillis()); 2461 // HPROF_UTF8 records 2462 SymbolTableDumper sym_dumper(writer()); 2463 SymbolTable::symbols_do(&sym_dumper); 2464 2465 // write HPROF_LOAD_CLASS records 2466 { 2467 LockedClassesDo locked_load_classes(&do_load_class); 2468 ClassLoaderDataGraph::classes_do(&locked_load_classes); 2469 } 2470 2471 // write HPROF_FRAME and HPROF_TRACE records 2472 // this must be called after _klass_map is built when iterating the classes above. 2473 dump_stack_traces(writer()); 2474 2475 // unlock global writer, so parallel dumpers can dump stack traces of unmounted virtual threads 2476 _dumper_controller->unlock_global_writer(); 2477 } 2478 2479 // HPROF_HEAP_DUMP/HPROF_HEAP_DUMP_SEGMENT starts here 2480 2481 ResourceMark rm; 2482 // share global compressor, local DumpWriter is not responsible for its life cycle 2483 DumpWriter segment_writer(DumpMerger::get_writer_path(writer()->get_file_path(), dumper_id), 2484 writer()->is_overwrite(), writer()->compressor()); 2485 if (!segment_writer.has_error()) { 2486 if (is_vm_dumper(dumper_id)) { 2487 // dump some non-heap subrecords to heap dump segment 2488 TraceTime timer("Dump non-objects (part 2)", TRACETIME_LOG(Info, heapdump)); 2489 // Writes HPROF_GC_CLASS_DUMP records 2490 ClassDumper class_dumper(&segment_writer); 2491 ClassLoaderDataGraph::classes_do(&class_dumper); 2492 2493 // HPROF_GC_ROOT_THREAD_OBJ + frames + jni locals 2494 dump_threads(&segment_writer); 2495 2496 // HPROF_GC_ROOT_JNI_GLOBAL 2497 JNIGlobalsDumper jni_dumper(&segment_writer); 2498 JNIHandles::oops_do(&jni_dumper); 2499 // technically not jni roots, but global roots 2500 // for things like preallocated throwable backtraces 2501 Universe::vm_global()->oops_do(&jni_dumper); 2502 // HPROF_GC_ROOT_STICKY_CLASS 2503 // These should be classes in the null class loader data, and not all classes 2504 // if !ClassUnloading 2505 StickyClassDumper stiky_class_dumper(&segment_writer); 2506 ClassLoaderData::the_null_class_loader_data()->classes_do(&stiky_class_dumper); 2507 } 2508 2509 // Heap iteration. 2510 // writes HPROF_GC_INSTANCE_DUMP records. 2511 // After each sub-record is written check_segment_length will be invoked 2512 // to check if the current segment exceeds a threshold. If so, a new 2513 // segment is started. 2514 // The HPROF_GC_CLASS_DUMP and HPROF_GC_INSTANCE_DUMP are the vast bulk 2515 // of the heap dump. 2516 2517 TraceTime timer(is_parallel_dump() ? "Dump heap objects in parallel" : "Dump heap objects", TRACETIME_LOG(Info, heapdump)); 2518 HeapObjectDumper obj_dumper(&segment_writer, this); 2519 if (!is_parallel_dump()) { 2520 Universe::heap()->object_iterate(&obj_dumper); 2521 } else { 2522 // == Parallel dump 2523 _poi->object_iterate(&obj_dumper, worker_id); 2524 } 2525 2526 segment_writer.finish_dump_segment(); 2527 segment_writer.flush(); 2528 } 2529 2530 _dumper_controller->dumper_complete(&segment_writer, writer()); 2531 2532 if (is_vm_dumper(dumper_id)) { 2533 _dumper_controller->wait_all_dumpers_complete(); 2534 2535 // flush global writer 2536 writer()->flush(); 2537 2538 // At this point, all fragments of the heapdump have been written to separate files. 2539 // We need to merge them into a complete heapdump and write HPROF_HEAP_DUMP_END at that time. 2540 } 2541 } 2542 2543 void VM_HeapDumper::dump_stack_traces(AbstractDumpWriter* writer) { 2544 // write a HPROF_TRACE record without any frames to be referenced as object alloc sites 2545 DumperSupport::write_header(writer, HPROF_TRACE, 3 * sizeof(u4)); 2546 writer->write_u4((u4)STACK_TRACE_ID); 2547 writer->write_u4(0); // thread number 2548 writer->write_u4(0); // frame count 2549 2550 // max number if every platform thread is carrier with mounted virtual thread 2551 _thread_dumpers = NEW_C_HEAP_ARRAY(ThreadDumper*, Threads::number_of_threads() * 2, mtInternal); 2552 2553 for (JavaThreadIteratorWithHandle jtiwh; JavaThread * thread = jtiwh.next(); ) { 2554 if (ThreadDumper::should_dump_pthread(thread)) { 2555 bool add_oom_frame = is_oom_thread(thread); 2556 2557 oop mounted_vt = thread->is_vthread_mounted() ? thread->vthread() : nullptr; 2558 if (mounted_vt != nullptr && !ThreadDumper::should_dump_vthread(mounted_vt)) { 2559 mounted_vt = nullptr; 2560 } 2561 2562 // mounted vthread (if any) 2563 if (mounted_vt != nullptr) { 2564 ThreadDumper* thread_dumper = new ThreadDumper(ThreadDumper::ThreadType::MountedVirtual, thread, mounted_vt); 2565 _thread_dumpers[_thread_dumpers_count++] = thread_dumper; 2566 if (add_oom_frame) { 2567 thread_dumper->add_oom_frame(_oome_constructor); 2568 // we add oom frame to the VT stack, don't add it to the carrier thread stack 2569 add_oom_frame = false; 2570 } 2571 thread_dumper->init_serial_nums(&_thread_serial_num, &_frame_serial_num); 2572 thread_dumper->dump_stack_traces(writer, _klass_map); 2573 } 2574 2575 // platform or carrier thread 2576 ThreadDumper* thread_dumper = new ThreadDumper(ThreadDumper::ThreadType::Platform, thread, thread->threadObj()); 2577 _thread_dumpers[_thread_dumpers_count++] = thread_dumper; 2578 if (add_oom_frame) { 2579 thread_dumper->add_oom_frame(_oome_constructor); 2580 } 2581 thread_dumper->init_serial_nums(&_thread_serial_num, &_frame_serial_num); 2582 thread_dumper->dump_stack_traces(writer, _klass_map); 2583 } 2584 } 2585 } 2586 2587 void VM_HeapDumper::dump_vthread(oop vt, AbstractDumpWriter* segment_writer) { 2588 // unmounted vthread has no JavaThread 2589 ThreadDumper thread_dumper(ThreadDumper::ThreadType::UnmountedVirtual, nullptr, vt); 2590 thread_dumper.init_serial_nums(&_thread_serial_num, &_frame_serial_num); 2591 2592 // write HPROF_TRACE/HPROF_FRAME records to global writer 2593 _dumper_controller->lock_global_writer(); 2594 thread_dumper.dump_stack_traces(writer(), _klass_map); 2595 _dumper_controller->unlock_global_writer(); 2596 2597 // write HPROF_GC_ROOT_THREAD_OBJ/HPROF_GC_ROOT_JAVA_FRAME/HPROF_GC_ROOT_JNI_LOCAL subrecord 2598 // to segment writer 2599 thread_dumper.dump_thread_obj(segment_writer); 2600 thread_dumper.dump_stack_refs(segment_writer); 2601 } 2602 2603 // dump the heap to given path. 2604 int HeapDumper::dump(const char* path, outputStream* out, int compression, bool overwrite, uint num_dump_threads) { 2605 assert(path != nullptr && strlen(path) > 0, "path missing"); 2606 2607 // print message in interactive case 2608 if (out != nullptr) { 2609 out->print_cr("Dumping heap to %s ...", path); 2610 timer()->start(); 2611 } 2612 2613 if (_oome && num_dump_threads > 1) { 2614 // Each additional parallel writer requires several MB of internal memory 2615 // (DumpWriter buffer, DumperClassCacheTable, GZipCompressor buffers). 2616 // For the OOM handling we may already be limited in memory. 2617 // Lets ensure we have at least 20MB per thread. 2618 julong max_threads = os::free_memory() / (20 * M); 2619 if (num_dump_threads > max_threads) { 2620 num_dump_threads = MAX2<uint>(1, (uint)max_threads); 2621 } 2622 } 2623 2624 // create JFR event 2625 EventHeapDump event; 2626 2627 AbstractCompressor* compressor = nullptr; 2628 2629 if (compression > 0) { 2630 compressor = new (std::nothrow) GZipCompressor(compression); 2631 2632 if (compressor == nullptr) { 2633 set_error("Could not allocate gzip compressor"); 2634 return -1; 2635 } 2636 } 2637 2638 DumpWriter writer(path, overwrite, compressor); 2639 2640 if (writer.error() != nullptr) { 2641 set_error(writer.error()); 2642 if (out != nullptr) { 2643 out->print_cr("Unable to create %s: %s", path, 2644 (error() != nullptr) ? error() : "reason unknown"); 2645 } 2646 return -1; 2647 } 2648 2649 // generate the segmented heap dump into separate files 2650 VM_HeapDumper dumper(&writer, _gc_before_heap_dump, _oome, num_dump_threads); 2651 VMThread::execute(&dumper); 2652 2653 // record any error that the writer may have encountered 2654 set_error(writer.error()); 2655 2656 // Heap dump process is done in two phases 2657 // 2658 // Phase 1: Concurrent threads directly write heap data to multiple heap files. 2659 // This is done by VM_HeapDumper, which is performed within safepoint. 2660 // 2661 // Phase 2: Merge multiple heap files into one complete heap dump file. 2662 // This is done by DumpMerger, which is performed outside safepoint 2663 2664 DumpMerger merger(path, &writer, dumper.dump_seq()); 2665 // Perform heapdump file merge operation in the current thread prevents us 2666 // from occupying the VM Thread, which in turn affects the occurrence of 2667 // GC and other VM operations. 2668 merger.do_merge(); 2669 if (writer.error() != nullptr) { 2670 set_error(writer.error()); 2671 } 2672 2673 // emit JFR event 2674 if (error() == nullptr) { 2675 event.set_destination(path); 2676 event.set_gcBeforeDump(_gc_before_heap_dump); 2677 event.set_size(writer.bytes_written()); 2678 event.set_onOutOfMemoryError(_oome); 2679 event.set_overwrite(overwrite); 2680 event.set_compression(compression); 2681 event.commit(); 2682 } else { 2683 log_debug(cds, heap)("Error %s while dumping heap", error()); 2684 } 2685 2686 // print message in interactive case 2687 if (out != nullptr) { 2688 timer()->stop(); 2689 if (error() == nullptr) { 2690 out->print_cr("Heap dump file created [" JULONG_FORMAT " bytes in %3.3f secs]", 2691 writer.bytes_written(), timer()->seconds()); 2692 } else { 2693 out->print_cr("Dump file is incomplete: %s", writer.error()); 2694 } 2695 } 2696 2697 if (compressor != nullptr) { 2698 delete compressor; 2699 } 2700 return (writer.error() == nullptr) ? 0 : -1; 2701 } 2702 2703 // stop timer (if still active), and free any error string we might be holding 2704 HeapDumper::~HeapDumper() { 2705 if (timer()->is_active()) { 2706 timer()->stop(); 2707 } 2708 set_error(nullptr); 2709 } 2710 2711 2712 // returns the error string (resource allocated), or null 2713 char* HeapDumper::error_as_C_string() const { 2714 if (error() != nullptr) { 2715 char* str = NEW_RESOURCE_ARRAY(char, strlen(error())+1); 2716 strcpy(str, error()); 2717 return str; 2718 } else { 2719 return nullptr; 2720 } 2721 } 2722 2723 // set the error string 2724 void HeapDumper::set_error(char const* error) { 2725 if (_error != nullptr) { 2726 os::free(_error); 2727 } 2728 if (error == nullptr) { 2729 _error = nullptr; 2730 } else { 2731 _error = os::strdup(error); 2732 assert(_error != nullptr, "allocation failure"); 2733 } 2734 } 2735 2736 // Called by out-of-memory error reporting by a single Java thread 2737 // outside of a JVM safepoint 2738 void HeapDumper::dump_heap_from_oome() { 2739 HeapDumper::dump_heap(true); 2740 } 2741 2742 // Called by error reporting by a single Java thread outside of a JVM safepoint, 2743 // or by heap dumping by the VM thread during a (GC) safepoint. Thus, these various 2744 // callers are strictly serialized and guaranteed not to interfere below. For more 2745 // general use, however, this method will need modification to prevent 2746 // inteference when updating the static variables base_path and dump_file_seq below. 2747 void HeapDumper::dump_heap() { 2748 HeapDumper::dump_heap(false); 2749 } 2750 2751 void HeapDumper::dump_heap(bool oome) { 2752 static char base_path[JVM_MAXPATHLEN] = {'\0'}; 2753 static uint dump_file_seq = 0; 2754 char* my_path; 2755 const int max_digit_chars = 20; 2756 2757 const char* dump_file_name = "java_pid"; 2758 const char* dump_file_ext = HeapDumpGzipLevel > 0 ? ".hprof.gz" : ".hprof"; 2759 2760 // The dump file defaults to java_pid<pid>.hprof in the current working 2761 // directory. HeapDumpPath=<file> can be used to specify an alternative 2762 // dump file name or a directory where dump file is created. 2763 if (dump_file_seq == 0) { // first time in, we initialize base_path 2764 // Calculate potentially longest base path and check if we have enough 2765 // allocated statically. 2766 const size_t total_length = 2767 (HeapDumpPath == nullptr ? 0 : strlen(HeapDumpPath)) + 2768 strlen(os::file_separator()) + max_digit_chars + 2769 strlen(dump_file_name) + strlen(dump_file_ext) + 1; 2770 if (total_length > sizeof(base_path)) { 2771 warning("Cannot create heap dump file. HeapDumpPath is too long."); 2772 return; 2773 } 2774 2775 bool use_default_filename = true; 2776 if (HeapDumpPath == nullptr || HeapDumpPath[0] == '\0') { 2777 // HeapDumpPath=<file> not specified 2778 } else { 2779 strcpy(base_path, HeapDumpPath); 2780 // check if the path is a directory (must exist) 2781 DIR* dir = os::opendir(base_path); 2782 if (dir == nullptr) { 2783 use_default_filename = false; 2784 } else { 2785 // HeapDumpPath specified a directory. We append a file separator 2786 // (if needed). 2787 os::closedir(dir); 2788 size_t fs_len = strlen(os::file_separator()); 2789 if (strlen(base_path) >= fs_len) { 2790 char* end = base_path; 2791 end += (strlen(base_path) - fs_len); 2792 if (strcmp(end, os::file_separator()) != 0) { 2793 strcat(base_path, os::file_separator()); 2794 } 2795 } 2796 } 2797 } 2798 // If HeapDumpPath wasn't a file name then we append the default name 2799 if (use_default_filename) { 2800 const size_t dlen = strlen(base_path); // if heap dump dir specified 2801 jio_snprintf(&base_path[dlen], sizeof(base_path)-dlen, "%s%d%s", 2802 dump_file_name, os::current_process_id(), dump_file_ext); 2803 } 2804 const size_t len = strlen(base_path) + 1; 2805 my_path = (char*)os::malloc(len, mtInternal); 2806 if (my_path == nullptr) { 2807 warning("Cannot create heap dump file. Out of system memory."); 2808 return; 2809 } 2810 strncpy(my_path, base_path, len); 2811 } else { 2812 // Append a sequence number id for dumps following the first 2813 const size_t len = strlen(base_path) + max_digit_chars + 2; // for '.' and \0 2814 my_path = (char*)os::malloc(len, mtInternal); 2815 if (my_path == nullptr) { 2816 warning("Cannot create heap dump file. Out of system memory."); 2817 return; 2818 } 2819 jio_snprintf(my_path, len, "%s.%d", base_path, dump_file_seq); 2820 } 2821 dump_file_seq++; // increment seq number for next time we dump 2822 2823 HeapDumper dumper(false /* no GC before heap dump */, 2824 oome /* pass along out-of-memory-error flag */); 2825 dumper.dump(my_path, tty, HeapDumpGzipLevel); 2826 os::free(my_path); 2827 }