1 /*
   2  * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2023, Alibaba Group Holding Limited. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "classfile/classLoaderData.inline.hpp"
  27 #include "classfile/classLoaderDataGraph.hpp"
  28 #include "classfile/javaClasses.inline.hpp"
  29 #include "classfile/symbolTable.hpp"
  30 #include "classfile/vmClasses.hpp"
  31 #include "classfile/vmSymbols.hpp"
  32 #include "gc/shared/gcLocker.hpp"
  33 #include "gc/shared/gcVMOperations.hpp"
  34 #include "gc/shared/workerThread.hpp"
  35 #include "jfr/jfrEvents.hpp"
  36 #include "jvm.h"
  37 #include "memory/allocation.inline.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "memory/universe.hpp"
  40 #include "oops/fieldStreams.inline.hpp"
  41 #include "oops/flatArrayKlass.hpp"
  42 #include "oops/flatArrayOop.inline.hpp"
  43 #include "oops/klass.inline.hpp"
  44 #include "oops/objArrayKlass.hpp"
  45 #include "oops/objArrayOop.inline.hpp"
  46 #include "oops/oop.inline.hpp"
  47 #include "oops/typeArrayOop.inline.hpp"
  48 #include "runtime/arguments.hpp"
  49 #include "runtime/atomicAccess.hpp"
  50 #include "runtime/continuationWrapper.inline.hpp"
  51 #include "runtime/frame.inline.hpp"
  52 #include "runtime/handles.inline.hpp"
  53 #include "runtime/javaCalls.hpp"
  54 #include "runtime/javaThread.inline.hpp"
  55 #include "runtime/jniHandles.hpp"
  56 #include "runtime/os.hpp"
  57 #include "runtime/threads.hpp"
  58 #include "runtime/threadSMR.hpp"
  59 #include "runtime/timerTrace.hpp"
  60 #include "runtime/vframe.hpp"
  61 #include "runtime/vmOperations.hpp"
  62 #include "runtime/vmThread.hpp"
  63 #include "services/heapDumper.hpp"
  64 #include "services/heapDumperCompression.hpp"
  65 #include "services/threadService.hpp"
  66 #include "utilities/checkedCast.hpp"
  67 #include "utilities/macros.hpp"
  68 #include "utilities/ostream.hpp"
  69 #ifdef LINUX
  70 #include "os_linux.hpp"
  71 #endif
  72 
  73 /*
  74  * HPROF binary format - description copied from:
  75  *   src/share/demo/jvmti/hprof/hprof_io.c
  76  *
  77  *
  78  *  header    "JAVA PROFILE 1.0.2" (0-terminated)
  79  *
  80  *  u4        size of identifiers. Identifiers are used to represent
  81  *            UTF8 strings, objects, stack traces, etc. They usually
  82  *            have the same size as host pointers.
  83  * u4         high word
  84  * u4         low word    number of milliseconds since 0:00 GMT, 1/1/70
  85  * [record]*  a sequence of records.
  86  *
  87  *
  88  * Record format:
  89  *
  90  * u1         a TAG denoting the type of the record
  91  * u4         number of *microseconds* since the time stamp in the
  92  *            header. (wraps around in a little more than an hour)
  93  * u4         number of bytes *remaining* in the record. Note that
  94  *            this number excludes the tag and the length field itself.
  95  * [u1]*      BODY of the record (a sequence of bytes)
  96  *
  97  *
  98  * The following TAGs are supported:
  99  *
 100  * TAG           BODY       notes
 101  *----------------------------------------------------------
 102  * HPROF_UTF8               a UTF8-encoded name
 103  *
 104  *               id         name ID
 105  *               [u1]*      UTF8 characters (no trailing zero)
 106  *
 107  * HPROF_LOAD_CLASS         a newly loaded class
 108  *
 109  *                u4        class serial number (> 0)
 110  *                id        class object ID
 111  *                u4        stack trace serial number
 112  *                id        class name ID
 113  *
 114  * HPROF_UNLOAD_CLASS       an unloading class
 115  *
 116  *                u4        class serial_number
 117  *
 118  * HPROF_FRAME              a Java stack frame
 119  *
 120  *                id        stack frame ID
 121  *                id        method name ID
 122  *                id        method signature ID
 123  *                id        source file name ID
 124  *                u4        class serial number
 125  *                i4        line number. >0: normal
 126  *                                       -1: unknown
 127  *                                       -2: compiled method
 128  *                                       -3: native method
 129  *
 130  * HPROF_TRACE              a Java stack trace
 131  *
 132  *               u4         stack trace serial number
 133  *               u4         thread serial number
 134  *               u4         number of frames
 135  *               [id]*      stack frame IDs
 136  *
 137  *
 138  * HPROF_ALLOC_SITES        a set of heap allocation sites, obtained after GC
 139  *
 140  *               u2         flags 0x0001: incremental vs. complete
 141  *                                0x0002: sorted by allocation vs. live
 142  *                                0x0004: whether to force a GC
 143  *               u4         cutoff ratio
 144  *               u4         total live bytes
 145  *               u4         total live instances
 146  *               u8         total bytes allocated
 147  *               u8         total instances allocated
 148  *               u4         number of sites that follow
 149  *               [u1        is_array: 0:  normal object
 150  *                                    2:  object array
 151  *                                    4:  boolean array
 152  *                                    5:  char array
 153  *                                    6:  float array
 154  *                                    7:  double array
 155  *                                    8:  byte array
 156  *                                    9:  short array
 157  *                                    10: int array
 158  *                                    11: long array
 159  *                u4        class serial number (may be zero during startup)
 160  *                u4        stack trace serial number
 161  *                u4        number of bytes alive
 162  *                u4        number of instances alive
 163  *                u4        number of bytes allocated
 164  *                u4]*      number of instance allocated
 165  *
 166  * HPROF_START_THREAD       a newly started thread.
 167  *
 168  *               u4         thread serial number (> 0)
 169  *               id         thread object ID
 170  *               u4         stack trace serial number
 171  *               id         thread name ID
 172  *               id         thread group name ID
 173  *               id         thread group parent name ID
 174  *
 175  * HPROF_END_THREAD         a terminating thread.
 176  *
 177  *               u4         thread serial number
 178  *
 179  * HPROF_HEAP_SUMMARY       heap summary
 180  *
 181  *               u4         total live bytes
 182  *               u4         total live instances
 183  *               u8         total bytes allocated
 184  *               u8         total instances allocated
 185  *
 186  * HPROF_HEAP_DUMP          denote a heap dump
 187  *
 188  *               [heap dump sub-records]*
 189  *
 190  *                          There are four kinds of heap dump sub-records:
 191  *
 192  *               u1         sub-record type
 193  *
 194  *               HPROF_GC_ROOT_UNKNOWN         unknown root
 195  *
 196  *                          id         object ID
 197  *
 198  *               HPROF_GC_ROOT_THREAD_OBJ      thread object
 199  *
 200  *                          id         thread object ID  (may be 0 for a
 201  *                                     thread newly attached through JNI)
 202  *                          u4         thread sequence number
 203  *                          u4         stack trace sequence number
 204  *
 205  *               HPROF_GC_ROOT_JNI_GLOBAL      JNI global ref root
 206  *
 207  *                          id         object ID
 208  *                          id         JNI global ref ID
 209  *
 210  *               HPROF_GC_ROOT_JNI_LOCAL       JNI local ref
 211  *
 212  *                          id         object ID
 213  *                          u4         thread serial number
 214  *                          u4         frame # in stack trace (-1 for empty)
 215  *
 216  *               HPROF_GC_ROOT_JAVA_FRAME      Java stack frame
 217  *
 218  *                          id         object ID
 219  *                          u4         thread serial number
 220  *                          u4         frame # in stack trace (-1 for empty)
 221  *
 222  *               HPROF_GC_ROOT_NATIVE_STACK    Native stack
 223  *
 224  *                          id         object ID
 225  *                          u4         thread serial number
 226  *
 227  *               HPROF_GC_ROOT_STICKY_CLASS    System class
 228  *
 229  *                          id         object ID
 230  *
 231  *               HPROF_GC_ROOT_THREAD_BLOCK    Reference from thread block
 232  *
 233  *                          id         object ID
 234  *                          u4         thread serial number
 235  *
 236  *               HPROF_GC_ROOT_MONITOR_USED    Busy monitor
 237  *
 238  *                          id         object ID
 239  *
 240  *               HPROF_GC_CLASS_DUMP           dump of a class object
 241  *
 242  *                          id         class object ID
 243  *                          u4         stack trace serial number
 244  *                          id         super class object ID
 245  *                          id         class loader object ID
 246  *                          id         signers object ID
 247  *                          id         protection domain object ID
 248  *                          id         reserved
 249  *                          id         reserved
 250  *
 251  *                          u4         instance size (in bytes)
 252  *
 253  *                          u2         size of constant pool
 254  *                          [u2,       constant pool index,
 255  *                           ty,       type
 256  *                                     2:  object
 257  *                                     4:  boolean
 258  *                                     5:  char
 259  *                                     6:  float
 260  *                                     7:  double
 261  *                                     8:  byte
 262  *                                     9:  short
 263  *                                     10: int
 264  *                                     11: long
 265  *                           vl]*      and value
 266  *
 267  *                          u2         number of static fields
 268  *                          [id,       static field name,
 269  *                           ty,       type,
 270  *                           vl]*      and value
 271  *
 272  *                          u2         number of inst. fields (not inc. super)
 273  *                          [id,       instance field name,
 274  *                           ty]*      type
 275  *
 276  *               HPROF_GC_INSTANCE_DUMP        dump of a normal object
 277  *
 278  *                          id         object ID
 279  *                          u4         stack trace serial number
 280  *                          id         class object ID
 281  *                          u4         number of bytes that follow
 282  *                          [vl]*      instance field values (class, followed
 283  *                                     by super, super's super ...)
 284  *
 285  *               HPROF_GC_OBJ_ARRAY_DUMP       dump of an object array
 286  *
 287  *                          id         array object ID
 288  *                          u4         stack trace serial number
 289  *                          u4         number of elements
 290  *                          id         array class ID
 291  *                          [id]*      elements
 292  *
 293  *               HPROF_GC_PRIM_ARRAY_DUMP      dump of a primitive array
 294  *
 295  *                          id         array object ID
 296  *                          u4         stack trace serial number
 297  *                          u4         number of elements
 298  *                          u1         element type
 299  *                                     4:  boolean array
 300  *                                     5:  char array
 301  *                                     6:  float array
 302  *                                     7:  double array
 303  *                                     8:  byte array
 304  *                                     9:  short array
 305  *                                     10: int array
 306  *                                     11: long array
 307  *                          [u1]*      elements
 308  *
 309  * HPROF_CPU_SAMPLES        a set of sample traces of running threads
 310  *
 311  *                u4        total number of samples
 312  *                u4        # of traces
 313  *               [u4        # of samples
 314  *                u4]*      stack trace serial number
 315  *
 316  * HPROF_CONTROL_SETTINGS   the settings of on/off switches
 317  *
 318  *                u4        0x00000001: alloc traces on/off
 319  *                          0x00000002: cpu sampling on/off
 320  *                u2        stack trace depth
 321  *
 322  *
 323  * When the header is "JAVA PROFILE 1.0.2" a heap dump can optionally
 324  * be generated as a sequence of heap dump segments. This sequence is
 325  * terminated by an end record. The additional tags allowed by format
 326  * "JAVA PROFILE 1.0.2" are:
 327  *
 328  * HPROF_HEAP_DUMP_SEGMENT  denote a heap dump segment
 329  *
 330  *               [heap dump sub-records]*
 331  *               The same sub-record types allowed by HPROF_HEAP_DUMP
 332  *
 333  * HPROF_HEAP_DUMP_END      denotes the end of a heap dump
 334  *
 335  */
 336 
 337 
 338 // HPROF tags
 339 
 340 enum hprofTag : u1 {
 341   // top-level records
 342   HPROF_UTF8                    = 0x01,
 343   HPROF_LOAD_CLASS              = 0x02,
 344   HPROF_UNLOAD_CLASS            = 0x03,
 345   HPROF_FRAME                   = 0x04,
 346   HPROF_TRACE                   = 0x05,
 347   HPROF_ALLOC_SITES             = 0x06,
 348   HPROF_HEAP_SUMMARY            = 0x07,
 349   HPROF_START_THREAD            = 0x0A,
 350   HPROF_END_THREAD              = 0x0B,
 351   HPROF_HEAP_DUMP               = 0x0C,
 352   HPROF_CPU_SAMPLES             = 0x0D,
 353   HPROF_CONTROL_SETTINGS        = 0x0E,
 354 
 355   // 1.0.2 record types
 356   HPROF_HEAP_DUMP_SEGMENT       = 0x1C,
 357   HPROF_HEAP_DUMP_END           = 0x2C,
 358 
 359   // field types
 360   HPROF_ARRAY_OBJECT            = 0x01,
 361   HPROF_NORMAL_OBJECT           = 0x02,
 362   HPROF_BOOLEAN                 = 0x04,
 363   HPROF_CHAR                    = 0x05,
 364   HPROF_FLOAT                   = 0x06,
 365   HPROF_DOUBLE                  = 0x07,
 366   HPROF_BYTE                    = 0x08,
 367   HPROF_SHORT                   = 0x09,
 368   HPROF_INT                     = 0x0A,
 369   HPROF_LONG                    = 0x0B,
 370 
 371   // data-dump sub-records
 372   HPROF_GC_ROOT_UNKNOWN         = 0xFF,
 373   HPROF_GC_ROOT_JNI_GLOBAL      = 0x01,
 374   HPROF_GC_ROOT_JNI_LOCAL       = 0x02,
 375   HPROF_GC_ROOT_JAVA_FRAME      = 0x03,
 376   HPROF_GC_ROOT_NATIVE_STACK    = 0x04,
 377   HPROF_GC_ROOT_STICKY_CLASS    = 0x05,
 378   HPROF_GC_ROOT_THREAD_BLOCK    = 0x06,
 379   HPROF_GC_ROOT_MONITOR_USED    = 0x07,
 380   HPROF_GC_ROOT_THREAD_OBJ      = 0x08,
 381   HPROF_GC_CLASS_DUMP           = 0x20,
 382   HPROF_GC_INSTANCE_DUMP        = 0x21,
 383   HPROF_GC_OBJ_ARRAY_DUMP       = 0x22,
 384   HPROF_GC_PRIM_ARRAY_DUMP      = 0x23
 385 };
 386 
 387 // Default stack trace ID (used for dummy HPROF_TRACE record)
 388 enum {
 389   STACK_TRACE_ID = 1,
 390   INITIAL_CLASS_COUNT = 200
 391 };
 392 
 393 // Supports I/O operations for a dump
 394 // Base class for dump and parallel dump
 395 class AbstractDumpWriter : public CHeapObj<mtInternal> {
 396  protected:
 397   enum {
 398     io_buffer_max_size = 1*M,
 399     dump_segment_header_size = 9
 400   };
 401 
 402   char* _buffer;    // internal buffer
 403   size_t _size;
 404   size_t _pos;
 405 
 406   bool _in_dump_segment; // Are we currently in a dump segment?
 407   bool _is_huge_sub_record; // Are we writing a sub-record larger than the buffer size?
 408   DEBUG_ONLY(size_t _sub_record_left;) // The bytes not written for the current sub-record.
 409   DEBUG_ONLY(bool _sub_record_ended;) // True if we have called the end_sub_record().
 410 
 411   char* buffer() const                          { return _buffer; }
 412   size_t buffer_size() const                    { return _size; }
 413   void set_position(size_t pos)                 { _pos = pos; }
 414 
 415   // Can be called if we have enough room in the buffer.
 416   void write_fast(const void* s, size_t len);
 417 
 418   // Returns true if we have enough room in the buffer for 'len' bytes.
 419   bool can_write_fast(size_t len);
 420 
 421   void write_address(address a);
 422 
 423  public:
 424   AbstractDumpWriter() :
 425     _buffer(nullptr),
 426     _size(io_buffer_max_size),
 427     _pos(0),
 428     _in_dump_segment(false) { }
 429 
 430   // Total number of bytes written to the disk
 431   virtual julong bytes_written() const = 0;
 432   // Return non-null if error occurred
 433   virtual char const* error() const = 0;
 434 
 435   size_t position() const                       { return _pos; }
 436   // writer functions
 437   virtual void write_raw(const void* s, size_t len);
 438   void write_u1(u1 x);
 439   void write_u2(u2 x);
 440   void write_u4(u4 x);
 441   void write_u8(u8 x);
 442   void write_objectID(oop o);
 443   void write_objectID(uintptr_t id);
 444   void write_rootID(oop* p);
 445   void write_symbolID(Symbol* o);
 446   void write_classID(Klass* k);
 447   void write_id(u4 x);
 448 
 449   // Start a new sub-record. Starts a new heap dump segment if needed.
 450   void start_sub_record(u1 tag, u4 len);
 451   // Ends the current sub-record.
 452   void end_sub_record();
 453   // Finishes the current dump segment if not already finished.
 454   void finish_dump_segment();
 455   // Flush internal buffer to persistent storage
 456   virtual void flush() = 0;
 457 };
 458 
 459 void AbstractDumpWriter::write_fast(const void* s, size_t len) {
 460   assert(!_in_dump_segment || (_sub_record_left >= len), "sub-record too large");
 461   assert(buffer_size() - position() >= len, "Must fit");
 462   DEBUG_ONLY(_sub_record_left -= len);
 463   memcpy(buffer() + position(), s, len);
 464   set_position(position() + len);
 465 }
 466 
 467 bool AbstractDumpWriter::can_write_fast(size_t len) {
 468   return buffer_size() - position() >= len;
 469 }
 470 
 471 // write raw bytes
 472 void AbstractDumpWriter::write_raw(const void* s, size_t len) {
 473   assert(!_in_dump_segment || (_sub_record_left >= len), "sub-record too large");
 474   DEBUG_ONLY(_sub_record_left -= len);
 475 
 476   // flush buffer to make room.
 477   while (len > buffer_size() - position()) {
 478     assert(!_in_dump_segment || _is_huge_sub_record,
 479            "Cannot overflow in non-huge sub-record.");
 480     size_t to_write = buffer_size() - position();
 481     memcpy(buffer() + position(), s, to_write);
 482     s = (void*) ((char*) s + to_write);
 483     len -= to_write;
 484     set_position(position() + to_write);
 485     flush();
 486   }
 487 
 488   memcpy(buffer() + position(), s, len);
 489   set_position(position() + len);
 490 }
 491 
 492 // Makes sure we inline the fast write into the write_u* functions. This is a big speedup.
 493 #define WRITE_KNOWN_TYPE(p, len) do { if (can_write_fast((len))) write_fast((p), (len)); \
 494                                       else write_raw((p), (len)); } while (0)
 495 
 496 void AbstractDumpWriter::write_u1(u1 x) {
 497   WRITE_KNOWN_TYPE(&x, 1);
 498 }
 499 
 500 void AbstractDumpWriter::write_u2(u2 x) {
 501   u2 v;
 502   Bytes::put_Java_u2((address)&v, x);
 503   WRITE_KNOWN_TYPE(&v, 2);
 504 }
 505 
 506 void AbstractDumpWriter::write_u4(u4 x) {
 507   u4 v;
 508   Bytes::put_Java_u4((address)&v, x);
 509   WRITE_KNOWN_TYPE(&v, 4);
 510 }
 511 
 512 void AbstractDumpWriter::write_u8(u8 x) {
 513   u8 v;
 514   Bytes::put_Java_u8((address)&v, x);
 515   WRITE_KNOWN_TYPE(&v, 8);
 516 }
 517 
 518 void AbstractDumpWriter::write_address(address a) {
 519 #ifdef _LP64
 520   write_u8((u8)a);
 521 #else
 522   write_u4((u4)a);
 523 #endif
 524 }
 525 
 526 void AbstractDumpWriter::write_objectID(oop o) {
 527   write_address(cast_from_oop<address>(o));
 528 }
 529 
 530 void AbstractDumpWriter::write_objectID(uintptr_t id) {
 531   write_address((address)id);
 532 }
 533 
 534 void AbstractDumpWriter::write_rootID(oop* p) {
 535   write_address((address)p);
 536 }
 537 
 538 void AbstractDumpWriter::write_symbolID(Symbol* s) {
 539   write_address((address)((uintptr_t)s));
 540 }
 541 
 542 void AbstractDumpWriter::write_id(u4 x) {
 543 #ifdef _LP64
 544   write_u8((u8) x);
 545 #else
 546   write_u4(x);
 547 #endif
 548 }
 549 
 550 // We use java mirror as the class ID
 551 void AbstractDumpWriter::write_classID(Klass* k) {
 552   write_objectID(k->java_mirror());
 553 }
 554 
 555 void AbstractDumpWriter::finish_dump_segment() {
 556   if (_in_dump_segment) {
 557     assert(_sub_record_left == 0, "Last sub-record not written completely");
 558     assert(_sub_record_ended, "sub-record must have ended");
 559 
 560     // Fix up the dump segment length if we haven't written a huge sub-record last
 561     // (in which case the segment length was already set to the correct value initially).
 562     if (!_is_huge_sub_record) {
 563       assert(position() > dump_segment_header_size, "Dump segment should have some content");
 564       Bytes::put_Java_u4((address) (buffer() + 5),
 565                          (u4) (position() - dump_segment_header_size));
 566     } else {
 567       // Finish process huge sub record
 568       // Set _is_huge_sub_record to false so the parallel dump writer can flush data to file.
 569       _is_huge_sub_record = false;
 570     }
 571 
 572     _in_dump_segment = false;
 573     flush();
 574   }
 575 }
 576 
 577 void AbstractDumpWriter::start_sub_record(u1 tag, u4 len) {
 578   if (!_in_dump_segment) {
 579     if (position() > 0) {
 580       flush();
 581     }
 582 
 583     assert(position() == 0 && buffer_size() > dump_segment_header_size, "Must be at the start");
 584 
 585     write_u1(HPROF_HEAP_DUMP_SEGMENT);
 586     write_u4(0); // timestamp
 587     // Will be fixed up later if we add more sub-records.  If this is a huge sub-record,
 588     // this is already the correct length, since we don't add more sub-records.
 589     write_u4(len);
 590     assert(Bytes::get_Java_u4((address)(buffer() + 5)) == len, "Inconsistent size!");
 591     _in_dump_segment = true;
 592     _is_huge_sub_record = len > buffer_size() - dump_segment_header_size;
 593   } else if (_is_huge_sub_record || (len > buffer_size() - position())) {
 594     // This object will not fit in completely or the last sub-record was huge.
 595     // Finish the current segment and try again.
 596     finish_dump_segment();
 597     start_sub_record(tag, len);
 598 
 599     return;
 600   }
 601 
 602   DEBUG_ONLY(_sub_record_left = len);
 603   DEBUG_ONLY(_sub_record_ended = false);
 604 
 605   write_u1(tag);
 606 }
 607 
 608 void AbstractDumpWriter::end_sub_record() {
 609   assert(_in_dump_segment, "must be in dump segment");
 610   assert(_sub_record_left == 0, "sub-record not written completely");
 611   assert(!_sub_record_ended, "Must not have ended yet");
 612   DEBUG_ONLY(_sub_record_ended = true);
 613 }
 614 
 615 // Supports I/O operations for a dump
 616 
 617 class DumpWriter : public AbstractDumpWriter {
 618 private:
 619   FileWriter* _writer;
 620   AbstractCompressor* _compressor;
 621   size_t _bytes_written;
 622   char* _error;
 623   // Compression support
 624   char* _out_buffer;
 625   size_t _out_size;
 626   size_t _out_pos;
 627   char* _tmp_buffer;
 628   size_t _tmp_size;
 629 
 630 private:
 631   void do_compress();
 632 
 633 public:
 634   DumpWriter(const char* path, bool overwrite, AbstractCompressor* compressor);
 635   ~DumpWriter();
 636   julong bytes_written() const override        { return (julong) _bytes_written; }
 637   char const* error() const override           { return _error; }
 638   void set_error(const char* error)            { _error = (char*)error; }
 639   bool has_error() const                       { return _error != nullptr; }
 640   const char* get_file_path() const            { return _writer->get_file_path(); }
 641   AbstractCompressor* compressor()             { return _compressor; }
 642   bool is_overwrite() const                    { return _writer->is_overwrite(); }
 643 
 644   void flush() override;
 645 
 646 private:
 647   // internals for DumpMerger
 648   friend class DumpMerger;
 649   void set_bytes_written(julong bytes_written) { _bytes_written = bytes_written; }
 650   int get_fd() const                           { return _writer->get_fd(); }
 651   void set_compressor(AbstractCompressor* p)   { _compressor = p; }
 652 };
 653 
 654 DumpWriter::DumpWriter(const char* path, bool overwrite, AbstractCompressor* compressor) :
 655   AbstractDumpWriter(),
 656   _writer(new (std::nothrow) FileWriter(path, overwrite)),
 657   _compressor(compressor),
 658   _bytes_written(0),
 659   _error(nullptr),
 660   _out_buffer(nullptr),
 661   _out_size(0),
 662   _out_pos(0),
 663   _tmp_buffer(nullptr),
 664   _tmp_size(0) {
 665   _error = (char*)_writer->open_writer();
 666   if (_error == nullptr) {
 667     _buffer = (char*)os::malloc(io_buffer_max_size, mtInternal);
 668     if (compressor != nullptr) {
 669       _error = (char*)_compressor->init(io_buffer_max_size, &_out_size, &_tmp_size);
 670       if (_error == nullptr) {
 671         if (_out_size > 0) {
 672           _out_buffer = (char*)os::malloc(_out_size, mtInternal);
 673         }
 674         if (_tmp_size > 0) {
 675           _tmp_buffer = (char*)os::malloc(_tmp_size, mtInternal);
 676         }
 677       }
 678     }
 679   }
 680   // initialize internal buffer
 681   _pos = 0;
 682   _size = io_buffer_max_size;
 683 }
 684 
 685 DumpWriter::~DumpWriter(){
 686   if (_buffer != nullptr) {
 687     os::free(_buffer);
 688   }
 689   if (_out_buffer != nullptr) {
 690     os::free(_out_buffer);
 691   }
 692   if (_tmp_buffer != nullptr) {
 693     os::free(_tmp_buffer);
 694   }
 695   if (_writer != nullptr) {
 696     delete _writer;
 697   }
 698   _bytes_written = -1;
 699 }
 700 
 701 // flush any buffered bytes to the file
 702 void DumpWriter::flush() {
 703   if (_pos <= 0) {
 704     return;
 705   }
 706   if (has_error()) {
 707     _pos = 0;
 708     return;
 709   }
 710   char* result = nullptr;
 711   if (_compressor == nullptr) {
 712     result = (char*)_writer->write_buf(_buffer, _pos);
 713     _bytes_written += _pos;
 714   } else {
 715     do_compress();
 716     if (!has_error()) {
 717       result = (char*)_writer->write_buf(_out_buffer, _out_pos);
 718       _bytes_written += _out_pos;
 719     }
 720   }
 721   _pos = 0; // reset pos to make internal buffer available
 722 
 723   if (result != nullptr) {
 724     set_error(result);
 725   }
 726 }
 727 
 728 void DumpWriter::do_compress() {
 729   const char* msg = _compressor->compress(_buffer, _pos, _out_buffer, _out_size,
 730                                           _tmp_buffer, _tmp_size, &_out_pos);
 731 
 732   if (msg != nullptr) {
 733     set_error(msg);
 734   }
 735 }
 736 
 737 class DumperClassCacheTable;
 738 class DumperClassCacheTableEntry;
 739 class DumperFlatObject;
 740 class DumperFlatObjectList;
 741 
 742 // Support class with a collection of functions used when dumping the heap
 743 class DumperSupport : AllStatic {
 744  public:
 745 
 746   // write a header of the given type
 747   static void write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len);
 748 
 749   // returns hprof tag for the given type signature
 750   static hprofTag sig2tag(Symbol* sig);
 751   // returns hprof tag for the given basic type
 752   static hprofTag type2tag(BasicType type);
 753   // Returns the size of the data to write.
 754   static u4 sig2size(Symbol* sig);
 755 
 756   // returns the size of the instance of the given class
 757   static u4 instance_size(InstanceKlass* ik);
 758 
 759   // dump a jfloat
 760   static void dump_float(AbstractDumpWriter* writer, jfloat f);
 761   // dump a jdouble
 762   static void dump_double(AbstractDumpWriter* writer, jdouble d);
 763   // dumps the raw value of the given field
 764   static void dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset);
 765   // returns the size of the static fields; also counts the static fields
 766   static u4 get_static_fields_size(InstanceKlass* ik, u2& field_count);
 767   // dumps static fields of the given class
 768   static void dump_static_fields(AbstractDumpWriter* writer, Klass* k);
 769   // dump the raw values of the instance fields of the given object, fills flat_fields
 770   static void dump_instance_fields(AbstractDumpWriter* writer, oop o, int offset,
 771                                    DumperClassCacheTableEntry* class_cache_entry, DumperFlatObjectList* flat_fields);
 772   // get the count of the instance fields for a given class
 773   static u2 get_instance_fields_count(InstanceKlass* ik);
 774   // dumps the definition of the instance fields for a given class
 775   static void dump_instance_field_descriptors(AbstractDumpWriter* writer, InstanceKlass* k);
 776   // creates HPROF_GC_INSTANCE_DUMP record for the given object, fills flat_fields
 777   static void dump_instance(AbstractDumpWriter* writer, uintptr_t id, oop o, int offset, InstanceKlass* ik,
 778                             DumperClassCacheTable* class_cache, DumperFlatObjectList* flat_fields);
 779   // creates HPROF_GC_CLASS_DUMP record for the given instance class
 780   static void dump_instance_class(AbstractDumpWriter* writer, InstanceKlass* ik);
 781   // creates HPROF_GC_CLASS_DUMP record for a given array class
 782   static void dump_array_class(AbstractDumpWriter* writer, Klass* k);
 783 
 784   // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array, fills flat_elements if the object is flat array
 785   static void dump_object_array(AbstractDumpWriter* writer, objArrayOop array, DumperFlatObjectList* flat_elements);
 786   // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
 787   static void dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array);
 788   // create HPROF_FRAME record for the given method and bci
 789   static void dump_stack_frame(AbstractDumpWriter* writer, int frame_serial_num, int class_serial_num, Method* m, int bci);
 790 
 791   // check if we need to truncate an array
 792   static int calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size);
 793 
 794   // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
 795   static void end_of_dump(AbstractDumpWriter* writer);
 796 
 797   static oop mask_dormant_archived_object(oop o, oop ref_obj) {
 798     if (o != nullptr && o->klass()->java_mirror_no_keepalive() == nullptr) {
 799       // Ignore this object since the corresponding java mirror is not loaded.
 800       // Might be a dormant archive object.
 801       report_dormant_archived_object(o, ref_obj);
 802       return nullptr;
 803     } else {
 804       return o;
 805     }
 806   }
 807 
 808   static void report_dormant_archived_object(oop o, oop ref_obj) {
 809     if (log_is_enabled(Trace, aot, heap)) {
 810       ResourceMark rm;
 811       if (ref_obj != nullptr) {
 812         log_trace(aot, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s) referenced by " INTPTR_FORMAT " (%s)",
 813                   p2i(o), o->klass()->external_name(),
 814                   p2i(ref_obj), ref_obj->klass()->external_name());
 815       } else {
 816         log_trace(aot, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s)",
 817                   p2i(o), o->klass()->external_name());
 818       }
 819     }
 820   }
 821 
 822   // Direct instances of ObjArrayKlass represent the Java types that Java code can see.
 823   // RefArrayKlass/FlatArrayKlass describe different implementations of the arrays, filter them out to avoid duplicates.
 824   static bool filter_out_klass(Klass* k) {
 825     if (k->is_objArray_klass() && k->kind() != Klass::KlassKind::ObjArrayKlassKind) {
 826       return true;
 827     }
 828     return false;
 829   }
 830 };
 831 
 832 // Hash table of klasses to the klass metadata. This should greatly improve the
 833 // hash dumping performance. This hash table is supposed to be used by a single
 834 // thread only.
 835 //
 836 class DumperClassCacheTableEntry : public CHeapObj<mtServiceability> {
 837   friend class DumperClassCacheTable;
 838 public:
 839   class FieldDescriptor {
 840   private:
 841     char _sigs_start;
 842     int _offset;
 843     InlineKlass* _inline_klass; // nullptr for heap object
 844     LayoutKind _layout_kind;
 845   public:
 846     FieldDescriptor(): _sigs_start(0), _offset(0), _inline_klass(nullptr), _layout_kind(LayoutKind::UNKNOWN) {}
 847 
 848     template<typename FieldStreamType>
 849     FieldDescriptor(const FieldStreamType& field)
 850       : _sigs_start(field.signature()->char_at(0)), _offset(field.offset())
 851     {
 852       if (field.is_flat()) {
 853         const fieldDescriptor& fd = field.field_descriptor();
 854         InstanceKlass* holder_klass = fd.field_holder();
 855         InlineLayoutInfo* layout_info = holder_klass->inline_layout_info_adr(fd.index());
 856         _inline_klass = layout_info->klass();
 857         _layout_kind = layout_info->kind();
 858       } else {
 859         _inline_klass = nullptr;
 860         _layout_kind = LayoutKind::REFERENCE;
 861       }
 862     }
 863 
 864     char sig_start() const            { return _sigs_start; }
 865     int offset() const                { return _offset; }
 866     bool is_flat() const              { return _inline_klass != nullptr; }
 867     InlineKlass* inline_klass() const { return _inline_klass; }
 868     LayoutKind layout_kind() const    { return _layout_kind; }
 869     bool is_flat_nullable() const     { return _layout_kind == LayoutKind::NULLABLE_ATOMIC_FLAT; }
 870   };
 871 
 872 private:
 873   GrowableArray<FieldDescriptor> _fields;
 874   u4 _instance_size;
 875 
 876 public:
 877   DumperClassCacheTableEntry(): _instance_size(0) {}
 878 
 879   template<typename FieldStreamType>
 880   void add_field(const FieldStreamType& field) {
 881     _fields.push(FieldDescriptor(field));
 882     _instance_size += DumperSupport::sig2size(field.signature());
 883   }
 884 
 885   const FieldDescriptor& field(int index) const { return _fields.at(index); }
 886   int field_count() const { return _fields.length(); }
 887   u4 instance_size() const { return _instance_size; }
 888 };
 889 
 890 class DumperClassCacheTable {
 891 private:
 892   // HashTable SIZE is specified at compile time so we
 893   // use 1031 which is the first prime after 1024.
 894   static constexpr size_t TABLE_SIZE = 1031;
 895 
 896   // Maintain the cache for N classes. This limits memory footprint
 897   // impact, regardless of how many classes we have in the dump.
 898   // This also improves look up performance by keeping the statically
 899   // sized table from overloading.
 900   static constexpr int CACHE_TOP = 256;
 901 
 902   typedef HashTable<InstanceKlass*, DumperClassCacheTableEntry*,
 903                     TABLE_SIZE, AnyObj::C_HEAP, mtServiceability> PtrTable;
 904   PtrTable* _ptrs;
 905 
 906   // Single-slot cache to handle the major case of objects of the same
 907   // class back-to-back, e.g. from T[].
 908   InstanceKlass* _last_ik;
 909   DumperClassCacheTableEntry* _last_entry;
 910 
 911   void unlink_all(PtrTable* table) {
 912     class CleanupEntry: StackObj {
 913     public:
 914       bool do_entry(InstanceKlass*& key, DumperClassCacheTableEntry*& entry) {
 915         delete entry;
 916         return true;
 917       }
 918     } cleanup;
 919     table->unlink(&cleanup);
 920   }
 921 
 922 public:
 923   DumperClassCacheTableEntry* lookup_or_create(InstanceKlass* ik) {
 924     if (_last_ik == ik) {
 925       return _last_entry;
 926     }
 927 
 928     DumperClassCacheTableEntry* entry;
 929     DumperClassCacheTableEntry** from_cache = _ptrs->get(ik);
 930     if (from_cache == nullptr) {
 931       entry = new DumperClassCacheTableEntry();
 932       for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
 933         if (!fld.access_flags().is_static()) {
 934           entry->add_field(fld);
 935         }
 936       }
 937 
 938       if (_ptrs->number_of_entries() >= CACHE_TOP) {
 939         // We do not track the individual hit rates for table entries.
 940         // Purge the entire table, and let the cache catch up with new
 941         // distribution.
 942         unlink_all(_ptrs);
 943       }
 944 
 945       _ptrs->put(ik, entry);
 946     } else {
 947       entry = *from_cache;
 948     }
 949 
 950     // Remember for single-slot cache.
 951     _last_ik = ik;
 952     _last_entry = entry;
 953 
 954     return entry;
 955   }
 956 
 957   DumperClassCacheTable() : _ptrs(new (mtServiceability) PtrTable), _last_ik(nullptr), _last_entry(nullptr) {}
 958 
 959   ~DumperClassCacheTable() {
 960     unlink_all(_ptrs);
 961     delete _ptrs;
 962   }
 963 };
 964 
 965 // Describes flat object (flatted field or element of flat array) in the holder oop
 966 class DumperFlatObject: public CHeapObj<mtServiceability> {
 967   friend class DumperFlatObjectList;
 968 private:
 969   DumperFlatObject* _next;
 970 
 971   const uintptr_t _id; // object id
 972 
 973   const int _offset;
 974   InlineKlass* const _inline_klass;
 975 
 976 public:
 977   DumperFlatObject(uintptr_t id, int offset, InlineKlass* inline_klass)
 978     : _next(nullptr), _id(id), _offset(offset), _inline_klass(inline_klass) {
 979   }
 980 
 981   uintptr_t object_id()       const { return _id; }
 982   int offset()                const { return _offset; }
 983   InlineKlass* inline_klass() const { return _inline_klass; }
 984 };
 985 
 986 class FlatObjectIdProvider {
 987 public:
 988   virtual uintptr_t get_id() = 0;
 989 };
 990 
 991 // Simple FIFO.
 992 class DumperFlatObjectList {
 993 private:
 994   FlatObjectIdProvider* _id_provider;
 995   DumperFlatObject* _head;
 996   DumperFlatObject* _tail;
 997 
 998   void push(DumperFlatObject* obj) {
 999     if (_head == nullptr) {
1000       _head = _tail = obj;
1001     } else {
1002       assert(_tail != nullptr, "must be");
1003       _tail->_next = obj;
1004       _tail = obj;
1005     }
1006   }
1007 
1008 public:
1009   DumperFlatObjectList(FlatObjectIdProvider* id_provider): _id_provider(id_provider), _head(nullptr), _tail(nullptr) {}
1010 
1011   bool is_empty() const { return _head == nullptr; }
1012 
1013   uintptr_t push(int offset, InlineKlass* inline_klass) {
1014     uintptr_t id = _id_provider->get_id();
1015     DumperFlatObject* obj = new DumperFlatObject(id, offset, inline_klass);
1016     push(obj);
1017     return id;
1018   }
1019 
1020   DumperFlatObject* pop() {
1021     assert(!is_empty(), "sanity");
1022     DumperFlatObject* element = _head;
1023     _head = element->_next;
1024     element->_next = nullptr;
1025     return element;
1026   }
1027 };
1028 
1029 // write a header of the given type
1030 void DumperSupport:: write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len) {
1031   writer->write_u1(tag);
1032   writer->write_u4(0);                  // current ticks
1033   writer->write_u4(len);
1034 }
1035 
1036 // returns hprof tag for the given type signature
1037 hprofTag DumperSupport::sig2tag(Symbol* sig) {
1038   switch (sig->char_at(0)) {
1039     case JVM_SIGNATURE_CLASS    : return HPROF_NORMAL_OBJECT;
1040     case JVM_SIGNATURE_ARRAY    : return HPROF_NORMAL_OBJECT;
1041     case JVM_SIGNATURE_BYTE     : return HPROF_BYTE;
1042     case JVM_SIGNATURE_CHAR     : return HPROF_CHAR;
1043     case JVM_SIGNATURE_FLOAT    : return HPROF_FLOAT;
1044     case JVM_SIGNATURE_DOUBLE   : return HPROF_DOUBLE;
1045     case JVM_SIGNATURE_INT      : return HPROF_INT;
1046     case JVM_SIGNATURE_LONG     : return HPROF_LONG;
1047     case JVM_SIGNATURE_SHORT    : return HPROF_SHORT;
1048     case JVM_SIGNATURE_BOOLEAN  : return HPROF_BOOLEAN;
1049     default : ShouldNotReachHere(); /* to shut up compiler */ return HPROF_BYTE;
1050   }
1051 }
1052 
1053 hprofTag DumperSupport::type2tag(BasicType type) {
1054   switch (type) {
1055     case T_BYTE     : return HPROF_BYTE;
1056     case T_CHAR     : return HPROF_CHAR;
1057     case T_FLOAT    : return HPROF_FLOAT;
1058     case T_DOUBLE   : return HPROF_DOUBLE;
1059     case T_INT      : return HPROF_INT;
1060     case T_LONG     : return HPROF_LONG;
1061     case T_SHORT    : return HPROF_SHORT;
1062     case T_BOOLEAN  : return HPROF_BOOLEAN;
1063     default : ShouldNotReachHere(); /* to shut up compiler */ return HPROF_BYTE;
1064   }
1065 }
1066 
1067 u4 DumperSupport::sig2size(Symbol* sig) {
1068   switch (sig->char_at(0)) {
1069     case JVM_SIGNATURE_CLASS:
1070     case JVM_SIGNATURE_ARRAY: return sizeof(address);
1071     case JVM_SIGNATURE_BOOLEAN:
1072     case JVM_SIGNATURE_BYTE: return 1;
1073     case JVM_SIGNATURE_SHORT:
1074     case JVM_SIGNATURE_CHAR: return 2;
1075     case JVM_SIGNATURE_INT:
1076     case JVM_SIGNATURE_FLOAT: return 4;
1077     case JVM_SIGNATURE_LONG:
1078     case JVM_SIGNATURE_DOUBLE: return 8;
1079     default: ShouldNotReachHere(); /* to shut up compiler */ return 0;
1080   }
1081 }
1082 
1083 template<typename T, typename F> T bit_cast(F from) { // replace with the real thing when we can use c++20
1084   T to;
1085   static_assert(sizeof(to) == sizeof(from), "must be of the same size");
1086   memcpy(&to, &from, sizeof(to));
1087   return to;
1088 }
1089 
1090 // dump a jfloat
1091 void DumperSupport::dump_float(AbstractDumpWriter* writer, jfloat f) {
1092   if (g_isnan(f)) {
1093     writer->write_u4(0x7fc00000); // collapsing NaNs
1094   } else {
1095     writer->write_u4(bit_cast<u4>(f));
1096   }
1097 }
1098 
1099 // dump a jdouble
1100 void DumperSupport::dump_double(AbstractDumpWriter* writer, jdouble d) {
1101   if (g_isnan(d)) {
1102     writer->write_u8(0x7ff80000ull << 32); // collapsing NaNs
1103   } else {
1104     writer->write_u8(bit_cast<u8>(d));
1105   }
1106 }
1107 
1108 // dumps the raw value of the given field
1109 void DumperSupport::dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset) {
1110   switch (type) {
1111     case JVM_SIGNATURE_CLASS :
1112     case JVM_SIGNATURE_ARRAY : {
1113       oop o = obj->obj_field_access<ON_UNKNOWN_OOP_REF | AS_NO_KEEPALIVE>(offset);
1114       o = mask_dormant_archived_object(o, obj);
1115       assert(oopDesc::is_oop_or_null(o), "Expected an oop or nullptr at " PTR_FORMAT, p2i(o));
1116       writer->write_objectID(o);
1117       break;
1118     }
1119     case JVM_SIGNATURE_BYTE : {
1120       jbyte b = obj->byte_field(offset);
1121       writer->write_u1(b);
1122       break;
1123     }
1124     case JVM_SIGNATURE_CHAR : {
1125       jchar c = obj->char_field(offset);
1126       writer->write_u2(c);
1127       break;
1128     }
1129     case JVM_SIGNATURE_SHORT : {
1130       jshort s = obj->short_field(offset);
1131       writer->write_u2(s);
1132       break;
1133     }
1134     case JVM_SIGNATURE_FLOAT : {
1135       jfloat f = obj->float_field(offset);
1136       dump_float(writer, f);
1137       break;
1138     }
1139     case JVM_SIGNATURE_DOUBLE : {
1140       jdouble d = obj->double_field(offset);
1141       dump_double(writer, d);
1142       break;
1143     }
1144     case JVM_SIGNATURE_INT : {
1145       jint i = obj->int_field(offset);
1146       writer->write_u4(i);
1147       break;
1148     }
1149     case JVM_SIGNATURE_LONG : {
1150       jlong l = obj->long_field(offset);
1151       writer->write_u8(l);
1152       break;
1153     }
1154     case JVM_SIGNATURE_BOOLEAN : {
1155       jboolean b = obj->bool_field(offset);
1156       writer->write_u1(b);
1157       break;
1158     }
1159     default : {
1160       ShouldNotReachHere();
1161       break;
1162     }
1163   }
1164 }
1165 
1166 // returns the size of the instance of the given class
1167 u4 DumperSupport::instance_size(InstanceKlass* ik) {
1168   u4 size = 0;
1169   for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1170     if (!fld.access_flags().is_static()) {
1171       size += sig2size(fld.signature());
1172     }
1173   }
1174   return size;
1175 }
1176 
1177 u4 DumperSupport::get_static_fields_size(InstanceKlass* ik, u2& field_count) {
1178   field_count = 0;
1179   u4 size = 0;
1180 
1181   for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1182     if (fldc.access_flags().is_static()) {
1183       assert(!fldc.is_flat(), "static fields cannot be flat");
1184 
1185       field_count++;
1186       size += sig2size(fldc.signature());
1187     }
1188   }
1189 
1190   // Add in resolved_references which is referenced by the cpCache
1191   // The resolved_references is an array per InstanceKlass holding the
1192   // strings and other oops resolved from the constant pool.
1193   oop resolved_references = ik->constants()->resolved_references_or_null();
1194   if (resolved_references != nullptr) {
1195     field_count++;
1196     size += sizeof(address);
1197 
1198     // Add in the resolved_references of the used previous versions of the class
1199     // in the case of RedefineClasses
1200     InstanceKlass* prev = ik->previous_versions();
1201     while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1202       field_count++;
1203       size += sizeof(address);
1204       prev = prev->previous_versions();
1205     }
1206   }
1207 
1208   // Also provide a pointer to the init_lock if present, so there aren't unreferenced int[0]
1209   // arrays.
1210   oop init_lock = ik->init_lock();
1211   if (init_lock != nullptr) {
1212     field_count++;
1213     size += sizeof(address);
1214   }
1215 
1216   // We write the value itself plus a name and a one byte type tag per field.
1217   return checked_cast<u4>(size + field_count * (sizeof(address) + 1));
1218 }
1219 
1220 // dumps static fields of the given class
1221 void DumperSupport::dump_static_fields(AbstractDumpWriter* writer, Klass* k) {
1222   InstanceKlass* ik = InstanceKlass::cast(k);
1223 
1224   // dump the field descriptors and raw values
1225   for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1226     if (fld.access_flags().is_static()) {
1227       assert(!fld.is_flat(), "static fields cannot be flat");
1228 
1229       Symbol* sig = fld.signature();
1230 
1231       writer->write_symbolID(fld.name());   // name
1232       writer->write_u1(sig2tag(sig));       // type
1233 
1234       // value
1235       dump_field_value(writer, sig->char_at(0), ik->java_mirror(), fld.offset());
1236     }
1237   }
1238 
1239   // Add resolved_references for each class that has them
1240   oop resolved_references = ik->constants()->resolved_references_or_null();
1241   if (resolved_references != nullptr) {
1242     writer->write_symbolID(vmSymbols::resolved_references_name());  // name
1243     writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1244     writer->write_objectID(resolved_references);
1245 
1246     // Also write any previous versions
1247     InstanceKlass* prev = ik->previous_versions();
1248     while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1249       writer->write_symbolID(vmSymbols::resolved_references_name());  // name
1250       writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1251       writer->write_objectID(prev->constants()->resolved_references());
1252       prev = prev->previous_versions();
1253     }
1254   }
1255 
1256   // Add init lock to the end if the class is not yet initialized
1257   oop init_lock = ik->init_lock();
1258   if (init_lock != nullptr) {
1259     writer->write_symbolID(vmSymbols::init_lock_name());         // name
1260     writer->write_u1(sig2tag(vmSymbols::int_array_signature())); // type
1261     writer->write_objectID(init_lock);
1262   }
1263 }
1264 
1265 // dump the raw values of the instance fields of the given object, fills flat_fields
1266 void DumperSupport:: dump_instance_fields(AbstractDumpWriter* writer, oop o, int offset,
1267                                           DumperClassCacheTableEntry* class_cache_entry, DumperFlatObjectList* flat_fields) {
1268   assert(class_cache_entry != nullptr, "Pre-condition: must be provided");
1269   for (int idx = 0; idx < class_cache_entry->field_count(); idx++) {
1270     const DumperClassCacheTableEntry::FieldDescriptor& field = class_cache_entry->field(idx);
1271     int field_offset = offset + field.offset();
1272     if (field.is_flat()) {
1273       // check for possible nulls
1274       if (field.is_flat_nullable()) {
1275         address payload = cast_from_oop<address>(o) + field_offset;
1276         if (field.inline_klass()->is_payload_marked_as_null(payload)) {
1277           writer->write_objectID(nullptr);
1278           continue;
1279         }
1280       }
1281       uintptr_t object_id = flat_fields->push(field_offset, field.inline_klass());
1282       writer->write_objectID(object_id);
1283     } else {
1284       dump_field_value(writer, field.sig_start(), o, field_offset);
1285     }
1286   }
1287 }
1288 
1289 // gets the count of the instance fields for a given class
1290 u2 DumperSupport::get_instance_fields_count(InstanceKlass* ik) {
1291   u2 field_count = 0;
1292 
1293   for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1294     if (!fldc.access_flags().is_static()) {
1295       field_count++;
1296     }
1297   }
1298 
1299   return field_count;
1300 }
1301 
1302 // dumps the definition of the instance fields for a given class
1303 void DumperSupport::dump_instance_field_descriptors(AbstractDumpWriter* writer, InstanceKlass* ik) {
1304   // dump the field descriptors
1305   for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1306     if (!fld.access_flags().is_static()) {
1307       Symbol* sig = fld.signature();
1308 
1309       writer->write_symbolID(fld.name());   // name
1310       writer->write_u1(sig2tag(sig));       // type
1311     }
1312   }
1313 }
1314 
1315 // creates HPROF_GC_INSTANCE_DUMP record for the given object
1316 void DumperSupport::dump_instance(AbstractDumpWriter* writer, uintptr_t id, oop o, int offset, InstanceKlass* ik,
1317                                   DumperClassCacheTable* class_cache, DumperFlatObjectList* flat_fields) {
1318   DumperClassCacheTableEntry* cache_entry = class_cache->lookup_or_create(ik);
1319 
1320   u4 is = cache_entry->instance_size();
1321   u4 size = 1 + sizeof(address) + 4 + sizeof(address) + 4 + is;
1322 
1323   writer->start_sub_record(HPROF_GC_INSTANCE_DUMP, size);
1324   writer->write_objectID(id);
1325   writer->write_u4(STACK_TRACE_ID);
1326 
1327   // class ID
1328   writer->write_classID(ik);
1329 
1330   // number of bytes that follow
1331   writer->write_u4(is);
1332 
1333   // field values
1334   if (offset != 0) {
1335     // the object itself if flattened, so all fields are stored without headers
1336     InlineKlass* inline_klass = InlineKlass::cast(ik);
1337     offset -= inline_klass->payload_offset();
1338   }
1339 
1340   dump_instance_fields(writer, o, offset, cache_entry, flat_fields);
1341 
1342   writer->end_sub_record();
1343 }
1344 
1345 // creates HPROF_GC_CLASS_DUMP record for the given instance class
1346 void DumperSupport::dump_instance_class(AbstractDumpWriter* writer, InstanceKlass* ik) {
1347   // We can safepoint and do a heap dump at a point where we have a Klass,
1348   // but no java mirror class has been setup for it. So we need to check
1349   // that the class is at least loaded, to avoid crash from a null mirror.
1350   if (!ik->is_loaded()) {
1351     return;
1352   }
1353 
1354   u2 static_fields_count = 0;
1355   u4 static_size = get_static_fields_size(ik, static_fields_count);
1356   u2 instance_fields_count = get_instance_fields_count(ik);
1357   u4 instance_fields_size = instance_fields_count * (sizeof(address) + 1);
1358   u4 size = checked_cast<u4>(1 + sizeof(address) + 4 + 6 * sizeof(address) + 4 + 2 + 2 + static_size + 2 + instance_fields_size);
1359 
1360   writer->start_sub_record(HPROF_GC_CLASS_DUMP, size);
1361 
1362   // class ID
1363   writer->write_classID(ik);
1364   writer->write_u4(STACK_TRACE_ID);
1365 
1366   // super class ID
1367   InstanceKlass* super = ik->super();
1368   if (super == nullptr) {
1369     writer->write_objectID(oop(nullptr));
1370   } else {
1371     writer->write_classID(super);
1372   }
1373 
1374   writer->write_objectID(ik->class_loader());
1375   writer->write_objectID(ik->signers());
1376   writer->write_objectID(ik->protection_domain());
1377 
1378   // reserved
1379   writer->write_objectID(oop(nullptr));
1380   writer->write_objectID(oop(nullptr));
1381 
1382   // instance size
1383   writer->write_u4(DumperSupport::instance_size(ik));
1384 
1385   // size of constant pool - ignored by HAT 1.1
1386   writer->write_u2(0);
1387 
1388   // static fields
1389   writer->write_u2(static_fields_count);
1390   dump_static_fields(writer, ik);
1391 
1392   // description of instance fields
1393   writer->write_u2(instance_fields_count);
1394   dump_instance_field_descriptors(writer, ik);
1395 
1396   writer->end_sub_record();
1397 }
1398 
1399 // creates HPROF_GC_CLASS_DUMP record for the given array class
1400 void DumperSupport::dump_array_class(AbstractDumpWriter* writer, Klass* k) {
1401   InstanceKlass* ik = nullptr; // bottom class for object arrays, null for primitive type arrays
1402   if (k->is_objArray_klass()) {
1403     Klass *bk = ObjArrayKlass::cast(k)->bottom_klass();
1404     assert(bk != nullptr, "checking");
1405     if (bk->is_instance_klass()) {
1406       ik = InstanceKlass::cast(bk);
1407     }
1408   }
1409 
1410   u4 size = 1 + sizeof(address) + 4 + 6 * sizeof(address) + 4 + 2 + 2 + 2;
1411   writer->start_sub_record(HPROF_GC_CLASS_DUMP, size);
1412   writer->write_classID(k);
1413   writer->write_u4(STACK_TRACE_ID);
1414 
1415   // super class of array classes is java.lang.Object
1416   InstanceKlass* java_super = k->java_super();
1417   assert(java_super != nullptr, "checking");
1418   writer->write_classID(java_super);
1419 
1420   writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->class_loader());
1421   writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->signers());
1422   writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->protection_domain());
1423 
1424   writer->write_objectID(oop(nullptr));    // reserved
1425   writer->write_objectID(oop(nullptr));
1426   writer->write_u4(0);             // instance size
1427   writer->write_u2(0);             // constant pool
1428   writer->write_u2(0);             // static fields
1429   writer->write_u2(0);             // instance fields
1430 
1431   writer->end_sub_record();
1432 
1433 }
1434 
1435 // Hprof uses an u4 as record length field,
1436 // which means we need to truncate arrays that are too long.
1437 int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size) {
1438   BasicType type = ArrayKlass::cast(array->klass())->element_type();
1439   assert((type >= T_BOOLEAN && type <= T_OBJECT) || type == T_FLAT_ELEMENT, "invalid array element type");
1440 
1441   int length = array->length();
1442 
1443   int type_size;
1444   if (type == T_OBJECT || type == T_FLAT_ELEMENT) {
1445     type_size = sizeof(address);
1446   } else {
1447     type_size = type2aelembytes(type);
1448   }
1449 
1450   size_t length_in_bytes = (size_t)length * type_size;
1451   uint max_bytes = max_juint - header_size;
1452 
1453   if (length_in_bytes > max_bytes) {
1454     length = max_bytes / type_size;
1455     length_in_bytes = (size_t)length * type_size;
1456 
1457     warning("cannot dump array of type %s[] with length %d; truncating to length %d",
1458             type2name_tab[type], array->length(), length);
1459   }
1460   return length;
1461 }
1462 
1463 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
1464 void DumperSupport::dump_object_array(AbstractDumpWriter* writer, objArrayOop array, DumperFlatObjectList* flat_elements) {
1465   // sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID) + sizeof(classID)
1466   short header_size = 1 + 2 * 4 + 2 * sizeof(address);
1467   int length = calculate_array_max_length(writer, array, header_size);
1468   u4 size = checked_cast<u4>(header_size + length * sizeof(address));
1469 
1470   writer->start_sub_record(HPROF_GC_OBJ_ARRAY_DUMP, size);
1471   writer->write_objectID(array);
1472   writer->write_u4(STACK_TRACE_ID);
1473   writer->write_u4(length);
1474 
1475   // array class ID
1476   writer->write_classID(array->klass());
1477 
1478   // [id]* elements
1479   if (array->is_flatArray()) {
1480     flatArrayOop farray = flatArrayOop(array);
1481     FlatArrayKlass* faklass = FlatArrayKlass::cast(farray->klass());
1482 
1483     InlineKlass* vk = faklass->element_klass();
1484     bool need_null_check = faklass->layout_kind() == LayoutKind::NULLABLE_ATOMIC_FLAT;
1485 
1486     for (int index = 0; index < length; index++) {
1487       address addr = (address)farray->value_at_addr(index, faklass->layout_helper());
1488       // check for null
1489       if (need_null_check) {
1490         if (vk->is_payload_marked_as_null(addr)) {
1491           writer->write_objectID(nullptr);
1492           continue;
1493         }
1494       }
1495       // offset in the array oop
1496       int offset = (int)(addr - cast_from_oop<address>(farray));
1497       uintptr_t object_id = flat_elements->push(offset, vk);
1498       writer->write_objectID(object_id);
1499     }
1500   } else {
1501     for (int index = 0; index < length; index++) {
1502       oop o = array->obj_at(index);
1503       o = mask_dormant_archived_object(o, array);
1504       writer->write_objectID(o);
1505     }
1506   }
1507 
1508   writer->end_sub_record();
1509 }
1510 
1511 #define WRITE_ARRAY(Array, Type, Size, Length) \
1512   for (int i = 0; i < Length; i++) { writer->write_##Size((Size)Array->Type##_at(i)); }
1513 
1514 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
1515 void DumperSupport::dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array) {
1516   BasicType type = TypeArrayKlass::cast(array->klass())->element_type();
1517   // 2 * sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID)
1518   short header_size = 2 * 1 + 2 * 4 + sizeof(address);
1519 
1520   int length = calculate_array_max_length(writer, array, header_size);
1521   int type_size = type2aelembytes(type);
1522   u4 length_in_bytes = (u4)length * type_size;
1523   u4 size = header_size + length_in_bytes;
1524 
1525   writer->start_sub_record(HPROF_GC_PRIM_ARRAY_DUMP, size);
1526   writer->write_objectID(array);
1527   writer->write_u4(STACK_TRACE_ID);
1528   writer->write_u4(length);
1529   writer->write_u1(type2tag(type));
1530 
1531   // nothing to copy
1532   if (length == 0) {
1533     writer->end_sub_record();
1534     return;
1535   }
1536 
1537   // If the byte ordering is big endian then we can copy most types directly
1538 
1539   switch (type) {
1540     case T_INT : {
1541       if (Endian::is_Java_byte_ordering_different()) {
1542         WRITE_ARRAY(array, int, u4, length);
1543       } else {
1544         writer->write_raw(array->int_at_addr(0), length_in_bytes);
1545       }
1546       break;
1547     }
1548     case T_BYTE : {
1549       writer->write_raw(array->byte_at_addr(0), length_in_bytes);
1550       break;
1551     }
1552     case T_CHAR : {
1553       if (Endian::is_Java_byte_ordering_different()) {
1554         WRITE_ARRAY(array, char, u2, length);
1555       } else {
1556         writer->write_raw(array->char_at_addr(0), length_in_bytes);
1557       }
1558       break;
1559     }
1560     case T_SHORT : {
1561       if (Endian::is_Java_byte_ordering_different()) {
1562         WRITE_ARRAY(array, short, u2, length);
1563       } else {
1564         writer->write_raw(array->short_at_addr(0), length_in_bytes);
1565       }
1566       break;
1567     }
1568     case T_BOOLEAN : {
1569       if (Endian::is_Java_byte_ordering_different()) {
1570         WRITE_ARRAY(array, bool, u1, length);
1571       } else {
1572         writer->write_raw(array->bool_at_addr(0), length_in_bytes);
1573       }
1574       break;
1575     }
1576     case T_LONG : {
1577       if (Endian::is_Java_byte_ordering_different()) {
1578         WRITE_ARRAY(array, long, u8, length);
1579       } else {
1580         writer->write_raw(array->long_at_addr(0), length_in_bytes);
1581       }
1582       break;
1583     }
1584 
1585     // handle float/doubles in a special value to ensure than NaNs are
1586     // written correctly. TO DO: Check if we can avoid this on processors that
1587     // use IEEE 754.
1588 
1589     case T_FLOAT : {
1590       for (int i = 0; i < length; i++) {
1591         dump_float(writer, array->float_at(i));
1592       }
1593       break;
1594     }
1595     case T_DOUBLE : {
1596       for (int i = 0; i < length; i++) {
1597         dump_double(writer, array->double_at(i));
1598       }
1599       break;
1600     }
1601     default : ShouldNotReachHere();
1602   }
1603 
1604   writer->end_sub_record();
1605 }
1606 
1607 // create a HPROF_FRAME record of the given Method* and bci
1608 void DumperSupport::dump_stack_frame(AbstractDumpWriter* writer,
1609                                      int frame_serial_num,
1610                                      int class_serial_num,
1611                                      Method* m,
1612                                      int bci) {
1613   int line_number;
1614   if (m->is_native()) {
1615     line_number = -3;  // native frame
1616   } else {
1617     line_number = m->line_number_from_bci(bci);
1618   }
1619 
1620   write_header(writer, HPROF_FRAME, 4*oopSize + 2*sizeof(u4));
1621   writer->write_id(frame_serial_num);               // frame serial number
1622   writer->write_symbolID(m->name());                // method's name
1623   writer->write_symbolID(m->signature());           // method's signature
1624 
1625   assert(m->method_holder()->is_instance_klass(), "not InstanceKlass");
1626   writer->write_symbolID(m->method_holder()->source_file_name());  // source file name
1627   writer->write_u4(class_serial_num);               // class serial number
1628   writer->write_u4((u4) line_number);               // line number
1629 }
1630 
1631 
1632 // Support class used to generate HPROF_UTF8 records from the entries in the
1633 // SymbolTable.
1634 
1635 class SymbolTableDumper : public SymbolClosure {
1636  private:
1637   AbstractDumpWriter* _writer;
1638   AbstractDumpWriter* writer() const                { return _writer; }
1639  public:
1640   SymbolTableDumper(AbstractDumpWriter* writer)     { _writer = writer; }
1641   void do_symbol(Symbol** p);
1642 };
1643 
1644 void SymbolTableDumper::do_symbol(Symbol** p) {
1645   ResourceMark rm;
1646   Symbol* sym = *p;
1647   int len = sym->utf8_length();
1648   if (len > 0) {
1649     char* s = sym->as_utf8();
1650     DumperSupport::write_header(writer(), HPROF_UTF8, oopSize + len);
1651     writer()->write_symbolID(sym);
1652     writer()->write_raw(s, len);
1653   }
1654 }
1655 
1656 // Support class used to generate HPROF_GC_CLASS_DUMP records
1657 
1658 class ClassDumper : public KlassClosure {
1659  private:
1660   AbstractDumpWriter* _writer;
1661   AbstractDumpWriter* writer() const { return _writer; }
1662 
1663  public:
1664   ClassDumper(AbstractDumpWriter* writer) : _writer(writer) {}
1665 
1666   void do_klass(Klass* k) {
1667     if (DumperSupport::filter_out_klass(k)) {
1668       return;
1669     }
1670     if (k->is_instance_klass()) {
1671       DumperSupport::dump_instance_class(writer(), InstanceKlass::cast(k));
1672     } else {
1673       DumperSupport::dump_array_class(writer(), k);
1674     }
1675   }
1676 };
1677 
1678 // Support class used to generate HPROF_LOAD_CLASS records
1679 
1680 class LoadedClassDumper : public LockedClassesDo {
1681  private:
1682   AbstractDumpWriter* _writer;
1683   GrowableArray<Klass*>* _klass_map;
1684   u4 _class_serial_num;
1685   AbstractDumpWriter* writer() const { return _writer; }
1686   void add_class_serial_number(Klass* k, int serial_num) {
1687     _klass_map->at_put_grow(serial_num, k);
1688   }
1689  public:
1690   LoadedClassDumper(AbstractDumpWriter* writer, GrowableArray<Klass*>* klass_map)
1691     : _writer(writer), _klass_map(klass_map), _class_serial_num(0) {}
1692 
1693   void do_klass(Klass* k) {
1694     if (DumperSupport::filter_out_klass(k)) {
1695       return;
1696     }
1697     // len of HPROF_LOAD_CLASS record
1698     u4 remaining = 2 * oopSize + 2 * sizeof(u4);
1699     DumperSupport::write_header(writer(), HPROF_LOAD_CLASS, remaining);
1700     // class serial number is just a number
1701     writer()->write_u4(++_class_serial_num);
1702     // class ID
1703     writer()->write_classID(k);
1704     // add the Klass* and class serial number pair
1705     add_class_serial_number(k, _class_serial_num);
1706     writer()->write_u4(STACK_TRACE_ID);
1707     // class name ID
1708     Symbol* name = k->name();
1709     writer()->write_symbolID(name);
1710   }
1711 };
1712 
1713 // Support class used to generate HPROF_GC_ROOT_JNI_LOCAL records
1714 
1715 class JNILocalsDumper : public OopClosure {
1716  private:
1717   AbstractDumpWriter* _writer;
1718   u4 _thread_serial_num;
1719   int _frame_num;
1720   AbstractDumpWriter* writer() const                { return _writer; }
1721  public:
1722   JNILocalsDumper(AbstractDumpWriter* writer, u4 thread_serial_num) {
1723     _writer = writer;
1724     _thread_serial_num = thread_serial_num;
1725     _frame_num = -1;  // default - empty stack
1726   }
1727   void set_frame_number(int n) { _frame_num = n; }
1728   void do_oop(oop* obj_p);
1729   void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); }
1730 };
1731 
1732 void JNILocalsDumper::do_oop(oop* obj_p) {
1733   // ignore null handles
1734   oop o = *obj_p;
1735   if (o != nullptr) {
1736     u4 size = 1 + sizeof(address) + 4 + 4;
1737     writer()->start_sub_record(HPROF_GC_ROOT_JNI_LOCAL, size);
1738     writer()->write_objectID(o);
1739     writer()->write_u4(_thread_serial_num);
1740     writer()->write_u4((u4)_frame_num);
1741     writer()->end_sub_record();
1742   }
1743 }
1744 
1745 
1746 // Support class used to generate HPROF_GC_ROOT_JNI_GLOBAL records
1747 
1748 class JNIGlobalsDumper : public OopClosure {
1749  private:
1750   AbstractDumpWriter* _writer;
1751   AbstractDumpWriter* writer() const                { return _writer; }
1752 
1753  public:
1754   JNIGlobalsDumper(AbstractDumpWriter* writer) {
1755     _writer = writer;
1756   }
1757   void do_oop(oop* obj_p);
1758   void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); }
1759 };
1760 
1761 void JNIGlobalsDumper::do_oop(oop* obj_p) {
1762   oop o = NativeAccess<AS_NO_KEEPALIVE>::oop_load(obj_p);
1763 
1764   // ignore these
1765   if (o == nullptr) return;
1766   // we ignore global ref to symbols and other internal objects
1767   if (o->is_instance() || o->is_objArray() || o->is_typeArray()) {
1768     u4 size = 1 + 2 * sizeof(address);
1769     writer()->start_sub_record(HPROF_GC_ROOT_JNI_GLOBAL, size);
1770     writer()->write_objectID(o);
1771     writer()->write_rootID(obj_p);      // global ref ID
1772     writer()->end_sub_record();
1773   }
1774 };
1775 
1776 // Support class used to generate HPROF_GC_ROOT_STICKY_CLASS records
1777 
1778 class StickyClassDumper : public KlassClosure {
1779  private:
1780   AbstractDumpWriter* _writer;
1781   AbstractDumpWriter* writer() const                { return _writer; }
1782  public:
1783   StickyClassDumper(AbstractDumpWriter* writer) {
1784     _writer = writer;
1785   }
1786   void do_klass(Klass* k) {
1787     if (k->is_instance_klass()) {
1788       InstanceKlass* ik = InstanceKlass::cast(k);
1789       u4 size = 1 + sizeof(address);
1790       writer()->start_sub_record(HPROF_GC_ROOT_STICKY_CLASS, size);
1791       writer()->write_classID(ik);
1792       writer()->end_sub_record();
1793     }
1794   }
1795 };
1796 
1797 // Support class used to generate HPROF_GC_ROOT_JAVA_FRAME records.
1798 
1799 class JavaStackRefDumper : public StackObj {
1800 private:
1801   AbstractDumpWriter* _writer;
1802   u4 _thread_serial_num;
1803   int _frame_num;
1804   AbstractDumpWriter* writer() const { return _writer; }
1805 public:
1806   JavaStackRefDumper(AbstractDumpWriter* writer, u4 thread_serial_num)
1807       : _writer(writer), _thread_serial_num(thread_serial_num), _frame_num(-1) // default - empty stack
1808   {
1809   }
1810 
1811   void set_frame_number(int n) { _frame_num = n; }
1812 
1813   void dump_java_stack_refs(StackValueCollection* values);
1814 };
1815 
1816 void JavaStackRefDumper::dump_java_stack_refs(StackValueCollection* values) {
1817   for (int index = 0; index < values->size(); index++) {
1818     if (values->at(index)->type() == T_OBJECT) {
1819       oop o = values->obj_at(index)();
1820       if (o != nullptr) {
1821         u4 size = 1 + sizeof(address) + 4 + 4;
1822         writer()->start_sub_record(HPROF_GC_ROOT_JAVA_FRAME, size);
1823         writer()->write_objectID(o);
1824         writer()->write_u4(_thread_serial_num);
1825         writer()->write_u4((u4)_frame_num);
1826         writer()->end_sub_record();
1827       }
1828     }
1829   }
1830 }
1831 
1832 // Class to collect, store and dump thread-related data:
1833 // - HPROF_TRACE and HPROF_FRAME records;
1834 // - HPROF_GC_ROOT_THREAD_OBJ/HPROF_GC_ROOT_JAVA_FRAME/HPROF_GC_ROOT_JNI_LOCAL subrecords.
1835 class ThreadDumper : public CHeapObj<mtInternal> {
1836 public:
1837   enum class ThreadType { Platform, MountedVirtual, UnmountedVirtual };
1838 
1839 private:
1840   ThreadType _thread_type;
1841   JavaThread* _java_thread;
1842   oop _thread_oop;
1843 
1844   GrowableArray<StackFrameInfo*>* _frames;
1845   // non-null if the thread is OOM thread
1846   Method* _oome_constructor;
1847   int _thread_serial_num;
1848   int _start_frame_serial_num;
1849 
1850   vframe* get_top_frame() const;
1851 
1852 public:
1853   static bool should_dump_pthread(JavaThread* thread) {
1854     return thread->threadObj() != nullptr && !thread->is_exiting() && !thread->is_hidden_from_external_view();
1855   }
1856 
1857   static bool should_dump_vthread(oop vt) {
1858     return java_lang_VirtualThread::state(vt) != java_lang_VirtualThread::NEW
1859         && java_lang_VirtualThread::state(vt) != java_lang_VirtualThread::TERMINATED;
1860   }
1861 
1862   static bool is_vthread_mounted(oop vt) {
1863     // The code should be consistent with the "mounted virtual thread" case
1864     // (VM_HeapDumper::dump_stack_traces(), ThreadDumper::get_top_frame()).
1865     // I.e. virtual thread is mounted if its carrierThread is not null
1866     // and is_vthread_mounted() for the carrier thread returns true.
1867     oop carrier_thread = java_lang_VirtualThread::carrier_thread(vt);
1868     if (carrier_thread == nullptr) {
1869       return false;
1870     }
1871     JavaThread* java_thread = java_lang_Thread::thread(carrier_thread);
1872     return java_thread->is_vthread_mounted();
1873   }
1874 
1875   ThreadDumper(ThreadType thread_type, JavaThread* java_thread, oop thread_oop);
1876   ~ThreadDumper() {
1877     for (int index = 0; index < _frames->length(); index++) {
1878       delete _frames->at(index);
1879     }
1880     delete _frames;
1881   }
1882 
1883   // affects frame_count
1884   void add_oom_frame(Method* oome_constructor) {
1885     assert(_start_frame_serial_num == 0, "add_oom_frame cannot be called after init_serial_nums");
1886     _oome_constructor = oome_constructor;
1887   }
1888 
1889   void init_serial_nums(volatile int* thread_counter, volatile int* frame_counter) {
1890     assert(_start_frame_serial_num == 0, "already initialized");
1891     _thread_serial_num = AtomicAccess::fetch_then_add(thread_counter, 1);
1892     _start_frame_serial_num = AtomicAccess::fetch_then_add(frame_counter, frame_count());
1893   }
1894 
1895   bool oom_thread() const {
1896     return _oome_constructor != nullptr;
1897   }
1898 
1899   int frame_count() const {
1900     return _frames->length() + (oom_thread() ? 1 : 0);
1901   }
1902 
1903   u4 thread_serial_num() const {
1904     return (u4)_thread_serial_num;
1905   }
1906 
1907   u4 stack_trace_serial_num() const {
1908     return (u4)(_thread_serial_num + STACK_TRACE_ID);
1909   }
1910 
1911   // writes HPROF_TRACE and HPROF_FRAME records
1912   // returns number of dumped frames
1913   void dump_stack_traces(AbstractDumpWriter* writer, GrowableArray<Klass*>* klass_map);
1914 
1915   // writes HPROF_GC_ROOT_THREAD_OBJ subrecord
1916   void dump_thread_obj(AbstractDumpWriter* writer);
1917 
1918   // Walk the stack of the thread.
1919   // Dumps a HPROF_GC_ROOT_JAVA_FRAME subrecord for each local
1920   // Dumps a HPROF_GC_ROOT_JNI_LOCAL subrecord for each JNI local
1921   void dump_stack_refs(AbstractDumpWriter* writer);
1922 
1923 };
1924 
1925 ThreadDumper::ThreadDumper(ThreadType thread_type, JavaThread* java_thread, oop thread_oop)
1926     : _thread_type(thread_type), _java_thread(java_thread), _thread_oop(thread_oop),
1927       _oome_constructor(nullptr),
1928       _thread_serial_num(0), _start_frame_serial_num(0)
1929 {
1930   // sanity checks
1931   if (_thread_type == ThreadType::UnmountedVirtual) {
1932     assert(_java_thread == nullptr, "sanity");
1933     assert(_thread_oop != nullptr, "sanity");
1934   } else {
1935     assert(_java_thread != nullptr, "sanity");
1936     assert(_thread_oop != nullptr, "sanity");
1937   }
1938 
1939   _frames = new (mtServiceability) GrowableArray<StackFrameInfo*>(10, mtServiceability);
1940   bool stop_at_vthread_entry = _thread_type == ThreadType::MountedVirtual;
1941 
1942   // vframes are resource allocated
1943   Thread* current_thread = Thread::current();
1944   ResourceMark rm(current_thread);
1945   HandleMark hm(current_thread);
1946 
1947   for (vframe* vf = get_top_frame(); vf != nullptr; vf = vf->sender()) {
1948     if (stop_at_vthread_entry && vf->is_vthread_entry()) {
1949       break;
1950     }
1951     if (vf->is_java_frame()) {
1952       javaVFrame* jvf = javaVFrame::cast(vf);
1953       _frames->append(new StackFrameInfo(jvf, false));
1954     } else {
1955       // ignore non-Java frames
1956     }
1957   }
1958 }
1959 
1960 void ThreadDumper::dump_stack_traces(AbstractDumpWriter* writer, GrowableArray<Klass*>* klass_map) {
1961   assert(_thread_serial_num != 0 && _start_frame_serial_num != 0, "serial_nums are not initialized");
1962 
1963   // write HPROF_FRAME records for this thread's stack trace
1964   int depth = _frames->length();
1965   int frame_serial_num = _start_frame_serial_num;
1966 
1967   if (oom_thread()) {
1968     // OOM thread
1969     // write fake frame that makes it look like the thread, which caused OOME,
1970     // is in the OutOfMemoryError zero-parameter constructor
1971     int oome_serial_num = klass_map->find(_oome_constructor->method_holder());
1972     // the class serial number starts from 1
1973     assert(oome_serial_num > 0, "OutOfMemoryError class not found");
1974     DumperSupport::dump_stack_frame(writer, ++frame_serial_num, oome_serial_num, _oome_constructor, 0);
1975     depth++;
1976   }
1977 
1978   for (int j = 0; j < _frames->length(); j++) {
1979     StackFrameInfo* frame = _frames->at(j);
1980     Method* m = frame->method();
1981     int class_serial_num = klass_map->find(m->method_holder());
1982     // the class serial number starts from 1
1983     assert(class_serial_num > 0, "class not found");
1984     DumperSupport::dump_stack_frame(writer, ++frame_serial_num, class_serial_num, m, frame->bci());
1985   }
1986 
1987   // write HPROF_TRACE record for the thread
1988   DumperSupport::write_header(writer, HPROF_TRACE, checked_cast<u4>(3 * sizeof(u4) + depth * oopSize));
1989   writer->write_u4(stack_trace_serial_num());   // stack trace serial number
1990   writer->write_u4(thread_serial_num());        // thread serial number
1991   writer->write_u4((u4)depth);                  // frame count (including oom frame)
1992   for (int j = 1; j <= depth; j++) {
1993     writer->write_id(_start_frame_serial_num + j);
1994   }
1995 }
1996 
1997 void ThreadDumper::dump_thread_obj(AbstractDumpWriter * writer) {
1998   assert(_thread_serial_num != 0 && _start_frame_serial_num != 0, "serial_num is not initialized");
1999 
2000   u4 size = 1 + sizeof(address) + 4 + 4;
2001   writer->start_sub_record(HPROF_GC_ROOT_THREAD_OBJ, size);
2002   writer->write_objectID(_thread_oop);
2003   writer->write_u4(thread_serial_num());      // thread serial number
2004   writer->write_u4(stack_trace_serial_num()); // stack trace serial number
2005   writer->end_sub_record();
2006 }
2007 
2008 void ThreadDumper::dump_stack_refs(AbstractDumpWriter * writer) {
2009   assert(_thread_serial_num != 0 && _start_frame_serial_num != 0, "serial_num is not initialized");
2010 
2011   JNILocalsDumper blk(writer, thread_serial_num());
2012   if (_thread_type == ThreadType::Platform) {
2013     if (!_java_thread->has_last_Java_frame()) {
2014       // no last java frame but there may be JNI locals
2015       _java_thread->active_handles()->oops_do(&blk);
2016       return;
2017     }
2018   }
2019 
2020   JavaStackRefDumper java_ref_dumper(writer, thread_serial_num());
2021 
2022   // vframes are resource allocated
2023   Thread* current_thread = Thread::current();
2024   ResourceMark rm(current_thread);
2025   HandleMark hm(current_thread);
2026 
2027   bool stopAtVthreadEntry = _thread_type == ThreadType::MountedVirtual;
2028   frame* last_entry_frame = nullptr;
2029   bool is_top_frame = true;
2030   int depth = 0;
2031   if (oom_thread()) {
2032     depth++;
2033   }
2034 
2035   for (vframe* vf = get_top_frame(); vf != nullptr; vf = vf->sender()) {
2036     if (stopAtVthreadEntry && vf->is_vthread_entry()) {
2037       break;
2038     }
2039 
2040     if (vf->is_java_frame()) {
2041       javaVFrame* jvf = javaVFrame::cast(vf);
2042       if (!(jvf->method()->is_native())) {
2043         java_ref_dumper.set_frame_number(depth);
2044         java_ref_dumper.dump_java_stack_refs(jvf->locals());
2045         java_ref_dumper.dump_java_stack_refs(jvf->expressions());
2046       } else {
2047         // native frame
2048         blk.set_frame_number(depth);
2049         if (is_top_frame) {
2050           // JNI locals for the top frame if mounted
2051           assert(_java_thread != nullptr || jvf->method()->is_synchronized()
2052                  || jvf->method()->is_object_wait0(), "impossible for unmounted vthread");
2053           if (_java_thread != nullptr) {
2054             _java_thread->active_handles()->oops_do(&blk);
2055           }
2056         } else {
2057           if (last_entry_frame != nullptr) {
2058             // JNI locals for the entry frame
2059             assert(last_entry_frame->is_entry_frame(), "checking");
2060             last_entry_frame->entry_frame_call_wrapper()->handles()->oops_do(&blk);
2061           }
2062         }
2063       }
2064       last_entry_frame = nullptr;
2065       // increment only for Java frames
2066       depth++;
2067     } else {
2068       // externalVFrame - for an entry frame then we report the JNI locals
2069       // when we find the corresponding javaVFrame
2070       frame* fr = vf->frame_pointer();
2071       assert(fr != nullptr, "sanity check");
2072       if (fr->is_entry_frame()) {
2073         last_entry_frame = fr;
2074       }
2075     }
2076   is_top_frame = false;
2077   }
2078   assert(depth == frame_count(), "total number of Java frames not matched");
2079 }
2080 
2081 vframe* ThreadDumper::get_top_frame() const {
2082   if (_thread_type == ThreadType::UnmountedVirtual) {
2083     ContinuationWrapper cont(java_lang_VirtualThread::continuation(_thread_oop));
2084     if (cont.is_empty()) {
2085       return nullptr;
2086     }
2087     assert(!cont.is_mounted(), "sanity check");
2088     stackChunkOop chunk = cont.last_nonempty_chunk();
2089     if (chunk == nullptr || chunk->is_empty()) {
2090       return nullptr;
2091     }
2092 
2093     RegisterMap reg_map(cont.continuation(), RegisterMap::UpdateMap::include);
2094     frame fr = chunk->top_frame(&reg_map);
2095     vframe* vf = vframe::new_vframe(&fr, &reg_map, nullptr); // don't need JavaThread
2096     return vf;
2097   }
2098 
2099   RegisterMap reg_map(_java_thread,
2100       RegisterMap::UpdateMap::include,
2101       RegisterMap::ProcessFrames::include,
2102       RegisterMap::WalkContinuation::skip);
2103   switch (_thread_type) {
2104   case ThreadType::Platform:
2105     if (!_java_thread->has_last_Java_frame()) {
2106       return nullptr;
2107     }
2108     return _java_thread->is_vthread_mounted()
2109         ? _java_thread->carrier_last_java_vframe(&reg_map)
2110         : _java_thread->platform_thread_last_java_vframe(&reg_map);
2111 
2112   case ThreadType::MountedVirtual:
2113     return _java_thread->last_java_vframe(&reg_map);
2114 
2115   default: // make compilers happy
2116       break;
2117   }
2118   ShouldNotReachHere();
2119   return nullptr;
2120 }
2121 
2122 class FlatObjectDumper: public FlatObjectIdProvider {
2123 private:
2124   volatile uintptr_t _id_counter;
2125 public:
2126   FlatObjectDumper(): _id_counter(0) {
2127   }
2128 
2129   void dump_flat_objects(AbstractDumpWriter* writer, oop holder,
2130                          DumperClassCacheTable* class_cache, DumperFlatObjectList* flat_objects);
2131 
2132   // FlatObjectIdProvider implementation
2133   virtual uintptr_t get_id() override {
2134     // need to protect against overflow, so use instead of fetch_then_add
2135     const uintptr_t max_value = (uintptr_t)-1;
2136     uintptr_t old_value = AtomicAccess::load(&_id_counter);
2137     while (old_value != max_value) {
2138       uintptr_t new_value = old_value + 1;
2139       // to avoid conflicts with oop addresses skip aligned values
2140       if ((new_value & MinObjAlignmentInBytesMask) == 0) {
2141         new_value++;
2142       }
2143       uintptr_t value = AtomicAccess::cmpxchg(&_id_counter, old_value, new_value);
2144       if (value == old_value) {
2145         // success
2146         return new_value;
2147       }
2148       old_value = value;
2149     }
2150     // if we are here, maximum id value is reached
2151     return max_value;
2152   }
2153 
2154 };
2155 
2156 void FlatObjectDumper::dump_flat_objects(AbstractDumpWriter* writer, oop holder,
2157                                          DumperClassCacheTable* class_cache, DumperFlatObjectList* flat_objects) {
2158   // DumperSupport::dump_instance can add entries to flat_objects
2159   while (!flat_objects->is_empty()) {
2160     DumperFlatObject* obj = flat_objects->pop();
2161     DumperSupport::dump_instance(writer, obj->object_id(), holder, obj->offset(), obj->inline_klass(), class_cache, flat_objects);
2162     delete obj;
2163   }
2164 }
2165 
2166 // Callback to dump thread-related data for unmounted virtual threads;
2167 // implemented by VM_HeapDumper.
2168 class UnmountedVThreadDumper {
2169 public:
2170   virtual void dump_vthread(oop vt, AbstractDumpWriter* segment_writer) = 0;
2171 };
2172 
2173 
2174 // Support class used when iterating over the heap.
2175 class HeapObjectDumper : public ObjectClosure {
2176  private:
2177   AbstractDumpWriter* _writer;
2178   AbstractDumpWriter* writer()                  { return _writer; }
2179   UnmountedVThreadDumper* _vthread_dumper;
2180   FlatObjectDumper* _flat_dumper;
2181 
2182   DumperClassCacheTable _class_cache;
2183 
2184  public:
2185   HeapObjectDumper(AbstractDumpWriter* writer, UnmountedVThreadDumper* vthread_dumper, FlatObjectDumper* flat_dumper)
2186     : _writer(writer), _vthread_dumper(vthread_dumper), _flat_dumper(flat_dumper) {}
2187 
2188   // called for each object in the heap
2189   void do_object(oop o);
2190 };
2191 
2192 void HeapObjectDumper::do_object(oop o) {
2193   // skip classes as these emitted as HPROF_GC_CLASS_DUMP records
2194   if (o->klass() == vmClasses::Class_klass()) {
2195     if (!java_lang_Class::is_primitive(o)) {
2196       return;
2197     }
2198   }
2199 
2200   if (DumperSupport::mask_dormant_archived_object(o, nullptr) == nullptr) {
2201     return;
2202   }
2203 
2204   if (o->is_instance()) {
2205     DumperFlatObjectList flat_fields(_flat_dumper);
2206     // create a HPROF_GC_INSTANCE record for each object
2207     DumperSupport::dump_instance(writer(),
2208                                  cast_from_oop<uintptr_t>(o), // object_id is the address
2209                                  o, 0,                        // for heap instance holder is oop, offset is 0
2210                                  InstanceKlass::cast(o->klass()),
2211                                  &_class_cache, &flat_fields);
2212 
2213     // if there are flattened fields, dump them
2214     if (!flat_fields.is_empty()) {
2215       _flat_dumper->dump_flat_objects(writer(), o, &_class_cache, &flat_fields);
2216     }
2217 
2218     // If we encounter an unmounted virtual thread it needs to be dumped explicitly
2219     // (mounted virtual threads are dumped with their carriers).
2220     if (java_lang_VirtualThread::is_instance(o)
2221         && ThreadDumper::should_dump_vthread(o) && !ThreadDumper::is_vthread_mounted(o)) {
2222       _vthread_dumper->dump_vthread(o, writer());
2223     }
2224   } else if (o->is_objArray()) {
2225     DumperFlatObjectList flat_elements(_flat_dumper);
2226     // create a HPROF_GC_OBJ_ARRAY_DUMP record for each object array
2227     DumperSupport::dump_object_array(writer(), objArrayOop(o), &flat_elements);
2228     // if this is flat array, dump its elements
2229     if (!flat_elements.is_empty()) {
2230       _flat_dumper->dump_flat_objects(writer(), o, &_class_cache, &flat_elements);
2231     }
2232   } else if (o->is_typeArray()) {
2233     // create a HPROF_GC_PRIM_ARRAY_DUMP record for each type array
2234     DumperSupport::dump_prim_array(writer(), typeArrayOop(o));
2235   }
2236 }
2237 
2238 // The dumper controller for parallel heap dump
2239 class DumperController : public CHeapObj<mtInternal> {
2240  private:
2241    Monitor* _lock;
2242    Mutex* _global_writer_lock;
2243 
2244    const uint   _dumper_number;
2245    uint   _complete_number;
2246 
2247    bool   _started; // VM dumper started and acquired global writer lock
2248 
2249  public:
2250    DumperController(uint number) :
2251      // _lock and _global_writer_lock are used for synchronization between GC worker threads inside safepoint,
2252      // so we lock with _no_safepoint_check_flag.
2253      // signal_start() acquires _lock when global writer is locked,
2254      // its rank must be less than _global_writer_lock rank.
2255      _lock(new (std::nothrow) PaddedMonitor(Mutex::nosafepoint - 1, "DumperController_lock")),
2256      _global_writer_lock(new (std::nothrow) Mutex(Mutex::nosafepoint, "DumpWriter_lock")),
2257      _dumper_number(number),
2258      _complete_number(0),
2259      _started(false)
2260    {}
2261 
2262    ~DumperController() {
2263      delete _lock;
2264      delete _global_writer_lock;
2265    }
2266 
2267    // parallel (non VM) dumpers must wait until VM dumper acquires global writer lock
2268    void wait_for_start_signal() {
2269      MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
2270      while (_started == false) {
2271        ml.wait();
2272      }
2273    }
2274 
2275    void signal_start() {
2276      MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
2277      _started = true;
2278      ml.notify_all();
2279    }
2280 
2281    void lock_global_writer() {
2282      _global_writer_lock->lock_without_safepoint_check();
2283    }
2284 
2285    void unlock_global_writer() {
2286      _global_writer_lock->unlock();
2287    }
2288 
2289    void dumper_complete(DumpWriter* local_writer, DumpWriter* global_writer) {
2290      MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
2291      _complete_number++;
2292      // propagate local error to global if any
2293      if (local_writer->has_error()) {
2294        global_writer->set_error(local_writer->error());
2295      }
2296      ml.notify();
2297    }
2298 
2299    void wait_all_dumpers_complete() {
2300      MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
2301      while (_complete_number != _dumper_number) {
2302         ml.wait();
2303      }
2304    }
2305 };
2306 
2307 // DumpMerger merges separate dump files into a complete one
2308 class DumpMerger : public StackObj {
2309 private:
2310   DumpWriter* _writer;
2311   const char* _path;
2312   bool _has_error;
2313   int _dump_seq;
2314 
2315 private:
2316   void merge_file(const char* path);
2317   void merge_done();
2318   void set_error(const char* msg);
2319 
2320 public:
2321   DumpMerger(const char* path, DumpWriter* writer, int dump_seq) :
2322     _writer(writer),
2323     _path(path),
2324     _has_error(_writer->has_error()),
2325     _dump_seq(dump_seq) {}
2326 
2327   void do_merge();
2328 
2329   // returns path for the parallel DumpWriter (resource allocated)
2330   static char* get_writer_path(const char* base_path, int seq);
2331 
2332 };
2333 
2334 char* DumpMerger::get_writer_path(const char* base_path, int seq) {
2335   // approximate required buffer size
2336   size_t buf_size = strlen(base_path)
2337                     + 2                 // ".p"
2338                     + 10                // number (that's enough for 2^32 parallel dumpers)
2339                     + 1;                // '\0'
2340 
2341   char* path = NEW_RESOURCE_ARRAY(char, buf_size);
2342   memset(path, 0, buf_size);
2343 
2344   os::snprintf_checked(path, buf_size, "%s.p%d", base_path, seq);
2345 
2346   return path;
2347 }
2348 
2349 
2350 void DumpMerger::merge_done() {
2351   // Writes the HPROF_HEAP_DUMP_END record.
2352   if (!_has_error) {
2353     DumperSupport::end_of_dump(_writer);
2354     _writer->flush();
2355   }
2356   _dump_seq = 0; //reset
2357 }
2358 
2359 void DumpMerger::set_error(const char* msg) {
2360   assert(msg != nullptr, "sanity check");
2361   log_error(heapdump)("%s (file: %s)", msg, _path);
2362   _writer->set_error(msg);
2363   _has_error = true;
2364 }
2365 
2366 #ifdef LINUX
2367 // Merge segmented heap files via sendfile, it's more efficient than the
2368 // read+write combination, which would require transferring data to and from
2369 // user space.
2370 void DumpMerger::merge_file(const char* path) {
2371   TraceTime timer("Merge segmented heap file directly", TRACETIME_LOG(Info, heapdump));
2372 
2373   int segment_fd = os::open(path, O_RDONLY, 0);
2374   if (segment_fd == -1) {
2375     set_error("Can not open segmented heap file during merging");
2376     return;
2377   }
2378 
2379   struct stat st;
2380   if (os::stat(path, &st) != 0) {
2381     ::close(segment_fd);
2382     set_error("Can not get segmented heap file size during merging");
2383     return;
2384   }
2385 
2386   // A successful call to sendfile may write fewer bytes than requested; the
2387   // caller should be prepared to retry the call if there were unsent bytes.
2388   jlong offset = 0;
2389   while (offset < st.st_size) {
2390     int ret = os::Linux::sendfile(_writer->get_fd(), segment_fd, &offset, st.st_size);
2391     if (ret == -1) {
2392       ::close(segment_fd);
2393       set_error("Failed to merge segmented heap file");
2394       return;
2395     }
2396   }
2397 
2398   // As sendfile variant does not call the write method of the global writer,
2399   // bytes_written is also incorrect for this variant, we need to explicitly
2400   // accumulate bytes_written for the global writer in this case
2401   julong accum = _writer->bytes_written() + st.st_size;
2402   _writer->set_bytes_written(accum);
2403   ::close(segment_fd);
2404 }
2405 #else
2406 // Generic implementation using read+write
2407 void DumpMerger::merge_file(const char* path) {
2408   TraceTime timer("Merge segmented heap file", TRACETIME_LOG(Info, heapdump));
2409 
2410   fileStream segment_fs(path, "rb");
2411   if (!segment_fs.is_open()) {
2412     set_error("Can not open segmented heap file during merging");
2413     return;
2414   }
2415 
2416   jlong total = 0;
2417   size_t cnt = 0;
2418 
2419   // Use _writer buffer for reading.
2420   while ((cnt = segment_fs.read(_writer->buffer(), 1, _writer->buffer_size())) != 0) {
2421     _writer->set_position(cnt);
2422     _writer->flush();
2423     total += cnt;
2424   }
2425 
2426   if (segment_fs.fileSize() != total) {
2427     set_error("Merged heap dump is incomplete");
2428   }
2429 }
2430 #endif
2431 
2432 void DumpMerger::do_merge() {
2433   TraceTime timer("Merge heap files complete", TRACETIME_LOG(Info, heapdump));
2434 
2435   // Since contents in segmented heap file were already zipped, we don't need to zip
2436   // them again during merging.
2437   AbstractCompressor* saved_compressor = _writer->compressor();
2438   _writer->set_compressor(nullptr);
2439 
2440   // Merge the content of the remaining files into base file. Regardless of whether
2441   // the merge process is successful or not, these segmented files will be deleted.
2442   for (int i = 0; i < _dump_seq; i++) {
2443     ResourceMark rm;
2444     const char* path = get_writer_path(_path, i);
2445     if (!_has_error) {
2446       merge_file(path);
2447     }
2448     // Delete selected segmented heap file nevertheless
2449     if (remove(path) != 0) {
2450       log_info(heapdump)("Removal of segment file (%d) failed (%d)", i, errno);
2451     }
2452   }
2453 
2454   // restore compressor for further use
2455   _writer->set_compressor(saved_compressor);
2456   merge_done();
2457 }
2458 
2459 // The VM operation that performs the heap dump
2460 class VM_HeapDumper : public VM_GC_Operation, public WorkerTask, public UnmountedVThreadDumper {
2461  private:
2462   DumpWriter*             _writer;
2463   JavaThread*             _oome_thread;
2464   Method*                 _oome_constructor;
2465   bool                    _gc_before_heap_dump;
2466   GrowableArray<Klass*>*  _klass_map;
2467 
2468   ThreadDumper**          _thread_dumpers; // platform, carrier and mounted virtual threads
2469   int                     _thread_dumpers_count;
2470   volatile int            _thread_serial_num;
2471   volatile int            _frame_serial_num;
2472 
2473   volatile int            _dump_seq;
2474   // parallel heap dump support
2475   uint                    _num_dumper_threads;
2476   DumperController*       _dumper_controller;
2477   ParallelObjectIterator* _poi;
2478 
2479   // flat value object support
2480   FlatObjectDumper        _flat_dumper;
2481 
2482   // Dumper id of VMDumper thread.
2483   static const int VMDumperId = 0;
2484   // VM dumper dumps both heap and non-heap data, other dumpers dump heap-only data.
2485   static bool is_vm_dumper(int dumper_id) { return dumper_id == VMDumperId; }
2486   // the 1st dumper calling get_next_dumper_id becomes VM dumper
2487   int get_next_dumper_id() {
2488     return AtomicAccess::fetch_then_add(&_dump_seq, 1);
2489   }
2490 
2491   DumpWriter* writer() const { return _writer; }
2492 
2493   bool skip_operation() const;
2494 
2495   // HPROF_GC_ROOT_THREAD_OBJ records for platform and mounted virtual threads
2496   void dump_threads(AbstractDumpWriter* writer);
2497 
2498   bool is_oom_thread(JavaThread* thread) const {
2499     return thread == _oome_thread && _oome_constructor != nullptr;
2500   }
2501 
2502   // HPROF_TRACE and HPROF_FRAME records for platform and mounted virtual threads
2503   void dump_stack_traces(AbstractDumpWriter* writer);
2504 
2505  public:
2506   VM_HeapDumper(DumpWriter* writer, bool gc_before_heap_dump, bool oome, uint num_dump_threads) :
2507     VM_GC_Operation(0 /* total collections,      dummy, ignored */,
2508                     GCCause::_heap_dump /* GC Cause */,
2509                     0 /* total full collections, dummy, ignored */,
2510                     gc_before_heap_dump),
2511     WorkerTask("dump heap") {
2512     _writer = writer;
2513     _gc_before_heap_dump = gc_before_heap_dump;
2514     _klass_map = new (mtServiceability) GrowableArray<Klass*>(INITIAL_CLASS_COUNT, mtServiceability);
2515 
2516     _thread_dumpers = nullptr;
2517     _thread_dumpers_count = 0;
2518     _thread_serial_num = 1;
2519     _frame_serial_num = 1;
2520 
2521     _dump_seq = VMDumperId;
2522     _num_dumper_threads = num_dump_threads;
2523     _dumper_controller = nullptr;
2524     _poi = nullptr;
2525     if (oome) {
2526       assert(!Thread::current()->is_VM_thread(), "Dump from OutOfMemoryError cannot be called by the VMThread");
2527       // get OutOfMemoryError zero-parameter constructor
2528       InstanceKlass* oome_ik = vmClasses::OutOfMemoryError_klass();
2529       _oome_constructor = oome_ik->find_method(vmSymbols::object_initializer_name(),
2530                                                           vmSymbols::void_method_signature());
2531       // get thread throwing OOME when generating the heap dump at OOME
2532       _oome_thread = JavaThread::current();
2533     } else {
2534       _oome_thread = nullptr;
2535       _oome_constructor = nullptr;
2536     }
2537   }
2538 
2539   ~VM_HeapDumper() {
2540     if (_thread_dumpers != nullptr) {
2541       for (int i = 0; i < _thread_dumpers_count; i++) {
2542         delete _thread_dumpers[i];
2543       }
2544       FREE_C_HEAP_ARRAY(ThreadDumper*, _thread_dumpers);
2545     }
2546 
2547     if (_dumper_controller != nullptr) {
2548       delete _dumper_controller;
2549       _dumper_controller = nullptr;
2550     }
2551     delete _klass_map;
2552   }
2553   int dump_seq()           { return _dump_seq; }
2554   bool is_parallel_dump()  { return _num_dumper_threads > 1; }
2555   void prepare_parallel_dump(WorkerThreads* workers);
2556 
2557   VMOp_Type type() const { return VMOp_HeapDumper; }
2558   virtual bool doit_prologue();
2559   void doit();
2560   void work(uint worker_id);
2561 
2562   // UnmountedVThreadDumper implementation
2563   void dump_vthread(oop vt, AbstractDumpWriter* segment_writer);
2564 };
2565 
2566 bool VM_HeapDumper::skip_operation() const {
2567   return false;
2568 }
2569 
2570 // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
2571 void DumperSupport::end_of_dump(AbstractDumpWriter* writer) {
2572   writer->finish_dump_segment();
2573 
2574   writer->write_u1(HPROF_HEAP_DUMP_END);
2575   writer->write_u4(0);
2576   writer->write_u4(0);
2577 }
2578 
2579 // Write a HPROF_GC_ROOT_THREAD_OBJ record for platform/carrier and mounted virtual threads.
2580 // Then walk the stack so that locals and JNI locals are dumped.
2581 void VM_HeapDumper::dump_threads(AbstractDumpWriter* writer) {
2582   for (int i = 0; i < _thread_dumpers_count; i++) {
2583     _thread_dumpers[i]->dump_thread_obj(writer);
2584     _thread_dumpers[i]->dump_stack_refs(writer);
2585   }
2586 }
2587 
2588 bool VM_HeapDumper::doit_prologue() {
2589   if (_gc_before_heap_dump && (UseZGC || UseShenandoahGC)) {
2590     // ZGC and Shenandoah cannot perform a synchronous GC cycle from within the VM thread.
2591     // So collect_as_vm_thread() is a noop. To respect the _gc_before_heap_dump flag a
2592     // synchronous GC cycle is performed from the caller thread in the prologue.
2593     Universe::heap()->collect(GCCause::_heap_dump);
2594   }
2595   return VM_GC_Operation::doit_prologue();
2596 }
2597 
2598 void VM_HeapDumper::prepare_parallel_dump(WorkerThreads* workers) {
2599   uint num_active_workers = workers != nullptr ? workers->active_workers() : 0;
2600   uint num_requested_dump_threads = _num_dumper_threads;
2601   // check if we can dump in parallel based on requested and active threads
2602   if (num_active_workers <= 1 || num_requested_dump_threads <= 1) {
2603     _num_dumper_threads = 1;
2604   } else {
2605     _num_dumper_threads = clamp(num_requested_dump_threads, 2U, num_active_workers);
2606   }
2607   _dumper_controller = new (std::nothrow) DumperController(_num_dumper_threads);
2608   bool can_parallel = _num_dumper_threads > 1;
2609   log_info(heapdump)("Requested dump threads %u, active dump threads %u, "
2610                      "actual dump threads %u, parallelism %s",
2611                      num_requested_dump_threads, num_active_workers,
2612                      _num_dumper_threads, can_parallel ? "true" : "false");
2613 }
2614 
2615 // The VM operation that dumps the heap. The dump consists of the following
2616 // records:
2617 //
2618 //  HPROF_HEADER
2619 //  [HPROF_UTF8]*
2620 //  [HPROF_LOAD_CLASS]*
2621 //  [[HPROF_FRAME]*|HPROF_TRACE]*
2622 //  [HPROF_GC_CLASS_DUMP]*
2623 //  [HPROF_HEAP_DUMP_SEGMENT]*
2624 //  HPROF_HEAP_DUMP_END
2625 //
2626 // The HPROF_TRACE records represent the stack traces where the heap dump
2627 // is generated and a "dummy trace" record which does not include
2628 // any frames. The dummy trace record is used to be referenced as the
2629 // unknown object alloc site.
2630 //
2631 // Each HPROF_HEAP_DUMP_SEGMENT record has a length followed by sub-records.
2632 // To allow the heap dump be generated in a single pass we remember the position
2633 // of the dump length and fix it up after all sub-records have been written.
2634 // To generate the sub-records we iterate over the heap, writing
2635 // HPROF_GC_INSTANCE_DUMP, HPROF_GC_OBJ_ARRAY_DUMP, and HPROF_GC_PRIM_ARRAY_DUMP
2636 // records as we go. Once that is done we write records for some of the GC
2637 // roots.
2638 
2639 void VM_HeapDumper::doit() {
2640 
2641   CollectedHeap* ch = Universe::heap();
2642 
2643   ch->ensure_parsability(false); // must happen, even if collection does
2644                                  // not happen (e.g. due to GCLocker)
2645 
2646   if (_gc_before_heap_dump) {
2647     if (GCLocker::is_active()) {
2648       warning("GC locker is held; pre-heapdump GC was skipped");
2649     } else {
2650       ch->collect_as_vm_thread(GCCause::_heap_dump);
2651     }
2652   }
2653 
2654   WorkerThreads* workers = ch->safepoint_workers();
2655   prepare_parallel_dump(workers);
2656 
2657   if (!is_parallel_dump()) {
2658     work(VMDumperId);
2659   } else {
2660     ParallelObjectIterator poi(_num_dumper_threads);
2661     _poi = &poi;
2662     workers->run_task(this, _num_dumper_threads);
2663     _poi = nullptr;
2664   }
2665 }
2666 
2667 void VM_HeapDumper::work(uint worker_id) {
2668   // VM Dumper works on all non-heap data dumping and part of heap iteration.
2669   int dumper_id = get_next_dumper_id();
2670 
2671   if (is_vm_dumper(dumper_id)) {
2672     // lock global writer, it will be unlocked after VM Dumper finishes with non-heap data
2673     _dumper_controller->lock_global_writer();
2674     _dumper_controller->signal_start();
2675   } else {
2676     _dumper_controller->wait_for_start_signal();
2677   }
2678 
2679   if (is_vm_dumper(dumper_id)) {
2680     TraceTime timer("Dump non-objects", TRACETIME_LOG(Info, heapdump));
2681     // Write the file header - we always use 1.0.2
2682     const char* header = "JAVA PROFILE 1.0.2";
2683 
2684     // header is few bytes long - no chance to overflow int
2685     writer()->write_raw(header, strlen(header) + 1); // NUL terminated
2686     writer()->write_u4(oopSize);
2687     // timestamp is current time in ms
2688     writer()->write_u8(os::javaTimeMillis());
2689     // HPROF_UTF8 records
2690     SymbolTableDumper sym_dumper(writer());
2691     SymbolTable::symbols_do(&sym_dumper);
2692 
2693     // write HPROF_LOAD_CLASS records
2694     {
2695       LoadedClassDumper loaded_class_dumper(writer(), _klass_map);
2696       ClassLoaderDataGraph::classes_do(&loaded_class_dumper);
2697     }
2698 
2699     // write HPROF_FRAME and HPROF_TRACE records
2700     // this must be called after _klass_map is built when iterating the classes above.
2701     dump_stack_traces(writer());
2702 
2703     // unlock global writer, so parallel dumpers can dump stack traces of unmounted virtual threads
2704     _dumper_controller->unlock_global_writer();
2705   }
2706 
2707   // HPROF_HEAP_DUMP/HPROF_HEAP_DUMP_SEGMENT starts here
2708 
2709   ResourceMark rm;
2710   // share global compressor, local DumpWriter is not responsible for its life cycle
2711   DumpWriter segment_writer(DumpMerger::get_writer_path(writer()->get_file_path(), dumper_id),
2712                             writer()->is_overwrite(), writer()->compressor());
2713   if (!segment_writer.has_error()) {
2714     if (is_vm_dumper(dumper_id)) {
2715       // dump some non-heap subrecords to heap dump segment
2716       TraceTime timer("Dump non-objects (part 2)", TRACETIME_LOG(Info, heapdump));
2717       // Writes HPROF_GC_CLASS_DUMP records
2718       ClassDumper class_dumper(&segment_writer);
2719       ClassLoaderDataGraph::classes_do(&class_dumper);
2720 
2721       // HPROF_GC_ROOT_THREAD_OBJ + frames + jni locals
2722       dump_threads(&segment_writer);
2723 
2724       // HPROF_GC_ROOT_JNI_GLOBAL
2725       JNIGlobalsDumper jni_dumper(&segment_writer);
2726       JNIHandles::oops_do(&jni_dumper);
2727       // technically not jni roots, but global roots
2728       // for things like preallocated throwable backtraces
2729       Universe::vm_global()->oops_do(&jni_dumper);
2730       // HPROF_GC_ROOT_STICKY_CLASS
2731       // These should be classes in the null class loader data, and not all classes
2732       // if !ClassUnloading
2733       StickyClassDumper stiky_class_dumper(&segment_writer);
2734       ClassLoaderData::the_null_class_loader_data()->classes_do(&stiky_class_dumper);
2735     }
2736 
2737     // Heap iteration.
2738     // writes HPROF_GC_INSTANCE_DUMP records.
2739     // After each sub-record is written check_segment_length will be invoked
2740     // to check if the current segment exceeds a threshold. If so, a new
2741     // segment is started.
2742     // The HPROF_GC_CLASS_DUMP and HPROF_GC_INSTANCE_DUMP are the vast bulk
2743     // of the heap dump.
2744 
2745     TraceTime timer(is_parallel_dump() ? "Dump heap objects in parallel" : "Dump heap objects", TRACETIME_LOG(Info, heapdump));
2746     HeapObjectDumper obj_dumper(&segment_writer, this, &_flat_dumper);
2747     if (!is_parallel_dump()) {
2748       Universe::heap()->object_iterate(&obj_dumper);
2749     } else {
2750       // == Parallel dump
2751       _poi->object_iterate(&obj_dumper, worker_id);
2752     }
2753 
2754     segment_writer.finish_dump_segment();
2755     segment_writer.flush();
2756   }
2757 
2758   _dumper_controller->dumper_complete(&segment_writer, writer());
2759 
2760   if (is_vm_dumper(dumper_id)) {
2761     _dumper_controller->wait_all_dumpers_complete();
2762 
2763     // flush global writer
2764     writer()->flush();
2765 
2766     // At this point, all fragments of the heapdump have been written to separate files.
2767     // We need to merge them into a complete heapdump and write HPROF_HEAP_DUMP_END at that time.
2768   }
2769 }
2770 
2771 void VM_HeapDumper::dump_stack_traces(AbstractDumpWriter* writer) {
2772   // write a HPROF_TRACE record without any frames to be referenced as object alloc sites
2773   DumperSupport::write_header(writer, HPROF_TRACE, 3 * sizeof(u4));
2774   writer->write_u4((u4)STACK_TRACE_ID);
2775   writer->write_u4(0);                    // thread number
2776   writer->write_u4(0);                    // frame count
2777 
2778   // max number if every platform thread is carrier with mounted virtual thread
2779   _thread_dumpers = NEW_C_HEAP_ARRAY(ThreadDumper*, Threads::number_of_threads() * 2, mtInternal);
2780 
2781   for (JavaThreadIteratorWithHandle jtiwh; JavaThread * thread = jtiwh.next(); ) {
2782     if (ThreadDumper::should_dump_pthread(thread)) {
2783       bool add_oom_frame = is_oom_thread(thread);
2784 
2785       oop mounted_vt = thread->is_vthread_mounted() ? thread->vthread() : nullptr;
2786       if (mounted_vt != nullptr && !ThreadDumper::should_dump_vthread(mounted_vt)) {
2787         mounted_vt = nullptr;
2788       }
2789 
2790       // mounted vthread (if any)
2791       if (mounted_vt != nullptr) {
2792         ThreadDumper* thread_dumper = new ThreadDumper(ThreadDumper::ThreadType::MountedVirtual, thread, mounted_vt);
2793         _thread_dumpers[_thread_dumpers_count++] = thread_dumper;
2794         if (add_oom_frame) {
2795           thread_dumper->add_oom_frame(_oome_constructor);
2796           // we add oom frame to the VT stack, don't add it to the carrier thread stack
2797           add_oom_frame = false;
2798         }
2799         thread_dumper->init_serial_nums(&_thread_serial_num, &_frame_serial_num);
2800         thread_dumper->dump_stack_traces(writer, _klass_map);
2801       }
2802 
2803       // platform or carrier thread
2804       ThreadDumper* thread_dumper = new ThreadDumper(ThreadDumper::ThreadType::Platform, thread, thread->threadObj());
2805       _thread_dumpers[_thread_dumpers_count++] = thread_dumper;
2806       if (add_oom_frame) {
2807         thread_dumper->add_oom_frame(_oome_constructor);
2808       }
2809       thread_dumper->init_serial_nums(&_thread_serial_num, &_frame_serial_num);
2810       thread_dumper->dump_stack_traces(writer, _klass_map);
2811     }
2812   }
2813 }
2814 
2815 void VM_HeapDumper::dump_vthread(oop vt, AbstractDumpWriter* segment_writer) {
2816   // unmounted vthread has no JavaThread
2817   ThreadDumper thread_dumper(ThreadDumper::ThreadType::UnmountedVirtual, nullptr, vt);
2818   thread_dumper.init_serial_nums(&_thread_serial_num, &_frame_serial_num);
2819 
2820   // write HPROF_TRACE/HPROF_FRAME records to global writer
2821   _dumper_controller->lock_global_writer();
2822   thread_dumper.dump_stack_traces(writer(), _klass_map);
2823   _dumper_controller->unlock_global_writer();
2824 
2825   // write HPROF_GC_ROOT_THREAD_OBJ/HPROF_GC_ROOT_JAVA_FRAME/HPROF_GC_ROOT_JNI_LOCAL subrecord
2826   // to segment writer
2827   thread_dumper.dump_thread_obj(segment_writer);
2828   thread_dumper.dump_stack_refs(segment_writer);
2829 }
2830 
2831 // dump the heap to given path.
2832 int HeapDumper::dump(const char* path, outputStream* out, int compression, bool overwrite, uint num_dump_threads) {
2833   assert(path != nullptr && strlen(path) > 0, "path missing");
2834 
2835   // print message in interactive case
2836   if (out != nullptr) {
2837     out->print_cr("Dumping heap to %s ...", path);
2838     timer()->start();
2839   }
2840 
2841   if (_oome && num_dump_threads > 1) {
2842     // Each additional parallel writer requires several MB of internal memory
2843     // (DumpWriter buffer, DumperClassCacheTable, GZipCompressor buffers).
2844     // For the OOM handling we may already be limited in memory.
2845     // Lets ensure we have at least 20MB per thread.
2846     physical_memory_size_type free_memory = 0;
2847     // Return value ignored - defaulting to 0 on failure.
2848     (void)os::free_memory(free_memory);
2849     julong max_threads = free_memory / (20 * M);
2850     if (num_dump_threads > max_threads) {
2851       num_dump_threads = MAX2<uint>(1, (uint)max_threads);
2852     }
2853   }
2854 
2855   // create JFR event
2856   EventHeapDump event;
2857 
2858   AbstractCompressor* compressor = nullptr;
2859 
2860   if (compression > 0) {
2861     compressor = new (std::nothrow) GZipCompressor(compression);
2862 
2863     if (compressor == nullptr) {
2864       set_error("Could not allocate gzip compressor");
2865       return -1;
2866     }
2867   }
2868 
2869   DumpWriter writer(path, overwrite, compressor);
2870 
2871   if (writer.error() != nullptr) {
2872     set_error(writer.error());
2873     if (out != nullptr) {
2874       out->print_cr("Unable to create %s: %s", path,
2875         (error() != nullptr) ? error() : "reason unknown");
2876     }
2877     return -1;
2878   }
2879 
2880   // generate the segmented heap dump into separate files
2881   VM_HeapDumper dumper(&writer, _gc_before_heap_dump, _oome, num_dump_threads);
2882   VMThread::execute(&dumper);
2883 
2884   // record any error that the writer may have encountered
2885   set_error(writer.error());
2886 
2887   // Heap dump process is done in two phases
2888   //
2889   // Phase 1: Concurrent threads directly write heap data to multiple heap files.
2890   //          This is done by VM_HeapDumper, which is performed within safepoint.
2891   //
2892   // Phase 2: Merge multiple heap files into one complete heap dump file.
2893   //          This is done by DumpMerger, which is performed outside safepoint
2894 
2895   DumpMerger merger(path, &writer, dumper.dump_seq());
2896   // Perform heapdump file merge operation in the current thread prevents us
2897   // from occupying the VM Thread, which in turn affects the occurrence of
2898   // GC and other VM operations.
2899   merger.do_merge();
2900   if (writer.error() != nullptr) {
2901     set_error(writer.error());
2902   }
2903 
2904   // emit JFR event
2905   if (error() == nullptr) {
2906     event.set_destination(path);
2907     event.set_gcBeforeDump(_gc_before_heap_dump);
2908     event.set_size(writer.bytes_written());
2909     event.set_onOutOfMemoryError(_oome);
2910     event.set_overwrite(overwrite);
2911     event.set_compression(compression);
2912     event.commit();
2913   } else {
2914     log_debug(aot, heap)("Error %s while dumping heap", error());
2915   }
2916 
2917   // print message in interactive case
2918   if (out != nullptr) {
2919     timer()->stop();
2920     if (error() == nullptr) {
2921       out->print_cr("Heap dump file created [" JULONG_FORMAT " bytes in %3.3f secs]",
2922                     writer.bytes_written(), timer()->seconds());
2923     } else {
2924       out->print_cr("Dump file is incomplete: %s", writer.error());
2925     }
2926   }
2927 
2928   if (compressor != nullptr) {
2929     delete compressor;
2930   }
2931   return (writer.error() == nullptr) ? 0 : -1;
2932 }
2933 
2934 // stop timer (if still active), and free any error string we might be holding
2935 HeapDumper::~HeapDumper() {
2936   if (timer()->is_active()) {
2937     timer()->stop();
2938   }
2939   set_error(nullptr);
2940 }
2941 
2942 
2943 // returns the error string (resource allocated), or null
2944 char* HeapDumper::error_as_C_string() const {
2945   if (error() != nullptr) {
2946     char* str = ResourceArea::strdup(error());
2947     return str;
2948   } else {
2949     return nullptr;
2950   }
2951 }
2952 
2953 // set the error string
2954 void HeapDumper::set_error(char const* error) {
2955   if (_error != nullptr) {
2956     os::free(_error);
2957   }
2958   if (error == nullptr) {
2959     _error = nullptr;
2960   } else {
2961     _error = os::strdup(error);
2962     assert(_error != nullptr, "allocation failure");
2963   }
2964 }
2965 
2966 // Called by out-of-memory error reporting by a single Java thread
2967 // outside of a JVM safepoint
2968 void HeapDumper::dump_heap_from_oome() {
2969   HeapDumper::dump_heap(true);
2970 }
2971 
2972 // Called by error reporting by a single Java thread outside of a JVM safepoint,
2973 // or by heap dumping by the VM thread during a (GC) safepoint. Thus, these various
2974 // callers are strictly serialized and guaranteed not to interfere below. For more
2975 // general use, however, this method will need modification to prevent
2976 // inteference when updating the static variables base_path and dump_file_seq below.
2977 void HeapDumper::dump_heap() {
2978   HeapDumper::dump_heap(false);
2979 }
2980 
2981 void HeapDumper::dump_heap(bool oome) {
2982   static char base_path[JVM_MAXPATHLEN] = {'\0'};
2983   static uint dump_file_seq = 0;
2984   char my_path[JVM_MAXPATHLEN];
2985   const int max_digit_chars = 20;
2986   const char* dump_file_name = HeapDumpGzipLevel > 0 ? "java_pid%p.hprof.gz" : "java_pid%p.hprof";
2987 
2988   // The dump file defaults to java_pid<pid>.hprof in the current working
2989   // directory. HeapDumpPath=<file> can be used to specify an alternative
2990   // dump file name or a directory where dump file is created.
2991   if (dump_file_seq == 0) { // first time in, we initialize base_path
2992     // Set base path (name or directory, default or custom, without seq no), doing %p substitution.
2993     const char *path_src = (HeapDumpPath != nullptr && HeapDumpPath[0] != '\0') ? HeapDumpPath : dump_file_name;
2994     if (!Arguments::copy_expand_pid(path_src, strlen(path_src), base_path, JVM_MAXPATHLEN - max_digit_chars)) {
2995       warning("Cannot create heap dump file.  HeapDumpPath is too long.");
2996       return;
2997     }
2998     // Check if the path is an existing directory
2999     DIR* dir = os::opendir(base_path);
3000     if (dir != nullptr) {
3001       os::closedir(dir);
3002       // Path is a directory.  Append a file separator (if needed).
3003       size_t fs_len = strlen(os::file_separator());
3004       if (strlen(base_path) >= fs_len) {
3005         char* end = base_path;
3006         end += (strlen(base_path) - fs_len);
3007         if (strcmp(end, os::file_separator()) != 0) {
3008           strcat(base_path, os::file_separator());
3009         }
3010       }
3011       // Then add the default name, with %p substitution.  Use my_path temporarily.
3012       if (!Arguments::copy_expand_pid(dump_file_name, strlen(dump_file_name), my_path, JVM_MAXPATHLEN - max_digit_chars)) {
3013         warning("Cannot create heap dump file.  HeapDumpPath is too long.");
3014         return;
3015       }
3016       const size_t dlen = strlen(base_path);
3017       jio_snprintf(&base_path[dlen], sizeof(base_path) - dlen, "%s", my_path);
3018     }
3019     strncpy(my_path, base_path, JVM_MAXPATHLEN);
3020   } else {
3021     // Append a sequence number id for dumps following the first
3022     const size_t len = strlen(base_path) + max_digit_chars + 2; // for '.' and \0
3023     jio_snprintf(my_path, len, "%s.%d", base_path, dump_file_seq);
3024   }
3025   dump_file_seq++;   // increment seq number for next time we dump
3026 
3027   HeapDumper dumper(false /* no GC before heap dump */,
3028                     oome  /* pass along out-of-memory-error flag */);
3029   dumper.dump(my_path, tty, HeapDumpGzipLevel);
3030 }