1 /*
   2  * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2023, Alibaba Group Holding Limited. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "classfile/classLoaderData.inline.hpp"
  27 #include "classfile/classLoaderDataGraph.hpp"
  28 #include "classfile/javaClasses.inline.hpp"
  29 #include "classfile/symbolTable.hpp"
  30 #include "classfile/vmClasses.hpp"
  31 #include "classfile/vmSymbols.hpp"
  32 #include "gc/shared/gcLocker.hpp"
  33 #include "gc/shared/gcVMOperations.hpp"
  34 #include "gc/shared/workerThread.hpp"
  35 #include "jfr/jfrEvents.hpp"
  36 #include "jvm.h"
  37 #include "memory/allocation.inline.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "memory/universe.hpp"
  40 #include "oops/fieldStreams.inline.hpp"
  41 #include "oops/klass.inline.hpp"
  42 #include "oops/objArrayKlass.hpp"
  43 #include "oops/objArrayOop.inline.hpp"
  44 #include "oops/flatArrayKlass.hpp"
  45 #include "oops/flatArrayOop.inline.hpp"
  46 #include "oops/oop.inline.hpp"
  47 #include "oops/typeArrayOop.inline.hpp"
  48 #include "runtime/arguments.hpp"
  49 #include "runtime/continuationWrapper.inline.hpp"
  50 #include "runtime/fieldDescriptor.inline.hpp"
  51 #include "runtime/frame.inline.hpp"
  52 #include "runtime/handles.inline.hpp"
  53 #include "runtime/javaCalls.hpp"
  54 #include "runtime/javaThread.inline.hpp"
  55 #include "runtime/jniHandles.hpp"
  56 #include "runtime/os.hpp"
  57 #include "runtime/threads.hpp"
  58 #include "runtime/threadSMR.hpp"
  59 #include "runtime/vframe.hpp"
  60 #include "runtime/vmOperations.hpp"
  61 #include "runtime/vmThread.hpp"
  62 #include "runtime/timerTrace.hpp"
  63 #include "services/heapDumper.hpp"
  64 #include "services/heapDumperCompression.hpp"
  65 #include "services/threadService.hpp"
  66 #include "utilities/checkedCast.hpp"
  67 #include "utilities/macros.hpp"
  68 #include "utilities/ostream.hpp"
  69 #ifdef LINUX
  70 #include "os_linux.hpp"
  71 #endif
  72 
  73 /*
  74  * HPROF binary format - description copied from:
  75  *   src/share/demo/jvmti/hprof/hprof_io.c
  76  *
  77  *
  78  *  header    "JAVA PROFILE 1.0.2" (0-terminated)
  79  *
  80  *  u4        size of identifiers. Identifiers are used to represent
  81  *            UTF8 strings, objects, stack traces, etc. They usually
  82  *            have the same size as host pointers.
  83  * u4         high word
  84  * u4         low word    number of milliseconds since 0:00 GMT, 1/1/70
  85  * [record]*  a sequence of records.
  86  *
  87  *
  88  * Record format:
  89  *
  90  * u1         a TAG denoting the type of the record
  91  * u4         number of *microseconds* since the time stamp in the
  92  *            header. (wraps around in a little more than an hour)
  93  * u4         number of bytes *remaining* in the record. Note that
  94  *            this number excludes the tag and the length field itself.
  95  * [u1]*      BODY of the record (a sequence of bytes)
  96  *
  97  *
  98  * The following TAGs are supported:
  99  *
 100  * TAG           BODY       notes
 101  *----------------------------------------------------------
 102  * HPROF_UTF8               a UTF8-encoded name
 103  *
 104  *               id         name ID
 105  *               [u1]*      UTF8 characters (no trailing zero)
 106  *
 107  * HPROF_LOAD_CLASS         a newly loaded class
 108  *
 109  *                u4        class serial number (> 0)
 110  *                id        class object ID
 111  *                u4        stack trace serial number
 112  *                id        class name ID
 113  *
 114  * HPROF_UNLOAD_CLASS       an unloading class
 115  *
 116  *                u4        class serial_number
 117  *
 118  * HPROF_FRAME              a Java stack frame
 119  *
 120  *                id        stack frame ID
 121  *                id        method name ID
 122  *                id        method signature ID
 123  *                id        source file name ID
 124  *                u4        class serial number
 125  *                i4        line number. >0: normal
 126  *                                       -1: unknown
 127  *                                       -2: compiled method
 128  *                                       -3: native method
 129  *
 130  * HPROF_TRACE              a Java stack trace
 131  *
 132  *               u4         stack trace serial number
 133  *               u4         thread serial number
 134  *               u4         number of frames
 135  *               [id]*      stack frame IDs
 136  *
 137  *
 138  * HPROF_ALLOC_SITES        a set of heap allocation sites, obtained after GC
 139  *
 140  *               u2         flags 0x0001: incremental vs. complete
 141  *                                0x0002: sorted by allocation vs. live
 142  *                                0x0004: whether to force a GC
 143  *               u4         cutoff ratio
 144  *               u4         total live bytes
 145  *               u4         total live instances
 146  *               u8         total bytes allocated
 147  *               u8         total instances allocated
 148  *               u4         number of sites that follow
 149  *               [u1        is_array: 0:  normal object
 150  *                                    2:  object array
 151  *                                    4:  boolean array
 152  *                                    5:  char array
 153  *                                    6:  float array
 154  *                                    7:  double array
 155  *                                    8:  byte array
 156  *                                    9:  short array
 157  *                                    10: int array
 158  *                                    11: long array
 159  *                u4        class serial number (may be zero during startup)
 160  *                u4        stack trace serial number
 161  *                u4        number of bytes alive
 162  *                u4        number of instances alive
 163  *                u4        number of bytes allocated
 164  *                u4]*      number of instance allocated
 165  *
 166  * HPROF_START_THREAD       a newly started thread.
 167  *
 168  *               u4         thread serial number (> 0)
 169  *               id         thread object ID
 170  *               u4         stack trace serial number
 171  *               id         thread name ID
 172  *               id         thread group name ID
 173  *               id         thread group parent name ID
 174  *
 175  * HPROF_END_THREAD         a terminating thread.
 176  *
 177  *               u4         thread serial number
 178  *
 179  * HPROF_HEAP_SUMMARY       heap summary
 180  *
 181  *               u4         total live bytes
 182  *               u4         total live instances
 183  *               u8         total bytes allocated
 184  *               u8         total instances allocated
 185  *
 186  * HPROF_HEAP_DUMP          denote a heap dump
 187  *
 188  *               [heap dump sub-records]*
 189  *
 190  *                          There are four kinds of heap dump sub-records:
 191  *
 192  *               u1         sub-record type
 193  *
 194  *               HPROF_GC_ROOT_UNKNOWN         unknown root
 195  *
 196  *                          id         object ID
 197  *
 198  *               HPROF_GC_ROOT_THREAD_OBJ      thread object
 199  *
 200  *                          id         thread object ID  (may be 0 for a
 201  *                                     thread newly attached through JNI)
 202  *                          u4         thread sequence number
 203  *                          u4         stack trace sequence number
 204  *
 205  *               HPROF_GC_ROOT_JNI_GLOBAL      JNI global ref root
 206  *
 207  *                          id         object ID
 208  *                          id         JNI global ref ID
 209  *
 210  *               HPROF_GC_ROOT_JNI_LOCAL       JNI local ref
 211  *
 212  *                          id         object ID
 213  *                          u4         thread serial number
 214  *                          u4         frame # in stack trace (-1 for empty)
 215  *
 216  *               HPROF_GC_ROOT_JAVA_FRAME      Java stack frame
 217  *
 218  *                          id         object ID
 219  *                          u4         thread serial number
 220  *                          u4         frame # in stack trace (-1 for empty)
 221  *
 222  *               HPROF_GC_ROOT_NATIVE_STACK    Native stack
 223  *
 224  *                          id         object ID
 225  *                          u4         thread serial number
 226  *
 227  *               HPROF_GC_ROOT_STICKY_CLASS    System class
 228  *
 229  *                          id         object ID
 230  *
 231  *               HPROF_GC_ROOT_THREAD_BLOCK    Reference from thread block
 232  *
 233  *                          id         object ID
 234  *                          u4         thread serial number
 235  *
 236  *               HPROF_GC_ROOT_MONITOR_USED    Busy monitor
 237  *
 238  *                          id         object ID
 239  *
 240  *               HPROF_GC_CLASS_DUMP           dump of a class object
 241  *
 242  *                          id         class object ID
 243  *                          u4         stack trace serial number
 244  *                          id         super class object ID
 245  *                          id         class loader object ID
 246  *                          id         signers object ID
 247  *                          id         protection domain object ID
 248  *                          id         reserved
 249  *                          id         reserved
 250  *
 251  *                          u4         instance size (in bytes)
 252  *
 253  *                          u2         size of constant pool
 254  *                          [u2,       constant pool index,
 255  *                           ty,       type
 256  *                                     2:  object
 257  *                                     4:  boolean
 258  *                                     5:  char
 259  *                                     6:  float
 260  *                                     7:  double
 261  *                                     8:  byte
 262  *                                     9:  short
 263  *                                     10: int
 264  *                                     11: long
 265  *                           vl]*      and value
 266  *
 267  *                          u2         number of static fields
 268  *                          [id,       static field name,
 269  *                           ty,       type,
 270  *                           vl]*      and value
 271  *
 272  *                          u2         number of inst. fields (not inc. super)
 273  *                          [id,       instance field name,
 274  *                           ty]*      type
 275  *
 276  *               HPROF_GC_INSTANCE_DUMP        dump of a normal object
 277  *
 278  *                          id         object ID
 279  *                          u4         stack trace serial number
 280  *                          id         class object ID
 281  *                          u4         number of bytes that follow
 282  *                          [vl]*      instance field values (class, followed
 283  *                                     by super, super's super ...)
 284  *
 285  *               HPROF_GC_OBJ_ARRAY_DUMP       dump of an object array
 286  *
 287  *                          id         array object ID
 288  *                          u4         stack trace serial number
 289  *                          u4         number of elements
 290  *                          id         array class ID
 291  *                          [id]*      elements
 292  *
 293  *               HPROF_GC_PRIM_ARRAY_DUMP      dump of a primitive array
 294  *
 295  *                          id         array object ID
 296  *                          u4         stack trace serial number
 297  *                          u4         number of elements
 298  *                          u1         element type
 299  *                                     4:  boolean array
 300  *                                     5:  char array
 301  *                                     6:  float array
 302  *                                     7:  double array
 303  *                                     8:  byte array
 304  *                                     9:  short array
 305  *                                     10: int array
 306  *                                     11: long array
 307  *                          [u1]*      elements
 308  *
 309  * HPROF_CPU_SAMPLES        a set of sample traces of running threads
 310  *
 311  *                u4        total number of samples
 312  *                u4        # of traces
 313  *               [u4        # of samples
 314  *                u4]*      stack trace serial number
 315  *
 316  * HPROF_CONTROL_SETTINGS   the settings of on/off switches
 317  *
 318  *                u4        0x00000001: alloc traces on/off
 319  *                          0x00000002: cpu sampling on/off
 320  *                u2        stack trace depth
 321  *
 322  * HPROF_FLAT_ARRAYS        list of flat arrays
 323  *
 324  *               [flat array sub-records]*
 325  *
 326  *               HPROF_FLAT_ARRAY      flat array
 327  *
 328  *                          id         array object ID (dumped as HPROF_GC_PRIM_ARRAY_DUMP)
 329  *                          id         element class ID (dumped by HPROF_GC_CLASS_DUMP)
 330  *
 331  * HPROF_INLINED_FIELDS     decribes inlined fields
 332  *
 333  *               [class with inlined fields sub-records]*
 334  *
 335  *               HPROF_CLASS_WITH_INLINED_FIELDS
 336  *
 337  *                          id         class ID (dumped as HPROF_GC_CLASS_DUMP)
 338  *
 339  *                          u2         number of instance inlined fields (not including super)
 340  *                          [u2,       inlined field index,
 341  *                           u2,       synthetic field count,
 342  *                           id,       original field name,
 343  *                           id]*      inlined field class ID (dumped by HPROF_GC_CLASS_DUMP)
 344  *
 345  * When the header is "JAVA PROFILE 1.0.2" a heap dump can optionally
 346  * be generated as a sequence of heap dump segments. This sequence is
 347  * terminated by an end record. The additional tags allowed by format
 348  * "JAVA PROFILE 1.0.2" are:
 349  *
 350  * HPROF_HEAP_DUMP_SEGMENT  denote a heap dump segment
 351  *
 352  *               [heap dump sub-records]*
 353  *               The same sub-record types allowed by HPROF_HEAP_DUMP
 354  *
 355  * HPROF_HEAP_DUMP_END      denotes the end of a heap dump
 356  *
 357  */
 358 
 359 
 360 // HPROF tags
 361 
 362 enum hprofTag : u1 {
 363   // top-level records
 364   HPROF_UTF8                    = 0x01,
 365   HPROF_LOAD_CLASS              = 0x02,
 366   HPROF_UNLOAD_CLASS            = 0x03,
 367   HPROF_FRAME                   = 0x04,
 368   HPROF_TRACE                   = 0x05,
 369   HPROF_ALLOC_SITES             = 0x06,
 370   HPROF_HEAP_SUMMARY            = 0x07,
 371   HPROF_START_THREAD            = 0x0A,
 372   HPROF_END_THREAD              = 0x0B,
 373   HPROF_HEAP_DUMP               = 0x0C,
 374   HPROF_CPU_SAMPLES             = 0x0D,
 375   HPROF_CONTROL_SETTINGS        = 0x0E,
 376 
 377   // 1.0.2 record types
 378   HPROF_HEAP_DUMP_SEGMENT       = 0x1C,
 379   HPROF_HEAP_DUMP_END           = 0x2C,
 380 
 381   // inlined object support
 382   HPROF_FLAT_ARRAYS             = 0x12,
 383   HPROF_INLINED_FIELDS          = 0x13,
 384   // inlined object subrecords
 385   HPROF_FLAT_ARRAY                  = 0x01,
 386   HPROF_CLASS_WITH_INLINED_FIELDS   = 0x01,
 387 
 388   // field types
 389   HPROF_ARRAY_OBJECT            = 0x01,
 390   HPROF_NORMAL_OBJECT           = 0x02,
 391   HPROF_BOOLEAN                 = 0x04,
 392   HPROF_CHAR                    = 0x05,
 393   HPROF_FLOAT                   = 0x06,
 394   HPROF_DOUBLE                  = 0x07,
 395   HPROF_BYTE                    = 0x08,
 396   HPROF_SHORT                   = 0x09,
 397   HPROF_INT                     = 0x0A,
 398   HPROF_LONG                    = 0x0B,
 399 
 400   // data-dump sub-records
 401   HPROF_GC_ROOT_UNKNOWN         = 0xFF,
 402   HPROF_GC_ROOT_JNI_GLOBAL      = 0x01,
 403   HPROF_GC_ROOT_JNI_LOCAL       = 0x02,
 404   HPROF_GC_ROOT_JAVA_FRAME      = 0x03,
 405   HPROF_GC_ROOT_NATIVE_STACK    = 0x04,
 406   HPROF_GC_ROOT_STICKY_CLASS    = 0x05,
 407   HPROF_GC_ROOT_THREAD_BLOCK    = 0x06,
 408   HPROF_GC_ROOT_MONITOR_USED    = 0x07,
 409   HPROF_GC_ROOT_THREAD_OBJ      = 0x08,
 410   HPROF_GC_CLASS_DUMP           = 0x20,
 411   HPROF_GC_INSTANCE_DUMP        = 0x21,
 412   HPROF_GC_OBJ_ARRAY_DUMP       = 0x22,
 413   HPROF_GC_PRIM_ARRAY_DUMP      = 0x23
 414 };
 415 
 416 // Default stack trace ID (used for dummy HPROF_TRACE record)
 417 enum {
 418   STACK_TRACE_ID = 1,
 419   INITIAL_CLASS_COUNT = 200
 420 };
 421 
 422 
 423 class AbstractDumpWriter;
 424 
 425 class InlinedObjects {
 426 
 427   struct ClassInlinedFields {
 428     const Klass *klass;
 429     uintx base_index;   // base index of the inlined field names (1st field has index base_index+1).
 430     ClassInlinedFields(const Klass *klass = nullptr, uintx base_index = 0) : klass(klass), base_index(base_index) {}
 431 
 432     // For GrowableArray::find_sorted().
 433     static int compare(const ClassInlinedFields& a, const ClassInlinedFields& b) {
 434       return a.klass - b.klass;
 435     }
 436     // For GrowableArray::sort().
 437     static int compare(ClassInlinedFields* a, ClassInlinedFields* b) {
 438       return compare(*a, *b);
 439     }
 440   };
 441 
 442   uintx _min_string_id;
 443   uintx _max_string_id;
 444 
 445   GrowableArray<ClassInlinedFields> *_inlined_field_map;
 446 
 447   // counters for classes with inlined fields and for the fields
 448   int _classes_count;
 449   int _inlined_fields_count;
 450 
 451   static InlinedObjects *_instance;
 452 
 453   static void inlined_field_names_callback(InlinedObjects* _this, const Klass *klass, uintx base_index, int count);
 454 
 455   GrowableArray<oop> *_flat_arrays;
 456 
 457 public:
 458   InlinedObjects()
 459     : _min_string_id(0), _max_string_id(0),
 460     _inlined_field_map(nullptr),
 461     _classes_count(0), _inlined_fields_count(0),
 462     _flat_arrays(nullptr) {
 463   }
 464 
 465   static InlinedObjects* get_instance() {
 466     return _instance;
 467   }
 468 
 469   void init();
 470   void release();
 471 
 472   void dump_inlined_field_names(AbstractDumpWriter *writer);
 473 
 474   uintx get_base_index_for(Klass* k);
 475   uintx get_next_string_id(uintx id);
 476 
 477   void dump_classed_with_inlined_fields(AbstractDumpWriter* writer);
 478 
 479   void add_flat_array(oop array);
 480   void dump_flat_arrays(AbstractDumpWriter* writer);
 481 
 482 };
 483 
 484 InlinedObjects *InlinedObjects::_instance = nullptr;
 485 
 486 
 487 // Supports I/O operations for a dump
 488 // Base class for dump and parallel dump
 489 class AbstractDumpWriter : public CHeapObj<mtInternal> {
 490  protected:
 491   enum {
 492     io_buffer_max_size = 1*M,
 493     dump_segment_header_size = 9
 494   };
 495 
 496   char* _buffer;    // internal buffer
 497   size_t _size;
 498   size_t _pos;
 499 
 500   bool _in_dump_segment; // Are we currently in a dump segment?
 501   bool _is_huge_sub_record; // Are we writing a sub-record larger than the buffer size?
 502   DEBUG_ONLY(size_t _sub_record_left;) // The bytes not written for the current sub-record.
 503   DEBUG_ONLY(bool _sub_record_ended;) // True if we have called the end_sub_record().
 504 
 505   char* buffer() const                          { return _buffer; }
 506   size_t buffer_size() const                    { return _size; }
 507   void set_position(size_t pos)                 { _pos = pos; }
 508 
 509   // Can be called if we have enough room in the buffer.
 510   void write_fast(const void* s, size_t len);
 511 
 512   // Returns true if we have enough room in the buffer for 'len' bytes.
 513   bool can_write_fast(size_t len);
 514 
 515   void write_address(address a);
 516 
 517  public:
 518   AbstractDumpWriter() :
 519     _buffer(nullptr),
 520     _size(io_buffer_max_size),
 521     _pos(0),
 522     _in_dump_segment(false) { }
 523 
 524   // Total number of bytes written to the disk
 525   virtual julong bytes_written() const = 0;
 526   // Return non-null if error occurred
 527   virtual char const* error() const = 0;
 528 
 529   size_t position() const                       { return _pos; }
 530   // writer functions
 531   virtual void write_raw(const void* s, size_t len);
 532   void write_u1(u1 x);
 533   void write_u2(u2 x);
 534   void write_u4(u4 x);
 535   void write_u8(u8 x);
 536   void write_objectID(oop o);
 537   void write_rootID(oop* p);
 538   void write_symbolID(Symbol* o);
 539   void write_classID(Klass* k);
 540   void write_id(u4 x);
 541 
 542   // Start a new sub-record. Starts a new heap dump segment if needed.
 543   void start_sub_record(u1 tag, u4 len);
 544   // Ends the current sub-record.
 545   void end_sub_record();
 546   // Finishes the current dump segment if not already finished.
 547   void finish_dump_segment();
 548   // Flush internal buffer to persistent storage
 549   virtual void flush() = 0;
 550 };
 551 
 552 void AbstractDumpWriter::write_fast(const void* s, size_t len) {
 553   assert(!_in_dump_segment || (_sub_record_left >= len), "sub-record too large");
 554   assert(buffer_size() - position() >= len, "Must fit");
 555   debug_only(_sub_record_left -= len);
 556   memcpy(buffer() + position(), s, len);
 557   set_position(position() + len);
 558 }
 559 
 560 bool AbstractDumpWriter::can_write_fast(size_t len) {
 561   return buffer_size() - position() >= len;
 562 }
 563 
 564 // write raw bytes
 565 void AbstractDumpWriter::write_raw(const void* s, size_t len) {
 566   assert(!_in_dump_segment || (_sub_record_left >= len), "sub-record too large");
 567   debug_only(_sub_record_left -= len);
 568 
 569   // flush buffer to make room.
 570   while (len > buffer_size() - position()) {
 571     assert(!_in_dump_segment || _is_huge_sub_record,
 572            "Cannot overflow in non-huge sub-record.");
 573     size_t to_write = buffer_size() - position();
 574     memcpy(buffer() + position(), s, to_write);
 575     s = (void*) ((char*) s + to_write);
 576     len -= to_write;
 577     set_position(position() + to_write);
 578     flush();
 579   }
 580 
 581   memcpy(buffer() + position(), s, len);
 582   set_position(position() + len);
 583 }
 584 
 585 // Makes sure we inline the fast write into the write_u* functions. This is a big speedup.
 586 #define WRITE_KNOWN_TYPE(p, len) do { if (can_write_fast((len))) write_fast((p), (len)); \
 587                                       else write_raw((p), (len)); } while (0)
 588 
 589 void AbstractDumpWriter::write_u1(u1 x) {
 590   WRITE_KNOWN_TYPE(&x, 1);
 591 }
 592 
 593 void AbstractDumpWriter::write_u2(u2 x) {
 594   u2 v;
 595   Bytes::put_Java_u2((address)&v, x);
 596   WRITE_KNOWN_TYPE(&v, 2);
 597 }
 598 
 599 void AbstractDumpWriter::write_u4(u4 x) {
 600   u4 v;
 601   Bytes::put_Java_u4((address)&v, x);
 602   WRITE_KNOWN_TYPE(&v, 4);
 603 }
 604 
 605 void AbstractDumpWriter::write_u8(u8 x) {
 606   u8 v;
 607   Bytes::put_Java_u8((address)&v, x);
 608   WRITE_KNOWN_TYPE(&v, 8);
 609 }
 610 
 611 void AbstractDumpWriter::write_address(address a) {
 612 #ifdef _LP64
 613   write_u8((u8)a);
 614 #else
 615   write_u4((u4)a);
 616 #endif
 617 }
 618 
 619 void AbstractDumpWriter::write_objectID(oop o) {
 620   write_address(cast_from_oop<address>(o));
 621 }
 622 
 623 void AbstractDumpWriter::write_rootID(oop* p) {
 624   write_address((address)p);
 625 }
 626 
 627 void AbstractDumpWriter::write_symbolID(Symbol* s) {
 628   write_address((address)((uintptr_t)s));
 629 }
 630 
 631 void AbstractDumpWriter::write_id(u4 x) {
 632 #ifdef _LP64
 633   write_u8((u8) x);
 634 #else
 635   write_u4(x);
 636 #endif
 637 }
 638 
 639 // We use java mirror as the class ID
 640 void AbstractDumpWriter::write_classID(Klass* k) {
 641   write_objectID(k->java_mirror());
 642 }
 643 
 644 void AbstractDumpWriter::finish_dump_segment() {
 645   if (_in_dump_segment) {
 646     assert(_sub_record_left == 0, "Last sub-record not written completely");
 647     assert(_sub_record_ended, "sub-record must have ended");
 648 
 649     // Fix up the dump segment length if we haven't written a huge sub-record last
 650     // (in which case the segment length was already set to the correct value initially).
 651     if (!_is_huge_sub_record) {
 652       assert(position() > dump_segment_header_size, "Dump segment should have some content");
 653       Bytes::put_Java_u4((address) (buffer() + 5),
 654                          (u4) (position() - dump_segment_header_size));
 655     } else {
 656       // Finish process huge sub record
 657       // Set _is_huge_sub_record to false so the parallel dump writer can flush data to file.
 658       _is_huge_sub_record = false;
 659     }
 660 
 661     _in_dump_segment = false;
 662     flush();
 663   }
 664 }
 665 
 666 void AbstractDumpWriter::start_sub_record(u1 tag, u4 len) {
 667   if (!_in_dump_segment) {
 668     if (position() > 0) {
 669       flush();
 670     }
 671 
 672     assert(position() == 0 && buffer_size() > dump_segment_header_size, "Must be at the start");
 673 
 674     write_u1(HPROF_HEAP_DUMP_SEGMENT);
 675     write_u4(0); // timestamp
 676     // Will be fixed up later if we add more sub-records.  If this is a huge sub-record,
 677     // this is already the correct length, since we don't add more sub-records.
 678     write_u4(len);
 679     assert(Bytes::get_Java_u4((address)(buffer() + 5)) == len, "Inconsistent size!");
 680     _in_dump_segment = true;
 681     _is_huge_sub_record = len > buffer_size() - dump_segment_header_size;
 682   } else if (_is_huge_sub_record || (len > buffer_size() - position())) {
 683     // This object will not fit in completely or the last sub-record was huge.
 684     // Finish the current segment and try again.
 685     finish_dump_segment();
 686     start_sub_record(tag, len);
 687 
 688     return;
 689   }
 690 
 691   debug_only(_sub_record_left = len);
 692   debug_only(_sub_record_ended = false);
 693 
 694   write_u1(tag);
 695 }
 696 
 697 void AbstractDumpWriter::end_sub_record() {
 698   assert(_in_dump_segment, "must be in dump segment");
 699   assert(_sub_record_left == 0, "sub-record not written completely");
 700   assert(!_sub_record_ended, "Must not have ended yet");
 701   debug_only(_sub_record_ended = true);
 702 }
 703 
 704 // Supports I/O operations for a dump
 705 
 706 class DumpWriter : public AbstractDumpWriter {
 707 private:
 708   FileWriter* _writer;
 709   AbstractCompressor* _compressor;
 710   size_t _bytes_written;
 711   char* _error;
 712   // Compression support
 713   char* _out_buffer;
 714   size_t _out_size;
 715   size_t _out_pos;
 716   char* _tmp_buffer;
 717   size_t _tmp_size;
 718 
 719 private:
 720   void do_compress();
 721 
 722 public:
 723   DumpWriter(const char* path, bool overwrite, AbstractCompressor* compressor);
 724   ~DumpWriter();
 725   julong bytes_written() const override        { return (julong) _bytes_written; }
 726   char const* error() const override           { return _error; }
 727   void set_error(const char* error)            { _error = (char*)error; }
 728   bool has_error() const                       { return _error != nullptr; }
 729   const char* get_file_path() const            { return _writer->get_file_path(); }
 730   AbstractCompressor* compressor()             { return _compressor; }
 731   bool is_overwrite() const                    { return _writer->is_overwrite(); }
 732 
 733   void flush() override;
 734 
 735 private:
 736   // internals for DumpMerger
 737   friend class DumpMerger;
 738   void set_bytes_written(julong bytes_written) { _bytes_written = bytes_written; }
 739   int get_fd() const                           { return _writer->get_fd(); }
 740   void set_compressor(AbstractCompressor* p)   { _compressor = p; }
 741 };
 742 
 743 DumpWriter::DumpWriter(const char* path, bool overwrite, AbstractCompressor* compressor) :
 744   AbstractDumpWriter(),
 745   _writer(new (std::nothrow) FileWriter(path, overwrite)),
 746   _compressor(compressor),
 747   _bytes_written(0),
 748   _error(nullptr),
 749   _out_buffer(nullptr),
 750   _out_size(0),
 751   _out_pos(0),
 752   _tmp_buffer(nullptr),
 753   _tmp_size(0) {
 754   _error = (char*)_writer->open_writer();
 755   if (_error == nullptr) {
 756     _buffer = (char*)os::malloc(io_buffer_max_size, mtInternal);
 757     if (compressor != nullptr) {
 758       _error = (char*)_compressor->init(io_buffer_max_size, &_out_size, &_tmp_size);
 759       if (_error == nullptr) {
 760         if (_out_size > 0) {
 761           _out_buffer = (char*)os::malloc(_out_size, mtInternal);
 762         }
 763         if (_tmp_size > 0) {
 764           _tmp_buffer = (char*)os::malloc(_tmp_size, mtInternal);
 765         }
 766       }
 767     }
 768   }
 769   // initialize internal buffer
 770   _pos = 0;
 771   _size = io_buffer_max_size;
 772 }
 773 
 774 DumpWriter::~DumpWriter(){
 775   if (_buffer != nullptr) {
 776     os::free(_buffer);
 777   }
 778   if (_out_buffer != nullptr) {
 779     os::free(_out_buffer);
 780   }
 781   if (_tmp_buffer != nullptr) {
 782     os::free(_tmp_buffer);
 783   }
 784   if (_writer != nullptr) {
 785     delete _writer;
 786   }
 787   _bytes_written = -1;
 788 }
 789 
 790 // flush any buffered bytes to the file
 791 void DumpWriter::flush() {
 792   if (_pos <= 0) {
 793     return;
 794   }
 795   if (has_error()) {
 796     _pos = 0;
 797     return;
 798   }
 799   char* result = nullptr;
 800   if (_compressor == nullptr) {
 801     result = (char*)_writer->write_buf(_buffer, _pos);
 802     _bytes_written += _pos;
 803   } else {
 804     do_compress();
 805     if (!has_error()) {
 806       result = (char*)_writer->write_buf(_out_buffer, _out_pos);
 807       _bytes_written += _out_pos;
 808     }
 809   }
 810   _pos = 0; // reset pos to make internal buffer available
 811 
 812   if (result != nullptr) {
 813     set_error(result);
 814   }
 815 }
 816 
 817 void DumpWriter::do_compress() {
 818   const char* msg = _compressor->compress(_buffer, _pos, _out_buffer, _out_size,
 819                                           _tmp_buffer, _tmp_size, &_out_pos);
 820 
 821   if (msg != nullptr) {
 822     set_error(msg);
 823   }
 824 }
 825 
 826 class DumperClassCacheTable;
 827 class DumperClassCacheTableEntry;
 828 
 829 // Support class with a collection of functions used when dumping the heap
 830 class DumperSupport : AllStatic {
 831  public:
 832 
 833   // write a header of the given type
 834   static void write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len);
 835 
 836   // returns hprof tag for the given type signature
 837   static hprofTag sig2tag(Symbol* sig);
 838   // returns hprof tag for the given basic type
 839   static hprofTag type2tag(BasicType type);
 840   // Returns the size of the data to write.
 841   static u4 sig2size(Symbol* sig);
 842 
 843   // calculates the total size of the all fields of the given class.
 844   static u4 instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry = nullptr);
 845 
 846   // dump a jfloat
 847   static void dump_float(AbstractDumpWriter* writer, jfloat f);
 848   // dump a jdouble
 849   static void dump_double(AbstractDumpWriter* writer, jdouble d);
 850   // dumps the raw value of the given field
 851   static void dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset);
 852   // returns the size of the static fields; also counts the static fields
 853   static u4 get_static_fields_size(InstanceKlass* ik, u2& field_count);
 854   // dumps static fields of the given class
 855   static void dump_static_fields(AbstractDumpWriter* writer, Klass* k);
 856   // dump the raw values of the instance fields of the given identity or inlined object;
 857   // for identity objects offset is 0 and 'klass' is o->klass(),
 858   // for inlined objects offset is the offset in the holder object, 'klass' is inlined object class
 859   static void dump_instance_fields(AbstractDumpWriter* writer, oop o, int offset, DumperClassCacheTable* class_cache, DumperClassCacheTableEntry* class_cache_entry);
 860   // dump the raw values of the instance fields of the given inlined object;
 861   // dump_instance_fields wrapper for inlined objects
 862   static void dump_inlined_object_fields(AbstractDumpWriter* writer, oop o, int offset, DumperClassCacheTable* class_cache, DumperClassCacheTableEntry* class_cache_entry);
 863 
 864   // get the count of the instance fields for a given class
 865   static u2 get_instance_fields_count(InstanceKlass* ik);
 866   // dumps the definition of the instance fields for a given class
 867   static void dump_instance_field_descriptors(AbstractDumpWriter* writer, InstanceKlass* k, uintx *inlined_fields_index = nullptr);
 868   // creates HPROF_GC_INSTANCE_DUMP record for the given object
 869   static void dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache);
 870   // creates HPROF_GC_CLASS_DUMP record for the given instance class
 871   static void dump_instance_class(AbstractDumpWriter* writer, Klass* k);
 872   // creates HPROF_GC_CLASS_DUMP record for a given array class
 873   static void dump_array_class(AbstractDumpWriter* writer, Klass* k);
 874 
 875   // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
 876   static void dump_object_array(AbstractDumpWriter* writer, objArrayOop array);
 877   // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given flat array
 878   static void dump_flat_array(AbstractDumpWriter* writer, flatArrayOop array, DumperClassCacheTable* class_cache);
 879   // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
 880   static void dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array);
 881   // create HPROF_FRAME record for the given method and bci
 882   static void dump_stack_frame(AbstractDumpWriter* writer, int frame_serial_num, int class_serial_num, Method* m, int bci);
 883 
 884   // check if we need to truncate an array
 885   static int calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size);
 886   // extended version to dump flat arrays as primitive arrays;
 887   // type_size specifies size of the inlined objects.
 888   static int calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, int type_size, short header_size);
 889 
 890   // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
 891   static void end_of_dump(AbstractDumpWriter* writer);
 892 
 893   static oop mask_dormant_archived_object(oop o, oop ref_obj) {
 894     if (o != nullptr && o->klass()->java_mirror_no_keepalive() == nullptr) {
 895       // Ignore this object since the corresponding java mirror is not loaded.
 896       // Might be a dormant archive object.
 897       report_dormant_archived_object(o, ref_obj);
 898       return nullptr;
 899     } else {
 900       return o;
 901     }
 902   }
 903 
 904   // helper methods for inlined fields.
 905   static bool is_inlined_field(const fieldDescriptor& fld) {
 906     return fld.is_flat();
 907   }
 908   static InlineKlass* get_inlined_field_klass(const fieldDescriptor& fld) {
 909     assert(is_inlined_field(fld), "must be inlined field");
 910     InstanceKlass* holder_klass = fld.field_holder();
 911     return InlineKlass::cast(holder_klass->get_inline_type_field_klass(fld.index()));
 912   }
 913 
 914   static void report_dormant_archived_object(oop o, oop ref_obj) {
 915     if (log_is_enabled(Trace, cds, heap)) {
 916       ResourceMark rm;
 917       if (ref_obj != nullptr) {
 918         log_trace(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s) referenced by " INTPTR_FORMAT " (%s)",
 919                   p2i(o), o->klass()->external_name(),
 920                   p2i(ref_obj), ref_obj->klass()->external_name());
 921       } else {
 922         log_trace(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s)",
 923                   p2i(o), o->klass()->external_name());
 924       }
 925     }
 926   }
 927 };
 928 
 929 // Hash table of klasses to the klass metadata. This should greatly improve the
 930 // hash dumping performance. This hash table is supposed to be used by a single
 931 // thread only.
 932 //
 933 class DumperClassCacheTableEntry : public CHeapObj<mtServiceability> {
 934   friend class DumperClassCacheTable;
 935 private:
 936   GrowableArray<char> _sigs_start;
 937   GrowableArray<int> _offsets;
 938   GrowableArray<InlineKlass*> _inline_klasses;
 939   u4 _instance_size;
 940   int _entries;
 941 
 942 public:
 943   DumperClassCacheTableEntry() : _instance_size(0), _entries(0) {};
 944 
 945   int field_count()             { return _entries; }
 946   char sig_start(int field_idx) { return _sigs_start.at(field_idx); }
 947   void push_sig_start_inlined() { _sigs_start.push('Q'); }
 948   bool is_inlined(int field_idx){ return _sigs_start.at(field_idx) == 'Q'; }
 949   InlineKlass* inline_klass(int field_idx) { assert(is_inlined(field_idx), "Not inlined"); return _inline_klasses.at(field_idx); }
 950   int offset(int field_idx)     { return _offsets.at(field_idx); }
 951   u4 instance_size()            { return _instance_size; }
 952 };
 953 
 954 class DumperClassCacheTable {
 955 private:
 956   // ResourceHashtable SIZE is specified at compile time so we
 957   // use 1031 which is the first prime after 1024.
 958   static constexpr size_t TABLE_SIZE = 1031;
 959 
 960   // Maintain the cache for N classes. This limits memory footprint
 961   // impact, regardless of how many classes we have in the dump.
 962   // This also improves look up performance by keeping the statically
 963   // sized table from overloading.
 964   static constexpr int CACHE_TOP = 256;
 965 
 966   typedef ResourceHashtable<InstanceKlass*, DumperClassCacheTableEntry*,
 967                             TABLE_SIZE, AnyObj::C_HEAP, mtServiceability> PtrTable;
 968   PtrTable* _ptrs;
 969 
 970   // Single-slot cache to handle the major case of objects of the same
 971   // class back-to-back, e.g. from T[].
 972   InstanceKlass* _last_ik;
 973   DumperClassCacheTableEntry* _last_entry;
 974 
 975   void unlink_all(PtrTable* table) {
 976     class CleanupEntry: StackObj {
 977     public:
 978       bool do_entry(InstanceKlass*& key, DumperClassCacheTableEntry*& entry) {
 979         delete entry;
 980         return true;
 981       }
 982     } cleanup;
 983     table->unlink(&cleanup);
 984   }
 985 
 986 public:
 987   DumperClassCacheTableEntry* lookup_or_create(InstanceKlass* ik) {
 988     if (_last_ik == ik) {
 989       return _last_entry;
 990     }
 991 
 992     DumperClassCacheTableEntry* entry;
 993     DumperClassCacheTableEntry** from_cache = _ptrs->get(ik);
 994     if (from_cache == nullptr) {
 995       entry = new DumperClassCacheTableEntry();
 996       for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
 997         if (!fld.access_flags().is_static()) {
 998           InlineKlass* inlineKlass = nullptr;
 999           if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
1000             inlineKlass = DumperSupport::get_inlined_field_klass(fld.field_descriptor());
1001             entry->push_sig_start_inlined();
1002             entry->_instance_size += DumperSupport::instance_size(inlineKlass);
1003           } else {
1004             Symbol* sig = fld.signature();
1005             entry->_sigs_start.push(sig->char_at(0));
1006             entry->_instance_size += DumperSupport::sig2size(sig);
1007           }
1008           entry->_inline_klasses.push(inlineKlass);
1009           entry->_offsets.push(fld.offset());
1010           entry->_entries++;
1011         }
1012       }
1013 
1014       if (_ptrs->number_of_entries() >= CACHE_TOP) {
1015         // We do not track the individual hit rates for table entries.
1016         // Purge the entire table, and let the cache catch up with new
1017         // distribution.
1018         unlink_all(_ptrs);
1019       }
1020 
1021       _ptrs->put(ik, entry);
1022     } else {
1023       entry = *from_cache;
1024     }
1025 
1026     // Remember for single-slot cache.
1027     _last_ik = ik;
1028     _last_entry = entry;
1029 
1030     return entry;
1031   }
1032 
1033   DumperClassCacheTable() : _ptrs(new (mtServiceability) PtrTable), _last_ik(nullptr), _last_entry(nullptr) {}
1034 
1035   ~DumperClassCacheTable() {
1036     unlink_all(_ptrs);
1037     delete _ptrs;
1038   }
1039 };
1040 
1041 // write a header of the given type
1042 void DumperSupport:: write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len) {
1043   writer->write_u1(tag);
1044   writer->write_u4(0);                  // current ticks
1045   writer->write_u4(len);
1046 }
1047 
1048 // returns hprof tag for the given type signature
1049 hprofTag DumperSupport::sig2tag(Symbol* sig) {
1050   switch (sig->char_at(0)) {
1051     case JVM_SIGNATURE_CLASS    : return HPROF_NORMAL_OBJECT;
1052     case JVM_SIGNATURE_ARRAY    : return HPROF_NORMAL_OBJECT;
1053     case JVM_SIGNATURE_BYTE     : return HPROF_BYTE;
1054     case JVM_SIGNATURE_CHAR     : return HPROF_CHAR;
1055     case JVM_SIGNATURE_FLOAT    : return HPROF_FLOAT;
1056     case JVM_SIGNATURE_DOUBLE   : return HPROF_DOUBLE;
1057     case JVM_SIGNATURE_INT      : return HPROF_INT;
1058     case JVM_SIGNATURE_LONG     : return HPROF_LONG;
1059     case JVM_SIGNATURE_SHORT    : return HPROF_SHORT;
1060     case JVM_SIGNATURE_BOOLEAN  : return HPROF_BOOLEAN;
1061     default : ShouldNotReachHere(); /* to shut up compiler */ return HPROF_BYTE;
1062   }
1063 }
1064 
1065 hprofTag DumperSupport::type2tag(BasicType type) {
1066   switch (type) {
1067     case T_BYTE     : return HPROF_BYTE;
1068     case T_CHAR     : return HPROF_CHAR;
1069     case T_FLOAT    : return HPROF_FLOAT;
1070     case T_DOUBLE   : return HPROF_DOUBLE;
1071     case T_INT      : return HPROF_INT;
1072     case T_LONG     : return HPROF_LONG;
1073     case T_SHORT    : return HPROF_SHORT;
1074     case T_BOOLEAN  : return HPROF_BOOLEAN;
1075     default : ShouldNotReachHere(); /* to shut up compiler */ return HPROF_BYTE;
1076   }
1077 }
1078 
1079 u4 DumperSupport::sig2size(Symbol* sig) {
1080   switch (sig->char_at(0)) {
1081     case JVM_SIGNATURE_CLASS:
1082     case JVM_SIGNATURE_ARRAY: return sizeof(address);
1083     case JVM_SIGNATURE_BOOLEAN:
1084     case JVM_SIGNATURE_BYTE: return 1;
1085     case JVM_SIGNATURE_SHORT:
1086     case JVM_SIGNATURE_CHAR: return 2;
1087     case JVM_SIGNATURE_INT:
1088     case JVM_SIGNATURE_FLOAT: return 4;
1089     case JVM_SIGNATURE_LONG:
1090     case JVM_SIGNATURE_DOUBLE: return 8;
1091     default: ShouldNotReachHere(); /* to shut up compiler */ return 0;
1092   }
1093 }
1094 
1095 template<typename T, typename F> T bit_cast(F from) { // replace with the real thing when we can use c++20
1096   T to;
1097   static_assert(sizeof(to) == sizeof(from), "must be of the same size");
1098   memcpy(&to, &from, sizeof(to));
1099   return to;
1100 }
1101 
1102 // dump a jfloat
1103 void DumperSupport::dump_float(AbstractDumpWriter* writer, jfloat f) {
1104   if (g_isnan(f)) {
1105     writer->write_u4(0x7fc00000); // collapsing NaNs
1106   } else {
1107     writer->write_u4(bit_cast<u4>(f));
1108   }
1109 }
1110 
1111 // dump a jdouble
1112 void DumperSupport::dump_double(AbstractDumpWriter* writer, jdouble d) {
1113   if (g_isnan(d)) {
1114     writer->write_u8(0x7ff80000ull << 32); // collapsing NaNs
1115   } else {
1116     writer->write_u8(bit_cast<u8>(d));
1117   }
1118 }
1119 
1120 
1121 // dumps the raw value of the given field
1122 void DumperSupport::dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset) {
1123   switch (type) {
1124     case JVM_SIGNATURE_CLASS :
1125     case JVM_SIGNATURE_ARRAY : {
1126       oop o = obj->obj_field_access<ON_UNKNOWN_OOP_REF | AS_NO_KEEPALIVE>(offset);
1127       o = mask_dormant_archived_object(o, obj);
1128       assert(oopDesc::is_oop_or_null(o), "Expected an oop or nullptr at " PTR_FORMAT, p2i(o));
1129       writer->write_objectID(o);
1130       break;
1131     }
1132     case JVM_SIGNATURE_BYTE : {
1133       jbyte b = obj->byte_field(offset);
1134       writer->write_u1(b);
1135       break;
1136     }
1137     case JVM_SIGNATURE_CHAR : {
1138       jchar c = obj->char_field(offset);
1139       writer->write_u2(c);
1140       break;
1141     }
1142     case JVM_SIGNATURE_SHORT : {
1143       jshort s = obj->short_field(offset);
1144       writer->write_u2(s);
1145       break;
1146     }
1147     case JVM_SIGNATURE_FLOAT : {
1148       jfloat f = obj->float_field(offset);
1149       dump_float(writer, f);
1150       break;
1151     }
1152     case JVM_SIGNATURE_DOUBLE : {
1153       jdouble d = obj->double_field(offset);
1154       dump_double(writer, d);
1155       break;
1156     }
1157     case JVM_SIGNATURE_INT : {
1158       jint i = obj->int_field(offset);
1159       writer->write_u4(i);
1160       break;
1161     }
1162     case JVM_SIGNATURE_LONG : {
1163       jlong l = obj->long_field(offset);
1164       writer->write_u8(l);
1165       break;
1166     }
1167     case JVM_SIGNATURE_BOOLEAN : {
1168       jboolean b = obj->bool_field(offset);
1169       writer->write_u1(b);
1170       break;
1171     }
1172     default : {
1173       ShouldNotReachHere();
1174       break;
1175     }
1176   }
1177 }
1178 
1179 // calculates the total size of the all fields of the given class.
1180 u4 DumperSupport::instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry) {
1181   if (class_cache_entry != nullptr) {
1182     return class_cache_entry->instance_size();
1183   } else {
1184     u4 size = 0;
1185     for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1186       if (!fld.access_flags().is_static()) {
1187         if (is_inlined_field(fld.field_descriptor())) {
1188           size += instance_size(get_inlined_field_klass(fld.field_descriptor()));
1189         } else {
1190           size += sig2size(fld.signature());
1191         }
1192       }
1193     }
1194     return size;
1195   }
1196 }
1197 
1198 u4 DumperSupport::get_static_fields_size(InstanceKlass* ik, u2& field_count) {
1199   field_count = 0;
1200   u4 size = 0;
1201 
1202   for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1203     if (fldc.access_flags().is_static()) {
1204       assert(!is_inlined_field(fldc.field_descriptor()), "static fields cannot be inlined");
1205 
1206       field_count++;
1207       size += sig2size(fldc.signature());
1208     }
1209   }
1210 
1211   // Add in resolved_references which is referenced by the cpCache
1212   // The resolved_references is an array per InstanceKlass holding the
1213   // strings and other oops resolved from the constant pool.
1214   oop resolved_references = ik->constants()->resolved_references_or_null();
1215   if (resolved_references != nullptr) {
1216     field_count++;
1217     size += sizeof(address);
1218 
1219     // Add in the resolved_references of the used previous versions of the class
1220     // in the case of RedefineClasses
1221     InstanceKlass* prev = ik->previous_versions();
1222     while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1223       field_count++;
1224       size += sizeof(address);
1225       prev = prev->previous_versions();
1226     }
1227   }
1228 
1229   // Also provide a pointer to the init_lock if present, so there aren't unreferenced int[0]
1230   // arrays.
1231   oop init_lock = ik->init_lock();
1232   if (init_lock != nullptr) {
1233     field_count++;
1234     size += sizeof(address);
1235   }
1236 
1237   // We write the value itself plus a name and a one byte type tag per field.
1238   return checked_cast<u4>(size + field_count * (sizeof(address) + 1));
1239 }
1240 
1241 // dumps static fields of the given class
1242 void DumperSupport::dump_static_fields(AbstractDumpWriter* writer, Klass* k) {
1243   InstanceKlass* ik = InstanceKlass::cast(k);
1244 
1245   // dump the field descriptors and raw values
1246   for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1247     if (fld.access_flags().is_static()) {
1248       assert(!is_inlined_field(fld.field_descriptor()), "static fields cannot be inlined");
1249 
1250       Symbol* sig = fld.signature();
1251 
1252       writer->write_symbolID(fld.name());   // name
1253       writer->write_u1(sig2tag(sig));       // type
1254 
1255       // value
1256       dump_field_value(writer, sig->char_at(0), ik->java_mirror(), fld.offset());
1257     }
1258   }
1259 
1260   // Add resolved_references for each class that has them
1261   oop resolved_references = ik->constants()->resolved_references_or_null();
1262   if (resolved_references != nullptr) {
1263     writer->write_symbolID(vmSymbols::resolved_references_name());  // name
1264     writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1265     writer->write_objectID(resolved_references);
1266 
1267     // Also write any previous versions
1268     InstanceKlass* prev = ik->previous_versions();
1269     while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1270       writer->write_symbolID(vmSymbols::resolved_references_name());  // name
1271       writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1272       writer->write_objectID(prev->constants()->resolved_references());
1273       prev = prev->previous_versions();
1274     }
1275   }
1276 
1277   // Add init lock to the end if the class is not yet initialized
1278   oop init_lock = ik->init_lock();
1279   if (init_lock != nullptr) {
1280     writer->write_symbolID(vmSymbols::init_lock_name());         // name
1281     writer->write_u1(sig2tag(vmSymbols::int_array_signature())); // type
1282     writer->write_objectID(init_lock);
1283   }
1284 }
1285 
1286 // dump the raw values of the instance fields of the given identity or inlined object;
1287 // for identity objects offset is 0 and 'klass' is o->klass(),
1288 // for inlined objects offset is the offset in the holder object, 'klass' is inlined object class.
1289 void DumperSupport::dump_instance_fields(AbstractDumpWriter* writer, oop o, int offset, DumperClassCacheTable* class_cache, DumperClassCacheTableEntry* class_cache_entry) {
1290   assert(class_cache_entry != nullptr, "Pre-condition: must be provided");
1291   for (int idx = 0; idx < class_cache_entry->field_count(); idx++) {
1292     if (class_cache_entry->is_inlined(idx)) {
1293       InlineKlass* field_klass = class_cache_entry->inline_klass(idx);
1294       int fields_offset = offset + (class_cache_entry->offset(idx) - field_klass->payload_offset());
1295       DumperClassCacheTableEntry* inline_class_cache_entry = class_cache->lookup_or_create(field_klass);
1296       dump_inlined_object_fields(writer, o, fields_offset, class_cache, inline_class_cache_entry);
1297     } else {
1298       dump_field_value(writer, class_cache_entry->sig_start(idx), o, class_cache_entry->offset(idx));
1299     }
1300   }
1301 }
1302 
1303 void DumperSupport::dump_inlined_object_fields(AbstractDumpWriter* writer, oop o, int offset, DumperClassCacheTable* class_cache, DumperClassCacheTableEntry* class_cache_entry) {
1304   // the object is inlined, so all its fields are stored without headers.
1305   dump_instance_fields(writer, o, offset, class_cache, class_cache_entry);
1306 }
1307 
1308 // gets the count of the instance fields for a given class
1309 u2 DumperSupport::get_instance_fields_count(InstanceKlass* ik) {
1310   u2 field_count = 0;
1311 
1312   for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1313     if (!fldc.access_flags().is_static()) {
1314       if (is_inlined_field(fldc.field_descriptor())) {
1315         // add "synthetic" fields for inlined fields.
1316         field_count += get_instance_fields_count(get_inlined_field_klass(fldc.field_descriptor()));
1317       } else {
1318         field_count++;
1319       }
1320     }
1321   }
1322 
1323   return field_count;
1324 }
1325 
1326 // dumps the definition of the instance fields for a given class
1327 // inlined_fields_id is not-nullptr for inlined fields (to get synthetic field name IDs
1328 // by using InlinedObjects::get_next_string_id()).
1329 void DumperSupport::dump_instance_field_descriptors(AbstractDumpWriter* writer, InstanceKlass* ik, uintx* inlined_fields_id) {
1330   // inlined_fields_id != nullptr means ik is a class of inlined field.
1331   // Inlined field id pointer for this class; lazyly initialized
1332   // if the class has inlined field(s) and the caller didn't provide inlined_fields_id.
1333   uintx *this_klass_inlined_fields_id = inlined_fields_id;
1334   uintx inlined_id = 0;
1335 
1336   // dump the field descriptors
1337   for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1338     if (!fld.access_flags().is_static()) {
1339       if (is_inlined_field(fld.field_descriptor())) {
1340         // dump "synthetic" fields for inlined fields.
1341         if (this_klass_inlined_fields_id == nullptr) {
1342           inlined_id = InlinedObjects::get_instance()->get_base_index_for(ik);
1343           this_klass_inlined_fields_id = &inlined_id;
1344         }
1345         dump_instance_field_descriptors(writer, get_inlined_field_klass(fld.field_descriptor()), this_klass_inlined_fields_id);
1346       } else {
1347         Symbol* sig = fld.signature();
1348         Symbol* name = nullptr;
1349         // Use inlined_fields_id provided by caller.
1350         if (inlined_fields_id != nullptr) {
1351           uintx name_id = InlinedObjects::get_instance()->get_next_string_id(*inlined_fields_id);
1352 
1353           // name_id == 0 is returned on error. use original field signature.
1354           if (name_id != 0) {
1355             *inlined_fields_id = name_id;
1356             name = reinterpret_cast<Symbol*>(name_id);
1357           }
1358         }
1359         if (name == nullptr) {
1360           name = fld.name();
1361         }
1362 
1363         writer->write_symbolID(name);         // name
1364         writer->write_u1(sig2tag(sig));       // type
1365       }
1366     }
1367   }
1368 }
1369 
1370 // creates HPROF_GC_INSTANCE_DUMP record for the given object
1371 void DumperSupport::dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache) {
1372   InstanceKlass* ik = InstanceKlass::cast(o->klass());
1373 
1374   DumperClassCacheTableEntry* cache_entry = class_cache->lookup_or_create(ik);
1375 
1376   u4 is = instance_size(ik, cache_entry);
1377   u4 size = 1 + sizeof(address) + 4 + sizeof(address) + 4 + is;
1378 
1379   writer->start_sub_record(HPROF_GC_INSTANCE_DUMP, size);
1380   writer->write_objectID(o);
1381   writer->write_u4(STACK_TRACE_ID);
1382 
1383   // class ID
1384   writer->write_classID(ik);
1385 
1386   // number of bytes that follow
1387   writer->write_u4(is);
1388 
1389   // field values
1390   dump_instance_fields(writer, o, 0, class_cache, cache_entry);
1391 
1392   writer->end_sub_record();
1393 }
1394 
1395 // creates HPROF_GC_CLASS_DUMP record for the given instance class
1396 void DumperSupport::dump_instance_class(AbstractDumpWriter* writer, Klass* k) {
1397   InstanceKlass* ik = InstanceKlass::cast(k);
1398 
1399   // We can safepoint and do a heap dump at a point where we have a Klass,
1400   // but no java mirror class has been setup for it. So we need to check
1401   // that the class is at least loaded, to avoid crash from a null mirror.
1402   if (!ik->is_loaded()) {
1403     return;
1404   }
1405 
1406   u2 static_fields_count = 0;
1407   u4 static_size = get_static_fields_size(ik, static_fields_count);
1408   u2 instance_fields_count = get_instance_fields_count(ik);
1409   u4 instance_fields_size = instance_fields_count * (sizeof(address) + 1);
1410   u4 size = checked_cast<u4>(1 + sizeof(address) + 4 + 6 * sizeof(address) + 4 + 2 + 2 + static_size + 2 + instance_fields_size);
1411 
1412   writer->start_sub_record(HPROF_GC_CLASS_DUMP, size);
1413 
1414   // class ID
1415   writer->write_classID(ik);
1416   writer->write_u4(STACK_TRACE_ID);
1417 
1418   // super class ID
1419   InstanceKlass* java_super = ik->java_super();
1420   if (java_super == nullptr) {
1421     writer->write_objectID(oop(nullptr));
1422   } else {
1423     writer->write_classID(java_super);
1424   }
1425 
1426   writer->write_objectID(ik->class_loader());
1427   writer->write_objectID(ik->signers());
1428   writer->write_objectID(ik->protection_domain());
1429 
1430   // reserved
1431   writer->write_objectID(oop(nullptr));
1432   writer->write_objectID(oop(nullptr));
1433 
1434   // instance size
1435   writer->write_u4(HeapWordSize * ik->size_helper());
1436 
1437   // size of constant pool - ignored by HAT 1.1
1438   writer->write_u2(0);
1439 
1440   // static fields
1441   writer->write_u2(static_fields_count);
1442   dump_static_fields(writer, ik);
1443 
1444   // description of instance fields
1445   writer->write_u2(instance_fields_count);
1446   dump_instance_field_descriptors(writer, ik);
1447 
1448   writer->end_sub_record();
1449 }
1450 
1451 // creates HPROF_GC_CLASS_DUMP record for the given array class
1452 void DumperSupport::dump_array_class(AbstractDumpWriter* writer, Klass* k) {
1453   InstanceKlass* ik = nullptr; // bottom class for object arrays, null for primitive type arrays
1454   if (k->is_objArray_klass()) {
1455     Klass *bk = ObjArrayKlass::cast(k)->bottom_klass();
1456     assert(bk != nullptr, "checking");
1457     if (bk->is_instance_klass()) {
1458       ik = InstanceKlass::cast(bk);
1459     }
1460   }
1461 
1462   u4 size = 1 + sizeof(address) + 4 + 6 * sizeof(address) + 4 + 2 + 2 + 2;
1463   writer->start_sub_record(HPROF_GC_CLASS_DUMP, size);
1464   writer->write_classID(k);
1465   writer->write_u4(STACK_TRACE_ID);
1466 
1467   // super class of array classes is java.lang.Object
1468   InstanceKlass* java_super = k->java_super();
1469   assert(java_super != nullptr, "checking");
1470   writer->write_classID(java_super);
1471 
1472   writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->class_loader());
1473   writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->signers());
1474   writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->protection_domain());
1475 
1476   writer->write_objectID(oop(nullptr));    // reserved
1477   writer->write_objectID(oop(nullptr));
1478   writer->write_u4(0);             // instance size
1479   writer->write_u2(0);             // constant pool
1480   writer->write_u2(0);             // static fields
1481   writer->write_u2(0);             // instance fields
1482 
1483   writer->end_sub_record();
1484 
1485 }
1486 
1487 // Hprof uses an u4 as record length field,
1488 // which means we need to truncate arrays that are too long.
1489 int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, int type_size, short header_size) {
1490   int length = array->length();
1491 
1492   size_t length_in_bytes = (size_t)length * type_size;
1493   uint max_bytes = max_juint - header_size;
1494 
1495   if (length_in_bytes > max_bytes) {
1496     length = max_bytes / type_size;
1497     length_in_bytes = (size_t)length * type_size;
1498 
1499     BasicType type = ArrayKlass::cast(array->klass())->element_type();
1500     warning("cannot dump array of type %s[] with length %d; truncating to length %d",
1501             type2name_tab[type], array->length(), length);
1502   }
1503   return length;
1504 }
1505 
1506 int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size) {
1507   BasicType type = ArrayKlass::cast(array->klass())->element_type();
1508   assert((type >= T_BOOLEAN && type <= T_OBJECT) || type == T_FLAT_ELEMENT, "invalid array element type");
1509   int type_size;
1510   if (type == T_OBJECT) {
1511     type_size = sizeof(address);
1512   } else if (type == T_FLAT_ELEMENT) {
1513       // TODO: FIXME
1514       fatal("Not supported yet"); // FIXME: JDK-8325678
1515   } else {
1516     type_size = type2aelembytes(type);
1517   }
1518 
1519   return calculate_array_max_length(writer, array, type_size, header_size);
1520 }
1521 
1522 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
1523 void DumperSupport::dump_object_array(AbstractDumpWriter* writer, objArrayOop array) {
1524   // sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID) + sizeof(classID)
1525   short header_size = 1 + 2 * 4 + 2 * sizeof(address);
1526   int length = calculate_array_max_length(writer, array, header_size);
1527   u4 size = checked_cast<u4>(header_size + length * sizeof(address));
1528 
1529   writer->start_sub_record(HPROF_GC_OBJ_ARRAY_DUMP, size);
1530   writer->write_objectID(array);
1531   writer->write_u4(STACK_TRACE_ID);
1532   writer->write_u4(length);
1533 
1534   // array class ID
1535   writer->write_classID(array->klass());
1536 
1537   // [id]* elements
1538   for (int index = 0; index < length; index++) {
1539     oop o = array->obj_at(index);
1540     o = mask_dormant_archived_object(o, array);
1541     writer->write_objectID(o);
1542   }
1543 
1544   writer->end_sub_record();
1545 }
1546 
1547 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given flat array
1548 void DumperSupport::dump_flat_array(AbstractDumpWriter* writer, flatArrayOop array, DumperClassCacheTable* class_cache) {
1549   FlatArrayKlass* array_klass = FlatArrayKlass::cast(array->klass());
1550   InlineKlass* element_klass = array_klass->element_klass();
1551   int element_size = instance_size(element_klass);
1552   /*                          id         array object ID
1553    *                          u4         stack trace serial number
1554    *                          u4         number of elements
1555    *                          u1         element type
1556    */
1557   short header_size = 1 + sizeof(address) + 2 * 4 + 1;
1558 
1559   // TODO: use T_SHORT/T_INT/T_LONG if needed to avoid truncation
1560   BasicType type = T_BYTE;
1561   int type_size = type2aelembytes(type);
1562   int length = calculate_array_max_length(writer, array, element_size, header_size);
1563   u4 length_in_bytes = (u4)(length * element_size);
1564   u4 size = header_size + length_in_bytes;
1565 
1566   writer->start_sub_record(HPROF_GC_PRIM_ARRAY_DUMP, size);
1567   writer->write_objectID(array);
1568   writer->write_u4(STACK_TRACE_ID);
1569   // TODO: round up array length for T_SHORT/T_INT/T_LONG
1570   writer->write_u4(length * element_size);
1571   writer->write_u1(type2tag(type));
1572 
1573   for (int index = 0; index < length; index++) {
1574     // need offset in the holder to read inlined object. calculate it from flatArrayOop::value_at_addr()
1575     int offset = (int)((address)array->value_at_addr(index, array_klass->layout_helper())
1576                   - cast_from_oop<address>(array));
1577     DumperClassCacheTableEntry* class_cache_entry = class_cache->lookup_or_create(element_klass);
1578     dump_inlined_object_fields(writer, array, offset, class_cache, class_cache_entry);
1579   }
1580 
1581   // TODO: write padding bytes for T_SHORT/T_INT/T_LONG
1582 
1583   InlinedObjects::get_instance()->add_flat_array(array);
1584 
1585   writer->end_sub_record();
1586 }
1587 
1588 #define WRITE_ARRAY(Array, Type, Size, Length) \
1589   for (int i = 0; i < Length; i++) { writer->write_##Size((Size)Array->Type##_at(i)); }
1590 
1591 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
1592 void DumperSupport::dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array) {
1593   BasicType type = TypeArrayKlass::cast(array->klass())->element_type();
1594   // 2 * sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID)
1595   short header_size = 2 * 1 + 2 * 4 + sizeof(address);
1596 
1597   int length = calculate_array_max_length(writer, array, header_size);
1598   int type_size = type2aelembytes(type);
1599   u4 length_in_bytes = (u4)length * type_size;
1600   u4 size = header_size + length_in_bytes;
1601 
1602   writer->start_sub_record(HPROF_GC_PRIM_ARRAY_DUMP, size);
1603   writer->write_objectID(array);
1604   writer->write_u4(STACK_TRACE_ID);
1605   writer->write_u4(length);
1606   writer->write_u1(type2tag(type));
1607 
1608   // nothing to copy
1609   if (length == 0) {
1610     writer->end_sub_record();
1611     return;
1612   }
1613 
1614   // If the byte ordering is big endian then we can copy most types directly
1615 
1616   switch (type) {
1617     case T_INT : {
1618       if (Endian::is_Java_byte_ordering_different()) {
1619         WRITE_ARRAY(array, int, u4, length);
1620       } else {
1621         writer->write_raw(array->int_at_addr(0), length_in_bytes);
1622       }
1623       break;
1624     }
1625     case T_BYTE : {
1626       writer->write_raw(array->byte_at_addr(0), length_in_bytes);
1627       break;
1628     }
1629     case T_CHAR : {
1630       if (Endian::is_Java_byte_ordering_different()) {
1631         WRITE_ARRAY(array, char, u2, length);
1632       } else {
1633         writer->write_raw(array->char_at_addr(0), length_in_bytes);
1634       }
1635       break;
1636     }
1637     case T_SHORT : {
1638       if (Endian::is_Java_byte_ordering_different()) {
1639         WRITE_ARRAY(array, short, u2, length);
1640       } else {
1641         writer->write_raw(array->short_at_addr(0), length_in_bytes);
1642       }
1643       break;
1644     }
1645     case T_BOOLEAN : {
1646       if (Endian::is_Java_byte_ordering_different()) {
1647         WRITE_ARRAY(array, bool, u1, length);
1648       } else {
1649         writer->write_raw(array->bool_at_addr(0), length_in_bytes);
1650       }
1651       break;
1652     }
1653     case T_LONG : {
1654       if (Endian::is_Java_byte_ordering_different()) {
1655         WRITE_ARRAY(array, long, u8, length);
1656       } else {
1657         writer->write_raw(array->long_at_addr(0), length_in_bytes);
1658       }
1659       break;
1660     }
1661 
1662     // handle float/doubles in a special value to ensure than NaNs are
1663     // written correctly. TO DO: Check if we can avoid this on processors that
1664     // use IEEE 754.
1665 
1666     case T_FLOAT : {
1667       for (int i = 0; i < length; i++) {
1668         dump_float(writer, array->float_at(i));
1669       }
1670       break;
1671     }
1672     case T_DOUBLE : {
1673       for (int i = 0; i < length; i++) {
1674         dump_double(writer, array->double_at(i));
1675       }
1676       break;
1677     }
1678     default : ShouldNotReachHere();
1679   }
1680 
1681   writer->end_sub_record();
1682 }
1683 
1684 // create a HPROF_FRAME record of the given Method* and bci
1685 void DumperSupport::dump_stack_frame(AbstractDumpWriter* writer,
1686                                      int frame_serial_num,
1687                                      int class_serial_num,
1688                                      Method* m,
1689                                      int bci) {
1690   int line_number;
1691   if (m->is_native()) {
1692     line_number = -3;  // native frame
1693   } else {
1694     line_number = m->line_number_from_bci(bci);
1695   }
1696 
1697   write_header(writer, HPROF_FRAME, 4*oopSize + 2*sizeof(u4));
1698   writer->write_id(frame_serial_num);               // frame serial number
1699   writer->write_symbolID(m->name());                // method's name
1700   writer->write_symbolID(m->signature());           // method's signature
1701 
1702   assert(m->method_holder()->is_instance_klass(), "not InstanceKlass");
1703   writer->write_symbolID(m->method_holder()->source_file_name());  // source file name
1704   writer->write_u4(class_serial_num);               // class serial number
1705   writer->write_u4((u4) line_number);               // line number
1706 }
1707 
1708 
1709 class InlinedFieldNameDumper : public LockedClassesDo {
1710 public:
1711   typedef void (*Callback)(InlinedObjects *owner, const Klass *klass, uintx base_index, int count);
1712 
1713 private:
1714   AbstractDumpWriter* _writer;
1715   InlinedObjects *_owner;
1716   Callback       _callback;
1717   uintx _index;
1718 
1719   void dump_inlined_field_names(GrowableArray<Symbol*>* super_names, Symbol* field_name, InlineKlass* klass) {
1720     super_names->push(field_name);
1721     for (HierarchicalFieldStream<JavaFieldStream> fld(klass); !fld.done(); fld.next()) {
1722       if (!fld.access_flags().is_static()) {
1723         if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
1724           dump_inlined_field_names(super_names, fld.name(), DumperSupport::get_inlined_field_klass(fld.field_descriptor()));
1725         } else {
1726           // get next string ID.
1727           uintx next_index = _owner->get_next_string_id(_index);
1728           if (next_index == 0) {
1729             // something went wrong (overflow?)
1730             // stop generation; the rest of inlined objects will have original field names.
1731             return;
1732           }
1733           _index = next_index;
1734 
1735           // Calculate length.
1736           int len = fld.name()->utf8_length();
1737           for (GrowableArrayIterator<Symbol*> it = super_names->begin(); it != super_names->end(); ++it) {
1738             len += (*it)->utf8_length() + 1;    // +1 for ".".
1739           }
1740 
1741           DumperSupport::write_header(_writer, HPROF_UTF8, oopSize + len);
1742           _writer->write_symbolID(reinterpret_cast<Symbol*>(_index));
1743           // Write the string value.
1744           // 1) super_names.
1745           for (GrowableArrayIterator<Symbol*> it = super_names->begin(); it != super_names->end(); ++it) {
1746             _writer->write_raw((*it)->bytes(), (*it)->utf8_length());
1747             _writer->write_u1('.');
1748           }
1749           // 2) field name.
1750           _writer->write_raw(fld.name()->bytes(), fld.name()->utf8_length());
1751         }
1752       }
1753     }
1754     super_names->pop();
1755   }
1756 
1757   void dump_inlined_field_names(Symbol* field_name, InlineKlass* field_klass) {
1758     GrowableArray<Symbol*> super_names(4, mtServiceability);
1759     dump_inlined_field_names(&super_names, field_name, field_klass);
1760   }
1761 
1762 public:
1763   InlinedFieldNameDumper(AbstractDumpWriter* writer, InlinedObjects* owner, Callback callback)
1764     : _writer(writer), _owner(owner), _callback(callback), _index(0)  {
1765   }
1766 
1767   void do_klass(Klass* k) {
1768     if (!k->is_instance_klass()) {
1769       return;
1770     }
1771     InstanceKlass* ik = InstanceKlass::cast(k);
1772     // if (ik->has_inline_type_fields()) {
1773     //   return;
1774     // }
1775 
1776     uintx base_index = _index;
1777     int count = 0;
1778 
1779     for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1780       if (!fld.access_flags().is_static()) {
1781         if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
1782           dump_inlined_field_names(fld.name(), DumperSupport::get_inlined_field_klass(fld.field_descriptor()));
1783           count++;
1784         }
1785       }
1786     }
1787 
1788     if (count != 0) {
1789       _callback(_owner, k, base_index, count);
1790     }
1791   }
1792 };
1793 
1794 class InlinedFieldsDumper : public LockedClassesDo {
1795 private:
1796   AbstractDumpWriter* _writer;
1797 
1798 public:
1799   InlinedFieldsDumper(AbstractDumpWriter* writer) : _writer(writer) {}
1800 
1801   void do_klass(Klass* k) {
1802     if (!k->is_instance_klass()) {
1803       return;
1804     }
1805     InstanceKlass* ik = InstanceKlass::cast(k);
1806     // if (ik->has_inline_type_fields()) {
1807     //   return;
1808     // }
1809 
1810     // We can be at a point where java mirror does not exist yet.
1811     // So we need to check that the class is at least loaded, to avoid crash from a null mirror.
1812     if (!ik->is_loaded()) {
1813       return;
1814     }
1815 
1816     u2 inlined_count = 0;
1817     for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1818       if (!fld.access_flags().is_static()) {
1819         if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
1820           inlined_count++;
1821         }
1822       }
1823     }
1824     if (inlined_count != 0) {
1825       _writer->write_u1(HPROF_CLASS_WITH_INLINED_FIELDS);
1826 
1827       // class ID
1828       _writer->write_classID(ik);
1829       // number of inlined fields
1830       _writer->write_u2(inlined_count);
1831       u2 index = 0;
1832       for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1833         if (!fld.access_flags().is_static()) {
1834           if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
1835             // inlined field index
1836             _writer->write_u2(index);
1837             // synthetic field count
1838             u2 field_count = DumperSupport::get_instance_fields_count(DumperSupport::get_inlined_field_klass(fld.field_descriptor()));
1839             _writer->write_u2(field_count);
1840             // original field name
1841             _writer->write_symbolID(fld.name());
1842             // inlined field class ID
1843             _writer->write_classID(DumperSupport::get_inlined_field_klass(fld.field_descriptor()));
1844 
1845             index += field_count;
1846           } else {
1847             index++;
1848           }
1849         }
1850       }
1851     }
1852   }
1853 };
1854 
1855 
1856 void InlinedObjects::init() {
1857   _instance = this;
1858 
1859   struct Closure : public SymbolClosure {
1860     uintx _min_id = max_uintx;
1861     uintx _max_id = 0;
1862     Closure() : _min_id(max_uintx), _max_id(0) {}
1863 
1864     void do_symbol(Symbol** p) {
1865       uintx val = reinterpret_cast<uintx>(*p);
1866       if (val < _min_id) {
1867         _min_id = val;
1868       }
1869       if (val > _max_id) {
1870         _max_id = val;
1871       }
1872     }
1873   } closure;
1874 
1875   SymbolTable::symbols_do(&closure);
1876 
1877   _min_string_id = closure._min_id;
1878   _max_string_id = closure._max_id;
1879 }
1880 
1881 void InlinedObjects::release() {
1882   _instance = nullptr;
1883 
1884   if (_inlined_field_map != nullptr) {
1885     delete _inlined_field_map;
1886     _inlined_field_map = nullptr;
1887   }
1888   if (_flat_arrays != nullptr) {
1889     delete _flat_arrays;
1890     _flat_arrays = nullptr;
1891   }
1892 }
1893 
1894 void InlinedObjects::inlined_field_names_callback(InlinedObjects* _this, const Klass* klass, uintx base_index, int count) {
1895   if (_this->_inlined_field_map == nullptr) {
1896     _this->_inlined_field_map = new (mtServiceability) GrowableArray<ClassInlinedFields>(100, mtServiceability);
1897   }
1898   _this->_inlined_field_map->append(ClassInlinedFields(klass, base_index));
1899 
1900   // counters for dumping classes with inlined fields
1901   _this->_classes_count++;
1902   _this->_inlined_fields_count += count;
1903 }
1904 
1905 void InlinedObjects::dump_inlined_field_names(AbstractDumpWriter* writer) {
1906   InlinedFieldNameDumper nameDumper(writer, this, inlined_field_names_callback);
1907   ClassLoaderDataGraph::classes_do(&nameDumper);
1908 
1909   if (_inlined_field_map != nullptr) {
1910     // prepare the map for  get_base_index_for().
1911     _inlined_field_map->sort(ClassInlinedFields::compare);
1912   }
1913 }
1914 
1915 uintx InlinedObjects::get_base_index_for(Klass* k) {
1916   if (_inlined_field_map != nullptr) {
1917     bool found = false;
1918     int idx = _inlined_field_map->find_sorted<ClassInlinedFields, ClassInlinedFields::compare>(ClassInlinedFields(k, 0), found);
1919     if (found) {
1920         return _inlined_field_map->at(idx).base_index;
1921     }
1922   }
1923 
1924   // return max_uintx, so get_next_string_id returns 0.
1925   return max_uintx;
1926 }
1927 
1928 uintx InlinedObjects::get_next_string_id(uintx id) {
1929   if (++id == _min_string_id) {
1930     return _max_string_id + 1;
1931   }
1932   return id;
1933 }
1934 
1935 void InlinedObjects::dump_classed_with_inlined_fields(AbstractDumpWriter* writer) {
1936   if (_classes_count != 0) {
1937     // Record for each class contains tag(u1), class ID and count(u2)
1938     // for each inlined field index(u2), synthetic fields count(u2), original field name and class ID
1939     int size = _classes_count * (1 + sizeof(address) + 2)
1940              + _inlined_fields_count * (2 + 2 + sizeof(address) + sizeof(address));
1941     DumperSupport::write_header(writer, HPROF_INLINED_FIELDS, (u4)size);
1942 
1943     InlinedFieldsDumper dumper(writer);
1944     ClassLoaderDataGraph::classes_do(&dumper);
1945   }
1946 }
1947 
1948 void InlinedObjects::add_flat_array(oop array) {
1949   if (_flat_arrays == nullptr) {
1950     _flat_arrays = new (mtServiceability) GrowableArray<oop>(100, mtServiceability);
1951   }
1952   _flat_arrays->append(array);
1953 }
1954 
1955 void InlinedObjects::dump_flat_arrays(AbstractDumpWriter* writer) {
1956   if (_flat_arrays != nullptr) {
1957     // For each flat array the record contains tag (u1), object ID and class ID.
1958     int size = _flat_arrays->length() * (1 + sizeof(address) + sizeof(address));
1959 
1960     DumperSupport::write_header(writer, HPROF_FLAT_ARRAYS, (u4)size);
1961     for (GrowableArrayIterator<oop> it = _flat_arrays->begin(); it != _flat_arrays->end(); ++it) {
1962       flatArrayOop array = flatArrayOop(*it);
1963       FlatArrayKlass* array_klass = FlatArrayKlass::cast(array->klass());
1964       InlineKlass* element_klass = array_klass->element_klass();
1965       writer->write_u1(HPROF_FLAT_ARRAY);
1966       writer->write_objectID(array);
1967       writer->write_classID(element_klass);
1968     }
1969   }
1970 }
1971 
1972 
1973 // Support class used to generate HPROF_UTF8 records from the entries in the
1974 // SymbolTable.
1975 
1976 class SymbolTableDumper : public SymbolClosure {
1977  private:
1978   AbstractDumpWriter* _writer;
1979   AbstractDumpWriter* writer() const                { return _writer; }
1980  public:
1981   SymbolTableDumper(AbstractDumpWriter* writer)     { _writer = writer; }
1982   void do_symbol(Symbol** p);
1983 };
1984 
1985 void SymbolTableDumper::do_symbol(Symbol** p) {
1986   ResourceMark rm;
1987   Symbol* sym = *p;
1988   int len = sym->utf8_length();
1989   if (len > 0) {
1990     char* s = sym->as_utf8();
1991     DumperSupport::write_header(writer(), HPROF_UTF8, oopSize + len);
1992     writer()->write_symbolID(sym);
1993     writer()->write_raw(s, len);
1994   }
1995 }
1996 
1997 // Support class used to generate HPROF_GC_CLASS_DUMP records
1998 
1999 class ClassDumper : public KlassClosure {
2000  private:
2001   AbstractDumpWriter* _writer;
2002   AbstractDumpWriter* writer() const { return _writer; }
2003 
2004  public:
2005   ClassDumper(AbstractDumpWriter* writer) : _writer(writer) {}
2006 
2007   void do_klass(Klass* k) {
2008     if (k->is_instance_klass()) {
2009       DumperSupport::dump_instance_class(writer(), k);
2010     } else {
2011       DumperSupport::dump_array_class(writer(), k);
2012     }
2013   }
2014 };
2015 
2016 // Support class used to generate HPROF_LOAD_CLASS records
2017 
2018 class LoadedClassDumper : public LockedClassesDo {
2019  private:
2020   AbstractDumpWriter* _writer;
2021   GrowableArray<Klass*>* _klass_map;
2022   u4 _class_serial_num;
2023   AbstractDumpWriter* writer() const { return _writer; }
2024   void add_class_serial_number(Klass* k, int serial_num) {
2025     _klass_map->at_put_grow(serial_num, k);
2026   }
2027  public:
2028   LoadedClassDumper(AbstractDumpWriter* writer, GrowableArray<Klass*>* klass_map)
2029     : _writer(writer), _klass_map(klass_map), _class_serial_num(0) {}
2030 
2031   void do_klass(Klass* k) {
2032     // len of HPROF_LOAD_CLASS record
2033     u4 remaining = 2 * oopSize + 2 * sizeof(u4);
2034     DumperSupport::write_header(writer(), HPROF_LOAD_CLASS, remaining);
2035     // class serial number is just a number
2036     writer()->write_u4(++_class_serial_num);
2037     // class ID
2038     writer()->write_classID(k);
2039     // add the Klass* and class serial number pair
2040     add_class_serial_number(k, _class_serial_num);
2041     writer()->write_u4(STACK_TRACE_ID);
2042     // class name ID
2043     Symbol* name = k->name();
2044     writer()->write_symbolID(name);
2045   }
2046 };
2047 
2048 // Support class used to generate HPROF_GC_ROOT_JNI_LOCAL records
2049 
2050 class JNILocalsDumper : public OopClosure {
2051  private:
2052   AbstractDumpWriter* _writer;
2053   u4 _thread_serial_num;
2054   int _frame_num;
2055   AbstractDumpWriter* writer() const                { return _writer; }
2056  public:
2057   JNILocalsDumper(AbstractDumpWriter* writer, u4 thread_serial_num) {
2058     _writer = writer;
2059     _thread_serial_num = thread_serial_num;
2060     _frame_num = -1;  // default - empty stack
2061   }
2062   void set_frame_number(int n) { _frame_num = n; }
2063   void do_oop(oop* obj_p);
2064   void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); }
2065 };
2066 
2067 void JNILocalsDumper::do_oop(oop* obj_p) {
2068   // ignore null handles
2069   oop o = *obj_p;
2070   if (o != nullptr) {
2071     u4 size = 1 + sizeof(address) + 4 + 4;
2072     writer()->start_sub_record(HPROF_GC_ROOT_JNI_LOCAL, size);
2073     writer()->write_objectID(o);
2074     writer()->write_u4(_thread_serial_num);
2075     writer()->write_u4((u4)_frame_num);
2076     writer()->end_sub_record();
2077   }
2078 }
2079 
2080 
2081 // Support class used to generate HPROF_GC_ROOT_JNI_GLOBAL records
2082 
2083 class JNIGlobalsDumper : public OopClosure {
2084  private:
2085   AbstractDumpWriter* _writer;
2086   AbstractDumpWriter* writer() const                { return _writer; }
2087 
2088  public:
2089   JNIGlobalsDumper(AbstractDumpWriter* writer) {
2090     _writer = writer;
2091   }
2092   void do_oop(oop* obj_p);
2093   void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); }
2094 };
2095 
2096 void JNIGlobalsDumper::do_oop(oop* obj_p) {
2097   oop o = NativeAccess<AS_NO_KEEPALIVE>::oop_load(obj_p);
2098 
2099   // ignore these
2100   if (o == nullptr) return;
2101   // we ignore global ref to symbols and other internal objects
2102   if (o->is_instance() || o->is_objArray() || o->is_typeArray()) {
2103     u4 size = 1 + 2 * sizeof(address);
2104     writer()->start_sub_record(HPROF_GC_ROOT_JNI_GLOBAL, size);
2105     writer()->write_objectID(o);
2106     writer()->write_rootID(obj_p);      // global ref ID
2107     writer()->end_sub_record();
2108   }
2109 };
2110 
2111 // Support class used to generate HPROF_GC_ROOT_STICKY_CLASS records
2112 
2113 class StickyClassDumper : public KlassClosure {
2114  private:
2115   AbstractDumpWriter* _writer;
2116   AbstractDumpWriter* writer() const                { return _writer; }
2117  public:
2118   StickyClassDumper(AbstractDumpWriter* writer) {
2119     _writer = writer;
2120   }
2121   void do_klass(Klass* k) {
2122     if (k->is_instance_klass()) {
2123       InstanceKlass* ik = InstanceKlass::cast(k);
2124       u4 size = 1 + sizeof(address);
2125       writer()->start_sub_record(HPROF_GC_ROOT_STICKY_CLASS, size);
2126       writer()->write_classID(ik);
2127       writer()->end_sub_record();
2128     }
2129   }
2130 };
2131 
2132 // Support class used to generate HPROF_GC_ROOT_JAVA_FRAME records.
2133 
2134 class JavaStackRefDumper : public StackObj {
2135 private:
2136   AbstractDumpWriter* _writer;
2137   u4 _thread_serial_num;
2138   int _frame_num;
2139   AbstractDumpWriter* writer() const { return _writer; }
2140 public:
2141   JavaStackRefDumper(AbstractDumpWriter* writer, u4 thread_serial_num)
2142       : _writer(writer), _thread_serial_num(thread_serial_num), _frame_num(-1) // default - empty stack
2143   {
2144   }
2145 
2146   void set_frame_number(int n) { _frame_num = n; }
2147 
2148   void dump_java_stack_refs(StackValueCollection* values);
2149 };
2150 
2151 void JavaStackRefDumper::dump_java_stack_refs(StackValueCollection* values) {
2152   for (int index = 0; index < values->size(); index++) {
2153     if (values->at(index)->type() == T_OBJECT) {
2154       oop o = values->obj_at(index)();
2155       if (o != nullptr) {
2156         u4 size = 1 + sizeof(address) + 4 + 4;
2157         writer()->start_sub_record(HPROF_GC_ROOT_JAVA_FRAME, size);
2158         writer()->write_objectID(o);
2159         writer()->write_u4(_thread_serial_num);
2160         writer()->write_u4((u4)_frame_num);
2161         writer()->end_sub_record();
2162       }
2163     }
2164   }
2165 }
2166 
2167 // Class to collect, store and dump thread-related data:
2168 // - HPROF_TRACE and HPROF_FRAME records;
2169 // - HPROF_GC_ROOT_THREAD_OBJ/HPROF_GC_ROOT_JAVA_FRAME/HPROF_GC_ROOT_JNI_LOCAL subrecords.
2170 class ThreadDumper : public CHeapObj<mtInternal> {
2171 public:
2172   enum class ThreadType { Platform, MountedVirtual, UnmountedVirtual };
2173 
2174 private:
2175   ThreadType _thread_type;
2176   JavaThread* _java_thread;
2177   oop _thread_oop;
2178 
2179   GrowableArray<StackFrameInfo*>* _frames;
2180   // non-null if the thread is OOM thread
2181   Method* _oome_constructor;
2182   int _thread_serial_num;
2183   int _start_frame_serial_num;
2184 
2185   vframe* get_top_frame() const;
2186 
2187 public:
2188   static bool should_dump_pthread(JavaThread* thread) {
2189     return thread->threadObj() != nullptr && !thread->is_exiting() && !thread->is_hidden_from_external_view();
2190   }
2191 
2192   static bool should_dump_vthread(oop vt) {
2193     return java_lang_VirtualThread::state(vt) != java_lang_VirtualThread::NEW
2194         && java_lang_VirtualThread::state(vt) != java_lang_VirtualThread::TERMINATED;
2195   }
2196 
2197   static bool is_vthread_mounted(oop vt) {
2198     // The code should be consistent with the "mounted virtual thread" case
2199     // (VM_HeapDumper::dump_stack_traces(), ThreadDumper::get_top_frame()).
2200     // I.e. virtual thread is mounted if its carrierThread is not null
2201     // and is_vthread_mounted() for the carrier thread returns true.
2202     oop carrier_thread = java_lang_VirtualThread::carrier_thread(vt);
2203     if (carrier_thread == nullptr) {
2204       return false;
2205     }
2206     JavaThread* java_thread = java_lang_Thread::thread(carrier_thread);
2207     return java_thread->is_vthread_mounted();
2208   }
2209 
2210   ThreadDumper(ThreadType thread_type, JavaThread* java_thread, oop thread_oop);
2211   ~ThreadDumper() {
2212     for (int index = 0; index < _frames->length(); index++) {
2213       delete _frames->at(index);
2214     }
2215     delete _frames;
2216   }
2217 
2218   // affects frame_count
2219   void add_oom_frame(Method* oome_constructor) {
2220     assert(_start_frame_serial_num == 0, "add_oom_frame cannot be called after init_serial_nums");
2221     _oome_constructor = oome_constructor;
2222   }
2223 
2224   void init_serial_nums(volatile int* thread_counter, volatile int* frame_counter) {
2225     assert(_start_frame_serial_num == 0, "already initialized");
2226     _thread_serial_num = Atomic::fetch_then_add(thread_counter, 1);
2227     _start_frame_serial_num = Atomic::fetch_then_add(frame_counter, frame_count());
2228   }
2229 
2230   bool oom_thread() const {
2231     return _oome_constructor != nullptr;
2232   }
2233 
2234   int frame_count() const {
2235     return _frames->length() + (oom_thread() ? 1 : 0);
2236   }
2237 
2238   u4 thread_serial_num() const {
2239     return (u4)_thread_serial_num;
2240   }
2241 
2242   u4 stack_trace_serial_num() const {
2243     return (u4)(_thread_serial_num + STACK_TRACE_ID);
2244   }
2245 
2246   // writes HPROF_TRACE and HPROF_FRAME records
2247   // returns number of dumped frames
2248   void dump_stack_traces(AbstractDumpWriter* writer, GrowableArray<Klass*>* klass_map);
2249 
2250   // writes HPROF_GC_ROOT_THREAD_OBJ subrecord
2251   void dump_thread_obj(AbstractDumpWriter* writer);
2252 
2253   // Walk the stack of the thread.
2254   // Dumps a HPROF_GC_ROOT_JAVA_FRAME subrecord for each local
2255   // Dumps a HPROF_GC_ROOT_JNI_LOCAL subrecord for each JNI local
2256   void dump_stack_refs(AbstractDumpWriter* writer);
2257 
2258 };
2259 
2260 ThreadDumper::ThreadDumper(ThreadType thread_type, JavaThread* java_thread, oop thread_oop)
2261     : _thread_type(thread_type), _java_thread(java_thread), _thread_oop(thread_oop),
2262       _oome_constructor(nullptr),
2263       _thread_serial_num(0), _start_frame_serial_num(0)
2264 {
2265   // sanity checks
2266   if (_thread_type == ThreadType::UnmountedVirtual) {
2267     assert(_java_thread == nullptr, "sanity");
2268     assert(_thread_oop != nullptr, "sanity");
2269   } else {
2270     assert(_java_thread != nullptr, "sanity");
2271     assert(_thread_oop != nullptr, "sanity");
2272   }
2273 
2274   _frames = new (mtServiceability) GrowableArray<StackFrameInfo*>(10, mtServiceability);
2275   bool stop_at_vthread_entry = _thread_type == ThreadType::MountedVirtual;
2276 
2277   // vframes are resource allocated
2278   Thread* current_thread = Thread::current();
2279   ResourceMark rm(current_thread);
2280   HandleMark hm(current_thread);
2281 
2282   for (vframe* vf = get_top_frame(); vf != nullptr; vf = vf->sender()) {
2283     if (stop_at_vthread_entry && vf->is_vthread_entry()) {
2284       break;
2285     }
2286     if (vf->is_java_frame()) {
2287       javaVFrame* jvf = javaVFrame::cast(vf);
2288       _frames->append(new StackFrameInfo(jvf, false));
2289     } else {
2290       // ignore non-Java frames
2291     }
2292   }
2293 }
2294 
2295 void ThreadDumper::dump_stack_traces(AbstractDumpWriter* writer, GrowableArray<Klass*>* klass_map) {
2296   assert(_thread_serial_num != 0 && _start_frame_serial_num != 0, "serial_nums are not initialized");
2297 
2298   // write HPROF_FRAME records for this thread's stack trace
2299   int depth = _frames->length();
2300   int frame_serial_num = _start_frame_serial_num;
2301 
2302   if (oom_thread()) {
2303     // OOM thread
2304     // write fake frame that makes it look like the thread, which caused OOME,
2305     // is in the OutOfMemoryError zero-parameter constructor
2306     int oome_serial_num = klass_map->find(_oome_constructor->method_holder());
2307     // the class serial number starts from 1
2308     assert(oome_serial_num > 0, "OutOfMemoryError class not found");
2309     DumperSupport::dump_stack_frame(writer, ++frame_serial_num, oome_serial_num, _oome_constructor, 0);
2310     depth++;
2311   }
2312 
2313   for (int j = 0; j < _frames->length(); j++) {
2314     StackFrameInfo* frame = _frames->at(j);
2315     Method* m = frame->method();
2316     int class_serial_num = klass_map->find(m->method_holder());
2317     // the class serial number starts from 1
2318     assert(class_serial_num > 0, "class not found");
2319     DumperSupport::dump_stack_frame(writer, ++frame_serial_num, class_serial_num, m, frame->bci());
2320   }
2321 
2322   // write HPROF_TRACE record for the thread
2323   DumperSupport::write_header(writer, HPROF_TRACE, checked_cast<u4>(3 * sizeof(u4) + depth * oopSize));
2324   writer->write_u4(stack_trace_serial_num());   // stack trace serial number
2325   writer->write_u4(thread_serial_num());        // thread serial number
2326   writer->write_u4((u4)depth);                  // frame count (including oom frame)
2327   for (int j = 1; j <= depth; j++) {
2328     writer->write_id(_start_frame_serial_num + j);
2329   }
2330 }
2331 
2332 void ThreadDumper::dump_thread_obj(AbstractDumpWriter * writer) {
2333   assert(_thread_serial_num != 0 && _start_frame_serial_num != 0, "serial_num is not initialized");
2334 
2335   u4 size = 1 + sizeof(address) + 4 + 4;
2336   writer->start_sub_record(HPROF_GC_ROOT_THREAD_OBJ, size);
2337   writer->write_objectID(_thread_oop);
2338   writer->write_u4(thread_serial_num());      // thread serial number
2339   writer->write_u4(stack_trace_serial_num()); // stack trace serial number
2340   writer->end_sub_record();
2341 }
2342 
2343 void ThreadDumper::dump_stack_refs(AbstractDumpWriter * writer) {
2344   assert(_thread_serial_num != 0 && _start_frame_serial_num != 0, "serial_num is not initialized");
2345 
2346   JNILocalsDumper blk(writer, thread_serial_num());
2347   if (_thread_type == ThreadType::Platform) {
2348     if (!_java_thread->has_last_Java_frame()) {
2349       // no last java frame but there may be JNI locals
2350       _java_thread->active_handles()->oops_do(&blk);
2351       return;
2352     }
2353   }
2354 
2355   JavaStackRefDumper java_ref_dumper(writer, thread_serial_num());
2356 
2357   // vframes are resource allocated
2358   Thread* current_thread = Thread::current();
2359   ResourceMark rm(current_thread);
2360   HandleMark hm(current_thread);
2361 
2362   bool stopAtVthreadEntry = _thread_type == ThreadType::MountedVirtual;
2363   frame* last_entry_frame = nullptr;
2364   bool is_top_frame = true;
2365   int depth = 0;
2366   if (oom_thread()) {
2367     depth++;
2368   }
2369 
2370   for (vframe* vf = get_top_frame(); vf != nullptr; vf = vf->sender()) {
2371     if (stopAtVthreadEntry && vf->is_vthread_entry()) {
2372       break;
2373     }
2374 
2375     if (vf->is_java_frame()) {
2376       javaVFrame* jvf = javaVFrame::cast(vf);
2377       if (!(jvf->method()->is_native())) {
2378         java_ref_dumper.set_frame_number(depth);
2379         java_ref_dumper.dump_java_stack_refs(jvf->locals());
2380         java_ref_dumper.dump_java_stack_refs(jvf->expressions());
2381       } else {
2382         // native frame
2383         blk.set_frame_number(depth);
2384         if (is_top_frame) {
2385           // JNI locals for the top frame if mounted
2386           assert(_java_thread != nullptr || jvf->method()->is_synchronized()
2387                  || jvf->method()->is_object_wait0(), "impossible for unmounted vthread");
2388           if (_java_thread != nullptr) {
2389             _java_thread->active_handles()->oops_do(&blk);
2390           }
2391         } else {
2392           if (last_entry_frame != nullptr) {
2393             // JNI locals for the entry frame
2394             assert(last_entry_frame->is_entry_frame(), "checking");
2395             last_entry_frame->entry_frame_call_wrapper()->handles()->oops_do(&blk);
2396           }
2397         }
2398       }
2399       last_entry_frame = nullptr;
2400       // increment only for Java frames
2401       depth++;
2402     } else {
2403       // externalVFrame - for an entry frame then we report the JNI locals
2404       // when we find the corresponding javaVFrame
2405       frame* fr = vf->frame_pointer();
2406       assert(fr != nullptr, "sanity check");
2407       if (fr->is_entry_frame()) {
2408         last_entry_frame = fr;
2409       }
2410     }
2411   is_top_frame = false;
2412   }
2413   assert(depth == frame_count(), "total number of Java frames not matched");
2414 }
2415 
2416 vframe* ThreadDumper::get_top_frame() const {
2417   if (_thread_type == ThreadType::UnmountedVirtual) {
2418     ContinuationWrapper cont(java_lang_VirtualThread::continuation(_thread_oop));
2419     if (cont.is_empty()) {
2420       return nullptr;
2421     }
2422     assert(!cont.is_mounted(), "sanity check");
2423     stackChunkOop chunk = cont.last_nonempty_chunk();
2424     if (chunk == nullptr || chunk->is_empty()) {
2425       return nullptr;
2426     }
2427 
2428     RegisterMap reg_map(cont.continuation(), RegisterMap::UpdateMap::include);
2429     frame fr = chunk->top_frame(&reg_map);
2430     vframe* vf = vframe::new_vframe(&fr, &reg_map, nullptr); // don't need JavaThread
2431     return vf;
2432   }
2433 
2434   RegisterMap reg_map(_java_thread,
2435       RegisterMap::UpdateMap::include,
2436       RegisterMap::ProcessFrames::include,
2437       RegisterMap::WalkContinuation::skip);
2438   switch (_thread_type) {
2439   case ThreadType::Platform:
2440     if (!_java_thread->has_last_Java_frame()) {
2441       return nullptr;
2442     }
2443     return _java_thread->is_vthread_mounted()
2444         ? _java_thread->carrier_last_java_vframe(&reg_map)
2445         : _java_thread->platform_thread_last_java_vframe(&reg_map);
2446 
2447   case ThreadType::MountedVirtual:
2448     return _java_thread->last_java_vframe(&reg_map);
2449 
2450   default: // make compilers happy
2451       break;
2452   }
2453   ShouldNotReachHere();
2454   return nullptr;
2455 }
2456 
2457 // Callback to dump thread-related data for unmounted virtual threads;
2458 // implemented by VM_HeapDumper.
2459 class UnmountedVThreadDumper {
2460  public:
2461   virtual void dump_vthread(oop vt, AbstractDumpWriter* segment_writer) = 0;
2462 };
2463 
2464 // Support class used when iterating over the heap.
2465 class HeapObjectDumper : public ObjectClosure {
2466  private:
2467   AbstractDumpWriter* _writer;
2468   AbstractDumpWriter* writer()                  { return _writer; }
2469   UnmountedVThreadDumper* _vthread_dumper;
2470 
2471   DumperClassCacheTable _class_cache;
2472 
2473  public:
2474   HeapObjectDumper(AbstractDumpWriter* writer, UnmountedVThreadDumper* vthread_dumper)
2475     : _writer(writer), _vthread_dumper(vthread_dumper) {}
2476 
2477   // called for each object in the heap
2478   void do_object(oop o);
2479 };
2480 
2481 void HeapObjectDumper::do_object(oop o) {
2482   // skip classes as these emitted as HPROF_GC_CLASS_DUMP records
2483   if (o->klass() == vmClasses::Class_klass()) {
2484     if (!java_lang_Class::is_primitive(o)) {
2485       return;
2486     }
2487   }
2488 
2489   if (DumperSupport::mask_dormant_archived_object(o, nullptr) == nullptr) {
2490     return;
2491   }
2492 
2493   if (o->is_instance()) {
2494     // create a HPROF_GC_INSTANCE record for each object
2495     DumperSupport::dump_instance(writer(), o, &_class_cache);
2496     // If we encounter an unmounted virtual thread it needs to be dumped explicitly
2497     // (mounted virtual threads are dumped with their carriers).
2498     if (java_lang_VirtualThread::is_instance(o)
2499         && ThreadDumper::should_dump_vthread(o) && !ThreadDumper::is_vthread_mounted(o)) {
2500       _vthread_dumper->dump_vthread(o, writer());
2501     }
2502   } else if (o->is_objArray()) {
2503     // create a HPROF_GC_OBJ_ARRAY_DUMP record for each object array
2504     DumperSupport::dump_object_array(writer(), objArrayOop(o));
2505   } else if (o->is_flatArray()) {
2506     DumperSupport::dump_flat_array(writer(), flatArrayOop(o), &_class_cache);
2507   } else if (o->is_typeArray()) {
2508     // create a HPROF_GC_PRIM_ARRAY_DUMP record for each type array
2509     DumperSupport::dump_prim_array(writer(), typeArrayOop(o));
2510   }
2511 }
2512 
2513 // The dumper controller for parallel heap dump
2514 class DumperController : public CHeapObj<mtInternal> {
2515  private:
2516    Monitor* _lock;
2517    Mutex* _global_writer_lock;
2518 
2519    const uint   _dumper_number;
2520    uint   _complete_number;
2521 
2522    bool   _started; // VM dumper started and acquired global writer lock
2523 
2524  public:
2525    DumperController(uint number) :
2526      // _lock and _global_writer_lock are used for synchronization between GC worker threads inside safepoint,
2527      // so we lock with _no_safepoint_check_flag.
2528      // signal_start() acquires _lock when global writer is locked,
2529      // its rank must be less than _global_writer_lock rank.
2530      _lock(new (std::nothrow) PaddedMonitor(Mutex::nosafepoint - 1, "DumperController_lock")),
2531      _global_writer_lock(new (std::nothrow) Mutex(Mutex::nosafepoint, "DumpWriter_lock")),
2532      _dumper_number(number),
2533      _complete_number(0),
2534      _started(false)
2535    {}
2536 
2537    ~DumperController() {
2538      delete _lock;
2539      delete _global_writer_lock;
2540    }
2541 
2542    // parallel (non VM) dumpers must wait until VM dumper acquires global writer lock
2543    void wait_for_start_signal() {
2544      MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
2545      while (_started == false) {
2546        ml.wait();
2547      }
2548    }
2549 
2550    void signal_start() {
2551      MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
2552      _started = true;
2553      ml.notify_all();
2554    }
2555 
2556    void lock_global_writer() {
2557      _global_writer_lock->lock_without_safepoint_check();
2558    }
2559 
2560    void unlock_global_writer() {
2561      _global_writer_lock->unlock();
2562    }
2563 
2564    void dumper_complete(DumpWriter* local_writer, DumpWriter* global_writer) {
2565      MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
2566      _complete_number++;
2567      // propagate local error to global if any
2568      if (local_writer->has_error()) {
2569        global_writer->set_error(local_writer->error());
2570      }
2571      ml.notify();
2572    }
2573 
2574    void wait_all_dumpers_complete() {
2575      MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
2576      while (_complete_number != _dumper_number) {
2577         ml.wait();
2578      }
2579    }
2580 };
2581 
2582 // DumpMerger merges separate dump files into a complete one
2583 class DumpMerger : public StackObj {
2584 private:
2585   DumpWriter* _writer;
2586   InlinedObjects*  _inlined_objects;
2587   const char* _path;
2588   bool _has_error;
2589   int _dump_seq;
2590 
2591 private:
2592   void merge_file(const char* path);
2593   void merge_done();
2594   void set_error(const char* msg);
2595 
2596 public:
2597   DumpMerger(const char* path, DumpWriter* writer, InlinedObjects* inlined_objects, int dump_seq) :
2598     _writer(writer),
2599     _inlined_objects(inlined_objects),
2600     _path(path),
2601     _has_error(_writer->has_error()),
2602     _dump_seq(dump_seq) {}
2603 
2604   void do_merge();
2605 
2606   // returns path for the parallel DumpWriter (resource allocated)
2607   static char* get_writer_path(const char* base_path, int seq);
2608 
2609 };
2610 
2611 char* DumpMerger::get_writer_path(const char* base_path, int seq) {
2612   // approximate required buffer size
2613   size_t buf_size = strlen(base_path)
2614                     + 2                 // ".p"
2615                     + 10                // number (that's enough for 2^32 parallel dumpers)
2616                     + 1;                // '\0'
2617 
2618   char* path = NEW_RESOURCE_ARRAY(char, buf_size);
2619   memset(path, 0, buf_size);
2620 
2621   os::snprintf(path, buf_size, "%s.p%d", base_path, seq);
2622 
2623   return path;
2624 }
2625 
2626 
2627 void DumpMerger::merge_done() {
2628   // Writes the HPROF_HEAP_DUMP_END record.
2629   if (!_has_error) {
2630     DumperSupport::end_of_dump(_writer);
2631     _inlined_objects->dump_flat_arrays(_writer);
2632     _writer->flush();
2633     _inlined_objects->release();
2634   }
2635   _dump_seq = 0; //reset
2636 }
2637 
2638 void DumpMerger::set_error(const char* msg) {
2639   assert(msg != nullptr, "sanity check");
2640   log_error(heapdump)("%s (file: %s)", msg, _path);
2641   _writer->set_error(msg);
2642   _has_error = true;
2643 }
2644 
2645 #ifdef LINUX
2646 // Merge segmented heap files via sendfile, it's more efficient than the
2647 // read+write combination, which would require transferring data to and from
2648 // user space.
2649 void DumpMerger::merge_file(const char* path) {
2650   TraceTime timer("Merge segmented heap file directly", TRACETIME_LOG(Info, heapdump));
2651 
2652   int segment_fd = os::open(path, O_RDONLY, 0);
2653   if (segment_fd == -1) {
2654     set_error("Can not open segmented heap file during merging");
2655     return;
2656   }
2657 
2658   struct stat st;
2659   if (os::stat(path, &st) != 0) {
2660     ::close(segment_fd);
2661     set_error("Can not get segmented heap file size during merging");
2662     return;
2663   }
2664 
2665   // A successful call to sendfile may write fewer bytes than requested; the
2666   // caller should be prepared to retry the call if there were unsent bytes.
2667   jlong offset = 0;
2668   while (offset < st.st_size) {
2669     int ret = os::Linux::sendfile(_writer->get_fd(), segment_fd, &offset, st.st_size);
2670     if (ret == -1) {
2671       ::close(segment_fd);
2672       set_error("Failed to merge segmented heap file");
2673       return;
2674     }
2675   }
2676 
2677   // As sendfile variant does not call the write method of the global writer,
2678   // bytes_written is also incorrect for this variant, we need to explicitly
2679   // accumulate bytes_written for the global writer in this case
2680   julong accum = _writer->bytes_written() + st.st_size;
2681   _writer->set_bytes_written(accum);
2682   ::close(segment_fd);
2683 }
2684 #else
2685 // Generic implementation using read+write
2686 void DumpMerger::merge_file(const char* path) {
2687   TraceTime timer("Merge segmented heap file", TRACETIME_LOG(Info, heapdump));
2688 
2689   fileStream segment_fs(path, "rb");
2690   if (!segment_fs.is_open()) {
2691     set_error("Can not open segmented heap file during merging");
2692     return;
2693   }
2694 
2695   jlong total = 0;
2696   size_t cnt = 0;
2697 
2698   // Use _writer buffer for reading.
2699   while ((cnt = segment_fs.read(_writer->buffer(), 1, _writer->buffer_size())) != 0) {
2700     _writer->set_position(cnt);
2701     _writer->flush();
2702     total += cnt;
2703   }
2704 
2705   if (segment_fs.fileSize() != total) {
2706     set_error("Merged heap dump is incomplete");
2707   }
2708 }
2709 #endif
2710 
2711 void DumpMerger::do_merge() {
2712   TraceTime timer("Merge heap files complete", TRACETIME_LOG(Info, heapdump));
2713 
2714   // Since contents in segmented heap file were already zipped, we don't need to zip
2715   // them again during merging.
2716   AbstractCompressor* saved_compressor = _writer->compressor();
2717   _writer->set_compressor(nullptr);
2718 
2719   // Merge the content of the remaining files into base file. Regardless of whether
2720   // the merge process is successful or not, these segmented files will be deleted.
2721   for (int i = 0; i < _dump_seq; i++) {
2722     ResourceMark rm;
2723     const char* path = get_writer_path(_path, i);
2724     if (!_has_error) {
2725       merge_file(path);
2726     }
2727     // Delete selected segmented heap file nevertheless
2728     if (remove(path) != 0) {
2729       log_info(heapdump)("Removal of segment file (%d) failed (%d)", i, errno);
2730     }
2731   }
2732 
2733   // restore compressor for further use
2734   _writer->set_compressor(saved_compressor);
2735   merge_done();
2736 }
2737 
2738 // The VM operation that performs the heap dump
2739 class VM_HeapDumper : public VM_GC_Operation, public WorkerTask, public UnmountedVThreadDumper {
2740  private:
2741   DumpWriter*             _writer;
2742   JavaThread*             _oome_thread;
2743   Method*                 _oome_constructor;
2744   bool                    _gc_before_heap_dump;
2745   GrowableArray<Klass*>*  _klass_map;
2746 
2747   ThreadDumper**          _thread_dumpers; // platform, carrier and mounted virtual threads
2748   int                     _thread_dumpers_count;
2749   volatile int            _thread_serial_num;
2750   volatile int            _frame_serial_num;
2751 
2752   volatile int            _dump_seq;
2753 
2754   // Inlined object support.
2755   InlinedObjects          _inlined_objects;
2756 
2757   // parallel heap dump support
2758   uint                    _num_dumper_threads;
2759   DumperController*       _dumper_controller;
2760   ParallelObjectIterator* _poi;
2761 
2762   // Dumper id of VMDumper thread.
2763   static const int VMDumperId = 0;
2764   // VM dumper dumps both heap and non-heap data, other dumpers dump heap-only data.
2765   static bool is_vm_dumper(int dumper_id) { return dumper_id == VMDumperId; }
2766   // the 1st dumper calling get_next_dumper_id becomes VM dumper
2767   int get_next_dumper_id() {
2768     return Atomic::fetch_then_add(&_dump_seq, 1);
2769   }
2770 
2771   DumpWriter* writer() const { return _writer; }
2772 
2773   bool skip_operation() const;
2774 
2775   // HPROF_GC_ROOT_THREAD_OBJ records for platform and mounted virtual threads
2776   void dump_threads(AbstractDumpWriter* writer);
2777 
2778   bool is_oom_thread(JavaThread* thread) const {
2779     return thread == _oome_thread && _oome_constructor != nullptr;
2780   }
2781 
2782   // HPROF_TRACE and HPROF_FRAME records for platform and mounted virtual threads
2783   void dump_stack_traces(AbstractDumpWriter* writer);
2784 
2785  public:
2786   VM_HeapDumper(DumpWriter* writer, bool gc_before_heap_dump, bool oome, uint num_dump_threads) :
2787     VM_GC_Operation(0 /* total collections,      dummy, ignored */,
2788                     GCCause::_heap_dump /* GC Cause */,
2789                     0 /* total full collections, dummy, ignored */,
2790                     gc_before_heap_dump),
2791     WorkerTask("dump heap") {
2792     _writer = writer;
2793     _gc_before_heap_dump = gc_before_heap_dump;
2794     _klass_map = new (mtServiceability) GrowableArray<Klass*>(INITIAL_CLASS_COUNT, mtServiceability);
2795 
2796     _thread_dumpers = nullptr;
2797     _thread_dumpers_count = 0;
2798     _thread_serial_num = 1;
2799     _frame_serial_num = 1;
2800 
2801     _dump_seq = VMDumperId;
2802     _num_dumper_threads = num_dump_threads;
2803     _dumper_controller = nullptr;
2804     _poi = nullptr;
2805     if (oome) {
2806       assert(!Thread::current()->is_VM_thread(), "Dump from OutOfMemoryError cannot be called by the VMThread");
2807       // get OutOfMemoryError zero-parameter constructor
2808       InstanceKlass* oome_ik = vmClasses::OutOfMemoryError_klass();
2809       _oome_constructor = oome_ik->find_method(vmSymbols::object_initializer_name(),
2810                                                           vmSymbols::void_method_signature());
2811       // get thread throwing OOME when generating the heap dump at OOME
2812       _oome_thread = JavaThread::current();
2813     } else {
2814       _oome_thread = nullptr;
2815       _oome_constructor = nullptr;
2816     }
2817   }
2818 
2819   ~VM_HeapDumper() {
2820     if (_thread_dumpers != nullptr) {
2821       for (int i = 0; i < _thread_dumpers_count; i++) {
2822         delete _thread_dumpers[i];
2823       }
2824       FREE_C_HEAP_ARRAY(ThreadDumper*, _thread_dumpers);
2825     }
2826 
2827     if (_dumper_controller != nullptr) {
2828       delete _dumper_controller;
2829       _dumper_controller = nullptr;
2830     }
2831     delete _klass_map;
2832   }
2833   int dump_seq()           { return _dump_seq; }
2834   bool is_parallel_dump()  { return _num_dumper_threads > 1; }
2835   void prepare_parallel_dump(WorkerThreads* workers);
2836 
2837   InlinedObjects* inlined_objects() { return &_inlined_objects; }
2838 
2839   VMOp_Type type() const { return VMOp_HeapDumper; }
2840   virtual bool doit_prologue();
2841   void doit();
2842   void work(uint worker_id);
2843 
2844   // UnmountedVThreadDumper implementation
2845   void dump_vthread(oop vt, AbstractDumpWriter* segment_writer);
2846 };
2847 
2848 bool VM_HeapDumper::skip_operation() const {
2849   return false;
2850 }
2851 
2852 // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
2853 void DumperSupport::end_of_dump(AbstractDumpWriter* writer) {
2854   writer->finish_dump_segment();
2855 
2856   writer->write_u1(HPROF_HEAP_DUMP_END);
2857   writer->write_u4(0);
2858   writer->write_u4(0);
2859 }
2860 
2861 // Write a HPROF_GC_ROOT_THREAD_OBJ record for platform/carrier and mounted virtual threads.
2862 // Then walk the stack so that locals and JNI locals are dumped.
2863 void VM_HeapDumper::dump_threads(AbstractDumpWriter* writer) {
2864   for (int i = 0; i < _thread_dumpers_count; i++) {
2865     _thread_dumpers[i]->dump_thread_obj(writer);
2866     _thread_dumpers[i]->dump_stack_refs(writer);
2867   }
2868 }
2869 
2870 bool VM_HeapDumper::doit_prologue() {
2871   if (_gc_before_heap_dump && (UseZGC || UseShenandoahGC)) {
2872     // ZGC and Shenandoah cannot perform a synchronous GC cycle from within the VM thread.
2873     // So collect_as_vm_thread() is a noop. To respect the _gc_before_heap_dump flag a
2874     // synchronous GC cycle is performed from the caller thread in the prologue.
2875     Universe::heap()->collect(GCCause::_heap_dump);
2876   }
2877   return VM_GC_Operation::doit_prologue();
2878 }
2879 
2880 void VM_HeapDumper::prepare_parallel_dump(WorkerThreads* workers) {
2881   uint num_active_workers = workers != nullptr ? workers->active_workers() : 0;
2882   uint num_requested_dump_threads = _num_dumper_threads;
2883   // check if we can dump in parallel based on requested and active threads
2884   if (num_active_workers <= 1 || num_requested_dump_threads <= 1) {
2885     _num_dumper_threads = 1;
2886   } else {
2887     _num_dumper_threads = clamp(num_requested_dump_threads, 2U, num_active_workers);
2888   }
2889   _dumper_controller = new (std::nothrow) DumperController(_num_dumper_threads);
2890   bool can_parallel = _num_dumper_threads > 1;
2891   log_info(heapdump)("Requested dump threads %u, active dump threads %u, "
2892                      "actual dump threads %u, parallelism %s",
2893                      num_requested_dump_threads, num_active_workers,
2894                      _num_dumper_threads, can_parallel ? "true" : "false");
2895 }
2896 
2897 // The VM operation that dumps the heap. The dump consists of the following
2898 // records:
2899 //
2900 //  HPROF_HEADER
2901 //  [HPROF_UTF8]*
2902 //  [HPROF_LOAD_CLASS]*
2903 //  [[HPROF_FRAME]*|HPROF_TRACE]*
2904 //  [HPROF_GC_CLASS_DUMP]*
2905 //  [HPROF_HEAP_DUMP_SEGMENT]*
2906 //  HPROF_HEAP_DUMP_END
2907 //
2908 // The HPROF_TRACE records represent the stack traces where the heap dump
2909 // is generated and a "dummy trace" record which does not include
2910 // any frames. The dummy trace record is used to be referenced as the
2911 // unknown object alloc site.
2912 //
2913 // Each HPROF_HEAP_DUMP_SEGMENT record has a length followed by sub-records.
2914 // To allow the heap dump be generated in a single pass we remember the position
2915 // of the dump length and fix it up after all sub-records have been written.
2916 // To generate the sub-records we iterate over the heap, writing
2917 // HPROF_GC_INSTANCE_DUMP, HPROF_GC_OBJ_ARRAY_DUMP, and HPROF_GC_PRIM_ARRAY_DUMP
2918 // records as we go. Once that is done we write records for some of the GC
2919 // roots.
2920 
2921 void VM_HeapDumper::doit() {
2922 
2923   CollectedHeap* ch = Universe::heap();
2924 
2925   ch->ensure_parsability(false); // must happen, even if collection does
2926                                  // not happen (e.g. due to GCLocker)
2927 
2928   if (_gc_before_heap_dump) {
2929     if (GCLocker::is_active()) {
2930       warning("GC locker is held; pre-heapdump GC was skipped");
2931     } else {
2932       ch->collect_as_vm_thread(GCCause::_heap_dump);
2933     }
2934   }
2935 
2936   WorkerThreads* workers = ch->safepoint_workers();
2937   prepare_parallel_dump(workers);
2938 
2939   if (!is_parallel_dump()) {
2940     work(VMDumperId);
2941   } else {
2942     ParallelObjectIterator poi(_num_dumper_threads);
2943     _poi = &poi;
2944     workers->run_task(this, _num_dumper_threads);
2945     _poi = nullptr;
2946   }
2947 }
2948 
2949 void VM_HeapDumper::work(uint worker_id) {
2950   // VM Dumper works on all non-heap data dumping and part of heap iteration.
2951   int dumper_id = get_next_dumper_id();
2952 
2953   if (is_vm_dumper(dumper_id)) {
2954     // lock global writer, it will be unlocked after VM Dumper finishes with non-heap data
2955     _dumper_controller->lock_global_writer();
2956     _dumper_controller->signal_start();
2957   } else {
2958     _dumper_controller->wait_for_start_signal();
2959   }
2960 
2961   if (is_vm_dumper(dumper_id)) {
2962     TraceTime timer("Dump non-objects", TRACETIME_LOG(Info, heapdump));
2963     // Write the file header - we always use 1.0.2
2964     const char* header = "JAVA PROFILE 1.0.2";
2965 
2966     // header is few bytes long - no chance to overflow int
2967     writer()->write_raw(header, strlen(header) + 1); // NUL terminated
2968     writer()->write_u4(oopSize);
2969     // timestamp is current time in ms
2970     writer()->write_u8(os::javaTimeMillis());
2971     // HPROF_UTF8 records
2972     SymbolTableDumper sym_dumper(writer());
2973     SymbolTable::symbols_do(&sym_dumper);
2974 
2975     // HPROF_UTF8 records for inlined field names.
2976     inlined_objects()->init();
2977     inlined_objects()->dump_inlined_field_names(writer());
2978 
2979     // HPROF_INLINED_FIELDS
2980     inlined_objects()->dump_classed_with_inlined_fields(writer());
2981 
2982     // write HPROF_LOAD_CLASS records
2983     {
2984       LoadedClassDumper loaded_class_dumper(writer(), _klass_map);
2985       ClassLoaderDataGraph::classes_do(&loaded_class_dumper);
2986     }
2987 
2988     // write HPROF_FRAME and HPROF_TRACE records
2989     // this must be called after _klass_map is built when iterating the classes above.
2990     dump_stack_traces(writer());
2991 
2992     // unlock global writer, so parallel dumpers can dump stack traces of unmounted virtual threads
2993     _dumper_controller->unlock_global_writer();
2994   }
2995 
2996   // HPROF_HEAP_DUMP/HPROF_HEAP_DUMP_SEGMENT starts here
2997 
2998   ResourceMark rm;
2999   // share global compressor, local DumpWriter is not responsible for its life cycle
3000   DumpWriter segment_writer(DumpMerger::get_writer_path(writer()->get_file_path(), dumper_id),
3001                             writer()->is_overwrite(), writer()->compressor());
3002   if (!segment_writer.has_error()) {
3003     if (is_vm_dumper(dumper_id)) {
3004       // dump some non-heap subrecords to heap dump segment
3005       TraceTime timer("Dump non-objects (part 2)", TRACETIME_LOG(Info, heapdump));
3006       // Writes HPROF_GC_CLASS_DUMP records
3007       ClassDumper class_dumper(&segment_writer);
3008       ClassLoaderDataGraph::classes_do(&class_dumper);
3009 
3010       // HPROF_GC_ROOT_THREAD_OBJ + frames + jni locals
3011       dump_threads(&segment_writer);
3012 
3013       // HPROF_GC_ROOT_JNI_GLOBAL
3014       JNIGlobalsDumper jni_dumper(&segment_writer);
3015       JNIHandles::oops_do(&jni_dumper);
3016       // technically not jni roots, but global roots
3017       // for things like preallocated throwable backtraces
3018       Universe::vm_global()->oops_do(&jni_dumper);
3019       // HPROF_GC_ROOT_STICKY_CLASS
3020       // These should be classes in the null class loader data, and not all classes
3021       // if !ClassUnloading
3022       StickyClassDumper stiky_class_dumper(&segment_writer);
3023       ClassLoaderData::the_null_class_loader_data()->classes_do(&stiky_class_dumper);
3024     }
3025 
3026     // Heap iteration.
3027     // writes HPROF_GC_INSTANCE_DUMP records.
3028     // After each sub-record is written check_segment_length will be invoked
3029     // to check if the current segment exceeds a threshold. If so, a new
3030     // segment is started.
3031     // The HPROF_GC_CLASS_DUMP and HPROF_GC_INSTANCE_DUMP are the vast bulk
3032     // of the heap dump.
3033 
3034     TraceTime timer(is_parallel_dump() ? "Dump heap objects in parallel" : "Dump heap objects", TRACETIME_LOG(Info, heapdump));
3035     HeapObjectDumper obj_dumper(&segment_writer, this);
3036     if (!is_parallel_dump()) {
3037       Universe::heap()->object_iterate(&obj_dumper);
3038     } else {
3039       // == Parallel dump
3040       _poi->object_iterate(&obj_dumper, worker_id);
3041     }
3042 
3043     segment_writer.finish_dump_segment();
3044     segment_writer.flush();
3045   }
3046 
3047   _dumper_controller->dumper_complete(&segment_writer, writer());
3048 
3049   if (is_vm_dumper(dumper_id)) {
3050     _dumper_controller->wait_all_dumpers_complete();
3051 
3052     // flush global writer
3053     writer()->flush();
3054 
3055     // At this point, all fragments of the heapdump have been written to separate files.
3056     // We need to merge them into a complete heapdump and write HPROF_HEAP_DUMP_END at that time.
3057   }
3058 }
3059 
3060 void VM_HeapDumper::dump_stack_traces(AbstractDumpWriter* writer) {
3061   // write a HPROF_TRACE record without any frames to be referenced as object alloc sites
3062   DumperSupport::write_header(writer, HPROF_TRACE, 3 * sizeof(u4));
3063   writer->write_u4((u4)STACK_TRACE_ID);
3064   writer->write_u4(0);                    // thread number
3065   writer->write_u4(0);                    // frame count
3066 
3067   // max number if every platform thread is carrier with mounted virtual thread
3068   _thread_dumpers = NEW_C_HEAP_ARRAY(ThreadDumper*, Threads::number_of_threads() * 2, mtInternal);
3069 
3070   for (JavaThreadIteratorWithHandle jtiwh; JavaThread * thread = jtiwh.next(); ) {
3071     if (ThreadDumper::should_dump_pthread(thread)) {
3072       bool add_oom_frame = is_oom_thread(thread);
3073 
3074       oop mounted_vt = thread->is_vthread_mounted() ? thread->vthread() : nullptr;
3075       if (mounted_vt != nullptr && !ThreadDumper::should_dump_vthread(mounted_vt)) {
3076         mounted_vt = nullptr;
3077       }
3078 
3079       // mounted vthread (if any)
3080       if (mounted_vt != nullptr) {
3081         ThreadDumper* thread_dumper = new ThreadDumper(ThreadDumper::ThreadType::MountedVirtual, thread, mounted_vt);
3082         _thread_dumpers[_thread_dumpers_count++] = thread_dumper;
3083         if (add_oom_frame) {
3084           thread_dumper->add_oom_frame(_oome_constructor);
3085           // we add oom frame to the VT stack, don't add it to the carrier thread stack
3086           add_oom_frame = false;
3087         }
3088         thread_dumper->init_serial_nums(&_thread_serial_num, &_frame_serial_num);
3089         thread_dumper->dump_stack_traces(writer, _klass_map);
3090       }
3091 
3092       // platform or carrier thread
3093       ThreadDumper* thread_dumper = new ThreadDumper(ThreadDumper::ThreadType::Platform, thread, thread->threadObj());
3094       _thread_dumpers[_thread_dumpers_count++] = thread_dumper;
3095       if (add_oom_frame) {
3096         thread_dumper->add_oom_frame(_oome_constructor);
3097       }
3098       thread_dumper->init_serial_nums(&_thread_serial_num, &_frame_serial_num);
3099       thread_dumper->dump_stack_traces(writer, _klass_map);
3100     }
3101   }
3102 }
3103 
3104 void VM_HeapDumper::dump_vthread(oop vt, AbstractDumpWriter* segment_writer) {
3105   // unmounted vthread has no JavaThread
3106   ThreadDumper thread_dumper(ThreadDumper::ThreadType::UnmountedVirtual, nullptr, vt);
3107   thread_dumper.init_serial_nums(&_thread_serial_num, &_frame_serial_num);
3108 
3109   // write HPROF_TRACE/HPROF_FRAME records to global writer
3110   _dumper_controller->lock_global_writer();
3111   thread_dumper.dump_stack_traces(writer(), _klass_map);
3112   _dumper_controller->unlock_global_writer();
3113 
3114   // write HPROF_GC_ROOT_THREAD_OBJ/HPROF_GC_ROOT_JAVA_FRAME/HPROF_GC_ROOT_JNI_LOCAL subrecord
3115   // to segment writer
3116   thread_dumper.dump_thread_obj(segment_writer);
3117   thread_dumper.dump_stack_refs(segment_writer);
3118 }
3119 
3120 // dump the heap to given path.
3121 int HeapDumper::dump(const char* path, outputStream* out, int compression, bool overwrite, uint num_dump_threads) {
3122   assert(path != nullptr && strlen(path) > 0, "path missing");
3123 
3124   // print message in interactive case
3125   if (out != nullptr) {
3126     out->print_cr("Dumping heap to %s ...", path);
3127     timer()->start();
3128   }
3129 
3130   if (_oome && num_dump_threads > 1) {
3131     // Each additional parallel writer requires several MB of internal memory
3132     // (DumpWriter buffer, DumperClassCacheTable, GZipCompressor buffers).
3133     // For the OOM handling we may already be limited in memory.
3134     // Lets ensure we have at least 20MB per thread.
3135     julong max_threads = os::free_memory() / (20 * M);
3136     if (num_dump_threads > max_threads) {
3137       num_dump_threads = MAX2<uint>(1, (uint)max_threads);
3138     }
3139   }
3140 
3141   // create JFR event
3142   EventHeapDump event;
3143 
3144   AbstractCompressor* compressor = nullptr;
3145 
3146   if (compression > 0) {
3147     compressor = new (std::nothrow) GZipCompressor(compression);
3148 
3149     if (compressor == nullptr) {
3150       set_error("Could not allocate gzip compressor");
3151       return -1;
3152     }
3153   }
3154 
3155   DumpWriter writer(path, overwrite, compressor);
3156 
3157   if (writer.error() != nullptr) {
3158     set_error(writer.error());
3159     if (out != nullptr) {
3160       out->print_cr("Unable to create %s: %s", path,
3161         (error() != nullptr) ? error() : "reason unknown");
3162     }
3163     return -1;
3164   }
3165 
3166   // generate the segmented heap dump into separate files
3167   VM_HeapDumper dumper(&writer, _gc_before_heap_dump, _oome, num_dump_threads);
3168   VMThread::execute(&dumper);
3169 
3170   // record any error that the writer may have encountered
3171   set_error(writer.error());
3172 
3173   // Heap dump process is done in two phases
3174   //
3175   // Phase 1: Concurrent threads directly write heap data to multiple heap files.
3176   //          This is done by VM_HeapDumper, which is performed within safepoint.
3177   //
3178   // Phase 2: Merge multiple heap files into one complete heap dump file.
3179   //          This is done by DumpMerger, which is performed outside safepoint
3180 
3181   DumpMerger merger(path, &writer, dumper.inlined_objects(), dumper.dump_seq());
3182   // Perform heapdump file merge operation in the current thread prevents us
3183   // from occupying the VM Thread, which in turn affects the occurrence of
3184   // GC and other VM operations.
3185   merger.do_merge();
3186   if (writer.error() != nullptr) {
3187     set_error(writer.error());
3188   }
3189 
3190   // emit JFR event
3191   if (error() == nullptr) {
3192     event.set_destination(path);
3193     event.set_gcBeforeDump(_gc_before_heap_dump);
3194     event.set_size(writer.bytes_written());
3195     event.set_onOutOfMemoryError(_oome);
3196     event.set_overwrite(overwrite);
3197     event.set_compression(compression);
3198     event.commit();
3199   } else {
3200     log_debug(cds, heap)("Error %s while dumping heap", error());
3201   }
3202 
3203   // print message in interactive case
3204   if (out != nullptr) {
3205     timer()->stop();
3206     if (error() == nullptr) {
3207       out->print_cr("Heap dump file created [" JULONG_FORMAT " bytes in %3.3f secs]",
3208                     writer.bytes_written(), timer()->seconds());
3209     } else {
3210       out->print_cr("Dump file is incomplete: %s", writer.error());
3211     }
3212   }
3213 
3214   if (compressor != nullptr) {
3215     delete compressor;
3216   }
3217   return (writer.error() == nullptr) ? 0 : -1;
3218 }
3219 
3220 // stop timer (if still active), and free any error string we might be holding
3221 HeapDumper::~HeapDumper() {
3222   if (timer()->is_active()) {
3223     timer()->stop();
3224   }
3225   set_error(nullptr);
3226 }
3227 
3228 
3229 // returns the error string (resource allocated), or null
3230 char* HeapDumper::error_as_C_string() const {
3231   if (error() != nullptr) {
3232     char* str = NEW_RESOURCE_ARRAY(char, strlen(error())+1);
3233     strcpy(str, error());
3234     return str;
3235   } else {
3236     return nullptr;
3237   }
3238 }
3239 
3240 // set the error string
3241 void HeapDumper::set_error(char const* error) {
3242   if (_error != nullptr) {
3243     os::free(_error);
3244   }
3245   if (error == nullptr) {
3246     _error = nullptr;
3247   } else {
3248     _error = os::strdup(error);
3249     assert(_error != nullptr, "allocation failure");
3250   }
3251 }
3252 
3253 // Called by out-of-memory error reporting by a single Java thread
3254 // outside of a JVM safepoint
3255 void HeapDumper::dump_heap_from_oome() {
3256   HeapDumper::dump_heap(true);
3257 }
3258 
3259 // Called by error reporting by a single Java thread outside of a JVM safepoint,
3260 // or by heap dumping by the VM thread during a (GC) safepoint. Thus, these various
3261 // callers are strictly serialized and guaranteed not to interfere below. For more
3262 // general use, however, this method will need modification to prevent
3263 // inteference when updating the static variables base_path and dump_file_seq below.
3264 void HeapDumper::dump_heap() {
3265   HeapDumper::dump_heap(false);
3266 }
3267 
3268 void HeapDumper::dump_heap(bool oome) {
3269   static char base_path[JVM_MAXPATHLEN] = {'\0'};
3270   static uint dump_file_seq = 0;
3271   char my_path[JVM_MAXPATHLEN];
3272   const int max_digit_chars = 20;
3273   const char* dump_file_name = HeapDumpGzipLevel > 0 ? "java_pid%p.hprof.gz" : "java_pid%p.hprof";
3274 
3275   // The dump file defaults to java_pid<pid>.hprof in the current working
3276   // directory. HeapDumpPath=<file> can be used to specify an alternative
3277   // dump file name or a directory where dump file is created.
3278   if (dump_file_seq == 0) { // first time in, we initialize base_path
3279     // Set base path (name or directory, default or custom, without seq no), doing %p substitution.
3280     const char *path_src = (HeapDumpPath != nullptr && HeapDumpPath[0] != '\0') ? HeapDumpPath : dump_file_name;
3281     if (!Arguments::copy_expand_pid(path_src, strlen(path_src), base_path, JVM_MAXPATHLEN - max_digit_chars)) {
3282       warning("Cannot create heap dump file.  HeapDumpPath is too long.");
3283       return;
3284     }
3285     // Check if the path is an existing directory
3286     DIR* dir = os::opendir(base_path);
3287     if (dir != nullptr) {
3288       os::closedir(dir);
3289       // Path is a directory.  Append a file separator (if needed).
3290       size_t fs_len = strlen(os::file_separator());
3291       if (strlen(base_path) >= fs_len) {
3292         char* end = base_path;
3293         end += (strlen(base_path) - fs_len);
3294         if (strcmp(end, os::file_separator()) != 0) {
3295           strcat(base_path, os::file_separator());
3296         }
3297       }
3298       // Then add the default name, with %p substitution.  Use my_path temporarily.
3299       if (!Arguments::copy_expand_pid(dump_file_name, strlen(dump_file_name), my_path, JVM_MAXPATHLEN - max_digit_chars)) {
3300         warning("Cannot create heap dump file.  HeapDumpPath is too long.");
3301         return;
3302       }
3303       const size_t dlen = strlen(base_path);
3304       jio_snprintf(&base_path[dlen], sizeof(base_path) - dlen, "%s", my_path);
3305     }
3306     strncpy(my_path, base_path, JVM_MAXPATHLEN);
3307   } else {
3308     // Append a sequence number id for dumps following the first
3309     const size_t len = strlen(base_path) + max_digit_chars + 2; // for '.' and \0
3310     jio_snprintf(my_path, len, "%s.%d", base_path, dump_file_seq);
3311   }
3312   dump_file_seq++;   // increment seq number for next time we dump
3313 
3314   HeapDumper dumper(false /* no GC before heap dump */,
3315                     oome  /* pass along out-of-memory-error flag */);
3316   dumper.dump(my_path, tty, HeapDumpGzipLevel);
3317 }