1 /*
   2  * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderData.inline.hpp"
  27 #include "classfile/classLoaderDataGraph.hpp"
  28 #include "classfile/javaClasses.inline.hpp"
  29 #include "classfile/symbolTable.hpp"
  30 #include "classfile/vmClasses.hpp"
  31 #include "classfile/vmSymbols.hpp"
  32 #include "gc/shared/gcLocker.hpp"
  33 #include "gc/shared/gcVMOperations.hpp"
  34 #include "gc/shared/workerThread.hpp"
  35 #include "jfr/jfrEvents.hpp"
  36 #include "jvm.h"
  37 #include "memory/allocation.inline.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "memory/universe.hpp"
  40 #include "oops/klass.inline.hpp"
  41 #include "oops/objArrayKlass.hpp"
  42 #include "oops/objArrayOop.inline.hpp"
  43 #include "oops/flatArrayKlass.hpp"
  44 #include "oops/flatArrayOop.inline.hpp"
  45 #include "oops/oop.inline.hpp"
  46 #include "oops/typeArrayOop.inline.hpp"
  47 #include "runtime/fieldDescriptor.inline.hpp"
  48 #include "runtime/frame.inline.hpp"
  49 #include "runtime/handles.inline.hpp"
  50 #include "runtime/javaCalls.hpp"
  51 #include "runtime/javaThread.inline.hpp"
  52 #include "runtime/jniHandles.hpp"
  53 #include "runtime/os.hpp"
  54 #include "runtime/reflectionUtils.hpp"
  55 #include "runtime/threads.hpp"
  56 #include "runtime/threadSMR.hpp"
  57 #include "runtime/vframe.hpp"
  58 #include "runtime/vmOperations.hpp"
  59 #include "runtime/vmThread.hpp"
  60 #include "services/heapDumper.hpp"
  61 #include "services/heapDumperCompression.hpp"
  62 #include "services/threadService.hpp"
  63 #include "utilities/macros.hpp"
  64 #include "utilities/ostream.hpp"
  65 
  66 /*
  67  * HPROF binary format - description copied from:
  68  *   src/share/demo/jvmti/hprof/hprof_io.c
  69  *
  70  *
  71  *  header    "JAVA PROFILE 1.0.2" (0-terminated)
  72  *
  73  *  u4        size of identifiers. Identifiers are used to represent
  74  *            UTF8 strings, objects, stack traces, etc. They usually
  75  *            have the same size as host pointers.
  76  * u4         high word
  77  * u4         low word    number of milliseconds since 0:00 GMT, 1/1/70
  78  * [record]*  a sequence of records.
  79  *
  80  *
  81  * Record format:
  82  *
  83  * u1         a TAG denoting the type of the record
  84  * u4         number of *microseconds* since the time stamp in the
  85  *            header. (wraps around in a little more than an hour)
  86  * u4         number of bytes *remaining* in the record. Note that
  87  *            this number excludes the tag and the length field itself.
  88  * [u1]*      BODY of the record (a sequence of bytes)
  89  *
  90  *
  91  * The following TAGs are supported:
  92  *
  93  * TAG           BODY       notes
  94  *----------------------------------------------------------
  95  * HPROF_UTF8               a UTF8-encoded name
  96  *
  97  *               id         name ID
  98  *               [u1]*      UTF8 characters (no trailing zero)
  99  *
 100  * HPROF_LOAD_CLASS         a newly loaded class
 101  *
 102  *                u4        class serial number (> 0)
 103  *                id        class object ID
 104  *                u4        stack trace serial number
 105  *                id        class name ID
 106  *
 107  * HPROF_UNLOAD_CLASS       an unloading class
 108  *
 109  *                u4        class serial_number
 110  *
 111  * HPROF_FRAME              a Java stack frame
 112  *
 113  *                id        stack frame ID
 114  *                id        method name ID
 115  *                id        method signature ID
 116  *                id        source file name ID
 117  *                u4        class serial number
 118  *                i4        line number. >0: normal
 119  *                                       -1: unknown
 120  *                                       -2: compiled method
 121  *                                       -3: native method
 122  *
 123  * HPROF_TRACE              a Java stack trace
 124  *
 125  *               u4         stack trace serial number
 126  *               u4         thread serial number
 127  *               u4         number of frames
 128  *               [id]*      stack frame IDs
 129  *
 130  *
 131  * HPROF_ALLOC_SITES        a set of heap allocation sites, obtained after GC
 132  *
 133  *               u2         flags 0x0001: incremental vs. complete
 134  *                                0x0002: sorted by allocation vs. live
 135  *                                0x0004: whether to force a GC
 136  *               u4         cutoff ratio
 137  *               u4         total live bytes
 138  *               u4         total live instances
 139  *               u8         total bytes allocated
 140  *               u8         total instances allocated
 141  *               u4         number of sites that follow
 142  *               [u1        is_array: 0:  normal object
 143  *                                    2:  object array
 144  *                                    4:  boolean array
 145  *                                    5:  char array
 146  *                                    6:  float array
 147  *                                    7:  double array
 148  *                                    8:  byte array
 149  *                                    9:  short array
 150  *                                    10: int array
 151  *                                    11: long array
 152  *                u4        class serial number (may be zero during startup)
 153  *                u4        stack trace serial number
 154  *                u4        number of bytes alive
 155  *                u4        number of instances alive
 156  *                u4        number of bytes allocated
 157  *                u4]*      number of instance allocated
 158  *
 159  * HPROF_START_THREAD       a newly started thread.
 160  *
 161  *               u4         thread serial number (> 0)
 162  *               id         thread object ID
 163  *               u4         stack trace serial number
 164  *               id         thread name ID
 165  *               id         thread group name ID
 166  *               id         thread group parent name ID
 167  *
 168  * HPROF_END_THREAD         a terminating thread.
 169  *
 170  *               u4         thread serial number
 171  *
 172  * HPROF_HEAP_SUMMARY       heap summary
 173  *
 174  *               u4         total live bytes
 175  *               u4         total live instances
 176  *               u8         total bytes allocated
 177  *               u8         total instances allocated
 178  *
 179  * HPROF_HEAP_DUMP          denote a heap dump
 180  *
 181  *               [heap dump sub-records]*
 182  *
 183  *                          There are four kinds of heap dump sub-records:
 184  *
 185  *               u1         sub-record type
 186  *
 187  *               HPROF_GC_ROOT_UNKNOWN         unknown root
 188  *
 189  *                          id         object ID
 190  *
 191  *               HPROF_GC_ROOT_THREAD_OBJ      thread object
 192  *
 193  *                          id         thread object ID  (may be 0 for a
 194  *                                     thread newly attached through JNI)
 195  *                          u4         thread sequence number
 196  *                          u4         stack trace sequence number
 197  *
 198  *               HPROF_GC_ROOT_JNI_GLOBAL      JNI global ref root
 199  *
 200  *                          id         object ID
 201  *                          id         JNI global ref ID
 202  *
 203  *               HPROF_GC_ROOT_JNI_LOCAL       JNI local ref
 204  *
 205  *                          id         object ID
 206  *                          u4         thread serial number
 207  *                          u4         frame # in stack trace (-1 for empty)
 208  *
 209  *               HPROF_GC_ROOT_JAVA_FRAME      Java stack frame
 210  *
 211  *                          id         object ID
 212  *                          u4         thread serial number
 213  *                          u4         frame # in stack trace (-1 for empty)
 214  *
 215  *               HPROF_GC_ROOT_NATIVE_STACK    Native stack
 216  *
 217  *                          id         object ID
 218  *                          u4         thread serial number
 219  *
 220  *               HPROF_GC_ROOT_STICKY_CLASS    System class
 221  *
 222  *                          id         object ID
 223  *
 224  *               HPROF_GC_ROOT_THREAD_BLOCK    Reference from thread block
 225  *
 226  *                          id         object ID
 227  *                          u4         thread serial number
 228  *
 229  *               HPROF_GC_ROOT_MONITOR_USED    Busy monitor
 230  *
 231  *                          id         object ID
 232  *
 233  *               HPROF_GC_CLASS_DUMP           dump of a class object
 234  *
 235  *                          id         class object ID
 236  *                          u4         stack trace serial number
 237  *                          id         super class object ID
 238  *                          id         class loader object ID
 239  *                          id         signers object ID
 240  *                          id         protection domain object ID
 241  *                          id         reserved
 242  *                          id         reserved
 243  *
 244  *                          u4         instance size (in bytes)
 245  *
 246  *                          u2         size of constant pool
 247  *                          [u2,       constant pool index,
 248  *                           ty,       type
 249  *                                     2:  object
 250  *                                     4:  boolean
 251  *                                     5:  char
 252  *                                     6:  float
 253  *                                     7:  double
 254  *                                     8:  byte
 255  *                                     9:  short
 256  *                                     10: int
 257  *                                     11: long
 258  *                           vl]*      and value
 259  *
 260  *                          u2         number of static fields
 261  *                          [id,       static field name,
 262  *                           ty,       type,
 263  *                           vl]*      and value
 264  *
 265  *                          u2         number of inst. fields (not inc. super)
 266  *                          [id,       instance field name,
 267  *                           ty]*      type
 268  *
 269  *               HPROF_GC_INSTANCE_DUMP        dump of a normal object
 270  *
 271  *                          id         object ID
 272  *                          u4         stack trace serial number
 273  *                          id         class object ID
 274  *                          u4         number of bytes that follow
 275  *                          [vl]*      instance field values (class, followed
 276  *                                     by super, super's super ...)
 277  *
 278  *               HPROF_GC_OBJ_ARRAY_DUMP       dump of an object array
 279  *
 280  *                          id         array object ID
 281  *                          u4         stack trace serial number
 282  *                          u4         number of elements
 283  *                          id         array class ID
 284  *                          [id]*      elements
 285  *
 286  *               HPROF_GC_PRIM_ARRAY_DUMP      dump of a primitive array
 287  *
 288  *                          id         array object ID
 289  *                          u4         stack trace serial number
 290  *                          u4         number of elements
 291  *                          u1         element type
 292  *                                     4:  boolean array
 293  *                                     5:  char array
 294  *                                     6:  float array
 295  *                                     7:  double array
 296  *                                     8:  byte array
 297  *                                     9:  short array
 298  *                                     10: int array
 299  *                                     11: long array
 300  *                          [u1]*      elements
 301  *
 302  * HPROF_CPU_SAMPLES        a set of sample traces of running threads
 303  *
 304  *                u4        total number of samples
 305  *                u4        # of traces
 306  *               [u4        # of samples
 307  *                u4]*      stack trace serial number
 308  *
 309  * HPROF_CONTROL_SETTINGS   the settings of on/off switches
 310  *
 311  *                u4        0x00000001: alloc traces on/off
 312  *                          0x00000002: cpu sampling on/off
 313  *                u2        stack trace depth
 314  *
 315  * HPROF_FLAT_ARRAYS        list of flat arrays
 316  *
 317  *               [flat array sub-records]*
 318  *
 319  *               HPROF_FLAT_ARRAY      flat array
 320  *
 321  *                          id         array object ID (dumped as HPROF_GC_PRIM_ARRAY_DUMP)
 322  *                          id         element class ID (dumped by HPROF_GC_CLASS_DUMP)
 323  *
 324  * HPROF_INLINED_FIELDS     decribes inlined fields
 325  *
 326  *               [class with inlined fields sub-records]*
 327  *
 328  *               HPROF_CLASS_WITH_INLINED_FIELDS
 329  *
 330  *                          id         class ID (dumped as HPROF_GC_CLASS_DUMP)
 331  *
 332  *                          u2         number of instance inlined fields (not including super)
 333  *                          [u2,       inlined field index,
 334  *                           u2,       synthetic field count,
 335  *                           id,       original field name,
 336  *                           id]*      inlined field class ID (dumped by HPROF_GC_CLASS_DUMP)
 337  *
 338  * When the header is "JAVA PROFILE 1.0.2" a heap dump can optionally
 339  * be generated as a sequence of heap dump segments. This sequence is
 340  * terminated by an end record. The additional tags allowed by format
 341  * "JAVA PROFILE 1.0.2" are:
 342  *
 343  * HPROF_HEAP_DUMP_SEGMENT  denote a heap dump segment
 344  *
 345  *               [heap dump sub-records]*
 346  *               The same sub-record types allowed by HPROF_HEAP_DUMP
 347  *
 348  * HPROF_HEAP_DUMP_END      denotes the end of a heap dump
 349  *
 350  */
 351 
 352 
 353 // HPROF tags
 354 
 355 enum hprofTag : u1 {
 356   // top-level records
 357   HPROF_UTF8                    = 0x01,
 358   HPROF_LOAD_CLASS              = 0x02,
 359   HPROF_UNLOAD_CLASS            = 0x03,
 360   HPROF_FRAME                   = 0x04,
 361   HPROF_TRACE                   = 0x05,
 362   HPROF_ALLOC_SITES             = 0x06,
 363   HPROF_HEAP_SUMMARY            = 0x07,
 364   HPROF_START_THREAD            = 0x0A,
 365   HPROF_END_THREAD              = 0x0B,
 366   HPROF_HEAP_DUMP               = 0x0C,
 367   HPROF_CPU_SAMPLES             = 0x0D,
 368   HPROF_CONTROL_SETTINGS        = 0x0E,
 369 
 370   // 1.0.2 record types
 371   HPROF_HEAP_DUMP_SEGMENT       = 0x1C,
 372   HPROF_HEAP_DUMP_END           = 0x2C,
 373 
 374   // inlined object support
 375   HPROF_FLAT_ARRAYS             = 0x12,
 376   HPROF_INLINED_FIELDS          = 0x13,
 377   // inlined object subrecords
 378   HPROF_FLAT_ARRAY                  = 0x01,
 379   HPROF_CLASS_WITH_INLINED_FIELDS   = 0x01,
 380 
 381   // field types
 382   HPROF_ARRAY_OBJECT            = 0x01,
 383   HPROF_NORMAL_OBJECT           = 0x02,
 384   HPROF_BOOLEAN                 = 0x04,
 385   HPROF_CHAR                    = 0x05,
 386   HPROF_FLOAT                   = 0x06,
 387   HPROF_DOUBLE                  = 0x07,
 388   HPROF_BYTE                    = 0x08,
 389   HPROF_SHORT                   = 0x09,
 390   HPROF_INT                     = 0x0A,
 391   HPROF_LONG                    = 0x0B,
 392 
 393   // data-dump sub-records
 394   HPROF_GC_ROOT_UNKNOWN         = 0xFF,
 395   HPROF_GC_ROOT_JNI_GLOBAL      = 0x01,
 396   HPROF_GC_ROOT_JNI_LOCAL       = 0x02,
 397   HPROF_GC_ROOT_JAVA_FRAME      = 0x03,
 398   HPROF_GC_ROOT_NATIVE_STACK    = 0x04,
 399   HPROF_GC_ROOT_STICKY_CLASS    = 0x05,
 400   HPROF_GC_ROOT_THREAD_BLOCK    = 0x06,
 401   HPROF_GC_ROOT_MONITOR_USED    = 0x07,
 402   HPROF_GC_ROOT_THREAD_OBJ      = 0x08,
 403   HPROF_GC_CLASS_DUMP           = 0x20,
 404   HPROF_GC_INSTANCE_DUMP        = 0x21,
 405   HPROF_GC_OBJ_ARRAY_DUMP       = 0x22,
 406   HPROF_GC_PRIM_ARRAY_DUMP      = 0x23
 407 };
 408 
 409 // Default stack trace ID (used for dummy HPROF_TRACE record)
 410 enum {
 411   STACK_TRACE_ID = 1,
 412   INITIAL_CLASS_COUNT = 200
 413 };
 414 
 415 
 416 class AbstractDumpWriter;
 417 
 418 class InlinedObjects {
 419 
 420   struct ClassInlinedFields {
 421     const Klass *klass;
 422     uintx base_index;   // base index of the inlined field names (1st field has index base_index+1).
 423     ClassInlinedFields(const Klass *klass = nullptr, uintx base_index = 0) : klass(klass), base_index(base_index) {}
 424 
 425     // For GrowableArray::find_sorted().
 426     static int compare(const ClassInlinedFields& a, const ClassInlinedFields& b) {
 427       return a.klass - b.klass;
 428     }
 429     // For GrowableArray::sort().
 430     static int compare(ClassInlinedFields* a, ClassInlinedFields* b) {
 431       return compare(*a, *b);
 432     }
 433   };
 434 
 435   uintx _min_string_id;
 436   uintx _max_string_id;
 437 
 438   GrowableArray<ClassInlinedFields> *_inlined_field_map;
 439 
 440   // counters for classes with inlined fields and for the fields
 441   int _classes_count;
 442   int _inlined_fields_count;
 443 
 444   static InlinedObjects *_instance;
 445 
 446   static void inlined_field_names_callback(InlinedObjects* _this, const Klass *klass, uintx base_index, int count);
 447 
 448   GrowableArray<oop> *_flat_arrays;
 449 
 450 public:
 451   InlinedObjects()
 452     : _min_string_id(0), _max_string_id(0),
 453     _inlined_field_map(nullptr),
 454     _classes_count(0), _inlined_fields_count(0),
 455     _flat_arrays(nullptr) {
 456   }
 457 
 458   static InlinedObjects* get_instance() {
 459     return _instance;
 460   }
 461 
 462   void init();
 463   void release();
 464 
 465   void dump_inlined_field_names(AbstractDumpWriter *writer);
 466 
 467   uintx get_base_index_for(Klass* k);
 468   uintx get_next_string_id(uintx id);
 469 
 470   void dump_classed_with_inlined_fields(AbstractDumpWriter* writer);
 471 
 472   void add_flat_array(oop array);
 473   void dump_flat_arrays(AbstractDumpWriter* writer);
 474 
 475 };
 476 
 477 InlinedObjects *InlinedObjects::_instance = nullptr;
 478 
 479 
 480 // Supports I/O operations for a dump
 481 // Base class for dump and parallel dump
 482 class AbstractDumpWriter : public StackObj {
 483  protected:
 484   enum {
 485     io_buffer_max_size = 1*M,
 486     io_buffer_max_waste = 10*K,
 487     dump_segment_header_size = 9
 488   };
 489 
 490   char* _buffer;    // internal buffer
 491   size_t _size;
 492   size_t _pos;
 493 
 494   bool _in_dump_segment; // Are we currently in a dump segment?
 495   bool _is_huge_sub_record; // Are we writing a sub-record larger than the buffer size?
 496   DEBUG_ONLY(size_t _sub_record_left;) // The bytes not written for the current sub-record.
 497   DEBUG_ONLY(bool _sub_record_ended;) // True if we have called the end_sub_record().
 498 
 499   virtual void flush(bool force = false) = 0;
 500 
 501   char* buffer() const                          { return _buffer; }
 502   size_t buffer_size() const                    { return _size; }
 503   void set_position(size_t pos)                 { _pos = pos; }
 504 
 505   // Can be called if we have enough room in the buffer.
 506   void write_fast(const void* s, size_t len);
 507 
 508   // Returns true if we have enough room in the buffer for 'len' bytes.
 509   bool can_write_fast(size_t len);
 510 
 511   void write_address(address a);
 512 
 513  public:
 514   AbstractDumpWriter() :
 515     _buffer(nullptr),
 516     _size(io_buffer_max_size),
 517     _pos(0),
 518     _in_dump_segment(false) { }
 519 
 520   // total number of bytes written to the disk
 521   virtual julong bytes_written() const = 0;
 522   virtual char const* error() const = 0;
 523 
 524   size_t position() const                       { return _pos; }
 525   // writer functions
 526   virtual void write_raw(const void* s, size_t len);
 527   void write_u1(u1 x);
 528   void write_u2(u2 x);
 529   void write_u4(u4 x);
 530   void write_u8(u8 x);
 531   void write_objectID(oop o);
 532   void write_rootID(oop* p);
 533   void write_symbolID(Symbol* o);
 534   void write_classID(Klass* k);
 535   void write_id(u4 x);
 536 
 537   // Start a new sub-record. Starts a new heap dump segment if needed.
 538   void start_sub_record(u1 tag, u4 len);
 539   // Ends the current sub-record.
 540   void end_sub_record();
 541   // Finishes the current dump segment if not already finished.
 542   void finish_dump_segment(bool force_flush = false);
 543   // Refresh to get new buffer
 544   void refresh() {
 545     assert (_in_dump_segment ==false, "Sanity check");
 546     _buffer = nullptr;
 547     _size = io_buffer_max_size;
 548     _pos = 0;
 549     // Force flush to guarantee data from parallel dumper are written.
 550     flush(true);
 551   }
 552   // Called when finished to release the threads.
 553   virtual void deactivate() = 0;
 554 };
 555 
 556 void AbstractDumpWriter::write_fast(const void* s, size_t len) {
 557   assert(!_in_dump_segment || (_sub_record_left >= len), "sub-record too large");
 558   assert(buffer_size() - position() >= len, "Must fit");
 559   debug_only(_sub_record_left -= len);
 560   memcpy(buffer() + position(), s, len);
 561   set_position(position() + len);
 562 }
 563 
 564 bool AbstractDumpWriter::can_write_fast(size_t len) {
 565   return buffer_size() - position() >= len;
 566 }
 567 
 568 // write raw bytes
 569 void AbstractDumpWriter::write_raw(const void* s, size_t len) {
 570   assert(!_in_dump_segment || (_sub_record_left >= len), "sub-record too large");
 571   debug_only(_sub_record_left -= len);
 572 
 573   // flush buffer to make room.
 574   while (len > buffer_size() - position()) {
 575     assert(!_in_dump_segment || _is_huge_sub_record,
 576            "Cannot overflow in non-huge sub-record.");
 577     size_t to_write = buffer_size() - position();
 578     memcpy(buffer() + position(), s, to_write);
 579     s = (void*) ((char*) s + to_write);
 580     len -= to_write;
 581     set_position(position() + to_write);
 582     flush();
 583   }
 584 
 585   memcpy(buffer() + position(), s, len);
 586   set_position(position() + len);
 587 }
 588 
 589 // Makes sure we inline the fast write into the write_u* functions. This is a big speedup.
 590 #define WRITE_KNOWN_TYPE(p, len) do { if (can_write_fast((len))) write_fast((p), (len)); \
 591                                       else write_raw((p), (len)); } while (0)
 592 
 593 void AbstractDumpWriter::write_u1(u1 x) {
 594   WRITE_KNOWN_TYPE(&x, 1);
 595 }
 596 
 597 void AbstractDumpWriter::write_u2(u2 x) {
 598   u2 v;
 599   Bytes::put_Java_u2((address)&v, x);
 600   WRITE_KNOWN_TYPE(&v, 2);
 601 }
 602 
 603 void AbstractDumpWriter::write_u4(u4 x) {
 604   u4 v;
 605   Bytes::put_Java_u4((address)&v, x);
 606   WRITE_KNOWN_TYPE(&v, 4);
 607 }
 608 
 609 void AbstractDumpWriter::write_u8(u8 x) {
 610   u8 v;
 611   Bytes::put_Java_u8((address)&v, x);
 612   WRITE_KNOWN_TYPE(&v, 8);
 613 }
 614 
 615 void AbstractDumpWriter::write_address(address a) {
 616 #ifdef _LP64
 617   write_u8((u8)a);
 618 #else
 619   write_u4((u4)a);
 620 #endif
 621 }
 622 
 623 void AbstractDumpWriter::write_objectID(oop o) {
 624   write_address(cast_from_oop<address>(o));
 625 }
 626 
 627 void AbstractDumpWriter::write_rootID(oop* p) {
 628   write_address((address)p);
 629 }
 630 
 631 void AbstractDumpWriter::write_symbolID(Symbol* s) {
 632   write_address((address)((uintptr_t)s));
 633 }
 634 
 635 void AbstractDumpWriter::write_id(u4 x) {
 636 #ifdef _LP64
 637   write_u8((u8) x);
 638 #else
 639   write_u4(x);
 640 #endif
 641 }
 642 
 643 // We use java mirror as the class ID
 644 void AbstractDumpWriter::write_classID(Klass* k) {
 645   write_objectID(k->java_mirror());
 646 }
 647 
 648 void AbstractDumpWriter::finish_dump_segment(bool force_flush) {
 649   if (_in_dump_segment) {
 650     assert(_sub_record_left == 0, "Last sub-record not written completely");
 651     assert(_sub_record_ended, "sub-record must have ended");
 652 
 653     // Fix up the dump segment length if we haven't written a huge sub-record last
 654     // (in which case the segment length was already set to the correct value initially).
 655     if (!_is_huge_sub_record) {
 656       assert(position() > dump_segment_header_size, "Dump segment should have some content");
 657       Bytes::put_Java_u4((address) (buffer() + 5),
 658                          (u4) (position() - dump_segment_header_size));
 659     } else {
 660       // Finish process huge sub record
 661       // Set _is_huge_sub_record to false so the parallel dump writer can flush data to file.
 662       _is_huge_sub_record = false;
 663     }
 664 
 665     _in_dump_segment = false;
 666     flush(force_flush);
 667   }
 668 }
 669 
 670 void AbstractDumpWriter::start_sub_record(u1 tag, u4 len) {
 671   if (!_in_dump_segment) {
 672     if (position() > 0) {
 673       flush();
 674     }
 675 
 676     assert(position() == 0 && buffer_size() > dump_segment_header_size, "Must be at the start");
 677 
 678     write_u1(HPROF_HEAP_DUMP_SEGMENT);
 679     write_u4(0); // timestamp
 680     // Will be fixed up later if we add more sub-records.  If this is a huge sub-record,
 681     // this is already the correct length, since we don't add more sub-records.
 682     write_u4(len);
 683     assert(Bytes::get_Java_u4((address)(buffer() + 5)) == len, "Inconsistent size!");
 684     _in_dump_segment = true;
 685     _is_huge_sub_record = len > buffer_size() - dump_segment_header_size;
 686   } else if (_is_huge_sub_record || (len > buffer_size() - position())) {
 687     // This object will not fit in completely or the last sub-record was huge.
 688     // Finish the current segment and try again.
 689     finish_dump_segment();
 690     start_sub_record(tag, len);
 691 
 692     return;
 693   }
 694 
 695   debug_only(_sub_record_left = len);
 696   debug_only(_sub_record_ended = false);
 697 
 698   write_u1(tag);
 699 }
 700 
 701 void AbstractDumpWriter::end_sub_record() {
 702   assert(_in_dump_segment, "must be in dump segment");
 703   assert(_sub_record_left == 0, "sub-record not written completely");
 704   assert(!_sub_record_ended, "Must not have ended yet");
 705   debug_only(_sub_record_ended = true);
 706 }
 707 
 708 // Supports I/O operations for a dump
 709 
 710 class DumpWriter : public AbstractDumpWriter {
 711  private:
 712   CompressionBackend _backend; // Does the actual writing.
 713  protected:
 714   void flush(bool force = false) override;
 715 
 716  public:
 717   // Takes ownership of the writer and compressor.
 718   DumpWriter(AbstractWriter* writer, AbstractCompressor* compressor);
 719 
 720   // total number of bytes written to the disk
 721   julong bytes_written() const override { return (julong) _backend.get_written(); }
 722 
 723   char const* error() const override    { return _backend.error(); }
 724 
 725   // Called by threads used for parallel writing.
 726   void writer_loop()                    { _backend.thread_loop(); }
 727   // Called when finish to release the threads.
 728   void deactivate() override            { flush(); _backend.deactivate(); }
 729   // Get the backend pointer, used by parallel dump writer.
 730   CompressionBackend* backend_ptr()     { return &_backend; }
 731 
 732 };
 733 
 734 // Check for error after constructing the object and destroy it in case of an error.
 735 DumpWriter::DumpWriter(AbstractWriter* writer, AbstractCompressor* compressor) :
 736   AbstractDumpWriter(),
 737   _backend(writer, compressor, io_buffer_max_size, io_buffer_max_waste) {
 738   flush();
 739 }
 740 
 741 // flush any buffered bytes to the file
 742 void DumpWriter::flush(bool force) {
 743   _backend.get_new_buffer(&_buffer, &_pos, &_size, force);
 744 }
 745 
 746 // Buffer queue used for parallel dump.
 747 struct ParWriterBufferQueueElem {
 748   char* _buffer;
 749   size_t _used;
 750   ParWriterBufferQueueElem* _next;
 751 };
 752 
 753 class ParWriterBufferQueue : public CHeapObj<mtInternal> {
 754  private:
 755   ParWriterBufferQueueElem* _head;
 756   ParWriterBufferQueueElem* _tail;
 757   uint _length;
 758  public:
 759   ParWriterBufferQueue() : _head(nullptr), _tail(nullptr), _length(0) { }
 760 
 761   void enqueue(ParWriterBufferQueueElem* entry) {
 762     if (_head == nullptr) {
 763       assert(is_empty() && _tail == nullptr, "Sanity check");
 764       _head = _tail = entry;
 765     } else {
 766       assert ((_tail->_next == nullptr && _tail->_buffer != nullptr), "Buffer queue is polluted");
 767       _tail->_next = entry;
 768       _tail = entry;
 769     }
 770     _length++;
 771     assert(_tail->_next == nullptr, "Buffer queue is polluted");
 772   }
 773 
 774   ParWriterBufferQueueElem* dequeue() {
 775     if (_head == nullptr)  return nullptr;
 776     ParWriterBufferQueueElem* entry = _head;
 777     assert (entry->_buffer != nullptr, "polluted buffer in writer list");
 778     _head = entry->_next;
 779     if (_head == nullptr) {
 780       _tail = nullptr;
 781     }
 782     entry->_next = nullptr;
 783     _length--;
 784     return entry;
 785   }
 786 
 787   bool is_empty() {
 788     return _length == 0;
 789   }
 790 
 791   uint length() { return _length; }
 792 };
 793 
 794 // Support parallel heap dump.
 795 class ParDumpWriter : public AbstractDumpWriter {
 796  private:
 797   // Lock used to guarantee the integrity of multiple buffers writing.
 798   static Monitor* _lock;
 799   // Pointer of backend from global DumpWriter.
 800   CompressionBackend* _backend_ptr;
 801   char const * _err;
 802   ParWriterBufferQueue* _buffer_queue;
 803   size_t _internal_buffer_used;
 804   char* _buffer_base;
 805   bool _split_data;
 806   static const uint BackendFlushThreshold = 2;
 807  protected:
 808   void flush(bool force = false) override {
 809     assert(_pos != 0, "must not be zero");
 810     if (_pos != 0) {
 811       refresh_buffer();
 812     }
 813 
 814     if (_split_data || _is_huge_sub_record) {
 815       return;
 816     }
 817 
 818     if (should_flush_buf_list(force)) {
 819       assert(!_in_dump_segment && !_split_data && !_is_huge_sub_record, "incomplete data send to backend!\n");
 820       flush_to_backend(force);
 821     }
 822   }
 823 
 824  public:
 825   // Check for error after constructing the object and destroy it in case of an error.
 826   ParDumpWriter(DumpWriter* dw) :
 827     AbstractDumpWriter(),
 828     _backend_ptr(dw->backend_ptr()),
 829     _buffer_queue((new (std::nothrow) ParWriterBufferQueue())),
 830     _buffer_base(nullptr),
 831     _split_data(false) {
 832     // prepare internal buffer
 833     allocate_internal_buffer();
 834   }
 835 
 836   ~ParDumpWriter() {
 837      assert(_buffer_queue != nullptr, "Sanity check");
 838      assert((_internal_buffer_used == 0) && (_buffer_queue->is_empty()),
 839             "All data must be send to backend");
 840      if (_buffer_base != nullptr) {
 841        os::free(_buffer_base);
 842        _buffer_base = nullptr;
 843      }
 844      delete _buffer_queue;
 845      _buffer_queue = nullptr;
 846   }
 847 
 848   // total number of bytes written to the disk
 849   julong bytes_written() const override { return (julong) _backend_ptr->get_written(); }
 850   char const* error() const override    { return _err == nullptr ? _backend_ptr->error() : _err; }
 851 
 852   static void before_work() {
 853     assert(_lock == nullptr, "ParDumpWriter lock must be initialized only once");
 854     _lock = new (std::nothrow) PaddedMonitor(Mutex::safepoint, "ParallelHProfWriter_lock");
 855   }
 856 
 857   static void after_work() {
 858     assert(_lock != nullptr, "ParDumpWriter lock is not initialized");
 859     delete _lock;
 860     _lock = nullptr;
 861   }
 862 
 863   // write raw bytes
 864   void write_raw(const void* s, size_t len) override {
 865     assert(!_in_dump_segment || (_sub_record_left >= len), "sub-record too large");
 866     debug_only(_sub_record_left -= len);
 867     assert(!_split_data, "Invalid split data");
 868     _split_data = true;
 869     // flush buffer to make room.
 870     while (len > buffer_size() - position()) {
 871       assert(!_in_dump_segment || _is_huge_sub_record,
 872              "Cannot overflow in non-huge sub-record.");
 873       size_t to_write = buffer_size() - position();
 874       memcpy(buffer() + position(), s, to_write);
 875       s = (void*) ((char*) s + to_write);
 876       len -= to_write;
 877       set_position(position() + to_write);
 878       flush();
 879     }
 880     _split_data = false;
 881     memcpy(buffer() + position(), s, len);
 882     set_position(position() + len);
 883   }
 884 
 885   void deactivate() override { flush(true); _backend_ptr->deactivate(); }
 886 
 887  private:
 888   void allocate_internal_buffer() {
 889     assert(_buffer_queue != nullptr, "Internal buffer queue is not ready when allocate internal buffer");
 890     assert(_buffer == nullptr && _buffer_base == nullptr, "current buffer must be null before allocate");
 891     _buffer_base = _buffer = (char*)os::malloc(io_buffer_max_size, mtInternal);
 892     if (_buffer == nullptr) {
 893       set_error("Could not allocate buffer for writer");
 894       return;
 895     }
 896     _pos = 0;
 897     _internal_buffer_used = 0;
 898     _size = io_buffer_max_size;
 899   }
 900 
 901   void set_error(char const* new_error) {
 902     if ((new_error != nullptr) && (_err == nullptr)) {
 903       _err = new_error;
 904     }
 905   }
 906 
 907   // Add buffer to internal list
 908   void refresh_buffer() {
 909     size_t expected_total = _internal_buffer_used + _pos;
 910     if (expected_total < io_buffer_max_size - io_buffer_max_waste) {
 911       // reuse current buffer.
 912       _internal_buffer_used = expected_total;
 913       assert(_size - _pos == io_buffer_max_size - expected_total, "illegal resize of buffer");
 914       _size -= _pos;
 915       _buffer += _pos;
 916       _pos = 0;
 917 
 918       return;
 919     }
 920     // It is not possible here that expected_total is larger than io_buffer_max_size because
 921     // of limitation in write_xxx().
 922     assert(expected_total <= io_buffer_max_size, "buffer overflow");
 923     assert(_buffer - _buffer_base <= io_buffer_max_size, "internal buffer overflow");
 924     ParWriterBufferQueueElem* entry =
 925         (ParWriterBufferQueueElem*)os::malloc(sizeof(ParWriterBufferQueueElem), mtInternal);
 926     if (entry == nullptr) {
 927       set_error("Heap dumper can allocate memory");
 928       return;
 929     }
 930     entry->_buffer = _buffer_base;
 931     entry->_used = expected_total;
 932     entry->_next = nullptr;
 933     // add to internal buffer queue
 934     _buffer_queue->enqueue(entry);
 935     _buffer_base =_buffer = nullptr;
 936     allocate_internal_buffer();
 937   }
 938 
 939   void reclaim_entry(ParWriterBufferQueueElem* entry) {
 940     assert(entry != nullptr && entry->_buffer != nullptr, "Invalid entry to reclaim");
 941     os::free(entry->_buffer);
 942     entry->_buffer = nullptr;
 943     os::free(entry);
 944   }
 945 
 946   void flush_buffer(char* buffer, size_t used) {
 947     assert(_lock->owner() == Thread::current(), "flush buffer must hold lock");
 948     size_t max = io_buffer_max_size;
 949     // get_new_buffer
 950     _backend_ptr->flush_external_buffer(buffer, used, max);
 951   }
 952 
 953   bool should_flush_buf_list(bool force) {
 954     return force || _buffer_queue->length() > BackendFlushThreshold;
 955   }
 956 
 957   void flush_to_backend(bool force) {
 958     // Guarantee there is only one writer updating the backend buffers.
 959     MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
 960     while (!_buffer_queue->is_empty()) {
 961       ParWriterBufferQueueElem* entry = _buffer_queue->dequeue();
 962       flush_buffer(entry->_buffer, entry->_used);
 963       // Delete buffer and entry.
 964       reclaim_entry(entry);
 965       entry = nullptr;
 966     }
 967     assert(_pos == 0, "available buffer must be empty before flush");
 968     // Flush internal buffer.
 969     if (_internal_buffer_used > 0) {
 970       flush_buffer(_buffer_base, _internal_buffer_used);
 971       os::free(_buffer_base);
 972       _pos = 0;
 973       _internal_buffer_used = 0;
 974       _buffer_base = _buffer = nullptr;
 975       // Allocate internal buffer for future use.
 976       allocate_internal_buffer();
 977     }
 978   }
 979 };
 980 
 981 Monitor* ParDumpWriter::_lock = nullptr;
 982 
 983 // Support class with a collection of functions used when dumping the heap
 984 
 985 class DumperSupport : AllStatic {
 986  public:
 987 
 988   // write a header of the given type
 989   static void write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len);
 990 
 991   // returns hprof tag for the given type signature
 992   static hprofTag sig2tag(Symbol* sig);
 993   // returns hprof tag for the given basic type
 994   static hprofTag type2tag(BasicType type);
 995   // Returns the size of the data to write.
 996   static u4 sig2size(Symbol* sig);
 997 
 998   // calculates the total size of the all fields of the given class.
 999   static u4 instance_size(InstanceKlass* ik);
1000 
1001   // dump a jfloat
1002   static void dump_float(AbstractDumpWriter* writer, jfloat f);
1003   // dump a jdouble
1004   static void dump_double(AbstractDumpWriter* writer, jdouble d);
1005   // dumps the raw value of the given field
1006   static void dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset);
1007   // returns the size of the static fields; also counts the static fields
1008   static u4 get_static_fields_size(InstanceKlass* ik, u2& field_count);
1009   // dumps static fields of the given class
1010   static void dump_static_fields(AbstractDumpWriter* writer, Klass* k);
1011   // dump the raw values of the instance fields of the given identity or inlined object;
1012   // for identity objects offset is 0 and 'klass' is o->klass(),
1013   // for inlined objects offset is the offset in the holder object, 'klass' is inlined object class
1014   static void dump_instance_fields(AbstractDumpWriter* writer, oop o, int offset, InstanceKlass* klass);
1015   // dump the raw values of the instance fields of the given inlined object;
1016   // dump_instance_fields wrapper for inlined objects
1017   static void dump_inlined_object_fields(AbstractDumpWriter* writer, oop o, int offset, InlineKlass* klass);
1018 
1019   // get the count of the instance fields for a given class
1020   static u2 get_instance_fields_count(InstanceKlass* ik);
1021   // dumps the definition of the instance fields for a given class
1022   static void dump_instance_field_descriptors(AbstractDumpWriter* writer, InstanceKlass* k, uintx *inlined_fields_index = nullptr);
1023   // creates HPROF_GC_INSTANCE_DUMP record for the given object
1024   static void dump_instance(AbstractDumpWriter* writer, oop o);
1025   // creates HPROF_GC_CLASS_DUMP record for the given instance class
1026   static void dump_instance_class(AbstractDumpWriter* writer, Klass* k);
1027   // creates HPROF_GC_CLASS_DUMP record for a given array class
1028   static void dump_array_class(AbstractDumpWriter* writer, Klass* k);
1029 
1030   // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
1031   static void dump_object_array(AbstractDumpWriter* writer, objArrayOop array);
1032   // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given flat array
1033   static void dump_flat_array(AbstractDumpWriter* writer, flatArrayOop array);
1034   // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
1035   static void dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array);
1036   // create HPROF_FRAME record for the given method and bci
1037   static void dump_stack_frame(AbstractDumpWriter* writer, int frame_serial_num, int class_serial_num, Method* m, int bci);
1038 
1039   // check if we need to truncate an array
1040   static int calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size);
1041   // extended version to dump flat arrays as primitive arrays;
1042   // type_size specifies size of the inlined objects.
1043   static int calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, int type_size, short header_size);
1044 
1045   // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
1046   static void end_of_dump(AbstractDumpWriter* writer);
1047 
1048   static oop mask_dormant_archived_object(oop o) {
1049     if (o != nullptr && o->klass()->java_mirror() == nullptr) {
1050       // Ignore this object since the corresponding java mirror is not loaded.
1051       // Might be a dormant archive object.
1052       return nullptr;
1053     } else {
1054       return o;
1055     }
1056   }
1057 
1058   // helper methods for inlined fields.
1059   static bool is_inlined_field(const FieldStream& fld) {
1060     return fld.field_descriptor().is_flat();
1061   }
1062   static InlineKlass* get_inlined_field_klass(const FieldStream &fld) {
1063     assert(is_inlined_field(fld), "must be inlined field");
1064     InstanceKlass* holder_klass = fld.field_descriptor().field_holder();
1065     return InlineKlass::cast(holder_klass->get_inline_type_field_klass(fld.index()));
1066   }
1067 };
1068 
1069 // write a header of the given type
1070 void DumperSupport:: write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len) {
1071   writer->write_u1(tag);
1072   writer->write_u4(0);                  // current ticks
1073   writer->write_u4(len);
1074 }
1075 
1076 // returns hprof tag for the given type signature
1077 hprofTag DumperSupport::sig2tag(Symbol* sig) {
1078   switch (sig->char_at(0)) {
1079     case JVM_SIGNATURE_CLASS    : return HPROF_NORMAL_OBJECT;
1080     case JVM_SIGNATURE_PRIMITIVE_OBJECT: return HPROF_NORMAL_OBJECT; // not inlined Q-object, i.e. identity object.
1081     case JVM_SIGNATURE_ARRAY    : return HPROF_NORMAL_OBJECT;
1082     case JVM_SIGNATURE_BYTE     : return HPROF_BYTE;
1083     case JVM_SIGNATURE_CHAR     : return HPROF_CHAR;
1084     case JVM_SIGNATURE_FLOAT    : return HPROF_FLOAT;
1085     case JVM_SIGNATURE_DOUBLE   : return HPROF_DOUBLE;
1086     case JVM_SIGNATURE_INT      : return HPROF_INT;
1087     case JVM_SIGNATURE_LONG     : return HPROF_LONG;
1088     case JVM_SIGNATURE_SHORT    : return HPROF_SHORT;
1089     case JVM_SIGNATURE_BOOLEAN  : return HPROF_BOOLEAN;
1090     default : ShouldNotReachHere(); /* to shut up compiler */ return HPROF_BYTE;
1091   }
1092 }
1093 
1094 hprofTag DumperSupport::type2tag(BasicType type) {
1095   switch (type) {
1096     case T_BYTE     : return HPROF_BYTE;
1097     case T_CHAR     : return HPROF_CHAR;
1098     case T_FLOAT    : return HPROF_FLOAT;
1099     case T_DOUBLE   : return HPROF_DOUBLE;
1100     case T_INT      : return HPROF_INT;
1101     case T_LONG     : return HPROF_LONG;
1102     case T_SHORT    : return HPROF_SHORT;
1103     case T_BOOLEAN  : return HPROF_BOOLEAN;
1104     default : ShouldNotReachHere(); /* to shut up compiler */ return HPROF_BYTE;
1105   }
1106 }
1107 
1108 u4 DumperSupport::sig2size(Symbol* sig) {
1109   switch (sig->char_at(0)) {
1110     case JVM_SIGNATURE_CLASS:
1111     case JVM_SIGNATURE_PRIMITIVE_OBJECT:
1112     case JVM_SIGNATURE_ARRAY: return sizeof(address);
1113     case JVM_SIGNATURE_BOOLEAN:
1114     case JVM_SIGNATURE_BYTE: return 1;
1115     case JVM_SIGNATURE_SHORT:
1116     case JVM_SIGNATURE_CHAR: return 2;
1117     case JVM_SIGNATURE_INT:
1118     case JVM_SIGNATURE_FLOAT: return 4;
1119     case JVM_SIGNATURE_LONG:
1120     case JVM_SIGNATURE_DOUBLE: return 8;
1121     default: ShouldNotReachHere(); /* to shut up compiler */ return 0;
1122   }
1123 }
1124 
1125 template<typename T, typename F> T bit_cast(F from) { // replace with the real thing when we can use c++20
1126   T to;
1127   static_assert(sizeof(to) == sizeof(from), "must be of the same size");
1128   memcpy(&to, &from, sizeof(to));
1129   return to;
1130 }
1131 
1132 // dump a jfloat
1133 void DumperSupport::dump_float(AbstractDumpWriter* writer, jfloat f) {
1134   if (g_isnan(f)) {
1135     writer->write_u4(0x7fc00000); // collapsing NaNs
1136   } else {
1137     writer->write_u4(bit_cast<u4>(f));
1138   }
1139 }
1140 
1141 // dump a jdouble
1142 void DumperSupport::dump_double(AbstractDumpWriter* writer, jdouble d) {
1143   if (g_isnan(d)) {
1144     writer->write_u8(0x7ff80000ull << 32); // collapsing NaNs
1145   } else {
1146     writer->write_u8(bit_cast<u8>(d));
1147   }
1148 }
1149 
1150 
1151 // dumps the raw value of the given field
1152 void DumperSupport::dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset) {
1153   switch (type) {
1154     case JVM_SIGNATURE_CLASS :
1155     case JVM_SIGNATURE_PRIMITIVE_OBJECT: // not inlined Q-object, i.e. identity object.
1156     case JVM_SIGNATURE_ARRAY : {
1157       oop o = obj->obj_field_access<ON_UNKNOWN_OOP_REF | AS_NO_KEEPALIVE>(offset);
1158       if (o != nullptr && log_is_enabled(Debug, cds, heap) && mask_dormant_archived_object(o) == nullptr) {
1159         ResourceMark rm;
1160         log_debug(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s) referenced by " INTPTR_FORMAT " (%s)",
1161                              p2i(o), o->klass()->external_name(),
1162                              p2i(obj), obj->klass()->external_name());
1163       }
1164       o = mask_dormant_archived_object(o);
1165       assert(oopDesc::is_oop_or_null(o), "Expected an oop or nullptr at " PTR_FORMAT, p2i(o));
1166       writer->write_objectID(o);
1167       break;
1168     }
1169     case JVM_SIGNATURE_BYTE : {
1170       jbyte b = obj->byte_field(offset);
1171       writer->write_u1(b);
1172       break;
1173     }
1174     case JVM_SIGNATURE_CHAR : {
1175       jchar c = obj->char_field(offset);
1176       writer->write_u2(c);
1177       break;
1178     }
1179     case JVM_SIGNATURE_SHORT : {
1180       jshort s = obj->short_field(offset);
1181       writer->write_u2(s);
1182       break;
1183     }
1184     case JVM_SIGNATURE_FLOAT : {
1185       jfloat f = obj->float_field(offset);
1186       dump_float(writer, f);
1187       break;
1188     }
1189     case JVM_SIGNATURE_DOUBLE : {
1190       jdouble d = obj->double_field(offset);
1191       dump_double(writer, d);
1192       break;
1193     }
1194     case JVM_SIGNATURE_INT : {
1195       jint i = obj->int_field(offset);
1196       writer->write_u4(i);
1197       break;
1198     }
1199     case JVM_SIGNATURE_LONG : {
1200       jlong l = obj->long_field(offset);
1201       writer->write_u8(l);
1202       break;
1203     }
1204     case JVM_SIGNATURE_BOOLEAN : {
1205       jboolean b = obj->bool_field(offset);
1206       writer->write_u1(b);
1207       break;
1208     }
1209     default : {
1210       ShouldNotReachHere();
1211       break;
1212     }
1213   }
1214 }
1215 
1216 // calculates the total size of the all fields of the given class.
1217 u4 DumperSupport::instance_size(InstanceKlass *ik) {
1218   u4 size = 0;
1219 
1220   for (FieldStream fld(ik, false, false); !fld.eos(); fld.next()) {
1221     if (!fld.access_flags().is_static()) {
1222       if (is_inlined_field(fld)) {
1223         size += instance_size(get_inlined_field_klass(fld));
1224       } else {
1225         size += sig2size(fld.signature());
1226       }
1227     }
1228   }
1229   return size;
1230 }
1231 
1232 u4 DumperSupport::get_static_fields_size(InstanceKlass* ik, u2& field_count) {
1233   field_count = 0;
1234   u4 size = 0;
1235 
1236   for (FieldStream fldc(ik, true, true); !fldc.eos(); fldc.next()) {
1237     if (fldc.access_flags().is_static()) {
1238       assert(!is_inlined_field(fldc), "static fields cannot be inlined");
1239 
1240       field_count++;
1241       size += sig2size(fldc.signature());
1242     }
1243   }
1244 
1245   // Add in resolved_references which is referenced by the cpCache
1246   // The resolved_references is an array per InstanceKlass holding the
1247   // strings and other oops resolved from the constant pool.
1248   oop resolved_references = ik->constants()->resolved_references_or_null();
1249   if (resolved_references != nullptr) {
1250     field_count++;
1251     size += sizeof(address);
1252 
1253     // Add in the resolved_references of the used previous versions of the class
1254     // in the case of RedefineClasses
1255     InstanceKlass* prev = ik->previous_versions();
1256     while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1257       field_count++;
1258       size += sizeof(address);
1259       prev = prev->previous_versions();
1260     }
1261   }
1262 
1263   // We write the value itself plus a name and a one byte type tag per field.
1264   return size + field_count * (sizeof(address) + 1);
1265 }
1266 
1267 // dumps static fields of the given class
1268 void DumperSupport::dump_static_fields(AbstractDumpWriter* writer, Klass* k) {
1269   InstanceKlass* ik = InstanceKlass::cast(k);
1270 
1271   // dump the field descriptors and raw values
1272   for (FieldStream fld(ik, true, true); !fld.eos(); fld.next()) {
1273     if (fld.access_flags().is_static()) {
1274       assert(!is_inlined_field(fld), "static fields cannot be inlined");
1275 
1276       Symbol* sig = fld.signature();
1277 
1278       writer->write_symbolID(fld.name());   // name
1279       writer->write_u1(sig2tag(sig));       // type
1280 
1281       // value
1282       dump_field_value(writer, sig->char_at(0), ik->java_mirror(), fld.offset());
1283     }
1284   }
1285 
1286   // Add resolved_references for each class that has them
1287   oop resolved_references = ik->constants()->resolved_references_or_null();
1288   if (resolved_references != nullptr) {
1289     writer->write_symbolID(vmSymbols::resolved_references_name());  // name
1290     writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1291     writer->write_objectID(resolved_references);
1292 
1293     // Also write any previous versions
1294     InstanceKlass* prev = ik->previous_versions();
1295     while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1296       writer->write_symbolID(vmSymbols::resolved_references_name());  // name
1297       writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1298       writer->write_objectID(prev->constants()->resolved_references());
1299       prev = prev->previous_versions();
1300     }
1301   }
1302 }
1303 
1304 // dump the raw values of the instance fields of the given identity or inlined object;
1305 // for identity objects offset is 0 and 'klass' is o->klass(),
1306 // for inlined objects offset is the offset in the holder object, 'klass' is inlined object class.
1307 void DumperSupport::dump_instance_fields(AbstractDumpWriter* writer, oop o, int offset, InstanceKlass *klass) {
1308   for (FieldStream fld(klass, false, false); !fld.eos(); fld.next()) {
1309     if (!fld.access_flags().is_static()) {
1310       if (is_inlined_field(fld)) {
1311         InlineKlass* field_klass = get_inlined_field_klass(fld);
1312         // the field is inlined, so all its fields are stored without headers.
1313         int fields_offset = offset + fld.offset() - field_klass->first_field_offset();
1314         dump_inlined_object_fields(writer, o, offset + fld.offset(), field_klass);
1315       } else {
1316         Symbol* sig = fld.signature();
1317         dump_field_value(writer, sig->char_at(0), o, offset + fld.offset());
1318       }
1319     }
1320   }
1321 }
1322 
1323 void DumperSupport::dump_inlined_object_fields(AbstractDumpWriter* writer, oop o, int offset, InlineKlass* klass) {
1324   // the object is inlined, so all its fields are stored without headers.
1325   dump_instance_fields(writer, o, offset - klass->first_field_offset(), klass);
1326 }
1327 
1328 // gets the count of the instance fields for a given class
1329 u2 DumperSupport::get_instance_fields_count(InstanceKlass* ik) {
1330   u2 field_count = 0;
1331 
1332   for (FieldStream fldc(ik, true, true); !fldc.eos(); fldc.next()) {
1333     if (!fldc.access_flags().is_static()) {
1334       if (is_inlined_field(fldc)) {
1335         // add "synthetic" fields for inlined fields.
1336         field_count += get_instance_fields_count(get_inlined_field_klass(fldc));
1337       } else {
1338         field_count++;
1339       }
1340     }
1341   }
1342 
1343   return field_count;
1344 }
1345 
1346 // dumps the definition of the instance fields for a given class
1347 // inlined_fields_id is not-nullptr for inlined fields (to get synthetic field name IDs
1348 // by using InlinedObjects::get_next_string_id()).
1349 void DumperSupport::dump_instance_field_descriptors(AbstractDumpWriter* writer, InstanceKlass* ik, uintx* inlined_fields_id) {
1350   // inlined_fields_id != nullptr means ik is a class of inlined field.
1351   // Inlined field id pointer for this class; lazyly initialized
1352   // if the class has inlined field(s) and the caller didn't provide inlined_fields_id.
1353   uintx *this_klass_inlined_fields_id = inlined_fields_id;
1354   uintx inlined_id = 0;
1355 
1356   // dump the field descriptors
1357   for (FieldStream fld(ik, true, true); !fld.eos(); fld.next()) {
1358     if (!fld.access_flags().is_static()) {
1359       if (is_inlined_field(fld)) {
1360         // dump "synthetic" fields for inlined fields.
1361         if (this_klass_inlined_fields_id == nullptr) {
1362           inlined_id = InlinedObjects::get_instance()->get_base_index_for(ik);
1363           this_klass_inlined_fields_id = &inlined_id;
1364         }
1365         dump_instance_field_descriptors(writer, get_inlined_field_klass(fld), this_klass_inlined_fields_id);
1366       } else {
1367         Symbol* sig = fld.signature();
1368         Symbol* name = nullptr;
1369         // Use inlined_fields_id provided by caller.
1370         if (inlined_fields_id != nullptr) {
1371           uintx name_id = InlinedObjects::get_instance()->get_next_string_id(*inlined_fields_id);
1372 
1373           // name_id == 0 is returned on error. use original field signature.
1374           if (name_id != 0) {
1375             *inlined_fields_id = name_id;
1376             name = reinterpret_cast<Symbol*>(name_id);
1377           }
1378         }
1379         if (name == nullptr) {
1380           name = fld.name();
1381         }
1382 
1383         writer->write_symbolID(name);         // name
1384         writer->write_u1(sig2tag(sig));       // type
1385       }
1386     }
1387   }
1388 }
1389 
1390 // creates HPROF_GC_INSTANCE_DUMP record for the given object
1391 void DumperSupport::dump_instance(AbstractDumpWriter* writer, oop o) {
1392   InstanceKlass* ik = InstanceKlass::cast(o->klass());
1393   u4 is = instance_size(ik);
1394   u4 size = 1 + sizeof(address) + 4 + sizeof(address) + 4 + is;
1395 
1396   writer->start_sub_record(HPROF_GC_INSTANCE_DUMP, size);
1397   writer->write_objectID(o);
1398   writer->write_u4(STACK_TRACE_ID);
1399 
1400   // class ID
1401   writer->write_classID(ik);
1402 
1403   // number of bytes that follow
1404   writer->write_u4(is);
1405 
1406   // field values
1407   dump_instance_fields(writer, o, 0, ik);
1408 
1409   writer->end_sub_record();
1410 }
1411 
1412 // creates HPROF_GC_CLASS_DUMP record for the given instance class
1413 void DumperSupport::dump_instance_class(AbstractDumpWriter* writer, Klass* k) {
1414   InstanceKlass* ik = InstanceKlass::cast(k);
1415 
1416   // We can safepoint and do a heap dump at a point where we have a Klass,
1417   // but no java mirror class has been setup for it. So we need to check
1418   // that the class is at least loaded, to avoid crash from a null mirror.
1419   if (!ik->is_loaded()) {
1420     return;
1421   }
1422 
1423   u2 static_fields_count = 0;
1424   u4 static_size = get_static_fields_size(ik, static_fields_count);
1425   u2 instance_fields_count = get_instance_fields_count(ik);
1426   u4 instance_fields_size = instance_fields_count * (sizeof(address) + 1);
1427   u4 size = 1 + sizeof(address) + 4 + 6 * sizeof(address) + 4 + 2 + 2 + static_size + 2 + instance_fields_size;
1428 
1429   writer->start_sub_record(HPROF_GC_CLASS_DUMP, size);
1430 
1431   // class ID
1432   writer->write_classID(ik);
1433   writer->write_u4(STACK_TRACE_ID);
1434 
1435   // super class ID
1436   InstanceKlass* java_super = ik->java_super();
1437   if (java_super == nullptr) {
1438     writer->write_objectID(oop(nullptr));
1439   } else {
1440     writer->write_classID(java_super);
1441   }
1442 
1443   writer->write_objectID(ik->class_loader());
1444   writer->write_objectID(ik->signers());
1445   writer->write_objectID(ik->protection_domain());
1446 
1447   // reserved
1448   writer->write_objectID(oop(nullptr));
1449   writer->write_objectID(oop(nullptr));
1450 
1451   // instance size
1452   writer->write_u4(HeapWordSize * ik->size_helper());
1453 
1454   // size of constant pool - ignored by HAT 1.1
1455   writer->write_u2(0);
1456 
1457   // static fields
1458   writer->write_u2(static_fields_count);
1459   dump_static_fields(writer, ik);
1460 
1461   // description of instance fields
1462   writer->write_u2(instance_fields_count);
1463   dump_instance_field_descriptors(writer, ik);
1464 
1465   writer->end_sub_record();
1466 }
1467 
1468 // creates HPROF_GC_CLASS_DUMP record for the given array class
1469 void DumperSupport::dump_array_class(AbstractDumpWriter* writer, Klass* k) {
1470   InstanceKlass* ik = nullptr; // bottom class for object arrays, null for primitive type arrays
1471   if (k->is_objArray_klass()) {
1472     Klass *bk = ObjArrayKlass::cast(k)->bottom_klass();
1473     assert(bk != nullptr, "checking");
1474     if (bk->is_instance_klass()) {
1475       ik = InstanceKlass::cast(bk);
1476     }
1477   }
1478 
1479   u4 size = 1 + sizeof(address) + 4 + 6 * sizeof(address) + 4 + 2 + 2 + 2;
1480   writer->start_sub_record(HPROF_GC_CLASS_DUMP, size);
1481   writer->write_classID(k);
1482   writer->write_u4(STACK_TRACE_ID);
1483 
1484   // super class of array classes is java.lang.Object
1485   InstanceKlass* java_super = k->java_super();
1486   assert(java_super != nullptr, "checking");
1487   writer->write_classID(java_super);
1488 
1489   writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->class_loader());
1490   writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->signers());
1491   writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->protection_domain());
1492 
1493   writer->write_objectID(oop(nullptr));    // reserved
1494   writer->write_objectID(oop(nullptr));
1495   writer->write_u4(0);             // instance size
1496   writer->write_u2(0);             // constant pool
1497   writer->write_u2(0);             // static fields
1498   writer->write_u2(0);             // instance fields
1499 
1500   writer->end_sub_record();
1501 
1502 }
1503 
1504 // Hprof uses an u4 as record length field,
1505 // which means we need to truncate arrays that are too long.
1506 int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, int type_size, short header_size) {
1507   int length = array->length();
1508 
1509   size_t length_in_bytes = (size_t)length * type_size;
1510   uint max_bytes = max_juint - header_size;
1511 
1512   if (length_in_bytes > max_bytes) {
1513     length = max_bytes / type_size;
1514     length_in_bytes = (size_t)length * type_size;
1515 
1516     BasicType type = ArrayKlass::cast(array->klass())->element_type();
1517     warning("cannot dump array of type %s[] with length %d; truncating to length %d",
1518             type2name_tab[type], array->length(), length);
1519   }
1520   return length;
1521 }
1522 
1523 int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size) {
1524   BasicType type = ArrayKlass::cast(array->klass())->element_type();
1525   assert((type >= T_BOOLEAN && type <= T_OBJECT) || type == T_PRIMITIVE_OBJECT, "invalid array element type");
1526   int type_size;
1527   if (type == T_OBJECT || type == T_PRIMITIVE_OBJECT) {  // TODO: FIXME
1528     type_size = sizeof(address);
1529   } else {
1530     type_size = type2aelembytes(type);
1531   }
1532 
1533   return calculate_array_max_length(writer, array, type_size, header_size);
1534 }
1535 
1536 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
1537 void DumperSupport::dump_object_array(AbstractDumpWriter* writer, objArrayOop array) {
1538   // sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID) + sizeof(classID)
1539   short header_size = 1 + 2 * 4 + 2 * sizeof(address);
1540   int length = calculate_array_max_length(writer, array, header_size);
1541   u4 size = header_size + length * sizeof(address);
1542 
1543   writer->start_sub_record(HPROF_GC_OBJ_ARRAY_DUMP, size);
1544   writer->write_objectID(array);
1545   writer->write_u4(STACK_TRACE_ID);
1546   writer->write_u4(length);
1547 
1548   // array class ID
1549   writer->write_classID(array->klass());
1550 
1551   // [id]* elements
1552   for (int index = 0; index < length; index++) {
1553     oop o = array->obj_at(index);
1554     if (o != nullptr && log_is_enabled(Debug, cds, heap) && mask_dormant_archived_object(o) == nullptr) {
1555       ResourceMark rm;
1556       log_debug(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s) referenced by " INTPTR_FORMAT " (%s)",
1557                            p2i(o), o->klass()->external_name(),
1558                            p2i(array), array->klass()->external_name());
1559     }
1560     o = mask_dormant_archived_object(o);
1561     writer->write_objectID(o);
1562   }
1563 
1564   writer->end_sub_record();
1565 }
1566 
1567 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given flat array
1568 void DumperSupport::dump_flat_array(AbstractDumpWriter* writer, flatArrayOop array) {
1569   FlatArrayKlass* array_klass = FlatArrayKlass::cast(array->klass());
1570   InlineKlass* element_klass = array_klass->element_klass();
1571   int element_size = instance_size(element_klass);
1572   /*                          id         array object ID
1573    *                          u4         stack trace serial number
1574    *                          u4         number of elements
1575    *                          u1         element type
1576    */
1577   short header_size = 1 + sizeof(address) + 2 * 4 + 1;
1578 
1579   // TODO: use T_SHORT/T_INT/T_LONG if needed to avoid truncation
1580   BasicType type = T_BYTE;
1581   int type_size = type2aelembytes(type);
1582   int length = calculate_array_max_length(writer, array, element_size, header_size);
1583   u4 length_in_bytes = (u4)(length * element_size);
1584   u4 size = header_size + length_in_bytes;
1585 
1586   writer->start_sub_record(HPROF_GC_PRIM_ARRAY_DUMP, size);
1587   writer->write_objectID(array);
1588   writer->write_u4(STACK_TRACE_ID);
1589   // TODO: round up array length for T_SHORT/T_INT/T_LONG
1590   writer->write_u4(length * element_size);
1591   writer->write_u1(type2tag(type));
1592 
1593   for (int index = 0; index < length; index++) {
1594     // need offset in the holder to read inlined object. calculate it from flatArrayOop::value_at_addr()
1595     int offset = (int)((address)array->value_at_addr(index, array_klass->layout_helper())
1596                   - cast_from_oop<address>(array));
1597     dump_inlined_object_fields(writer, array, offset, element_klass);
1598   }
1599 
1600   // TODO: write padding bytes for T_SHORT/T_INT/T_LONG
1601 
1602   InlinedObjects::get_instance()->add_flat_array(array);
1603 
1604   writer->end_sub_record();
1605 }
1606 
1607 #define WRITE_ARRAY(Array, Type, Size, Length) \
1608   for (int i = 0; i < Length; i++) { writer->write_##Size((Size)Array->Type##_at(i)); }
1609 
1610 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
1611 void DumperSupport::dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array) {
1612   BasicType type = TypeArrayKlass::cast(array->klass())->element_type();
1613   // 2 * sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID)
1614   short header_size = 2 * 1 + 2 * 4 + sizeof(address);
1615 
1616   int length = calculate_array_max_length(writer, array, header_size);
1617   int type_size = type2aelembytes(type);
1618   u4 length_in_bytes = (u4)length * type_size;
1619   u4 size = header_size + length_in_bytes;
1620 
1621   writer->start_sub_record(HPROF_GC_PRIM_ARRAY_DUMP, size);
1622   writer->write_objectID(array);
1623   writer->write_u4(STACK_TRACE_ID);
1624   writer->write_u4(length);
1625   writer->write_u1(type2tag(type));
1626 
1627   // nothing to copy
1628   if (length == 0) {
1629     writer->end_sub_record();
1630     return;
1631   }
1632 
1633   // If the byte ordering is big endian then we can copy most types directly
1634 
1635   switch (type) {
1636     case T_INT : {
1637       if (Endian::is_Java_byte_ordering_different()) {
1638         WRITE_ARRAY(array, int, u4, length);
1639       } else {
1640         writer->write_raw(array->int_at_addr(0), length_in_bytes);
1641       }
1642       break;
1643     }
1644     case T_BYTE : {
1645       writer->write_raw(array->byte_at_addr(0), length_in_bytes);
1646       break;
1647     }
1648     case T_CHAR : {
1649       if (Endian::is_Java_byte_ordering_different()) {
1650         WRITE_ARRAY(array, char, u2, length);
1651       } else {
1652         writer->write_raw(array->char_at_addr(0), length_in_bytes);
1653       }
1654       break;
1655     }
1656     case T_SHORT : {
1657       if (Endian::is_Java_byte_ordering_different()) {
1658         WRITE_ARRAY(array, short, u2, length);
1659       } else {
1660         writer->write_raw(array->short_at_addr(0), length_in_bytes);
1661       }
1662       break;
1663     }
1664     case T_BOOLEAN : {
1665       if (Endian::is_Java_byte_ordering_different()) {
1666         WRITE_ARRAY(array, bool, u1, length);
1667       } else {
1668         writer->write_raw(array->bool_at_addr(0), length_in_bytes);
1669       }
1670       break;
1671     }
1672     case T_LONG : {
1673       if (Endian::is_Java_byte_ordering_different()) {
1674         WRITE_ARRAY(array, long, u8, length);
1675       } else {
1676         writer->write_raw(array->long_at_addr(0), length_in_bytes);
1677       }
1678       break;
1679     }
1680 
1681     // handle float/doubles in a special value to ensure than NaNs are
1682     // written correctly. TO DO: Check if we can avoid this on processors that
1683     // use IEEE 754.
1684 
1685     case T_FLOAT : {
1686       for (int i = 0; i < length; i++) {
1687         dump_float(writer, array->float_at(i));
1688       }
1689       break;
1690     }
1691     case T_DOUBLE : {
1692       for (int i = 0; i < length; i++) {
1693         dump_double(writer, array->double_at(i));
1694       }
1695       break;
1696     }
1697     default : ShouldNotReachHere();
1698   }
1699 
1700   writer->end_sub_record();
1701 }
1702 
1703 // create a HPROF_FRAME record of the given Method* and bci
1704 void DumperSupport::dump_stack_frame(AbstractDumpWriter* writer,
1705                                      int frame_serial_num,
1706                                      int class_serial_num,
1707                                      Method* m,
1708                                      int bci) {
1709   int line_number;
1710   if (m->is_native()) {
1711     line_number = -3;  // native frame
1712   } else {
1713     line_number = m->line_number_from_bci(bci);
1714   }
1715 
1716   write_header(writer, HPROF_FRAME, 4*oopSize + 2*sizeof(u4));
1717   writer->write_id(frame_serial_num);               // frame serial number
1718   writer->write_symbolID(m->name());                // method's name
1719   writer->write_symbolID(m->signature());           // method's signature
1720 
1721   assert(m->method_holder()->is_instance_klass(), "not InstanceKlass");
1722   writer->write_symbolID(m->method_holder()->source_file_name());  // source file name
1723   writer->write_u4(class_serial_num);               // class serial number
1724   writer->write_u4((u4) line_number);               // line number
1725 }
1726 
1727 
1728 class InlinedFieldNameDumper : public LockedClassesDo {
1729 public:
1730   typedef void (*Callback)(InlinedObjects *owner, const Klass *klass, uintx base_index, int count);
1731 
1732 private:
1733   AbstractDumpWriter* _writer;
1734   InlinedObjects *_owner;
1735   Callback       _callback;
1736   uintx _index;
1737 
1738   void dump_inlined_field_names(GrowableArray<Symbol*>* super_names, Symbol* field_name, InlineKlass* klass) {
1739     super_names->push(field_name);
1740     for (FieldStream fld(klass, false, false); !fld.eos(); fld.next()) {
1741       if (!fld.access_flags().is_static()) {
1742         if (DumperSupport::is_inlined_field(fld)) {
1743           dump_inlined_field_names(super_names, fld.name(), DumperSupport::get_inlined_field_klass(fld));
1744         } else {
1745           // get next string ID.
1746           uintx next_index = _owner->get_next_string_id(_index);
1747           if (next_index == 0) {
1748             // something went wrong (overflow?)
1749             // stop generation; the rest of inlined objects will have original field names.
1750             return;
1751           }
1752           _index = next_index;
1753 
1754           // Calculate length.
1755           int len = fld.name()->utf8_length();
1756           for (GrowableArrayIterator<Symbol*> it = super_names->begin(); it != super_names->end(); ++it) {
1757             len += (*it)->utf8_length() + 1;    // +1 for ".".
1758           }
1759 
1760           DumperSupport::write_header(_writer, HPROF_UTF8, oopSize + len);
1761           _writer->write_symbolID(reinterpret_cast<Symbol*>(_index));
1762           // Write the string value.
1763           // 1) super_names.
1764           for (GrowableArrayIterator<Symbol*> it = super_names->begin(); it != super_names->end(); ++it) {
1765             _writer->write_raw((*it)->bytes(), (*it)->utf8_length());
1766             _writer->write_u1('.');
1767           }
1768           // 2) field name.
1769           _writer->write_raw(fld.name()->bytes(), fld.name()->utf8_length());
1770         }
1771       }
1772     }
1773     super_names->pop();
1774   }
1775 
1776   void dump_inlined_field_names(Symbol* field_name, InlineKlass* field_klass) {
1777     GrowableArray<Symbol*> super_names(4, mtServiceability);
1778     dump_inlined_field_names(&super_names, field_name, field_klass);
1779   }
1780 
1781 public:
1782   InlinedFieldNameDumper(AbstractDumpWriter* writer, InlinedObjects* owner, Callback callback)
1783     : _writer(writer), _owner(owner), _callback(callback), _index(0)  {
1784   }
1785 
1786   void do_klass(Klass* k) {
1787     if (!k->is_instance_klass()) {
1788       return;
1789     }
1790     InstanceKlass* ik = InstanceKlass::cast(k);
1791     // if (ik->has_inline_type_fields()) {
1792     //   return;
1793     // }
1794 
1795     uintx base_index = _index;
1796     int count = 0;
1797 
1798     for (FieldStream fld(ik, false, false); !fld.eos(); fld.next()) {
1799       if (!fld.access_flags().is_static()) {
1800         if (DumperSupport::is_inlined_field(fld)) {
1801           dump_inlined_field_names(fld.name(), DumperSupport::get_inlined_field_klass(fld));
1802           count++;
1803         }
1804       }
1805     }
1806 
1807     if (count != 0) {
1808       _callback(_owner, k, base_index, count);
1809     }
1810   }
1811 };
1812 
1813 class InlinedFieldsDumper : public LockedClassesDo {
1814 private:
1815   AbstractDumpWriter* _writer;
1816 
1817 public:
1818   InlinedFieldsDumper(AbstractDumpWriter* writer) : _writer(writer) {}
1819 
1820   void do_klass(Klass* k) {
1821     if (!k->is_instance_klass()) {
1822       return;
1823     }
1824     InstanceKlass* ik = InstanceKlass::cast(k);
1825     // if (ik->has_inline_type_fields()) {
1826     //   return;
1827     // }
1828 
1829     // We can be at a point where java mirror does not exist yet.
1830     // So we need to check that the class is at least loaded, to avoid crash from a null mirror.
1831     if (!ik->is_loaded()) {
1832       return;
1833     }
1834 
1835     u2 inlined_count = 0;
1836     for (FieldStream fld(ik, false, false); !fld.eos(); fld.next()) {
1837       if (!fld.access_flags().is_static()) {
1838         if (DumperSupport::is_inlined_field(fld)) {
1839           inlined_count++;
1840         }
1841       }
1842     }
1843     if (inlined_count != 0) {
1844       _writer->write_u1(HPROF_CLASS_WITH_INLINED_FIELDS);
1845 
1846       // class ID
1847       _writer->write_classID(ik);
1848       // number of inlined fields
1849       _writer->write_u2(inlined_count);
1850       u2 index = 0;
1851       for (FieldStream fld(ik, false, false); !fld.eos(); fld.next()) {
1852         if (!fld.access_flags().is_static()) {
1853           if (DumperSupport::is_inlined_field(fld)) {
1854             // inlined field index
1855             _writer->write_u2(index);
1856             // synthetic field count
1857             u2 field_count = DumperSupport::get_instance_fields_count(DumperSupport::get_inlined_field_klass(fld));
1858             _writer->write_u2(field_count);
1859             // original field name
1860             _writer->write_symbolID(fld.name());
1861             // inlined field class ID
1862             _writer->write_classID(DumperSupport::get_inlined_field_klass(fld));
1863 
1864             index += field_count;
1865           } else {
1866             index++;
1867           }
1868         }
1869       }
1870     }
1871   }
1872 };
1873 
1874 
1875 void InlinedObjects::init() {
1876   _instance = this;
1877 
1878   struct Closure : public SymbolClosure {
1879     uintx _min_id = max_uintx;
1880     uintx _max_id = 0;
1881     Closure() : _min_id(max_uintx), _max_id(0) {}
1882 
1883     void do_symbol(Symbol** p) {
1884       uintx val = reinterpret_cast<uintx>(*p);
1885       if (val < _min_id) {
1886         _min_id = val;
1887       }
1888       if (val > _max_id) {
1889         _max_id = val;
1890       }
1891     }
1892   } closure;
1893 
1894   SymbolTable::symbols_do(&closure);
1895 
1896   _min_string_id = closure._min_id;
1897   _max_string_id = closure._max_id;
1898 }
1899 
1900 void InlinedObjects::release() {
1901   _instance = nullptr;
1902 
1903   if (_inlined_field_map != nullptr) {
1904     delete _inlined_field_map;
1905     _inlined_field_map = nullptr;
1906   }
1907   if (_flat_arrays != nullptr) {
1908     delete _flat_arrays;
1909     _flat_arrays = nullptr;
1910   }
1911 }
1912 
1913 void InlinedObjects::inlined_field_names_callback(InlinedObjects* _this, const Klass* klass, uintx base_index, int count) {
1914   if (_this->_inlined_field_map == nullptr) {
1915     _this->_inlined_field_map = new (mtServiceability) GrowableArray<ClassInlinedFields>(100, mtServiceability);
1916   }
1917   _this->_inlined_field_map->append(ClassInlinedFields(klass, base_index));
1918 
1919   // counters for dumping classes with inlined fields
1920   _this->_classes_count++;
1921   _this->_inlined_fields_count += count;
1922 }
1923 
1924 void InlinedObjects::dump_inlined_field_names(AbstractDumpWriter* writer) {
1925   InlinedFieldNameDumper nameDumper(writer, this, inlined_field_names_callback);
1926   ClassLoaderDataGraph::classes_do(&nameDumper);
1927 
1928   if (_inlined_field_map != nullptr) {
1929     // prepare the map for  get_base_index_for().
1930     _inlined_field_map->sort(ClassInlinedFields::compare);
1931   }
1932 }
1933 
1934 uintx InlinedObjects::get_base_index_for(Klass* k) {
1935   if (_inlined_field_map != nullptr) {
1936     bool found = false;
1937     int idx = _inlined_field_map->find_sorted<ClassInlinedFields, ClassInlinedFields::compare>(ClassInlinedFields(k, 0), found);
1938     if (found) {
1939         return _inlined_field_map->at(idx).base_index;
1940     }
1941   }
1942 
1943   // return max_uintx, so get_next_string_id returns 0.
1944   return max_uintx;
1945 }
1946 
1947 uintx InlinedObjects::get_next_string_id(uintx id) {
1948   if (++id == _min_string_id) {
1949     return _max_string_id + 1;
1950   }
1951   return id;
1952 }
1953 
1954 void InlinedObjects::dump_classed_with_inlined_fields(AbstractDumpWriter* writer) {
1955   if (_classes_count != 0) {
1956     // Record for each class contains tag(u1), class ID and count(u2)
1957     // for each inlined field index(u2), synthetic fields count(u2), original field name and class ID
1958     int size = _classes_count * (1 + sizeof(address) + 2)
1959              + _inlined_fields_count * (2 + 2 + sizeof(address) + sizeof(address));
1960     DumperSupport::write_header(writer, HPROF_INLINED_FIELDS, (u4)size);
1961 
1962     InlinedFieldsDumper dumper(writer);
1963     ClassLoaderDataGraph::classes_do(&dumper);
1964   }
1965 }
1966 
1967 void InlinedObjects::add_flat_array(oop array) {
1968   if (_flat_arrays == nullptr) {
1969     _flat_arrays = new (mtServiceability) GrowableArray<oop>(100, mtServiceability);
1970   }
1971   _flat_arrays->append(array);
1972 }
1973 
1974 void InlinedObjects::dump_flat_arrays(AbstractDumpWriter* writer) {
1975   if (_flat_arrays != nullptr) {
1976     // For each flat array the record contains tag (u1), object ID and class ID.
1977     int size = _flat_arrays->length() * (1 + sizeof(address) + sizeof(address));
1978 
1979     DumperSupport::write_header(writer, HPROF_FLAT_ARRAYS, (u4)size);
1980     for (GrowableArrayIterator<oop> it = _flat_arrays->begin(); it != _flat_arrays->end(); ++it) {
1981       flatArrayOop array = flatArrayOop(*it);
1982       FlatArrayKlass* array_klass = FlatArrayKlass::cast(array->klass());
1983       InlineKlass* element_klass = array_klass->element_klass();
1984       writer->write_u1(HPROF_FLAT_ARRAY);
1985       writer->write_objectID(array);
1986       writer->write_classID(element_klass);
1987     }
1988   }
1989 }
1990 
1991 
1992 // Support class used to generate HPROF_UTF8 records from the entries in the
1993 // SymbolTable.
1994 
1995 class SymbolTableDumper : public SymbolClosure {
1996  private:
1997   AbstractDumpWriter* _writer;
1998   AbstractDumpWriter* writer() const                { return _writer; }
1999  public:
2000   SymbolTableDumper(AbstractDumpWriter* writer)     { _writer = writer; }
2001   void do_symbol(Symbol** p);
2002 };
2003 
2004 void SymbolTableDumper::do_symbol(Symbol** p) {
2005   ResourceMark rm;
2006   Symbol* sym = *p;
2007   int len = sym->utf8_length();
2008   if (len > 0) {
2009     char* s = sym->as_utf8();
2010     DumperSupport::write_header(writer(), HPROF_UTF8, oopSize + len);
2011     writer()->write_symbolID(sym);
2012     writer()->write_raw(s, len);
2013   }
2014 }
2015 
2016 // Support class used to generate HPROF_GC_ROOT_JNI_LOCAL records
2017 
2018 class JNILocalsDumper : public OopClosure {
2019  private:
2020   AbstractDumpWriter* _writer;
2021   u4 _thread_serial_num;
2022   int _frame_num;
2023   AbstractDumpWriter* writer() const                { return _writer; }
2024  public:
2025   JNILocalsDumper(AbstractDumpWriter* writer, u4 thread_serial_num) {
2026     _writer = writer;
2027     _thread_serial_num = thread_serial_num;
2028     _frame_num = -1;  // default - empty stack
2029   }
2030   void set_frame_number(int n) { _frame_num = n; }
2031   void do_oop(oop* obj_p);
2032   void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); }
2033 };
2034 
2035 
2036 void JNILocalsDumper::do_oop(oop* obj_p) {
2037   // ignore null handles
2038   oop o = *obj_p;
2039   if (o != nullptr) {
2040     u4 size = 1 + sizeof(address) + 4 + 4;
2041     writer()->start_sub_record(HPROF_GC_ROOT_JNI_LOCAL, size);
2042     writer()->write_objectID(o);
2043     writer()->write_u4(_thread_serial_num);
2044     writer()->write_u4((u4)_frame_num);
2045     writer()->end_sub_record();
2046   }
2047 }
2048 
2049 
2050 // Support class used to generate HPROF_GC_ROOT_JNI_GLOBAL records
2051 
2052 class JNIGlobalsDumper : public OopClosure {
2053  private:
2054   AbstractDumpWriter* _writer;
2055   AbstractDumpWriter* writer() const                { return _writer; }
2056 
2057  public:
2058   JNIGlobalsDumper(AbstractDumpWriter* writer) {
2059     _writer = writer;
2060   }
2061   void do_oop(oop* obj_p);
2062   void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); }
2063 };
2064 
2065 void JNIGlobalsDumper::do_oop(oop* obj_p) {
2066   oop o = NativeAccess<AS_NO_KEEPALIVE>::oop_load(obj_p);
2067 
2068   // ignore these
2069   if (o == nullptr) return;
2070   // we ignore global ref to symbols and other internal objects
2071   if (o->is_instance() || o->is_objArray() || o->is_typeArray()) {
2072     u4 size = 1 + 2 * sizeof(address);
2073     writer()->start_sub_record(HPROF_GC_ROOT_JNI_GLOBAL, size);
2074     writer()->write_objectID(o);
2075     writer()->write_rootID(obj_p);      // global ref ID
2076     writer()->end_sub_record();
2077   }
2078 };
2079 
2080 // Support class used to generate HPROF_GC_ROOT_STICKY_CLASS records
2081 
2082 class StickyClassDumper : public KlassClosure {
2083  private:
2084   AbstractDumpWriter* _writer;
2085   AbstractDumpWriter* writer() const                { return _writer; }
2086  public:
2087   StickyClassDumper(AbstractDumpWriter* writer) {
2088     _writer = writer;
2089   }
2090   void do_klass(Klass* k) {
2091     if (k->is_instance_klass()) {
2092       InstanceKlass* ik = InstanceKlass::cast(k);
2093       u4 size = 1 + sizeof(address);
2094       writer()->start_sub_record(HPROF_GC_ROOT_STICKY_CLASS, size);
2095       writer()->write_classID(ik);
2096       writer()->end_sub_record();
2097     }
2098   }
2099 };
2100 
2101 // Large object heap dump support.
2102 // To avoid memory consumption, when dumping large objects such as huge array and
2103 // large objects whose size are larger than LARGE_OBJECT_DUMP_THRESHOLD, the scanned
2104 // partial object/array data will be sent to the backend directly instead of caching
2105 // the whole object/array in the internal buffer.
2106 // The HeapDumpLargeObjectList is used to save the large object when dumper scans
2107 // the heap. The large objects could be added (push) parallelly by multiple dumpers,
2108 // But they will be removed (popped) serially only by the VM thread.
2109 class HeapDumpLargeObjectList : public CHeapObj<mtInternal> {
2110  private:
2111   class HeapDumpLargeObjectListElem : public CHeapObj<mtInternal> {
2112    public:
2113     HeapDumpLargeObjectListElem(oop obj) : _obj(obj), _next(nullptr) { }
2114     oop _obj;
2115     HeapDumpLargeObjectListElem* _next;
2116   };
2117 
2118   volatile HeapDumpLargeObjectListElem* _head;
2119 
2120  public:
2121   HeapDumpLargeObjectList() : _head(nullptr) { }
2122 
2123   void atomic_push(oop obj) {
2124     assert (obj != nullptr, "sanity check");
2125     HeapDumpLargeObjectListElem* entry = new HeapDumpLargeObjectListElem(obj);
2126     if (entry == nullptr) {
2127       warning("failed to allocate element for large object list");
2128       return;
2129     }
2130     assert (entry->_obj != nullptr, "sanity check");
2131     while (true) {
2132       volatile HeapDumpLargeObjectListElem* old_head = Atomic::load_acquire(&_head);
2133       HeapDumpLargeObjectListElem* new_head = entry;
2134       if (Atomic::cmpxchg(&_head, old_head, new_head) == old_head) {
2135         // successfully push
2136         new_head->_next = (HeapDumpLargeObjectListElem*)old_head;
2137         return;
2138       }
2139     }
2140   }
2141 
2142   oop pop() {
2143     if (_head == nullptr) {
2144       return nullptr;
2145     }
2146     HeapDumpLargeObjectListElem* entry = (HeapDumpLargeObjectListElem*)_head;
2147     _head = _head->_next;
2148     assert (entry != nullptr, "illegal larger object list entry");
2149     oop ret = entry->_obj;
2150     delete entry;
2151     assert (ret != nullptr, "illegal oop pointer");
2152     return ret;
2153   }
2154 
2155   void drain(ObjectClosure* cl) {
2156     while (_head !=  nullptr) {
2157       cl->do_object(pop());
2158     }
2159   }
2160 
2161   bool is_empty() {
2162     return _head == nullptr;
2163   }
2164 
2165   static const size_t LargeObjectSizeThreshold = 1 << 20; // 1 MB
2166 };
2167 
2168 class VM_HeapDumper;
2169 
2170 // Support class using when iterating over the heap.
2171 class HeapObjectDumper : public ObjectClosure {
2172  private:
2173   AbstractDumpWriter* _writer;
2174   HeapDumpLargeObjectList* _list;
2175 
2176   AbstractDumpWriter* writer()                  { return _writer; }
2177   bool is_large(oop o);
2178  public:
2179   HeapObjectDumper(AbstractDumpWriter* writer, HeapDumpLargeObjectList* list = nullptr) {
2180     _writer = writer;
2181     _list = list;
2182   }
2183 
2184   // called for each object in the heap
2185   void do_object(oop o);
2186 };
2187 
2188 void HeapObjectDumper::do_object(oop o) {
2189   // skip classes as these emitted as HPROF_GC_CLASS_DUMP records
2190   if (o->klass() == vmClasses::Class_klass()) {
2191     if (!java_lang_Class::is_primitive(o)) {
2192       return;
2193     }
2194   }
2195 
2196   if (DumperSupport::mask_dormant_archived_object(o) == nullptr) {
2197     log_debug(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s)", p2i(o), o->klass()->external_name());
2198     return;
2199   }
2200 
2201   // If large object list exists and it is large object/array,
2202   // add oop into the list and skip scan. VM thread will process it later.
2203   if (_list != nullptr && is_large(o)) {
2204     _list->atomic_push(o);
2205     return;
2206   }
2207 
2208   if (o->is_instance()) {
2209     // create a HPROF_GC_INSTANCE record for each object
2210     DumperSupport::dump_instance(writer(), o);
2211   } else if (o->is_objArray()) {
2212     // create a HPROF_GC_OBJ_ARRAY_DUMP record for each object array
2213     DumperSupport::dump_object_array(writer(), objArrayOop(o));
2214   } else if (o->is_flatArray()) {
2215     DumperSupport::dump_flat_array(writer(), flatArrayOop(o));
2216   } else if (o->is_typeArray()) {
2217     // create a HPROF_GC_PRIM_ARRAY_DUMP record for each type array
2218     DumperSupport::dump_prim_array(writer(), typeArrayOop(o));
2219   }
2220 }
2221 
2222 bool HeapObjectDumper::is_large(oop o) {
2223   size_t size = 0;
2224   if (o->is_instance()) {
2225     // Use o->size() * 8 as the upper limit of instance size to avoid iterating static fields
2226     size = o->size() * 8;
2227   } else if (o->is_objArray()) {
2228     objArrayOop array = objArrayOop(o);
2229     BasicType type = ArrayKlass::cast(array->klass())->element_type();
2230     assert(type >= T_BOOLEAN && type <= T_OBJECT, "invalid array element type");
2231     int length = array->length();
2232     int type_size = sizeof(address);
2233     size = (size_t)length * type_size;
2234   } else if (o->is_typeArray()) {
2235     flatArrayOop array = flatArrayOop(o);
2236     BasicType type = ArrayKlass::cast(array->klass())->element_type();
2237     assert(type == T_PRIMITIVE_OBJECT, "invalid array element type");
2238     int length = array->length();
2239     //TODO: FIXME
2240     //int type_size = type2aelembytes(type);
2241     //size = (size_t)length * type_size;
2242   } else if (o->is_typeArray()) {
2243     typeArrayOop array = typeArrayOop(o);
2244     BasicType type = ArrayKlass::cast(array->klass())->element_type();
2245     assert(type >= T_BOOLEAN && type <= T_OBJECT, "invalid array element type");
2246     int length = array->length();
2247     int type_size = type2aelembytes(type);
2248     size = (size_t)length * type_size;
2249   }
2250   return size > HeapDumpLargeObjectList::LargeObjectSizeThreshold;
2251 }
2252 
2253 // The dumper controller for parallel heap dump
2254 class DumperController : public CHeapObj<mtInternal> {
2255  private:
2256    bool     _started;
2257    Monitor* _lock;
2258    uint   _dumper_number;
2259    uint   _complete_number;
2260 
2261  public:
2262    DumperController(uint number) :
2263      _started(false),
2264      _lock(new (std::nothrow) PaddedMonitor(Mutex::safepoint, "DumperController_lock")),
2265      _dumper_number(number),
2266      _complete_number(0) { }
2267 
2268    ~DumperController() { delete _lock; }
2269 
2270    void wait_for_start_signal() {
2271      MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
2272      while (_started == false) {
2273        ml.wait();
2274      }
2275      assert(_started == true,  "dumper woke up with wrong state");
2276    }
2277 
2278    void start_dump() {
2279      assert (_started == false, "start dump with wrong state");
2280      MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
2281      _started = true;
2282      ml.notify_all();
2283    }
2284 
2285    void dumper_complete() {
2286      assert (_started == true, "dumper complete with wrong state");
2287      MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
2288      _complete_number++;
2289      ml.notify();
2290    }
2291 
2292    void wait_all_dumpers_complete() {
2293      assert (_started == true, "wrong state when wait for dumper complete");
2294      MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
2295      while (_complete_number != _dumper_number) {
2296         ml.wait();
2297      }
2298      _started = false;
2299    }
2300 };
2301 
2302 // The VM operation that performs the heap dump
2303 class VM_HeapDumper : public VM_GC_Operation, public WorkerTask {
2304  private:
2305   static VM_HeapDumper*   _global_dumper;
2306   static DumpWriter*      _global_writer;
2307   DumpWriter*             _local_writer;
2308   JavaThread*             _oome_thread;
2309   Method*                 _oome_constructor;
2310   bool                    _gc_before_heap_dump;
2311   GrowableArray<Klass*>*  _klass_map;
2312   ThreadStackTrace**      _stack_traces;
2313   int                     _num_threads;
2314 
2315   // Inlined object support.
2316   InlinedObjects          _inlined_objects;
2317   InlinedObjects* inlined_objects() { return &_inlined_objects; }
2318 
2319   // parallel heap dump support
2320   uint                    _num_dumper_threads;
2321   uint                    _num_writer_threads;
2322   DumperController*       _dumper_controller;
2323   ParallelObjectIterator* _poi;
2324   HeapDumpLargeObjectList* _large_object_list;
2325 
2326   // VMDumperType is for thread that dumps both heap and non-heap data.
2327   static const size_t VMDumperType = 0;
2328   static const size_t WriterType = 1;
2329   static const size_t DumperType = 2;
2330   // worker id of VMDumper thread.
2331   static const size_t VMDumperWorkerId = 0;
2332 
2333   size_t get_worker_type(uint worker_id) {
2334     assert(_num_writer_threads >= 1, "Must be at least one writer");
2335     // worker id of VMDumper that dump heap and non-heap data
2336     if (worker_id == VMDumperWorkerId) {
2337       return VMDumperType;
2338     }
2339 
2340     // worker id of dumper starts from 1, which only dump heap datar
2341     if (worker_id < _num_dumper_threads) {
2342       return DumperType;
2343     }
2344 
2345     // worker id of writer starts from _num_dumper_threads
2346     return WriterType;
2347   }
2348 
2349   void prepare_parallel_dump(uint num_total) {
2350     assert (_dumper_controller == nullptr, "dumper controller must be null");
2351     assert (num_total > 0, "active workers number must >= 1");
2352     // Dumper threads number must not be larger than active workers number.
2353     if (num_total < _num_dumper_threads) {
2354       _num_dumper_threads = num_total - 1;
2355     }
2356     // Calculate dumper and writer threads number.
2357     _num_writer_threads = num_total - _num_dumper_threads;
2358     // If dumper threads number is 1, only the VMThread works as a dumper.
2359     // If dumper threads number is equal to active workers, need at lest one worker thread as writer.
2360     if (_num_dumper_threads > 0 && _num_writer_threads == 0) {
2361       _num_writer_threads = 1;
2362       _num_dumper_threads = num_total - _num_writer_threads;
2363     }
2364     // Prepare parallel writer.
2365     if (_num_dumper_threads > 1) {
2366       ParDumpWriter::before_work();
2367       // Number of dumper threads that only iterate heap.
2368       uint _heap_only_dumper_threads = _num_dumper_threads - 1 /* VMDumper thread */;
2369       _dumper_controller = new (std::nothrow) DumperController(_heap_only_dumper_threads);
2370     }
2371   }
2372 
2373   void finish_parallel_dump() {
2374     if (_num_dumper_threads > 1) {
2375       ParDumpWriter::after_work();
2376     }
2377   }
2378 
2379   // accessors and setters
2380   static VM_HeapDumper* dumper()         {  assert(_global_dumper != nullptr, "Error"); return _global_dumper; }
2381   static DumpWriter* writer()            {  assert(_global_writer != nullptr, "Error"); return _global_writer; }
2382   void set_global_dumper() {
2383     assert(_global_dumper == nullptr, "Error");
2384     _global_dumper = this;
2385   }
2386   void set_global_writer() {
2387     assert(_global_writer == nullptr, "Error");
2388     _global_writer = _local_writer;
2389   }
2390   void clear_global_dumper() { _global_dumper = nullptr; }
2391   void clear_global_writer() { _global_writer = nullptr; }
2392 
2393   bool skip_operation() const;
2394 
2395   // writes a HPROF_LOAD_CLASS record
2396   static void do_load_class(Klass* k);
2397 
2398   // writes a HPROF_GC_CLASS_DUMP record for the given class
2399   static void do_class_dump(Klass* k);
2400 
2401   // HPROF_GC_ROOT_THREAD_OBJ records
2402   int do_thread(JavaThread* thread, u4 thread_serial_num);
2403   void do_threads();
2404 
2405   void add_class_serial_number(Klass* k, int serial_num) {
2406     _klass_map->at_put_grow(serial_num, k);
2407   }
2408 
2409   // HPROF_TRACE and HPROF_FRAME records
2410   void dump_stack_traces();
2411 
2412   // large objects
2413   void dump_large_objects(ObjectClosure* writer);
2414 
2415  public:
2416   VM_HeapDumper(DumpWriter* writer, bool gc_before_heap_dump, bool oome, uint num_dump_threads) :
2417     VM_GC_Operation(0 /* total collections,      dummy, ignored */,
2418                     GCCause::_heap_dump /* GC Cause */,
2419                     0 /* total full collections, dummy, ignored */,
2420                     gc_before_heap_dump),
2421     WorkerTask("dump heap") {
2422     _local_writer = writer;
2423     _gc_before_heap_dump = gc_before_heap_dump;
2424     _klass_map = new (mtServiceability) GrowableArray<Klass*>(INITIAL_CLASS_COUNT, mtServiceability);
2425     _stack_traces = nullptr;
2426     _num_threads = 0;
2427     _num_dumper_threads = num_dump_threads;
2428     _dumper_controller = nullptr;
2429     _poi = nullptr;
2430     _large_object_list = new (std::nothrow) HeapDumpLargeObjectList();
2431     if (oome) {
2432       assert(!Thread::current()->is_VM_thread(), "Dump from OutOfMemoryError cannot be called by the VMThread");
2433       // get OutOfMemoryError zero-parameter constructor
2434       InstanceKlass* oome_ik = vmClasses::OutOfMemoryError_klass();
2435       _oome_constructor = oome_ik->find_method(vmSymbols::object_initializer_name(),
2436                                                           vmSymbols::void_method_signature());
2437       // get thread throwing OOME when generating the heap dump at OOME
2438       _oome_thread = JavaThread::current();
2439     } else {
2440       _oome_thread = nullptr;
2441       _oome_constructor = nullptr;
2442     }
2443   }
2444 
2445   ~VM_HeapDumper() {
2446     if (_stack_traces != nullptr) {
2447       for (int i=0; i < _num_threads; i++) {
2448         delete _stack_traces[i];
2449       }
2450       FREE_C_HEAP_ARRAY(ThreadStackTrace*, _stack_traces);
2451     }
2452     if (_dumper_controller != nullptr) {
2453       delete _dumper_controller;
2454       _dumper_controller = nullptr;
2455     }
2456     delete _klass_map;
2457     delete _large_object_list;
2458   }
2459 
2460   VMOp_Type type() const { return VMOp_HeapDumper; }
2461   virtual bool doit_prologue();
2462   void doit();
2463   void work(uint worker_id);
2464 };
2465 
2466 VM_HeapDumper* VM_HeapDumper::_global_dumper = nullptr;
2467 DumpWriter*    VM_HeapDumper::_global_writer = nullptr;
2468 
2469 bool VM_HeapDumper::skip_operation() const {
2470   return false;
2471 }
2472 
2473 // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
2474 void DumperSupport::end_of_dump(AbstractDumpWriter* writer) {
2475   writer->finish_dump_segment();
2476 
2477   writer->write_u1(HPROF_HEAP_DUMP_END);
2478   writer->write_u4(0);
2479   writer->write_u4(0);
2480 }
2481 
2482 // writes a HPROF_LOAD_CLASS record for the class
2483 void VM_HeapDumper::do_load_class(Klass* k) {
2484   static u4 class_serial_num = 0;
2485 
2486   // len of HPROF_LOAD_CLASS record
2487   u4 remaining = 2*oopSize + 2*sizeof(u4);
2488 
2489   DumperSupport::write_header(writer(), HPROF_LOAD_CLASS, remaining);
2490 
2491   // class serial number is just a number
2492   writer()->write_u4(++class_serial_num);
2493 
2494   // class ID
2495   writer()->write_classID(k);
2496 
2497   // add the Klass* and class serial number pair
2498   dumper()->add_class_serial_number(k, class_serial_num);
2499 
2500   writer()->write_u4(STACK_TRACE_ID);
2501 
2502   // class name ID
2503   Symbol* name = k->name();
2504   writer()->write_symbolID(name);
2505 }
2506 
2507 // writes a HPROF_GC_CLASS_DUMP record for the given class
2508 void VM_HeapDumper::do_class_dump(Klass* k) {
2509   if (k->is_instance_klass()) {
2510     DumperSupport::dump_instance_class(writer(), k);
2511   } else {
2512     DumperSupport::dump_array_class(writer(), k);
2513   }
2514 }
2515 
2516 // Walk the stack of the given thread.
2517 // Dumps a HPROF_GC_ROOT_JAVA_FRAME record for each local
2518 // Dumps a HPROF_GC_ROOT_JNI_LOCAL record for each JNI local
2519 //
2520 // It returns the number of Java frames in this thread stack
2521 int VM_HeapDumper::do_thread(JavaThread* java_thread, u4 thread_serial_num) {
2522   JNILocalsDumper blk(writer(), thread_serial_num);
2523 
2524   oop threadObj = java_thread->threadObj();
2525   assert(threadObj != nullptr, "sanity check");
2526 
2527   int stack_depth = 0;
2528   if (java_thread->has_last_Java_frame()) {
2529 
2530     // vframes are resource allocated
2531     Thread* current_thread = Thread::current();
2532     ResourceMark rm(current_thread);
2533     HandleMark hm(current_thread);
2534 
2535     RegisterMap reg_map(java_thread,
2536                         RegisterMap::UpdateMap::include,
2537                         RegisterMap::ProcessFrames::include,
2538                         RegisterMap::WalkContinuation::skip);
2539     frame f = java_thread->last_frame();
2540     vframe* vf = vframe::new_vframe(&f, &reg_map, java_thread);
2541     frame* last_entry_frame = nullptr;
2542     int extra_frames = 0;
2543 
2544     if (java_thread == _oome_thread && _oome_constructor != nullptr) {
2545       extra_frames++;
2546     }
2547     while (vf != nullptr) {
2548       blk.set_frame_number(stack_depth);
2549       if (vf->is_java_frame()) {
2550 
2551         // java frame (interpreted, compiled, ...)
2552         javaVFrame *jvf = javaVFrame::cast(vf);
2553         if (!(jvf->method()->is_native())) {
2554           StackValueCollection* locals = jvf->locals();
2555           for (int slot=0; slot<locals->size(); slot++) {
2556             if (locals->at(slot)->type() == T_OBJECT) {
2557               oop o = locals->obj_at(slot)();
2558 
2559               if (o != nullptr) {
2560                 u4 size = 1 + sizeof(address) + 4 + 4;
2561                 writer()->start_sub_record(HPROF_GC_ROOT_JAVA_FRAME, size);
2562                 writer()->write_objectID(o);
2563                 writer()->write_u4(thread_serial_num);
2564                 writer()->write_u4((u4) (stack_depth + extra_frames));
2565                 writer()->end_sub_record();
2566               }
2567             }
2568           }
2569           StackValueCollection *exprs = jvf->expressions();
2570           for(int index = 0; index < exprs->size(); index++) {
2571             if (exprs->at(index)->type() == T_OBJECT) {
2572                oop o = exprs->obj_at(index)();
2573                if (o != nullptr) {
2574                  u4 size = 1 + sizeof(address) + 4 + 4;
2575                  writer()->start_sub_record(HPROF_GC_ROOT_JAVA_FRAME, size);
2576                  writer()->write_objectID(o);
2577                  writer()->write_u4(thread_serial_num);
2578                  writer()->write_u4((u4) (stack_depth + extra_frames));
2579                  writer()->end_sub_record();
2580                }
2581              }
2582           }
2583         } else {
2584           // native frame
2585           if (stack_depth == 0) {
2586             // JNI locals for the top frame.
2587             java_thread->active_handles()->oops_do(&blk);
2588           } else {
2589             if (last_entry_frame != nullptr) {
2590               // JNI locals for the entry frame
2591               assert(last_entry_frame->is_entry_frame(), "checking");
2592               last_entry_frame->entry_frame_call_wrapper()->handles()->oops_do(&blk);
2593             }
2594           }
2595         }
2596         // increment only for Java frames
2597         stack_depth++;
2598         last_entry_frame = nullptr;
2599 
2600       } else {
2601         // externalVFrame - if it's an entry frame then report any JNI locals
2602         // as roots when we find the corresponding native javaVFrame
2603         frame* fr = vf->frame_pointer();
2604         assert(fr != nullptr, "sanity check");
2605         if (fr->is_entry_frame()) {
2606           last_entry_frame = fr;
2607         }
2608       }
2609       vf = vf->sender();
2610     }
2611   } else {
2612     // no last java frame but there may be JNI locals
2613     java_thread->active_handles()->oops_do(&blk);
2614   }
2615   return stack_depth;
2616 }
2617 
2618 
2619 // write a HPROF_GC_ROOT_THREAD_OBJ record for each java thread. Then walk
2620 // the stack so that locals and JNI locals are dumped.
2621 void VM_HeapDumper::do_threads() {
2622   for (int i=0; i < _num_threads; i++) {
2623     JavaThread* thread = _stack_traces[i]->thread();
2624     oop threadObj = thread->threadObj();
2625     u4 thread_serial_num = i+1;
2626     u4 stack_serial_num = thread_serial_num + STACK_TRACE_ID;
2627     u4 size = 1 + sizeof(address) + 4 + 4;
2628     writer()->start_sub_record(HPROF_GC_ROOT_THREAD_OBJ, size);
2629     writer()->write_objectID(threadObj);
2630     writer()->write_u4(thread_serial_num);  // thread number
2631     writer()->write_u4(stack_serial_num);   // stack trace serial number
2632     writer()->end_sub_record();
2633     int num_frames = do_thread(thread, thread_serial_num);
2634     assert(num_frames == _stack_traces[i]->get_stack_depth(),
2635            "total number of Java frames not matched");
2636   }
2637 }
2638 
2639 bool VM_HeapDumper::doit_prologue() {
2640   if (_gc_before_heap_dump && UseZGC) {
2641     // ZGC cannot perform a synchronous GC cycle from within the VM thread.
2642     // So ZCollectedHeap::collect_as_vm_thread() is a noop. To respect the
2643     // _gc_before_heap_dump flag a synchronous GC cycle is performed from
2644     // the caller thread in the prologue.
2645     Universe::heap()->collect(GCCause::_heap_dump);
2646   }
2647   return VM_GC_Operation::doit_prologue();
2648 }
2649 
2650 
2651 // The VM operation that dumps the heap. The dump consists of the following
2652 // records:
2653 //
2654 //  HPROF_HEADER
2655 //  [HPROF_UTF8]*
2656 //  [HPROF_LOAD_CLASS]*
2657 //  [[HPROF_FRAME]*|HPROF_TRACE]*
2658 //  [HPROF_GC_CLASS_DUMP]*
2659 //  [HPROF_HEAP_DUMP_SEGMENT]*
2660 //  HPROF_HEAP_DUMP_END
2661 //
2662 // The HPROF_TRACE records represent the stack traces where the heap dump
2663 // is generated and a "dummy trace" record which does not include
2664 // any frames. The dummy trace record is used to be referenced as the
2665 // unknown object alloc site.
2666 //
2667 // Each HPROF_HEAP_DUMP_SEGMENT record has a length followed by sub-records.
2668 // To allow the heap dump be generated in a single pass we remember the position
2669 // of the dump length and fix it up after all sub-records have been written.
2670 // To generate the sub-records we iterate over the heap, writing
2671 // HPROF_GC_INSTANCE_DUMP, HPROF_GC_OBJ_ARRAY_DUMP, and HPROF_GC_PRIM_ARRAY_DUMP
2672 // records as we go. Once that is done we write records for some of the GC
2673 // roots.
2674 
2675 void VM_HeapDumper::doit() {
2676 
2677   CollectedHeap* ch = Universe::heap();
2678 
2679   ch->ensure_parsability(false); // must happen, even if collection does
2680                                  // not happen (e.g. due to GCLocker)
2681 
2682   if (_gc_before_heap_dump) {
2683     if (GCLocker::is_active()) {
2684       warning("GC locker is held; pre-heapdump GC was skipped");
2685     } else {
2686       ch->collect_as_vm_thread(GCCause::_heap_dump);
2687     }
2688   }
2689 
2690   // At this point we should be the only dumper active, so
2691   // the following should be safe.
2692   set_global_dumper();
2693   set_global_writer();
2694 
2695   WorkerThreads* workers = ch->safepoint_workers();
2696 
2697   if (workers == nullptr) {
2698     // Use serial dump, set dumper threads and writer threads number to 1.
2699     _num_dumper_threads=1;
2700     _num_writer_threads=1;
2701     work(0);
2702   } else {
2703     prepare_parallel_dump(workers->active_workers());
2704     if (_num_dumper_threads > 1) {
2705       ParallelObjectIterator poi(_num_dumper_threads);
2706       _poi = &poi;
2707       workers->run_task(this);
2708       _poi = nullptr;
2709     } else {
2710       workers->run_task(this);
2711     }
2712     finish_parallel_dump();
2713   }
2714 
2715   // Now we clear the global variables, so that a future dumper can run.
2716   clear_global_dumper();
2717   clear_global_writer();
2718 }
2719 
2720 void VM_HeapDumper::work(uint worker_id) {
2721   if (worker_id != 0) {
2722     if (get_worker_type(worker_id) == WriterType) {
2723       writer()->writer_loop();
2724       return;
2725     }
2726     if (_num_dumper_threads > 1 && get_worker_type(worker_id) == DumperType) {
2727       _dumper_controller->wait_for_start_signal();
2728     }
2729   } else {
2730     // The worker 0 on all non-heap data dumping and part of heap iteration.
2731     // Write the file header - we always use 1.0.2
2732     const char* header = "JAVA PROFILE 1.0.2";
2733 
2734     // header is few bytes long - no chance to overflow int
2735     writer()->write_raw(header, strlen(header) + 1); // NUL terminated
2736     writer()->write_u4(oopSize);
2737     // timestamp is current time in ms
2738     writer()->write_u8(os::javaTimeMillis());
2739     // HPROF_UTF8 records
2740     SymbolTableDumper sym_dumper(writer());
2741     SymbolTable::symbols_do(&sym_dumper);
2742 
2743     // HPROF_UTF8 records for inlined field names.
2744     inlined_objects()->init();
2745     inlined_objects()->dump_inlined_field_names(writer());
2746 
2747     // HPROF_INLINED_FIELDS
2748     inlined_objects()->dump_classed_with_inlined_fields(writer());
2749 
2750     // write HPROF_LOAD_CLASS records
2751     {
2752       LockedClassesDo locked_load_classes(&do_load_class);
2753       ClassLoaderDataGraph::classes_do(&locked_load_classes);
2754     }
2755 
2756     // write HPROF_FRAME and HPROF_TRACE records
2757     // this must be called after _klass_map is built when iterating the classes above.
2758     dump_stack_traces();
2759 
2760     // Writes HPROF_GC_CLASS_DUMP records
2761     {
2762       LockedClassesDo locked_dump_class(&do_class_dump);
2763       ClassLoaderDataGraph::classes_do(&locked_dump_class);
2764     }
2765 
2766     // HPROF_GC_ROOT_THREAD_OBJ + frames + jni locals
2767     do_threads();
2768 
2769     // HPROF_GC_ROOT_JNI_GLOBAL
2770     JNIGlobalsDumper jni_dumper(writer());
2771     JNIHandles::oops_do(&jni_dumper);
2772     // technically not jni roots, but global roots
2773     // for things like preallocated throwable backtraces
2774     Universe::vm_global()->oops_do(&jni_dumper);
2775     // HPROF_GC_ROOT_STICKY_CLASS
2776     // These should be classes in the null class loader data, and not all classes
2777     // if !ClassUnloading
2778     StickyClassDumper class_dumper(writer());
2779     ClassLoaderData::the_null_class_loader_data()->classes_do(&class_dumper);
2780   }
2781   // writes HPROF_GC_INSTANCE_DUMP records.
2782   // After each sub-record is written check_segment_length will be invoked
2783   // to check if the current segment exceeds a threshold. If so, a new
2784   // segment is started.
2785   // The HPROF_GC_CLASS_DUMP and HPROF_GC_INSTANCE_DUMP are the vast bulk
2786   // of the heap dump.
2787   if (_num_dumper_threads <= 1) {
2788     HeapObjectDumper obj_dumper(writer());
2789     Universe::heap()->object_iterate(&obj_dumper);
2790   } else {
2791     assert(get_worker_type(worker_id) == DumperType
2792           || get_worker_type(worker_id) == VMDumperType,
2793           "must be dumper thread to do heap iteration");
2794     if (get_worker_type(worker_id) == VMDumperType) {
2795       // Clear global writer's buffer.
2796       writer()->finish_dump_segment(true);
2797       // Notify dumpers to start heap iteration.
2798       _dumper_controller->start_dump();
2799     }
2800     // Heap iteration.
2801     {
2802        ParDumpWriter pw(writer());
2803        {
2804          HeapObjectDumper obj_dumper(&pw, _large_object_list);
2805          _poi->object_iterate(&obj_dumper, worker_id);
2806        }
2807 
2808        if (get_worker_type(worker_id) == VMDumperType) {
2809          _dumper_controller->wait_all_dumpers_complete();
2810          // clear internal buffer;
2811          pw.finish_dump_segment(true);
2812          // refresh the global_writer's buffer and position;
2813          writer()->refresh();
2814        } else {
2815          pw.finish_dump_segment(true);
2816          _dumper_controller->dumper_complete();
2817          return;
2818        }
2819     }
2820   }
2821 
2822   assert(get_worker_type(worker_id) == VMDumperType, "Heap dumper must be VMDumper");
2823   // Use writer() rather than ParDumpWriter to avoid memory consumption.
2824   HeapObjectDumper obj_dumper(writer());
2825   dump_large_objects(&obj_dumper);
2826   // Writes the HPROF_HEAP_DUMP_END record.
2827   DumperSupport::end_of_dump(writer());
2828 
2829   inlined_objects()->dump_flat_arrays(writer());
2830 
2831   // We are done with writing. Release the worker threads.
2832   writer()->deactivate();
2833 
2834   inlined_objects()->release();
2835 }
2836 
2837 void VM_HeapDumper::dump_stack_traces() {
2838   // write a HPROF_TRACE record without any frames to be referenced as object alloc sites
2839   DumperSupport::write_header(writer(), HPROF_TRACE, 3*sizeof(u4));
2840   writer()->write_u4((u4) STACK_TRACE_ID);
2841   writer()->write_u4(0);                    // thread number
2842   writer()->write_u4(0);                    // frame count
2843 
2844   _stack_traces = NEW_C_HEAP_ARRAY(ThreadStackTrace*, Threads::number_of_threads(), mtInternal);
2845   int frame_serial_num = 0;
2846   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
2847     oop threadObj = thread->threadObj();
2848     if (threadObj != nullptr && !thread->is_exiting() && !thread->is_hidden_from_external_view()) {
2849       // dump thread stack trace
2850       Thread* current_thread = Thread::current();
2851       ResourceMark rm(current_thread);
2852       HandleMark hm(current_thread);
2853 
2854       ThreadStackTrace* stack_trace = new ThreadStackTrace(thread, false);
2855       stack_trace->dump_stack_at_safepoint(-1, /* ObjectMonitorsHashtable is not needed here */ nullptr, true);
2856       _stack_traces[_num_threads++] = stack_trace;
2857 
2858       // write HPROF_FRAME records for this thread's stack trace
2859       int depth = stack_trace->get_stack_depth();
2860       int thread_frame_start = frame_serial_num;
2861       int extra_frames = 0;
2862       // write fake frame that makes it look like the thread, which caused OOME,
2863       // is in the OutOfMemoryError zero-parameter constructor
2864       if (thread == _oome_thread && _oome_constructor != nullptr) {
2865         int oome_serial_num = _klass_map->find(_oome_constructor->method_holder());
2866         // the class serial number starts from 1
2867         assert(oome_serial_num > 0, "OutOfMemoryError class not found");
2868         DumperSupport::dump_stack_frame(writer(), ++frame_serial_num, oome_serial_num,
2869                                         _oome_constructor, 0);
2870         extra_frames++;
2871       }
2872       for (int j=0; j < depth; j++) {
2873         StackFrameInfo* frame = stack_trace->stack_frame_at(j);
2874         Method* m = frame->method();
2875         int class_serial_num = _klass_map->find(m->method_holder());
2876         // the class serial number starts from 1
2877         assert(class_serial_num > 0, "class not found");
2878         DumperSupport::dump_stack_frame(writer(), ++frame_serial_num, class_serial_num, m, frame->bci());
2879       }
2880       depth += extra_frames;
2881 
2882       // write HPROF_TRACE record for one thread
2883       DumperSupport::write_header(writer(), HPROF_TRACE, 3*sizeof(u4) + depth*oopSize);
2884       int stack_serial_num = _num_threads + STACK_TRACE_ID;
2885       writer()->write_u4(stack_serial_num);      // stack trace serial number
2886       writer()->write_u4((u4) _num_threads);     // thread serial number
2887       writer()->write_u4(depth);                 // frame count
2888       for (int j=1; j <= depth; j++) {
2889         writer()->write_id(thread_frame_start + j);
2890       }
2891     }
2892   }
2893 }
2894 
2895 // dump the large objects.
2896 void VM_HeapDumper::dump_large_objects(ObjectClosure* cl) {
2897   _large_object_list->drain(cl);
2898 }
2899 
2900 // dump the heap to given path.
2901 int HeapDumper::dump(const char* path, outputStream* out, int compression, bool overwrite, uint num_dump_threads) {
2902   assert(path != nullptr && strlen(path) > 0, "path missing");
2903 
2904   // print message in interactive case
2905   if (out != nullptr) {
2906     out->print_cr("Dumping heap to %s ...", path);
2907     timer()->start();
2908   }
2909   // create JFR event
2910   EventHeapDump event;
2911 
2912   AbstractCompressor* compressor = nullptr;
2913 
2914   if (compression > 0) {
2915     compressor = new (std::nothrow) GZipCompressor(compression);
2916 
2917     if (compressor == nullptr) {
2918       set_error("Could not allocate gzip compressor");
2919       return -1;
2920     }
2921   }
2922 
2923   DumpWriter writer(new (std::nothrow) FileWriter(path, overwrite), compressor);
2924 
2925   if (writer.error() != nullptr) {
2926     set_error(writer.error());
2927     if (out != nullptr) {
2928       out->print_cr("Unable to create %s: %s", path,
2929         (error() != nullptr) ? error() : "reason unknown");
2930     }
2931     return -1;
2932   }
2933 
2934   // generate the dump
2935   VM_HeapDumper dumper(&writer, _gc_before_heap_dump, _oome, num_dump_threads);
2936   if (Thread::current()->is_VM_thread()) {
2937     assert(SafepointSynchronize::is_at_safepoint(), "Expected to be called at a safepoint");
2938     dumper.doit();
2939   } else {
2940     VMThread::execute(&dumper);
2941   }
2942 
2943   // record any error that the writer may have encountered
2944   set_error(writer.error());
2945 
2946   // emit JFR event
2947   if (error() == nullptr) {
2948     event.set_destination(path);
2949     event.set_gcBeforeDump(_gc_before_heap_dump);
2950     event.set_size(writer.bytes_written());
2951     event.set_onOutOfMemoryError(_oome);
2952     event.set_overwrite(overwrite);
2953     event.set_compression(compression);
2954     event.commit();
2955   } else {
2956     log_debug(cds, heap)("Error %s while dumping heap", error());
2957   }
2958 
2959   // print message in interactive case
2960   if (out != nullptr) {
2961     timer()->stop();
2962     if (error() == nullptr) {
2963       out->print_cr("Heap dump file created [" JULONG_FORMAT " bytes in %3.3f secs]",
2964                     writer.bytes_written(), timer()->seconds());
2965     } else {
2966       out->print_cr("Dump file is incomplete: %s", writer.error());
2967     }
2968   }
2969 
2970   return (writer.error() == nullptr) ? 0 : -1;
2971 }
2972 
2973 // stop timer (if still active), and free any error string we might be holding
2974 HeapDumper::~HeapDumper() {
2975   if (timer()->is_active()) {
2976     timer()->stop();
2977   }
2978   set_error(nullptr);
2979 }
2980 
2981 
2982 // returns the error string (resource allocated), or null
2983 char* HeapDumper::error_as_C_string() const {
2984   if (error() != nullptr) {
2985     char* str = NEW_RESOURCE_ARRAY(char, strlen(error())+1);
2986     strcpy(str, error());
2987     return str;
2988   } else {
2989     return nullptr;
2990   }
2991 }
2992 
2993 // set the error string
2994 void HeapDumper::set_error(char const* error) {
2995   if (_error != nullptr) {
2996     os::free(_error);
2997   }
2998   if (error == nullptr) {
2999     _error = nullptr;
3000   } else {
3001     _error = os::strdup(error);
3002     assert(_error != nullptr, "allocation failure");
3003   }
3004 }
3005 
3006 // Called by out-of-memory error reporting by a single Java thread
3007 // outside of a JVM safepoint
3008 void HeapDumper::dump_heap_from_oome() {
3009   HeapDumper::dump_heap(true);
3010 }
3011 
3012 // Called by error reporting by a single Java thread outside of a JVM safepoint,
3013 // or by heap dumping by the VM thread during a (GC) safepoint. Thus, these various
3014 // callers are strictly serialized and guaranteed not to interfere below. For more
3015 // general use, however, this method will need modification to prevent
3016 // inteference when updating the static variables base_path and dump_file_seq below.
3017 void HeapDumper::dump_heap() {
3018   HeapDumper::dump_heap(false);
3019 }
3020 
3021 void HeapDumper::dump_heap(bool oome) {
3022   static char base_path[JVM_MAXPATHLEN] = {'\0'};
3023   static uint dump_file_seq = 0;
3024   char* my_path;
3025   const int max_digit_chars = 20;
3026 
3027   const char* dump_file_name = "java_pid";
3028   const char* dump_file_ext  = HeapDumpGzipLevel > 0 ? ".hprof.gz" : ".hprof";
3029 
3030   // The dump file defaults to java_pid<pid>.hprof in the current working
3031   // directory. HeapDumpPath=<file> can be used to specify an alternative
3032   // dump file name or a directory where dump file is created.
3033   if (dump_file_seq == 0) { // first time in, we initialize base_path
3034     // Calculate potentially longest base path and check if we have enough
3035     // allocated statically.
3036     const size_t total_length =
3037                       (HeapDumpPath == nullptr ? 0 : strlen(HeapDumpPath)) +
3038                       strlen(os::file_separator()) + max_digit_chars +
3039                       strlen(dump_file_name) + strlen(dump_file_ext) + 1;
3040     if (total_length > sizeof(base_path)) {
3041       warning("Cannot create heap dump file.  HeapDumpPath is too long.");
3042       return;
3043     }
3044 
3045     bool use_default_filename = true;
3046     if (HeapDumpPath == nullptr || HeapDumpPath[0] == '\0') {
3047       // HeapDumpPath=<file> not specified
3048     } else {
3049       strcpy(base_path, HeapDumpPath);
3050       // check if the path is a directory (must exist)
3051       DIR* dir = os::opendir(base_path);
3052       if (dir == nullptr) {
3053         use_default_filename = false;
3054       } else {
3055         // HeapDumpPath specified a directory. We append a file separator
3056         // (if needed).
3057         os::closedir(dir);
3058         size_t fs_len = strlen(os::file_separator());
3059         if (strlen(base_path) >= fs_len) {
3060           char* end = base_path;
3061           end += (strlen(base_path) - fs_len);
3062           if (strcmp(end, os::file_separator()) != 0) {
3063             strcat(base_path, os::file_separator());
3064           }
3065         }
3066       }
3067     }
3068     // If HeapDumpPath wasn't a file name then we append the default name
3069     if (use_default_filename) {
3070       const size_t dlen = strlen(base_path);  // if heap dump dir specified
3071       jio_snprintf(&base_path[dlen], sizeof(base_path)-dlen, "%s%d%s",
3072                    dump_file_name, os::current_process_id(), dump_file_ext);
3073     }
3074     const size_t len = strlen(base_path) + 1;
3075     my_path = (char*)os::malloc(len, mtInternal);
3076     if (my_path == nullptr) {
3077       warning("Cannot create heap dump file.  Out of system memory.");
3078       return;
3079     }
3080     strncpy(my_path, base_path, len);
3081   } else {
3082     // Append a sequence number id for dumps following the first
3083     const size_t len = strlen(base_path) + max_digit_chars + 2; // for '.' and \0
3084     my_path = (char*)os::malloc(len, mtInternal);
3085     if (my_path == nullptr) {
3086       warning("Cannot create heap dump file.  Out of system memory.");
3087       return;
3088     }
3089     jio_snprintf(my_path, len, "%s.%d", base_path, dump_file_seq);
3090   }
3091   dump_file_seq++;   // increment seq number for next time we dump
3092 
3093   HeapDumper dumper(false /* no GC before heap dump */,
3094                     oome  /* pass along out-of-memory-error flag */);
3095   dumper.dump(my_path, tty, HeapDumpGzipLevel);
3096   os::free(my_path);
3097 }