1 /*
   2  * Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jvm.h"
  27 #include "classfile/classLoaderData.inline.hpp"
  28 #include "classfile/classLoaderDataGraph.hpp"
  29 #include "classfile/javaClasses.inline.hpp"
  30 #include "classfile/symbolTable.hpp"
  31 #include "classfile/vmClasses.hpp"
  32 #include "classfile/vmSymbols.hpp"
  33 #include "gc/shared/gcLocker.hpp"
  34 #include "gc/shared/gcVMOperations.hpp"
  35 #include "gc/shared/workerThread.hpp"
  36 #include "jfr/jfrEvents.hpp"
  37 #include "memory/allocation.inline.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "memory/universe.hpp"
  40 #include "oops/klass.inline.hpp"
  41 #include "oops/objArrayKlass.hpp"
  42 #include "oops/objArrayOop.inline.hpp"
  43 #include "oops/flatArrayKlass.hpp"
  44 #include "oops/flatArrayOop.inline.hpp"
  45 #include "oops/oop.inline.hpp"
  46 #include "oops/typeArrayOop.inline.hpp"
  47 #include "runtime/fieldDescriptor.inline.hpp"
  48 #include "runtime/frame.inline.hpp"
  49 #include "runtime/handles.inline.hpp"
  50 #include "runtime/javaCalls.hpp"
  51 #include "runtime/jniHandles.hpp"
  52 #include "runtime/os.hpp"
  53 #include "runtime/reflectionUtils.hpp"
  54 #include "runtime/thread.inline.hpp"
  55 #include "runtime/threadSMR.hpp"
  56 #include "runtime/vframe.hpp"
  57 #include "runtime/vmOperations.hpp"
  58 #include "runtime/vmThread.hpp"
  59 #include "services/heapDumper.hpp"
  60 #include "services/heapDumperCompression.hpp"
  61 #include "services/threadService.hpp"
  62 #include "utilities/macros.hpp"
  63 #include "utilities/ostream.hpp"
  64 
  65 /*
  66  * HPROF binary format - description copied from:
  67  *   src/share/demo/jvmti/hprof/hprof_io.c
  68  *
  69  *
  70  *  header    "JAVA PROFILE 1.0.2" (0-terminated)
  71  *
  72  *  u4        size of identifiers. Identifiers are used to represent
  73  *            UTF8 strings, objects, stack traces, etc. They usually
  74  *            have the same size as host pointers.
  75  * u4         high word
  76  * u4         low word    number of milliseconds since 0:00 GMT, 1/1/70
  77  * [record]*  a sequence of records.
  78  *
  79  *
  80  * Record format:
  81  *
  82  * u1         a TAG denoting the type of the record
  83  * u4         number of *microseconds* since the time stamp in the
  84  *            header. (wraps around in a little more than an hour)
  85  * u4         number of bytes *remaining* in the record. Note that
  86  *            this number excludes the tag and the length field itself.
  87  * [u1]*      BODY of the record (a sequence of bytes)
  88  *
  89  *
  90  * The following TAGs are supported:
  91  *
  92  * TAG           BODY       notes
  93  *----------------------------------------------------------
  94  * HPROF_UTF8               a UTF8-encoded name
  95  *
  96  *               id         name ID
  97  *               [u1]*      UTF8 characters (no trailing zero)
  98  *
  99  * HPROF_LOAD_CLASS         a newly loaded class
 100  *
 101  *                u4        class serial number (> 0)
 102  *                id        class object ID
 103  *                u4        stack trace serial number
 104  *                id        class name ID
 105  *
 106  * HPROF_UNLOAD_CLASS       an unloading class
 107  *
 108  *                u4        class serial_number
 109  *
 110  * HPROF_FRAME              a Java stack frame
 111  *
 112  *                id        stack frame ID
 113  *                id        method name ID
 114  *                id        method signature ID
 115  *                id        source file name ID
 116  *                u4        class serial number
 117  *                i4        line number. >0: normal
 118  *                                       -1: unknown
 119  *                                       -2: compiled method
 120  *                                       -3: native method
 121  *
 122  * HPROF_TRACE              a Java stack trace
 123  *
 124  *               u4         stack trace serial number
 125  *               u4         thread serial number
 126  *               u4         number of frames
 127  *               [id]*      stack frame IDs
 128  *
 129  *
 130  * HPROF_ALLOC_SITES        a set of heap allocation sites, obtained after GC
 131  *
 132  *               u2         flags 0x0001: incremental vs. complete
 133  *                                0x0002: sorted by allocation vs. live
 134  *                                0x0004: whether to force a GC
 135  *               u4         cutoff ratio
 136  *               u4         total live bytes
 137  *               u4         total live instances
 138  *               u8         total bytes allocated
 139  *               u8         total instances allocated
 140  *               u4         number of sites that follow
 141  *               [u1        is_array: 0:  normal object
 142  *                                    2:  object array
 143  *                                    4:  boolean array
 144  *                                    5:  char array
 145  *                                    6:  float array
 146  *                                    7:  double array
 147  *                                    8:  byte array
 148  *                                    9:  short array
 149  *                                    10: int array
 150  *                                    11: long array
 151  *                u4        class serial number (may be zero during startup)
 152  *                u4        stack trace serial number
 153  *                u4        number of bytes alive
 154  *                u4        number of instances alive
 155  *                u4        number of bytes allocated
 156  *                u4]*      number of instance allocated
 157  *
 158  * HPROF_START_THREAD       a newly started thread.
 159  *
 160  *               u4         thread serial number (> 0)
 161  *               id         thread object ID
 162  *               u4         stack trace serial number
 163  *               id         thread name ID
 164  *               id         thread group name ID
 165  *               id         thread group parent name ID
 166  *
 167  * HPROF_END_THREAD         a terminating thread.
 168  *
 169  *               u4         thread serial number
 170  *
 171  * HPROF_HEAP_SUMMARY       heap summary
 172  *
 173  *               u4         total live bytes
 174  *               u4         total live instances
 175  *               u8         total bytes allocated
 176  *               u8         total instances allocated
 177  *
 178  * HPROF_HEAP_DUMP          denote a heap dump
 179  *
 180  *               [heap dump sub-records]*
 181  *
 182  *                          There are four kinds of heap dump sub-records:
 183  *
 184  *               u1         sub-record type
 185  *
 186  *               HPROF_GC_ROOT_UNKNOWN         unknown root
 187  *
 188  *                          id         object ID
 189  *
 190  *               HPROF_GC_ROOT_THREAD_OBJ      thread object
 191  *
 192  *                          id         thread object ID  (may be 0 for a
 193  *                                     thread newly attached through JNI)
 194  *                          u4         thread sequence number
 195  *                          u4         stack trace sequence number
 196  *
 197  *               HPROF_GC_ROOT_JNI_GLOBAL      JNI global ref root
 198  *
 199  *                          id         object ID
 200  *                          id         JNI global ref ID
 201  *
 202  *               HPROF_GC_ROOT_JNI_LOCAL       JNI local ref
 203  *
 204  *                          id         object ID
 205  *                          u4         thread serial number
 206  *                          u4         frame # in stack trace (-1 for empty)
 207  *
 208  *               HPROF_GC_ROOT_JAVA_FRAME      Java stack frame
 209  *
 210  *                          id         object ID
 211  *                          u4         thread serial number
 212  *                          u4         frame # in stack trace (-1 for empty)
 213  *
 214  *               HPROF_GC_ROOT_NATIVE_STACK    Native stack
 215  *
 216  *                          id         object ID
 217  *                          u4         thread serial number
 218  *
 219  *               HPROF_GC_ROOT_STICKY_CLASS    System class
 220  *
 221  *                          id         object ID
 222  *
 223  *               HPROF_GC_ROOT_THREAD_BLOCK    Reference from thread block
 224  *
 225  *                          id         object ID
 226  *                          u4         thread serial number
 227  *
 228  *               HPROF_GC_ROOT_MONITOR_USED    Busy monitor
 229  *
 230  *                          id         object ID
 231  *
 232  *               HPROF_GC_CLASS_DUMP           dump of a class object
 233  *
 234  *                          id         class object ID
 235  *                          u4         stack trace serial number
 236  *                          id         super class object ID
 237  *                          id         class loader object ID
 238  *                          id         signers object ID
 239  *                          id         protection domain object ID
 240  *                          id         reserved
 241  *                          id         reserved
 242  *
 243  *                          u4         instance size (in bytes)
 244  *
 245  *                          u2         size of constant pool
 246  *                          [u2,       constant pool index,
 247  *                           ty,       type
 248  *                                     2:  object
 249  *                                     4:  boolean
 250  *                                     5:  char
 251  *                                     6:  float
 252  *                                     7:  double
 253  *                                     8:  byte
 254  *                                     9:  short
 255  *                                     10: int
 256  *                                     11: long
 257  *                           vl]*      and value
 258  *
 259  *                          u2         number of static fields
 260  *                          [id,       static field name,
 261  *                           ty,       type,
 262  *                           vl]*      and value
 263  *
 264  *                          u2         number of inst. fields (not inc. super)
 265  *                          [id,       instance field name,
 266  *                           ty]*      type
 267  *
 268  *               HPROF_GC_INSTANCE_DUMP        dump of a normal object
 269  *
 270  *                          id         object ID
 271  *                          u4         stack trace serial number
 272  *                          id         class object ID
 273  *                          u4         number of bytes that follow
 274  *                          [vl]*      instance field values (class, followed
 275  *                                     by super, super's super ...)
 276  *
 277  *               HPROF_GC_OBJ_ARRAY_DUMP       dump of an object array
 278  *
 279  *                          id         array object ID
 280  *                          u4         stack trace serial number
 281  *                          u4         number of elements
 282  *                          id         array class ID
 283  *                          [id]*      elements
 284  *
 285  *               HPROF_GC_PRIM_ARRAY_DUMP      dump of a primitive array
 286  *
 287  *                          id         array object ID
 288  *                          u4         stack trace serial number
 289  *                          u4         number of elements
 290  *                          u1         element type
 291  *                                     4:  boolean array
 292  *                                     5:  char array
 293  *                                     6:  float array
 294  *                                     7:  double array
 295  *                                     8:  byte array
 296  *                                     9:  short array
 297  *                                     10: int array
 298  *                                     11: long array
 299  *                          [u1]*      elements
 300  *
 301  * HPROF_CPU_SAMPLES        a set of sample traces of running threads
 302  *
 303  *                u4        total number of samples
 304  *                u4        # of traces
 305  *               [u4        # of samples
 306  *                u4]*      stack trace serial number
 307  *
 308  * HPROF_CONTROL_SETTINGS   the settings of on/off switches
 309  *
 310  *                u4        0x00000001: alloc traces on/off
 311  *                          0x00000002: cpu sampling on/off
 312  *                u2        stack trace depth
 313  *
 314  * HPROF_FLAT_ARRAYS        list of flat arrays
 315  *
 316  *               [flat array sub-records]*
 317  *
 318  *               HPROF_FLAT_ARRAY      flat array
 319  *
 320  *                          id         array object ID (dumped as HPROF_GC_PRIM_ARRAY_DUMP)
 321  *                          id         element class ID (dumped by HPROF_GC_CLASS_DUMP)
 322  *
 323  * HPROF_INLINED_FIELDS     decribes inlined fields
 324  *
 325  *               [class with inlined fields sub-records]*
 326  *
 327  *               HPROF_CLASS_WITH_INLINED_FIELDS
 328  *
 329  *                          id         class ID (dumped as HPROF_GC_CLASS_DUMP)
 330  *
 331  *                          u2         number of instance inlined fields (not including super)
 332  *                          [u2,       inlined field index,
 333  *                           u2,       synthetic field count,
 334  *                           id,       original field name,
 335  *                           id]*      inlined field class ID (dumped by HPROF_GC_CLASS_DUMP)
 336  *
 337  * When the header is "JAVA PROFILE 1.0.2" a heap dump can optionally
 338  * be generated as a sequence of heap dump segments. This sequence is
 339  * terminated by an end record. The additional tags allowed by format
 340  * "JAVA PROFILE 1.0.2" are:
 341  *
 342  * HPROF_HEAP_DUMP_SEGMENT  denote a heap dump segment
 343  *
 344  *               [heap dump sub-records]*
 345  *               The same sub-record types allowed by HPROF_HEAP_DUMP
 346  *
 347  * HPROF_HEAP_DUMP_END      denotes the end of a heap dump
 348  *
 349  */
 350 
 351 
 352 // HPROF tags
 353 
 354 enum hprofTag : u1 {
 355   // top-level records
 356   HPROF_UTF8                    = 0x01,
 357   HPROF_LOAD_CLASS              = 0x02,
 358   HPROF_UNLOAD_CLASS            = 0x03,
 359   HPROF_FRAME                   = 0x04,
 360   HPROF_TRACE                   = 0x05,
 361   HPROF_ALLOC_SITES             = 0x06,
 362   HPROF_HEAP_SUMMARY            = 0x07,
 363   HPROF_START_THREAD            = 0x0A,
 364   HPROF_END_THREAD              = 0x0B,
 365   HPROF_HEAP_DUMP               = 0x0C,
 366   HPROF_CPU_SAMPLES             = 0x0D,
 367   HPROF_CONTROL_SETTINGS        = 0x0E,
 368 
 369   // 1.0.2 record types
 370   HPROF_HEAP_DUMP_SEGMENT       = 0x1C,
 371   HPROF_HEAP_DUMP_END           = 0x2C,
 372 
 373   // inlined object support
 374   HPROF_FLAT_ARRAYS             = 0x12,
 375   HPROF_INLINED_FIELDS          = 0x13,
 376   // inlined object subrecords
 377   HPROF_FLAT_ARRAY                  = 0x01,
 378   HPROF_CLASS_WITH_INLINED_FIELDS   = 0x01,
 379 
 380   // field types
 381   HPROF_ARRAY_OBJECT            = 0x01,
 382   HPROF_NORMAL_OBJECT           = 0x02,
 383   HPROF_BOOLEAN                 = 0x04,
 384   HPROF_CHAR                    = 0x05,
 385   HPROF_FLOAT                   = 0x06,
 386   HPROF_DOUBLE                  = 0x07,
 387   HPROF_BYTE                    = 0x08,
 388   HPROF_SHORT                   = 0x09,
 389   HPROF_INT                     = 0x0A,
 390   HPROF_LONG                    = 0x0B,
 391 
 392   // data-dump sub-records
 393   HPROF_GC_ROOT_UNKNOWN         = 0xFF,
 394   HPROF_GC_ROOT_JNI_GLOBAL      = 0x01,
 395   HPROF_GC_ROOT_JNI_LOCAL       = 0x02,
 396   HPROF_GC_ROOT_JAVA_FRAME      = 0x03,
 397   HPROF_GC_ROOT_NATIVE_STACK    = 0x04,
 398   HPROF_GC_ROOT_STICKY_CLASS    = 0x05,
 399   HPROF_GC_ROOT_THREAD_BLOCK    = 0x06,
 400   HPROF_GC_ROOT_MONITOR_USED    = 0x07,
 401   HPROF_GC_ROOT_THREAD_OBJ      = 0x08,
 402   HPROF_GC_CLASS_DUMP           = 0x20,
 403   HPROF_GC_INSTANCE_DUMP        = 0x21,
 404   HPROF_GC_OBJ_ARRAY_DUMP       = 0x22,
 405   HPROF_GC_PRIM_ARRAY_DUMP      = 0x23
 406 };
 407 
 408 // Default stack trace ID (used for dummy HPROF_TRACE record)
 409 enum {
 410   STACK_TRACE_ID = 1,
 411   INITIAL_CLASS_COUNT = 200
 412 };
 413 
 414 
 415 class AbstractDumpWriter;
 416 
 417 class InlinedObjects {
 418 
 419   struct ClassInlinedFields {
 420     const Klass *klass;
 421     uintx base_index;   // base index of the inlined field names (1st field has index base_index+1).
 422     ClassInlinedFields(const Klass *klass = nullptr, uintx base_index = 0) : klass(klass), base_index(base_index) {}
 423 
 424     // For GrowableArray::find_sorted().
 425     static int compare(const ClassInlinedFields& a, const ClassInlinedFields& b) {
 426       return a.klass - b.klass;
 427     }
 428     // For GrowableArray::sort().
 429     static int compare(ClassInlinedFields* a, ClassInlinedFields* b) {
 430       return compare(*a, *b);
 431     }
 432   };
 433 
 434   uintx _min_string_id;
 435   uintx _max_string_id;
 436 
 437   GrowableArray<ClassInlinedFields> *_inlined_field_map;
 438 
 439   // counters for classes with inlined fields and for the fields
 440   int _classes_count;
 441   int _inlined_fields_count;
 442 
 443   static InlinedObjects *_instance;
 444 
 445   static void inlined_field_names_callback(InlinedObjects* _this, const Klass *klass, uintx base_index, int count);
 446 
 447   GrowableArray<oop> *_flat_arrays;
 448 
 449 public:
 450   InlinedObjects()
 451     : _min_string_id(0), _max_string_id(0),
 452     _inlined_field_map(nullptr),
 453     _classes_count(0), _inlined_fields_count(0),
 454     _flat_arrays(nullptr) {
 455   }
 456 
 457   static InlinedObjects* get_instance() {
 458     return _instance;
 459   }
 460 
 461   void init();
 462   void release();
 463 
 464   void dump_inlined_field_names(AbstractDumpWriter *writer);
 465 
 466   uintx get_base_index_for(Klass* k);
 467   uintx get_next_string_id(uintx id);
 468 
 469   void dump_classed_with_inlined_fields(AbstractDumpWriter* writer);
 470 
 471   void add_flat_array(oop array);
 472   void dump_flat_arrays(AbstractDumpWriter* writer);
 473 
 474 };
 475 
 476 InlinedObjects *InlinedObjects::_instance = nullptr;
 477 
 478 
 479 // Supports I/O operations for a dump
 480 // Base class for dump and parallel dump
 481 class AbstractDumpWriter : public StackObj {
 482  protected:
 483   enum {
 484     io_buffer_max_size = 1*M,
 485     io_buffer_max_waste = 10*K,
 486     dump_segment_header_size = 9
 487   };
 488 
 489   char* _buffer;    // internal buffer
 490   size_t _size;
 491   size_t _pos;
 492 
 493   bool _in_dump_segment; // Are we currently in a dump segment?
 494   bool _is_huge_sub_record; // Are we writing a sub-record larger than the buffer size?
 495   DEBUG_ONLY(size_t _sub_record_left;) // The bytes not written for the current sub-record.
 496   DEBUG_ONLY(bool _sub_record_ended;) // True if we have called the end_sub_record().
 497 
 498   virtual void flush(bool force = false) = 0;
 499 
 500   char* buffer() const                          { return _buffer; }
 501   size_t buffer_size() const                    { return _size; }
 502   void set_position(size_t pos)                 { _pos = pos; }
 503 
 504   // Can be called if we have enough room in the buffer.
 505   void write_fast(const void* s, size_t len);
 506 
 507   // Returns true if we have enough room in the buffer for 'len' bytes.
 508   bool can_write_fast(size_t len);
 509  public:
 510   AbstractDumpWriter() :
 511     _buffer(NULL),
 512     _size(io_buffer_max_size),
 513     _pos(0),
 514     _in_dump_segment(false) { }
 515 
 516   // total number of bytes written to the disk
 517   virtual julong bytes_written() const = 0;
 518   virtual char const* error() const = 0;
 519 
 520   size_t position() const                       { return _pos; }
 521   // writer functions
 522   virtual void write_raw(const void* s, size_t len);
 523   void write_u1(u1 x);
 524   void write_u2(u2 x);
 525   void write_u4(u4 x);
 526   void write_u8(u8 x);
 527   void write_objectID(oop o);
 528   void write_symbolID(Symbol* o);
 529   void write_classID(Klass* k);
 530   void write_id(u4 x);
 531 
 532   // Start a new sub-record. Starts a new heap dump segment if needed.
 533   void start_sub_record(u1 tag, u4 len);
 534   // Ends the current sub-record.
 535   void end_sub_record();
 536   // Finishes the current dump segment if not already finished.
 537   void finish_dump_segment(bool force_flush = false);
 538   // Refresh to get new buffer
 539   void refresh() {
 540     assert (_in_dump_segment ==false, "Sanity check");
 541     _buffer = NULL;
 542     _size = io_buffer_max_size;
 543     _pos = 0;
 544     // Force flush to guarantee data from parallel dumper are written.
 545     flush(true);
 546   }
 547   // Called when finished to release the threads.
 548   virtual void deactivate() = 0;
 549 };
 550 
 551 void AbstractDumpWriter::write_fast(const void* s, size_t len) {
 552   assert(!_in_dump_segment || (_sub_record_left >= len), "sub-record too large");
 553   assert(buffer_size() - position() >= len, "Must fit");
 554   debug_only(_sub_record_left -= len);
 555   memcpy(buffer() + position(), s, len);
 556   set_position(position() + len);
 557 }
 558 
 559 bool AbstractDumpWriter::can_write_fast(size_t len) {
 560   return buffer_size() - position() >= len;
 561 }
 562 
 563 // write raw bytes
 564 void AbstractDumpWriter::write_raw(const void* s, size_t len) {
 565   assert(!_in_dump_segment || (_sub_record_left >= len), "sub-record too large");
 566   debug_only(_sub_record_left -= len);
 567 
 568   // flush buffer to make room.
 569   while (len > buffer_size() - position()) {
 570     assert(!_in_dump_segment || _is_huge_sub_record,
 571            "Cannot overflow in non-huge sub-record.");
 572     size_t to_write = buffer_size() - position();
 573     memcpy(buffer() + position(), s, to_write);
 574     s = (void*) ((char*) s + to_write);
 575     len -= to_write;
 576     set_position(position() + to_write);
 577     flush();
 578   }
 579 
 580   memcpy(buffer() + position(), s, len);
 581   set_position(position() + len);
 582 }
 583 
 584 // Makes sure we inline the fast write into the write_u* functions. This is a big speedup.
 585 #define WRITE_KNOWN_TYPE(p, len) do { if (can_write_fast((len))) write_fast((p), (len)); \
 586                                       else write_raw((p), (len)); } while (0)
 587 
 588 void AbstractDumpWriter::write_u1(u1 x) {
 589   WRITE_KNOWN_TYPE(&x, 1);
 590 }
 591 
 592 void AbstractDumpWriter::write_u2(u2 x) {
 593   u2 v;
 594   Bytes::put_Java_u2((address)&v, x);
 595   WRITE_KNOWN_TYPE(&v, 2);
 596 }
 597 
 598 void AbstractDumpWriter::write_u4(u4 x) {
 599   u4 v;
 600   Bytes::put_Java_u4((address)&v, x);
 601   WRITE_KNOWN_TYPE(&v, 4);
 602 }
 603 
 604 void AbstractDumpWriter::write_u8(u8 x) {
 605   u8 v;
 606   Bytes::put_Java_u8((address)&v, x);
 607   WRITE_KNOWN_TYPE(&v, 8);
 608 }
 609 
 610 void AbstractDumpWriter::write_objectID(oop o) {
 611   address a = cast_from_oop<address>(o);
 612 #ifdef _LP64
 613   write_u8((u8)a);
 614 #else
 615   write_u4((u4)a);
 616 #endif
 617 }
 618 
 619 void AbstractDumpWriter::write_symbolID(Symbol* s) {
 620   address a = (address)((uintptr_t)s);
 621 #ifdef _LP64
 622   write_u8((u8)a);
 623 #else
 624   write_u4((u4)a);
 625 #endif
 626 }
 627 
 628 void AbstractDumpWriter::write_id(u4 x) {
 629 #ifdef _LP64
 630   write_u8((u8) x);
 631 #else
 632   write_u4(x);
 633 #endif
 634 }
 635 
 636 // We use java mirror as the class ID
 637 void AbstractDumpWriter::write_classID(Klass* k) {
 638   write_objectID(k->java_mirror());
 639 }
 640 
 641 void AbstractDumpWriter::finish_dump_segment(bool force_flush) {
 642   if (_in_dump_segment) {
 643     assert(_sub_record_left == 0, "Last sub-record not written completely");
 644     assert(_sub_record_ended, "sub-record must have ended");
 645 
 646     // Fix up the dump segment length if we haven't written a huge sub-record last
 647     // (in which case the segment length was already set to the correct value initially).
 648     if (!_is_huge_sub_record) {
 649       assert(position() > dump_segment_header_size, "Dump segment should have some content");
 650       Bytes::put_Java_u4((address) (buffer() + 5),
 651                          (u4) (position() - dump_segment_header_size));
 652     } else {
 653       // Finish process huge sub record
 654       // Set _is_huge_sub_record to false so the parallel dump writer can flush data to file.
 655       _is_huge_sub_record = false;
 656     }
 657 
 658     _in_dump_segment = false;
 659     flush(force_flush);
 660   }
 661 }
 662 
 663 void AbstractDumpWriter::start_sub_record(u1 tag, u4 len) {
 664   if (!_in_dump_segment) {
 665     if (position() > 0) {
 666       flush();
 667     }
 668 
 669     assert(position() == 0 && buffer_size() > dump_segment_header_size, "Must be at the start");
 670 
 671     write_u1(HPROF_HEAP_DUMP_SEGMENT);
 672     write_u4(0); // timestamp
 673     // Will be fixed up later if we add more sub-records.  If this is a huge sub-record,
 674     // this is already the correct length, since we don't add more sub-records.
 675     write_u4(len);
 676     assert(Bytes::get_Java_u4((address)(buffer() + 5)) == len, "Inconsitent size!");
 677     _in_dump_segment = true;
 678     _is_huge_sub_record = len > buffer_size() - dump_segment_header_size;
 679   } else if (_is_huge_sub_record || (len > buffer_size() - position())) {
 680     // This object will not fit in completely or the last sub-record was huge.
 681     // Finish the current segement and try again.
 682     finish_dump_segment();
 683     start_sub_record(tag, len);
 684 
 685     return;
 686   }
 687 
 688   debug_only(_sub_record_left = len);
 689   debug_only(_sub_record_ended = false);
 690 
 691   write_u1(tag);
 692 }
 693 
 694 void AbstractDumpWriter::end_sub_record() {
 695   assert(_in_dump_segment, "must be in dump segment");
 696   assert(_sub_record_left == 0, "sub-record not written completely");
 697   assert(!_sub_record_ended, "Must not have ended yet");
 698   debug_only(_sub_record_ended = true);
 699 }
 700 
 701 // Supports I/O operations for a dump
 702 
 703 class DumpWriter : public AbstractDumpWriter {
 704  private:
 705   CompressionBackend _backend; // Does the actual writing.
 706  protected:
 707   void flush(bool force = false) override;
 708 
 709  public:
 710   // Takes ownership of the writer and compressor.
 711   DumpWriter(AbstractWriter* writer, AbstractCompressor* compressor);
 712 
 713   // total number of bytes written to the disk
 714   julong bytes_written() const override { return (julong) _backend.get_written(); }
 715 
 716   char const* error() const override    { return _backend.error(); }
 717 
 718   // Called by threads used for parallel writing.
 719   void writer_loop()                    { _backend.thread_loop(); }
 720   // Called when finish to release the threads.
 721   void deactivate() override            { flush(); _backend.deactivate(); }
 722   // Get the backend pointer, used by parallel dump writer.
 723   CompressionBackend* backend_ptr()     { return &_backend; }
 724 
 725 };
 726 
 727 // Check for error after constructing the object and destroy it in case of an error.
 728 DumpWriter::DumpWriter(AbstractWriter* writer, AbstractCompressor* compressor) :
 729   AbstractDumpWriter(),
 730   _backend(writer, compressor, io_buffer_max_size, io_buffer_max_waste) {
 731   flush();
 732 }
 733 
 734 // flush any buffered bytes to the file
 735 void DumpWriter::flush(bool force) {
 736   _backend.get_new_buffer(&_buffer, &_pos, &_size, force);
 737 }
 738 
 739 // Buffer queue used for parallel dump.
 740 struct ParWriterBufferQueueElem {
 741   char* _buffer;
 742   size_t _used;
 743   ParWriterBufferQueueElem* _next;
 744 };
 745 
 746 class ParWriterBufferQueue : public CHeapObj<mtInternal> {
 747  private:
 748   ParWriterBufferQueueElem* _head;
 749   ParWriterBufferQueueElem* _tail;
 750   uint _length;
 751  public:
 752   ParWriterBufferQueue() : _head(NULL), _tail(NULL), _length(0) { }
 753 
 754   void enqueue(ParWriterBufferQueueElem* entry) {
 755     if (_head == NULL) {
 756       assert(is_empty() && _tail == NULL, "Sanity check");
 757       _head = _tail = entry;
 758     } else {
 759       assert ((_tail->_next == NULL && _tail->_buffer != NULL), "Buffer queue is polluted");
 760       _tail->_next = entry;
 761       _tail = entry;
 762     }
 763     _length++;
 764     assert(_tail->_next == NULL, "Bufer queue is polluted");
 765   }
 766 
 767   ParWriterBufferQueueElem* dequeue() {
 768     if (_head == NULL)  return NULL;
 769     ParWriterBufferQueueElem* entry = _head;
 770     assert (entry->_buffer != NULL, "polluted buffer in writer list");
 771     _head = entry->_next;
 772     if (_head == NULL) {
 773       _tail = NULL;
 774     }
 775     entry->_next = NULL;
 776     _length--;
 777     return entry;
 778   }
 779 
 780   bool is_empty() {
 781     return _length == 0;
 782   }
 783 
 784   uint length() { return _length; }
 785 };
 786 
 787 // Support parallel heap dump.
 788 class ParDumpWriter : public AbstractDumpWriter {
 789  private:
 790   // Lock used to guarantee the integrity of multiple buffers writing.
 791   static Monitor* _lock;
 792   // Pointer of backend from global DumpWriter.
 793   CompressionBackend* _backend_ptr;
 794   char const * _err;
 795   ParWriterBufferQueue* _buffer_queue;
 796   size_t _internal_buffer_used;
 797   char* _buffer_base;
 798   bool _split_data;
 799   static const uint BackendFlushThreshold = 2;
 800  protected:
 801   void flush(bool force = false) override {
 802     assert(_pos != 0, "must not be zero");
 803     if (_pos != 0) {
 804       refresh_buffer();
 805     }
 806 
 807     if (_split_data || _is_huge_sub_record) {
 808       return;
 809     }
 810 
 811     if (should_flush_buf_list(force)) {
 812       assert(!_in_dump_segment && !_split_data && !_is_huge_sub_record, "incomplete data send to backend!\n");
 813       flush_to_backend(force);
 814     }
 815   }
 816 
 817  public:
 818   // Check for error after constructing the object and destroy it in case of an error.
 819   ParDumpWriter(DumpWriter* dw) :
 820     AbstractDumpWriter(),
 821     _backend_ptr(dw->backend_ptr()),
 822     _buffer_queue((new (std::nothrow) ParWriterBufferQueue())),
 823     _buffer_base(NULL),
 824     _split_data(false) {
 825     // prepare internal buffer
 826     allocate_internal_buffer();
 827   }
 828 
 829   ~ParDumpWriter() {
 830      assert(_buffer_queue != NULL, "Sanity check");
 831      assert((_internal_buffer_used == 0) && (_buffer_queue->is_empty()),
 832             "All data must be send to backend");
 833      if (_buffer_base != NULL) {
 834        os::free(_buffer_base);
 835        _buffer_base = NULL;
 836      }
 837      delete _buffer_queue;
 838      _buffer_queue = NULL;
 839   }
 840 
 841   // total number of bytes written to the disk
 842   julong bytes_written() const override { return (julong) _backend_ptr->get_written(); }
 843   char const* error() const override    { return _err == NULL ? _backend_ptr->error() : _err; }
 844 
 845   static void before_work() {
 846     assert(_lock == NULL, "ParDumpWriter lock must be initialized only once");
 847     _lock = new (std::nothrow) PaddedMonitor(Mutex::safepoint, "ParallelHProfWriter_lock");
 848   }
 849 
 850   static void after_work() {
 851     assert(_lock != NULL, "ParDumpWriter lock is not initialized");
 852     delete _lock;
 853     _lock = NULL;
 854   }
 855 
 856   // write raw bytes
 857   void write_raw(const void* s, size_t len) override {
 858     assert(!_in_dump_segment || (_sub_record_left >= len), "sub-record too large");
 859     debug_only(_sub_record_left -= len);
 860     assert(!_split_data, "Invalid split data");
 861     _split_data = true;
 862     // flush buffer to make room.
 863     while (len > buffer_size() - position()) {
 864       assert(!_in_dump_segment || _is_huge_sub_record,
 865              "Cannot overflow in non-huge sub-record.");
 866       size_t to_write = buffer_size() - position();
 867       memcpy(buffer() + position(), s, to_write);
 868       s = (void*) ((char*) s + to_write);
 869       len -= to_write;
 870       set_position(position() + to_write);
 871       flush();
 872     }
 873     _split_data = false;
 874     memcpy(buffer() + position(), s, len);
 875     set_position(position() + len);
 876   }
 877 
 878   void deactivate() override { flush(true); _backend_ptr->deactivate(); }
 879 
 880  private:
 881   void allocate_internal_buffer() {
 882     assert(_buffer_queue != NULL, "Internal buffer queue is not ready when allocate internal buffer");
 883     assert(_buffer == NULL && _buffer_base == NULL, "current buffer must be NULL before allocate");
 884     _buffer_base = _buffer = (char*)os::malloc(io_buffer_max_size, mtInternal);
 885     if (_buffer == NULL) {
 886       set_error("Could not allocate buffer for writer");
 887       return;
 888     }
 889     _pos = 0;
 890     _internal_buffer_used = 0;
 891     _size = io_buffer_max_size;
 892   }
 893 
 894   void set_error(char const* new_error) {
 895     if ((new_error != NULL) && (_err == NULL)) {
 896       _err = new_error;
 897     }
 898   }
 899 
 900   // Add buffer to internal list
 901   void refresh_buffer() {
 902     size_t expected_total = _internal_buffer_used + _pos;
 903     if (expected_total < io_buffer_max_size - io_buffer_max_waste) {
 904       // reuse current buffer.
 905       _internal_buffer_used = expected_total;
 906       assert(_size - _pos == io_buffer_max_size - expected_total, "illegal resize of buffer");
 907       _size -= _pos;
 908       _buffer += _pos;
 909       _pos = 0;
 910 
 911       return;
 912     }
 913     // It is not possible here that expected_total is larger than io_buffer_max_size because
 914     // of limitation in write_xxx().
 915     assert(expected_total <= io_buffer_max_size, "buffer overflow");
 916     assert(_buffer - _buffer_base <= io_buffer_max_size, "internal buffer overflow");
 917     ParWriterBufferQueueElem* entry =
 918         (ParWriterBufferQueueElem*)os::malloc(sizeof(ParWriterBufferQueueElem), mtInternal);
 919     if (entry == NULL) {
 920       set_error("Heap dumper can allocate memory");
 921       return;
 922     }
 923     entry->_buffer = _buffer_base;
 924     entry->_used = expected_total;
 925     entry->_next = NULL;
 926     // add to internal buffer queue
 927     _buffer_queue->enqueue(entry);
 928     _buffer_base =_buffer = NULL;
 929     allocate_internal_buffer();
 930   }
 931 
 932   void reclaim_entry(ParWriterBufferQueueElem* entry) {
 933     assert(entry != NULL && entry->_buffer != NULL, "Invalid entry to reclaim");
 934     os::free(entry->_buffer);
 935     entry->_buffer = NULL;
 936     os::free(entry);
 937   }
 938 
 939   void flush_buffer(char* buffer, size_t used) {
 940     assert(_lock->owner() == Thread::current(), "flush buffer must hold lock");
 941     size_t max = io_buffer_max_size;
 942     // get_new_buffer
 943     _backend_ptr->flush_external_buffer(buffer, used, max);
 944   }
 945 
 946   bool should_flush_buf_list(bool force) {
 947     return force || _buffer_queue->length() > BackendFlushThreshold;
 948   }
 949 
 950   void flush_to_backend(bool force) {
 951     // Guarantee there is only one writer updating the backend buffers.
 952     MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
 953     while (!_buffer_queue->is_empty()) {
 954       ParWriterBufferQueueElem* entry = _buffer_queue->dequeue();
 955       flush_buffer(entry->_buffer, entry->_used);
 956       // Delete buffer and entry.
 957       reclaim_entry(entry);
 958       entry = NULL;
 959     }
 960     assert(_pos == 0, "available buffer must be empty before flush");
 961     // Flush internal buffer.
 962     if (_internal_buffer_used > 0) {
 963       flush_buffer(_buffer_base, _internal_buffer_used);
 964       os::free(_buffer_base);
 965       _pos = 0;
 966       _internal_buffer_used = 0;
 967       _buffer_base = _buffer = NULL;
 968       // Allocate internal buffer for future use.
 969       allocate_internal_buffer();
 970     }
 971   }
 972 };
 973 
 974 Monitor* ParDumpWriter::_lock = NULL;
 975 
 976 // Support class with a collection of functions used when dumping the heap
 977 
 978 class DumperSupport : AllStatic {
 979  public:
 980 
 981   // write a header of the given type
 982   static void write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len);
 983 
 984   // returns hprof tag for the given type signature
 985   static hprofTag sig2tag(Symbol* sig);
 986   // returns hprof tag for the given basic type
 987   static hprofTag type2tag(BasicType type);
 988   // Returns the size of the data to write.
 989   static u4 sig2size(Symbol* sig);
 990 
 991   // calculates the total size of the all fields of the given class.
 992   static u4 instance_size(InstanceKlass* ik);
 993 
 994   // dump a jfloat
 995   static void dump_float(AbstractDumpWriter* writer, jfloat f);
 996   // dump a jdouble
 997   static void dump_double(AbstractDumpWriter* writer, jdouble d);
 998   // dumps the raw value of the given field
 999   static void dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset);
1000   // returns the size of the static fields; also counts the static fields
1001   static u4 get_static_fields_size(InstanceKlass* ik, u2& field_count);
1002   // dumps static fields of the given class
1003   static void dump_static_fields(AbstractDumpWriter* writer, Klass* k);
1004   // dump the raw values of the instance fields of the given identity or inlined object;
1005   // for identity objects offset is 0 and 'klass' is o->klass(),
1006   // for inlined objects offset is the offset in the holder object, 'klass' is inlined object class
1007   static void dump_instance_fields(AbstractDumpWriter* writer, oop o, int offset, InstanceKlass* klass);
1008   // dump the raw values of the instance fields of the given inlined object;
1009   // dump_instance_fields wrapper for inlined objects
1010   static void dump_inlined_object_fields(AbstractDumpWriter* writer, oop o, int offset, InlineKlass* klass);
1011 
1012   // get the count of the instance fields for a given class
1013   static u2 get_instance_fields_count(InstanceKlass* ik);
1014   // dumps the definition of the instance fields for a given class
1015   static void dump_instance_field_descriptors(AbstractDumpWriter* writer, InstanceKlass* k, uintx *inlined_fields_index = nullptr);
1016   // creates HPROF_GC_INSTANCE_DUMP record for the given object
1017   static void dump_instance(AbstractDumpWriter* writer, oop o);
1018   // creates HPROF_GC_CLASS_DUMP record for the given instance class
1019   static void dump_instance_class(AbstractDumpWriter* writer, Klass* k);
1020   // creates HPROF_GC_CLASS_DUMP record for a given array class
1021   static void dump_array_class(AbstractDumpWriter* writer, Klass* k);
1022 
1023   // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
1024   static void dump_object_array(AbstractDumpWriter* writer, objArrayOop array);
1025   // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given flat array
1026   static void dump_flat_array(AbstractDumpWriter* writer, flatArrayOop array);
1027   // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
1028   static void dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array);
1029   // create HPROF_FRAME record for the given method and bci
1030   static void dump_stack_frame(AbstractDumpWriter* writer, int frame_serial_num, int class_serial_num, Method* m, int bci);
1031 
1032   // check if we need to truncate an array
1033   static int calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size);
1034   // extended version to dump flat arrays as primitive arrays;
1035   // type_size specifies size of the inlined objects.
1036   static int calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, int type_size, short header_size);
1037 
1038   // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
1039   static void end_of_dump(AbstractDumpWriter* writer);
1040 
1041   static oop mask_dormant_archived_object(oop o) {
1042     if (o != NULL && o->klass()->java_mirror() == NULL) {
1043       // Ignore this object since the corresponding java mirror is not loaded.
1044       // Might be a dormant archive object.
1045       return NULL;
1046     } else {
1047       return o;
1048     }
1049   }
1050 
1051   // helper methods for inlined fields.
1052   static bool is_inlined_field(const FieldStream& fld) {
1053     return fld.field_descriptor().is_inlined();
1054   }
1055   static InlineKlass* get_inlined_field_klass(const FieldStream &fld) {
1056     assert(is_inlined_field(fld), "must be inlined field");
1057     InstanceKlass* holder_klass = fld.field_descriptor().field_holder();
1058     return InlineKlass::cast(holder_klass->get_inline_type_field_klass(fld.index()));
1059   }
1060 };
1061 
1062 // write a header of the given type
1063 void DumperSupport:: write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len) {
1064   writer->write_u1(tag);
1065   writer->write_u4(0);                  // current ticks
1066   writer->write_u4(len);
1067 }
1068 
1069 // returns hprof tag for the given type signature
1070 hprofTag DumperSupport::sig2tag(Symbol* sig) {
1071   switch (sig->char_at(0)) {
1072     case JVM_SIGNATURE_CLASS    : return HPROF_NORMAL_OBJECT;
1073     case JVM_SIGNATURE_PRIMITIVE_OBJECT: return HPROF_NORMAL_OBJECT; // not inlined Q-object, i.e. identity object.
1074     case JVM_SIGNATURE_ARRAY    : return HPROF_NORMAL_OBJECT;
1075     case JVM_SIGNATURE_BYTE     : return HPROF_BYTE;
1076     case JVM_SIGNATURE_CHAR     : return HPROF_CHAR;
1077     case JVM_SIGNATURE_FLOAT    : return HPROF_FLOAT;
1078     case JVM_SIGNATURE_DOUBLE   : return HPROF_DOUBLE;
1079     case JVM_SIGNATURE_INT      : return HPROF_INT;
1080     case JVM_SIGNATURE_LONG     : return HPROF_LONG;
1081     case JVM_SIGNATURE_SHORT    : return HPROF_SHORT;
1082     case JVM_SIGNATURE_BOOLEAN  : return HPROF_BOOLEAN;
1083     default : ShouldNotReachHere(); /* to shut up compiler */ return HPROF_BYTE;
1084   }
1085 }
1086 
1087 hprofTag DumperSupport::type2tag(BasicType type) {
1088   switch (type) {
1089     case T_BYTE     : return HPROF_BYTE;
1090     case T_CHAR     : return HPROF_CHAR;
1091     case T_FLOAT    : return HPROF_FLOAT;
1092     case T_DOUBLE   : return HPROF_DOUBLE;
1093     case T_INT      : return HPROF_INT;
1094     case T_LONG     : return HPROF_LONG;
1095     case T_SHORT    : return HPROF_SHORT;
1096     case T_BOOLEAN  : return HPROF_BOOLEAN;
1097     default : ShouldNotReachHere(); /* to shut up compiler */ return HPROF_BYTE;
1098   }
1099 }
1100 
1101 u4 DumperSupport::sig2size(Symbol* sig) {
1102   switch (sig->char_at(0)) {
1103     case JVM_SIGNATURE_CLASS:
1104     case JVM_SIGNATURE_PRIMITIVE_OBJECT:
1105     case JVM_SIGNATURE_ARRAY: return sizeof(address);
1106     case JVM_SIGNATURE_BOOLEAN:
1107     case JVM_SIGNATURE_BYTE: return 1;
1108     case JVM_SIGNATURE_SHORT:
1109     case JVM_SIGNATURE_CHAR: return 2;
1110     case JVM_SIGNATURE_INT:
1111     case JVM_SIGNATURE_FLOAT: return 4;
1112     case JVM_SIGNATURE_LONG:
1113     case JVM_SIGNATURE_DOUBLE: return 8;
1114     default: ShouldNotReachHere(); /* to shut up compiler */ return 0;
1115   }
1116 }
1117 
1118 template<typename T, typename F> T bit_cast(F from) { // replace with the real thing when we can use c++20
1119   T to;
1120   static_assert(sizeof(to) == sizeof(from), "must be of the same size");
1121   memcpy(&to, &from, sizeof(to));
1122   return to;
1123 }
1124 
1125 // dump a jfloat
1126 void DumperSupport::dump_float(AbstractDumpWriter* writer, jfloat f) {
1127   if (g_isnan(f)) {
1128     writer->write_u4(0x7fc00000); // collapsing NaNs
1129   } else {
1130     writer->write_u4(bit_cast<u4>(f));
1131   }
1132 }
1133 
1134 // dump a jdouble
1135 void DumperSupport::dump_double(AbstractDumpWriter* writer, jdouble d) {
1136   if (g_isnan(d)) {
1137     writer->write_u8(0x7ff80000ull << 32); // collapsing NaNs
1138   } else {
1139     writer->write_u8(bit_cast<u8>(d));
1140   }
1141 }
1142 
1143 
1144 // dumps the raw value of the given field
1145 void DumperSupport::dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset) {
1146   switch (type) {
1147     case JVM_SIGNATURE_CLASS :
1148     case JVM_SIGNATURE_PRIMITIVE_OBJECT: // not inlined Q-object, i.e. identity object.
1149     case JVM_SIGNATURE_ARRAY : {
1150       oop o = obj->obj_field_access<ON_UNKNOWN_OOP_REF | AS_NO_KEEPALIVE>(offset);
1151       if (o != NULL && log_is_enabled(Debug, cds, heap) && mask_dormant_archived_object(o) == NULL) {
1152         ResourceMark rm;
1153         log_debug(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s) referenced by " INTPTR_FORMAT " (%s)",
1154                              p2i(o), o->klass()->external_name(),
1155                              p2i(obj), obj->klass()->external_name());
1156       }
1157       o = mask_dormant_archived_object(o);
1158       assert(oopDesc::is_oop_or_null(o), "Expected an oop or NULL at " PTR_FORMAT, p2i(o));
1159       writer->write_objectID(o);
1160       break;
1161     }
1162     case JVM_SIGNATURE_BYTE : {
1163       jbyte b = obj->byte_field(offset);
1164       writer->write_u1(b);
1165       break;
1166     }
1167     case JVM_SIGNATURE_CHAR : {
1168       jchar c = obj->char_field(offset);
1169       writer->write_u2(c);
1170       break;
1171     }
1172     case JVM_SIGNATURE_SHORT : {
1173       jshort s = obj->short_field(offset);
1174       writer->write_u2(s);
1175       break;
1176     }
1177     case JVM_SIGNATURE_FLOAT : {
1178       jfloat f = obj->float_field(offset);
1179       dump_float(writer, f);
1180       break;
1181     }
1182     case JVM_SIGNATURE_DOUBLE : {
1183       jdouble d = obj->double_field(offset);
1184       dump_double(writer, d);
1185       break;
1186     }
1187     case JVM_SIGNATURE_INT : {
1188       jint i = obj->int_field(offset);
1189       writer->write_u4(i);
1190       break;
1191     }
1192     case JVM_SIGNATURE_LONG : {
1193       jlong l = obj->long_field(offset);
1194       writer->write_u8(l);
1195       break;
1196     }
1197     case JVM_SIGNATURE_BOOLEAN : {
1198       jboolean b = obj->bool_field(offset);
1199       writer->write_u1(b);
1200       break;
1201     }
1202     default : {
1203       ShouldNotReachHere();
1204       break;
1205     }
1206   }
1207 }
1208 
1209 // calculates the total size of the all fields of the given class.
1210 u4 DumperSupport::instance_size(InstanceKlass *ik) {
1211   u4 size = 0;
1212 
1213   for (FieldStream fld(ik, false, false); !fld.eos(); fld.next()) {
1214     if (!fld.access_flags().is_static()) {
1215       if (is_inlined_field(fld)) {
1216         size += instance_size(get_inlined_field_klass(fld));
1217       } else {
1218         size += sig2size(fld.signature());
1219       }
1220     }
1221   }
1222   return size;
1223 }
1224 
1225 u4 DumperSupport::get_static_fields_size(InstanceKlass* ik, u2& field_count) {
1226   field_count = 0;
1227   u4 size = 0;
1228 
1229   for (FieldStream fldc(ik, true, true); !fldc.eos(); fldc.next()) {
1230     if (fldc.access_flags().is_static()) {
1231       assert(!is_inlined_field(fldc), "static fields cannot be inlined");
1232 
1233       field_count++;
1234       size += sig2size(fldc.signature());
1235     }
1236   }
1237 
1238   // Add in resolved_references which is referenced by the cpCache
1239   // The resolved_references is an array per InstanceKlass holding the
1240   // strings and other oops resolved from the constant pool.
1241   oop resolved_references = ik->constants()->resolved_references_or_null();
1242   if (resolved_references != NULL) {
1243     field_count++;
1244     size += sizeof(address);
1245 
1246     // Add in the resolved_references of the used previous versions of the class
1247     // in the case of RedefineClasses
1248     InstanceKlass* prev = ik->previous_versions();
1249     while (prev != NULL && prev->constants()->resolved_references_or_null() != NULL) {
1250       field_count++;
1251       size += sizeof(address);
1252       prev = prev->previous_versions();
1253     }
1254   }
1255 
1256   // Also provide a pointer to the init_lock if present, so there aren't unreferenced int[0]
1257   // arrays.
1258   oop init_lock = ik->init_lock();
1259   if (init_lock != NULL) {
1260     field_count++;
1261     size += sizeof(address);
1262   }
1263 
1264   // We write the value itself plus a name and a one byte type tag per field.
1265   return size + field_count * (sizeof(address) + 1);
1266 }
1267 
1268 // dumps static fields of the given class
1269 void DumperSupport::dump_static_fields(AbstractDumpWriter* writer, Klass* k) {
1270   InstanceKlass* ik = InstanceKlass::cast(k);
1271 
1272   // dump the field descriptors and raw values
1273   for (FieldStream fld(ik, true, true); !fld.eos(); fld.next()) {
1274     if (fld.access_flags().is_static()) {
1275       assert(!is_inlined_field(fld), "static fields cannot be inlined");
1276 
1277       Symbol* sig = fld.signature();
1278 
1279       writer->write_symbolID(fld.name());   // name
1280       writer->write_u1(sig2tag(sig));       // type
1281 
1282       // value
1283       dump_field_value(writer, sig->char_at(0), ik->java_mirror(), fld.offset());
1284     }
1285   }
1286 
1287   // Add resolved_references for each class that has them
1288   oop resolved_references = ik->constants()->resolved_references_or_null();
1289   if (resolved_references != NULL) {
1290     writer->write_symbolID(vmSymbols::resolved_references_name());  // name
1291     writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1292     writer->write_objectID(resolved_references);
1293 
1294     // Also write any previous versions
1295     InstanceKlass* prev = ik->previous_versions();
1296     while (prev != NULL && prev->constants()->resolved_references_or_null() != NULL) {
1297       writer->write_symbolID(vmSymbols::resolved_references_name());  // name
1298       writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1299       writer->write_objectID(prev->constants()->resolved_references());
1300       prev = prev->previous_versions();
1301     }
1302   }
1303 
1304   // Add init lock to the end if the class is not yet initialized
1305   oop init_lock = ik->init_lock();
1306   if (init_lock != NULL) {
1307     writer->write_symbolID(vmSymbols::init_lock_name());         // name
1308     writer->write_u1(sig2tag(vmSymbols::int_array_signature())); // type
1309     writer->write_objectID(init_lock);
1310   }
1311 }
1312 
1313 // dump the raw values of the instance fields of the given identity or inlined object;
1314 // for identity objects offset is 0 and 'klass' is o->klass(),
1315 // for inlined objects offset is the offset in the holder object, 'klass' is inlined object class.
1316 void DumperSupport::dump_instance_fields(AbstractDumpWriter* writer, oop o, int offset, InstanceKlass *klass) {
1317   for (FieldStream fld(klass, false, false); !fld.eos(); fld.next()) {
1318     if (!fld.access_flags().is_static()) {
1319       if (is_inlined_field(fld)) {
1320         InlineKlass* field_klass = get_inlined_field_klass(fld);
1321         // the field is inlined, so all its fields are stored without headers.
1322         int fields_offset = offset + fld.offset() - field_klass->first_field_offset();
1323         dump_inlined_object_fields(writer, o, offset + fld.offset(), field_klass);
1324       } else {
1325         Symbol* sig = fld.signature();
1326         dump_field_value(writer, sig->char_at(0), o, offset + fld.offset());
1327       }
1328     }
1329   }
1330 }
1331 
1332 void DumperSupport::dump_inlined_object_fields(AbstractDumpWriter* writer, oop o, int offset, InlineKlass* klass) {
1333   // the object is inlined, so all its fields are stored without headers.
1334   dump_instance_fields(writer, o, offset - klass->first_field_offset(), klass);
1335 }
1336 
1337 // gets the count of the instance fields for a given class
1338 u2 DumperSupport::get_instance_fields_count(InstanceKlass* ik) {
1339   u2 field_count = 0;
1340 
1341   for (FieldStream fldc(ik, true, true); !fldc.eos(); fldc.next()) {
1342     if (!fldc.access_flags().is_static()) {
1343       if (is_inlined_field(fldc)) {
1344         // add "synthetic" fields for inlined fields.
1345         field_count += get_instance_fields_count(get_inlined_field_klass(fldc));
1346       } else {
1347         field_count++;
1348       }
1349     }
1350   }
1351 
1352   return field_count;
1353 }
1354 
1355 // dumps the definition of the instance fields for a given class
1356 // inlined_fields_id is not-NULL for inlined fields (to get synthetic field name IDs
1357 // by using InlinedObjects::get_next_string_id()).
1358 void DumperSupport::dump_instance_field_descriptors(AbstractDumpWriter* writer, InstanceKlass* ik, uintx* inlined_fields_id) {
1359   // inlined_fields_id != NULL means ik is a class of inlined field.
1360   // Inlined field id pointer for this class; lazyly initialized
1361   // if the class has inlined field(s) and the caller didn't provide inlined_fields_id.
1362   uintx *this_klass_inlined_fields_id = inlined_fields_id;
1363   uintx inlined_id = 0;
1364 
1365   // dump the field descriptors
1366   for (FieldStream fld(ik, true, true); !fld.eos(); fld.next()) {
1367     if (!fld.access_flags().is_static()) {
1368       if (is_inlined_field(fld)) {
1369         // dump "synthetic" fields for inlined fields.
1370         if (this_klass_inlined_fields_id == nullptr) {
1371           inlined_id = InlinedObjects::get_instance()->get_base_index_for(ik);
1372           this_klass_inlined_fields_id = &inlined_id;
1373         }
1374         dump_instance_field_descriptors(writer, get_inlined_field_klass(fld), this_klass_inlined_fields_id);
1375       } else {
1376         Symbol* sig = fld.signature();
1377         Symbol* name = nullptr;
1378         // Use inlined_fields_id provided by caller.
1379         if (inlined_fields_id != nullptr) {
1380           uintx name_id = InlinedObjects::get_instance()->get_next_string_id(*inlined_fields_id);
1381 
1382           // name_id == 0 is returned on error. use original field signature.
1383           if (name_id != 0) {
1384             *inlined_fields_id = name_id;
1385             name = reinterpret_cast<Symbol*>(name_id);
1386           }
1387         }
1388         if (name == nullptr) {
1389           name = fld.name();
1390         }
1391 
1392         writer->write_symbolID(name);         // name
1393         writer->write_u1(sig2tag(sig));       // type
1394       }
1395     }
1396   }
1397 }
1398 
1399 // creates HPROF_GC_INSTANCE_DUMP record for the given object
1400 void DumperSupport::dump_instance(AbstractDumpWriter* writer, oop o) {
1401   InstanceKlass* ik = InstanceKlass::cast(o->klass());
1402   u4 is = instance_size(ik);
1403   u4 size = 1 + sizeof(address) + 4 + sizeof(address) + 4 + is;
1404 
1405   writer->start_sub_record(HPROF_GC_INSTANCE_DUMP, size);
1406   writer->write_objectID(o);
1407   writer->write_u4(STACK_TRACE_ID);
1408 
1409   // class ID
1410   writer->write_classID(ik);
1411 
1412   // number of bytes that follow
1413   writer->write_u4(is);
1414 
1415   // field values
1416   dump_instance_fields(writer, o, 0, ik);
1417 
1418   writer->end_sub_record();
1419 }
1420 
1421 // creates HPROF_GC_CLASS_DUMP record for the given instance class
1422 void DumperSupport::dump_instance_class(AbstractDumpWriter* writer, Klass* k) {
1423   InstanceKlass* ik = InstanceKlass::cast(k);
1424 
1425   // We can safepoint and do a heap dump at a point where we have a Klass,
1426   // but no java mirror class has been setup for it. So we need to check
1427   // that the class is at least loaded, to avoid crash from a null mirror.
1428   if (!ik->is_loaded()) {
1429     return;
1430   }
1431 
1432   u2 static_fields_count = 0;
1433   u4 static_size = get_static_fields_size(ik, static_fields_count);
1434   u2 instance_fields_count = get_instance_fields_count(ik);
1435   u4 instance_fields_size = instance_fields_count * (sizeof(address) + 1);
1436   u4 size = 1 + sizeof(address) + 4 + 6 * sizeof(address) + 4 + 2 + 2 + static_size + 2 + instance_fields_size;
1437 
1438   writer->start_sub_record(HPROF_GC_CLASS_DUMP, size);
1439 
1440   // class ID
1441   writer->write_classID(ik);
1442   writer->write_u4(STACK_TRACE_ID);
1443 
1444   // super class ID
1445   InstanceKlass* java_super = ik->java_super();
1446   if (java_super == NULL) {
1447     writer->write_objectID(oop(NULL));
1448   } else {
1449     writer->write_classID(java_super);
1450   }
1451 
1452   writer->write_objectID(ik->class_loader());
1453   writer->write_objectID(ik->signers());
1454   writer->write_objectID(ik->protection_domain());
1455 
1456   // reserved
1457   writer->write_objectID(oop(NULL));
1458   writer->write_objectID(oop(NULL));
1459 
1460   // instance size
1461   writer->write_u4(HeapWordSize * ik->size_helper());
1462 
1463   // size of constant pool - ignored by HAT 1.1
1464   writer->write_u2(0);
1465 
1466   // static fields
1467   writer->write_u2(static_fields_count);
1468   dump_static_fields(writer, ik);
1469 
1470   // description of instance fields
1471   writer->write_u2(instance_fields_count);
1472   dump_instance_field_descriptors(writer, ik);
1473 
1474   writer->end_sub_record();
1475 }
1476 
1477 // creates HPROF_GC_CLASS_DUMP record for the given array class
1478 void DumperSupport::dump_array_class(AbstractDumpWriter* writer, Klass* k) {
1479   InstanceKlass* ik = NULL; // bottom class for object arrays, NULL for primitive type arrays
1480   if (k->is_objArray_klass()) {
1481     Klass *bk = ObjArrayKlass::cast(k)->bottom_klass();
1482     assert(bk != NULL, "checking");
1483     if (bk->is_instance_klass()) {
1484       ik = InstanceKlass::cast(bk);
1485     }
1486   }
1487 
1488   u4 size = 1 + sizeof(address) + 4 + 6 * sizeof(address) + 4 + 2 + 2 + 2;
1489   writer->start_sub_record(HPROF_GC_CLASS_DUMP, size);
1490   writer->write_classID(k);
1491   writer->write_u4(STACK_TRACE_ID);
1492 
1493   // super class of array classes is java.lang.Object
1494   InstanceKlass* java_super = k->java_super();
1495   assert(java_super != NULL, "checking");
1496   writer->write_classID(java_super);
1497 
1498   writer->write_objectID(ik == NULL ? oop(NULL) : ik->class_loader());
1499   writer->write_objectID(ik == NULL ? oop(NULL) : ik->signers());
1500   writer->write_objectID(ik == NULL ? oop(NULL) : ik->protection_domain());
1501 
1502   writer->write_objectID(oop(NULL));    // reserved
1503   writer->write_objectID(oop(NULL));
1504   writer->write_u4(0);             // instance size
1505   writer->write_u2(0);             // constant pool
1506   writer->write_u2(0);             // static fields
1507   writer->write_u2(0);             // instance fields
1508 
1509   writer->end_sub_record();
1510 
1511 }
1512 
1513 // Hprof uses an u4 as record length field,
1514 // which means we need to truncate arrays that are too long.
1515 int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, int type_size, short header_size) {
1516   int length = array->length();
1517 
1518   size_t length_in_bytes = (size_t)length * type_size;
1519   uint max_bytes = max_juint - header_size;
1520 
1521   if (length_in_bytes > max_bytes) {
1522     length = max_bytes / type_size;
1523     length_in_bytes = (size_t)length * type_size;
1524 
1525     BasicType type = ArrayKlass::cast(array->klass())->element_type();
1526     warning("cannot dump array of type %s[] with length %d; truncating to length %d",
1527             type2name_tab[type], array->length(), length);
1528   }
1529   return length;
1530 }
1531 
1532 int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size) {
1533   BasicType type = ArrayKlass::cast(array->klass())->element_type();
1534   assert((type >= T_BOOLEAN && type <= T_OBJECT) || type == T_PRIMITIVE_OBJECT, "invalid array element type");
1535   int type_size;
1536   if (type == T_OBJECT || type == T_PRIMITIVE_OBJECT) {  // TODO: FIXME
1537     type_size = sizeof(address);
1538   } else {
1539     type_size = type2aelembytes(type);
1540   }
1541 
1542   return calculate_array_max_length(writer, array, type_size, header_size);
1543 }
1544 
1545 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
1546 void DumperSupport::dump_object_array(AbstractDumpWriter* writer, objArrayOop array) {
1547   // sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID) + sizeof(classID)
1548   short header_size = 1 + 2 * 4 + 2 * sizeof(address);
1549   int length = calculate_array_max_length(writer, array, header_size);
1550   u4 size = header_size + length * sizeof(address);
1551 
1552   writer->start_sub_record(HPROF_GC_OBJ_ARRAY_DUMP, size);
1553   writer->write_objectID(array);
1554   writer->write_u4(STACK_TRACE_ID);
1555   writer->write_u4(length);
1556 
1557   // array class ID
1558   writer->write_classID(array->klass());
1559 
1560   // [id]* elements
1561   for (int index = 0; index < length; index++) {
1562     oop o = array->obj_at(index);
1563     if (o != NULL && log_is_enabled(Debug, cds, heap) && mask_dormant_archived_object(o) == NULL) {
1564       ResourceMark rm;
1565       log_debug(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s) referenced by " INTPTR_FORMAT " (%s)",
1566                            p2i(o), o->klass()->external_name(),
1567                            p2i(array), array->klass()->external_name());
1568     }
1569     o = mask_dormant_archived_object(o);
1570     writer->write_objectID(o);
1571   }
1572 
1573   writer->end_sub_record();
1574 }
1575 
1576 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given flat array
1577 void DumperSupport::dump_flat_array(AbstractDumpWriter* writer, flatArrayOop array) {
1578   FlatArrayKlass* array_klass = FlatArrayKlass::cast(array->klass());
1579   InlineKlass* element_klass = array_klass->element_klass();
1580   int element_size = instance_size(element_klass);
1581   /*                          id         array object ID
1582    *                          u4         stack trace serial number
1583    *                          u4         number of elements
1584    *                          u1         element type
1585    */
1586   short header_size = 1 + sizeof(address) + 2 * 4 + 1;
1587 
1588   // TODO: use T_SHORT/T_INT/T_LONG if needed to avoid truncation
1589   BasicType type = T_BYTE;
1590   int type_size = type2aelembytes(type);
1591   int length = calculate_array_max_length(writer, array, element_size, header_size);
1592   u4 length_in_bytes = (u4)(length * element_size);
1593   u4 size = header_size + length_in_bytes;
1594 
1595   writer->start_sub_record(HPROF_GC_PRIM_ARRAY_DUMP, size);
1596   writer->write_objectID(array);
1597   writer->write_u4(STACK_TRACE_ID);
1598   // TODO: round up array length for T_SHORT/T_INT/T_LONG
1599   writer->write_u4(length * element_size);
1600   writer->write_u1(type2tag(type));
1601 
1602   for (int index = 0; index < length; index++) {
1603     // need offset in the holder to read inlined object. calculate it from flatArrayOop::value_at_addr()
1604     int offset = (int)((address)array->value_at_addr(index, array_klass->layout_helper())
1605                   - cast_from_oop<address>(array));
1606     dump_inlined_object_fields(writer, array, offset, element_klass);
1607   }
1608 
1609   // TODO: write padding bytes for T_SHORT/T_INT/T_LONG
1610 
1611   InlinedObjects::get_instance()->add_flat_array(array);
1612 
1613   writer->end_sub_record();
1614 }
1615 
1616 #define WRITE_ARRAY(Array, Type, Size, Length) \
1617   for (int i = 0; i < Length; i++) { writer->write_##Size((Size)Array->Type##_at(i)); }
1618 
1619 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
1620 void DumperSupport::dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array) {
1621   BasicType type = TypeArrayKlass::cast(array->klass())->element_type();
1622   // 2 * sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID)
1623   short header_size = 2 * 1 + 2 * 4 + sizeof(address);
1624 
1625   int length = calculate_array_max_length(writer, array, header_size);
1626   int type_size = type2aelembytes(type);
1627   u4 length_in_bytes = (u4)length * type_size;
1628   u4 size = header_size + length_in_bytes;
1629 
1630   writer->start_sub_record(HPROF_GC_PRIM_ARRAY_DUMP, size);
1631   writer->write_objectID(array);
1632   writer->write_u4(STACK_TRACE_ID);
1633   writer->write_u4(length);
1634   writer->write_u1(type2tag(type));
1635 
1636   // nothing to copy
1637   if (length == 0) {
1638     writer->end_sub_record();
1639     return;
1640   }
1641 
1642   // If the byte ordering is big endian then we can copy most types directly
1643 
1644   switch (type) {
1645     case T_INT : {
1646       if (Endian::is_Java_byte_ordering_different()) {
1647         WRITE_ARRAY(array, int, u4, length);
1648       } else {
1649         writer->write_raw(array->int_at_addr(0), length_in_bytes);
1650       }
1651       break;
1652     }
1653     case T_BYTE : {
1654       writer->write_raw(array->byte_at_addr(0), length_in_bytes);
1655       break;
1656     }
1657     case T_CHAR : {
1658       if (Endian::is_Java_byte_ordering_different()) {
1659         WRITE_ARRAY(array, char, u2, length);
1660       } else {
1661         writer->write_raw(array->char_at_addr(0), length_in_bytes);
1662       }
1663       break;
1664     }
1665     case T_SHORT : {
1666       if (Endian::is_Java_byte_ordering_different()) {
1667         WRITE_ARRAY(array, short, u2, length);
1668       } else {
1669         writer->write_raw(array->short_at_addr(0), length_in_bytes);
1670       }
1671       break;
1672     }
1673     case T_BOOLEAN : {
1674       if (Endian::is_Java_byte_ordering_different()) {
1675         WRITE_ARRAY(array, bool, u1, length);
1676       } else {
1677         writer->write_raw(array->bool_at_addr(0), length_in_bytes);
1678       }
1679       break;
1680     }
1681     case T_LONG : {
1682       if (Endian::is_Java_byte_ordering_different()) {
1683         WRITE_ARRAY(array, long, u8, length);
1684       } else {
1685         writer->write_raw(array->long_at_addr(0), length_in_bytes);
1686       }
1687       break;
1688     }
1689 
1690     // handle float/doubles in a special value to ensure than NaNs are
1691     // written correctly. TO DO: Check if we can avoid this on processors that
1692     // use IEEE 754.
1693 
1694     case T_FLOAT : {
1695       for (int i = 0; i < length; i++) {
1696         dump_float(writer, array->float_at(i));
1697       }
1698       break;
1699     }
1700     case T_DOUBLE : {
1701       for (int i = 0; i < length; i++) {
1702         dump_double(writer, array->double_at(i));
1703       }
1704       break;
1705     }
1706     default : ShouldNotReachHere();
1707   }
1708 
1709   writer->end_sub_record();
1710 }
1711 
1712 // create a HPROF_FRAME record of the given Method* and bci
1713 void DumperSupport::dump_stack_frame(AbstractDumpWriter* writer,
1714                                      int frame_serial_num,
1715                                      int class_serial_num,
1716                                      Method* m,
1717                                      int bci) {
1718   int line_number;
1719   if (m->is_native()) {
1720     line_number = -3;  // native frame
1721   } else {
1722     line_number = m->line_number_from_bci(bci);
1723   }
1724 
1725   write_header(writer, HPROF_FRAME, 4*oopSize + 2*sizeof(u4));
1726   writer->write_id(frame_serial_num);               // frame serial number
1727   writer->write_symbolID(m->name());                // method's name
1728   writer->write_symbolID(m->signature());           // method's signature
1729 
1730   assert(m->method_holder()->is_instance_klass(), "not InstanceKlass");
1731   writer->write_symbolID(m->method_holder()->source_file_name());  // source file name
1732   writer->write_u4(class_serial_num);               // class serial number
1733   writer->write_u4((u4) line_number);               // line number
1734 }
1735 
1736 
1737 class InlinedFieldNameDumper : public LockedClassesDo {
1738 public:
1739   typedef void (*Callback)(InlinedObjects *owner, const Klass *klass, uintx base_index, int count);
1740 
1741 private:
1742   AbstractDumpWriter* _writer;
1743   InlinedObjects *_owner;
1744   Callback       _callback;
1745   uintx _index;
1746 
1747   void dump_inlined_field_names(GrowableArray<Symbol*>* super_names, Symbol* field_name, InlineKlass* klass) {
1748     super_names->push(field_name);
1749     for (FieldStream fld(klass, false, false); !fld.eos(); fld.next()) {
1750       if (!fld.access_flags().is_static()) {
1751         if (DumperSupport::is_inlined_field(fld)) {
1752           dump_inlined_field_names(super_names, fld.name(), DumperSupport::get_inlined_field_klass(fld));
1753         } else {
1754           // get next string ID.
1755           uintx next_index = _owner->get_next_string_id(_index);
1756           if (next_index == 0) {
1757             // something went wrong (overflow?)
1758             // stop generation; the rest of inlined objects will have original field names.
1759             return;
1760           }
1761           _index = next_index;
1762 
1763           // Calculate length.
1764           int len = fld.name()->utf8_length();
1765           for (GrowableArrayIterator<Symbol*> it = super_names->begin(); it != super_names->end(); ++it) {
1766             len += (*it)->utf8_length() + 1;    // +1 for ".".
1767           }
1768 
1769           DumperSupport::write_header(_writer, HPROF_UTF8, oopSize + len);
1770           _writer->write_symbolID(reinterpret_cast<Symbol*>(_index));
1771           // Write the string value.
1772           // 1) super_names.
1773           for (GrowableArrayIterator<Symbol*> it = super_names->begin(); it != super_names->end(); ++it) {
1774             _writer->write_raw((*it)->bytes(), (*it)->utf8_length());
1775             _writer->write_u1('.');
1776           }
1777           // 2) field name.
1778           _writer->write_raw(fld.name()->bytes(), fld.name()->utf8_length());
1779         }
1780       }
1781     }
1782     super_names->pop();
1783   }
1784 
1785   void dump_inlined_field_names(Symbol* field_name, InlineKlass* field_klass) {
1786     GrowableArray<Symbol*> super_names(4, mtServiceability);
1787     dump_inlined_field_names(&super_names, field_name, field_klass);
1788   }
1789 
1790 public:
1791   InlinedFieldNameDumper(AbstractDumpWriter* writer, InlinedObjects* owner, Callback callback)
1792     : _writer(writer), _owner(owner), _callback(callback), _index(0)  {
1793   }
1794 
1795   void do_klass(Klass* k) {
1796     if (!k->is_instance_klass()) {
1797       return;
1798     }
1799     InstanceKlass* ik = InstanceKlass::cast(k);
1800     // if (ik->has_inline_type_fields()) {
1801     //   return;
1802     // }
1803 
1804     uintx base_index = _index;
1805     int count = 0;
1806 
1807     for (FieldStream fld(ik, false, false); !fld.eos(); fld.next()) {
1808       if (!fld.access_flags().is_static()) {
1809         if (DumperSupport::is_inlined_field(fld)) {
1810           dump_inlined_field_names(fld.name(), DumperSupport::get_inlined_field_klass(fld));
1811           count++;
1812         }
1813       }
1814     }
1815 
1816     if (count != 0) {
1817       _callback(_owner, k, base_index, count);
1818     }
1819   }
1820 };
1821 
1822 class InlinedFieldsDumper : public LockedClassesDo {
1823 private:
1824   AbstractDumpWriter* _writer;
1825 
1826 public:
1827   InlinedFieldsDumper(AbstractDumpWriter* writer) : _writer(writer) {}
1828 
1829   void do_klass(Klass* k) {
1830     if (!k->is_instance_klass()) {
1831       return;
1832     }
1833     InstanceKlass* ik = InstanceKlass::cast(k);
1834     // if (ik->has_inline_type_fields()) {
1835     //   return;
1836     // }
1837 
1838     // We can be at a point where java mirror does not exist yet.
1839     // So we need to check that the class is at least loaded, to avoid crash from a null mirror.
1840     if (!ik->is_loaded()) {
1841       return;
1842     }
1843 
1844     u2 inlined_count = 0;
1845     for (FieldStream fld(ik, false, false); !fld.eos(); fld.next()) {
1846       if (!fld.access_flags().is_static()) {
1847         if (DumperSupport::is_inlined_field(fld)) {
1848           inlined_count++;
1849         }
1850       }
1851     }
1852     if (inlined_count != 0) {
1853       _writer->write_u1(HPROF_CLASS_WITH_INLINED_FIELDS);
1854 
1855       // class ID
1856       _writer->write_classID(ik);
1857       // number of inlined fields
1858       _writer->write_u2(inlined_count);
1859       u2 index = 0;
1860       for (FieldStream fld(ik, false, false); !fld.eos(); fld.next()) {
1861         if (!fld.access_flags().is_static()) {
1862           if (DumperSupport::is_inlined_field(fld)) {
1863             // inlined field index
1864             _writer->write_u2(index);
1865             // synthetic field count
1866             u2 field_count = DumperSupport::get_instance_fields_count(DumperSupport::get_inlined_field_klass(fld));
1867             _writer->write_u2(field_count);
1868             // original field name
1869             _writer->write_symbolID(fld.name());
1870             // inlined field class ID
1871             _writer->write_classID(DumperSupport::get_inlined_field_klass(fld));
1872 
1873             index += field_count;
1874           } else {
1875             index++;
1876           }
1877         }
1878       }
1879     }
1880   }
1881 };
1882 
1883 
1884 void InlinedObjects::init() {
1885   _instance = this;
1886 
1887   struct Closure : public SymbolClosure {
1888     uintx _min_id = max_uintx;
1889     uintx _max_id = 0;
1890     Closure() : _min_id(max_uintx), _max_id(0) {}
1891 
1892     void do_symbol(Symbol** p) {
1893       uintx val = reinterpret_cast<uintx>(*p);
1894       if (val < _min_id) {
1895         _min_id = val;
1896       }
1897       if (val > _max_id) {
1898         _max_id = val;
1899       }
1900     }
1901   } closure;
1902 
1903   SymbolTable::symbols_do(&closure);
1904 
1905   _min_string_id = closure._min_id;
1906   _max_string_id = closure._max_id;
1907 }
1908 
1909 void InlinedObjects::release() {
1910   _instance = nullptr;
1911 
1912   if (_inlined_field_map != nullptr) {
1913     delete _inlined_field_map;
1914     _inlined_field_map = nullptr;
1915   }
1916   if (_flat_arrays != nullptr) {
1917     delete _flat_arrays;
1918     _flat_arrays = nullptr;
1919   }
1920 }
1921 
1922 void InlinedObjects::inlined_field_names_callback(InlinedObjects* _this, const Klass* klass, uintx base_index, int count) {
1923   if (_this->_inlined_field_map == nullptr) {
1924     _this->_inlined_field_map = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<ClassInlinedFields>(100, mtServiceability);
1925   }
1926   _this->_inlined_field_map->append(ClassInlinedFields(klass, base_index));
1927 
1928   // counters for dumping classes with inlined fields
1929   _this->_classes_count++;
1930   _this->_inlined_fields_count += count;
1931 }
1932 
1933 void InlinedObjects::dump_inlined_field_names(AbstractDumpWriter* writer) {
1934   InlinedFieldNameDumper nameDumper(writer, this, inlined_field_names_callback);
1935   ClassLoaderDataGraph::classes_do(&nameDumper);
1936 
1937   if (_inlined_field_map != nullptr) {
1938     // prepare the map for  get_base_index_for().
1939     _inlined_field_map->sort(ClassInlinedFields::compare);
1940   }
1941 }
1942 
1943 uintx InlinedObjects::get_base_index_for(Klass* k) {
1944   if (_inlined_field_map != nullptr) {
1945     bool found = false;
1946     int idx = _inlined_field_map->find_sorted<ClassInlinedFields, ClassInlinedFields::compare>(ClassInlinedFields(k, 0), found);
1947     if (found) {
1948         return _inlined_field_map->at(idx).base_index;
1949     }
1950   }
1951 
1952   // return max_uintx, so get_next_string_id returns 0.
1953   return max_uintx;
1954 }
1955 
1956 uintx InlinedObjects::get_next_string_id(uintx id) {
1957   if (++id == _min_string_id) {
1958     return _max_string_id + 1;
1959   }
1960   return id;
1961 }
1962 
1963 void InlinedObjects::dump_classed_with_inlined_fields(AbstractDumpWriter* writer) {
1964   if (_classes_count != 0) {
1965     // Record for each class contains tag(u1), class ID and count(u2)
1966     // for each inlined field index(u2), synthetic fields count(u2), original field name and class ID
1967     int size = _classes_count * (1 + sizeof(address) + 2)
1968              + _inlined_fields_count * (2 + 2 + sizeof(address) + sizeof(address));
1969     DumperSupport::write_header(writer, HPROF_INLINED_FIELDS, (u4)size);
1970 
1971     InlinedFieldsDumper dumper(writer);
1972     ClassLoaderDataGraph::classes_do(&dumper);
1973   }
1974 }
1975 
1976 void InlinedObjects::add_flat_array(oop array) {
1977   if (_flat_arrays == nullptr) {
1978     _flat_arrays = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<oop>(100, mtServiceability);
1979   }
1980   _flat_arrays->append(array);
1981 }
1982 
1983 void InlinedObjects::dump_flat_arrays(AbstractDumpWriter* writer) {
1984   if (_flat_arrays != nullptr) {
1985     // For each flat array the record contains tag (u1), object ID and class ID.
1986     int size = _flat_arrays->length() * (1 + sizeof(address) + sizeof(address));
1987 
1988     DumperSupport::write_header(writer, HPROF_FLAT_ARRAYS, (u4)size);
1989     for (GrowableArrayIterator<oop> it = _flat_arrays->begin(); it != _flat_arrays->end(); ++it) {
1990       flatArrayOop array = flatArrayOop(*it);
1991       FlatArrayKlass* array_klass = FlatArrayKlass::cast(array->klass());
1992       InlineKlass* element_klass = array_klass->element_klass();
1993       writer->write_u1(HPROF_FLAT_ARRAY);
1994       writer->write_objectID(array);
1995       writer->write_classID(element_klass);
1996     }
1997   }
1998 }
1999 
2000 
2001 // Support class used to generate HPROF_UTF8 records from the entries in the
2002 // SymbolTable.
2003 
2004 class SymbolTableDumper : public SymbolClosure {
2005  private:
2006   AbstractDumpWriter* _writer;
2007   AbstractDumpWriter* writer() const                { return _writer; }
2008  public:
2009   SymbolTableDumper(AbstractDumpWriter* writer)     { _writer = writer; }
2010   void do_symbol(Symbol** p);
2011 };
2012 
2013 void SymbolTableDumper::do_symbol(Symbol** p) {
2014   ResourceMark rm;
2015   Symbol* sym = *p;
2016   int len = sym->utf8_length();
2017   if (len > 0) {
2018     char* s = sym->as_utf8();
2019     DumperSupport::write_header(writer(), HPROF_UTF8, oopSize + len);
2020     writer()->write_symbolID(sym);
2021     writer()->write_raw(s, len);
2022   }
2023 }
2024 
2025 // Support class used to generate HPROF_GC_ROOT_JNI_LOCAL records
2026 
2027 class JNILocalsDumper : public OopClosure {
2028  private:
2029   AbstractDumpWriter* _writer;
2030   u4 _thread_serial_num;
2031   int _frame_num;
2032   AbstractDumpWriter* writer() const                { return _writer; }
2033  public:
2034   JNILocalsDumper(AbstractDumpWriter* writer, u4 thread_serial_num) {
2035     _writer = writer;
2036     _thread_serial_num = thread_serial_num;
2037     _frame_num = -1;  // default - empty stack
2038   }
2039   void set_frame_number(int n) { _frame_num = n; }
2040   void do_oop(oop* obj_p);
2041   void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); }
2042 };
2043 
2044 
2045 void JNILocalsDumper::do_oop(oop* obj_p) {
2046   // ignore null handles
2047   oop o = *obj_p;
2048   if (o != NULL) {
2049     u4 size = 1 + sizeof(address) + 4 + 4;
2050     writer()->start_sub_record(HPROF_GC_ROOT_JNI_LOCAL, size);
2051     writer()->write_objectID(o);
2052     writer()->write_u4(_thread_serial_num);
2053     writer()->write_u4((u4)_frame_num);
2054     writer()->end_sub_record();
2055   }
2056 }
2057 
2058 
2059 // Support class used to generate HPROF_GC_ROOT_JNI_GLOBAL records
2060 
2061 class JNIGlobalsDumper : public OopClosure {
2062  private:
2063   AbstractDumpWriter* _writer;
2064   AbstractDumpWriter* writer() const                { return _writer; }
2065 
2066  public:
2067   JNIGlobalsDumper(AbstractDumpWriter* writer) {
2068     _writer = writer;
2069   }
2070   void do_oop(oop* obj_p);
2071   void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); }
2072 };
2073 
2074 void JNIGlobalsDumper::do_oop(oop* obj_p) {
2075   oop o = NativeAccess<AS_NO_KEEPALIVE>::oop_load(obj_p);
2076 
2077   // ignore these
2078   if (o == NULL) return;
2079   // we ignore global ref to symbols and other internal objects
2080   if (o->is_instance() || o->is_objArray() || o->is_typeArray()) {
2081     u4 size = 1 + 2 * sizeof(address);
2082     writer()->start_sub_record(HPROF_GC_ROOT_JNI_GLOBAL, size);
2083     writer()->write_objectID(o);
2084     writer()->write_objectID((oopDesc*)obj_p);      // global ref ID
2085     writer()->end_sub_record();
2086   }
2087 };
2088 
2089 // Support class used to generate HPROF_GC_ROOT_STICKY_CLASS records
2090 
2091 class StickyClassDumper : public KlassClosure {
2092  private:
2093   AbstractDumpWriter* _writer;
2094   AbstractDumpWriter* writer() const                { return _writer; }
2095  public:
2096   StickyClassDumper(AbstractDumpWriter* writer) {
2097     _writer = writer;
2098   }
2099   void do_klass(Klass* k) {
2100     if (k->is_instance_klass()) {
2101       InstanceKlass* ik = InstanceKlass::cast(k);
2102       u4 size = 1 + sizeof(address);
2103       writer()->start_sub_record(HPROF_GC_ROOT_STICKY_CLASS, size);
2104       writer()->write_classID(ik);
2105       writer()->end_sub_record();
2106     }
2107   }
2108 };
2109 
2110 // Large object heap dump support.
2111 // To avoid memory consumption, when dumping large objects such as huge array and
2112 // large objects whose size are larger than LARGE_OBJECT_DUMP_THRESHOLD, the scanned
2113 // partial object/array data will be sent to the backend directly instead of caching
2114 // the whole object/array in the internal buffer.
2115 // The HeapDumpLargeObjectList is used to save the large object when dumper scans
2116 // the heap. The large objects could be added (push) parallelly by multiple dumpers,
2117 // But they will be removed (popped) serially only by the VM thread.
2118 class HeapDumpLargeObjectList : public CHeapObj<mtInternal> {
2119  private:
2120   class HeapDumpLargeObjectListElem : public CHeapObj<mtInternal> {
2121    public:
2122     HeapDumpLargeObjectListElem(oop obj) : _obj(obj), _next(NULL) { }
2123     oop _obj;
2124     HeapDumpLargeObjectListElem* _next;
2125   };
2126 
2127   volatile HeapDumpLargeObjectListElem* _head;
2128 
2129  public:
2130   HeapDumpLargeObjectList() : _head(NULL) { }
2131 
2132   void atomic_push(oop obj) {
2133     assert (obj != NULL, "sanity check");
2134     HeapDumpLargeObjectListElem* entry = new HeapDumpLargeObjectListElem(obj);
2135     if (entry == NULL) {
2136       warning("failed to allocate element for large object list");
2137       return;
2138     }
2139     assert (entry->_obj != NULL, "sanity check");
2140     while (true) {
2141       volatile HeapDumpLargeObjectListElem* old_head = Atomic::load_acquire(&_head);
2142       HeapDumpLargeObjectListElem* new_head = entry;
2143       if (Atomic::cmpxchg(&_head, old_head, new_head) == old_head) {
2144         // successfully push
2145         new_head->_next = (HeapDumpLargeObjectListElem*)old_head;
2146         return;
2147       }
2148     }
2149   }
2150 
2151   oop pop() {
2152     if (_head == NULL) {
2153       return NULL;
2154     }
2155     HeapDumpLargeObjectListElem* entry = (HeapDumpLargeObjectListElem*)_head;
2156     _head = _head->_next;
2157     assert (entry != NULL, "illegal larger object list entry");
2158     oop ret = entry->_obj;
2159     delete entry;
2160     assert (ret != NULL, "illegal oop pointer");
2161     return ret;
2162   }
2163 
2164   void drain(ObjectClosure* cl) {
2165     while (_head !=  NULL) {
2166       cl->do_object(pop());
2167     }
2168   }
2169 
2170   bool is_empty() {
2171     return _head == NULL;
2172   }
2173 
2174   static const size_t LargeObjectSizeThreshold = 1 << 20; // 1 MB
2175 };
2176 
2177 class VM_HeapDumper;
2178 
2179 // Support class using when iterating over the heap.
2180 class HeapObjectDumper : public ObjectClosure {
2181  private:
2182   AbstractDumpWriter* _writer;
2183   HeapDumpLargeObjectList* _list;
2184 
2185   AbstractDumpWriter* writer()                  { return _writer; }
2186   bool is_large(oop o);
2187  public:
2188   HeapObjectDumper(AbstractDumpWriter* writer, HeapDumpLargeObjectList* list = NULL) {
2189     _writer = writer;
2190     _list = list;
2191   }
2192 
2193   // called for each object in the heap
2194   void do_object(oop o);
2195 };
2196 
2197 void HeapObjectDumper::do_object(oop o) {
2198   // skip classes as these emitted as HPROF_GC_CLASS_DUMP records
2199   if (o->klass() == vmClasses::Class_klass()) {
2200     if (!java_lang_Class::is_primitive(o)) {
2201       return;
2202     }
2203   }
2204 
2205   if (DumperSupport::mask_dormant_archived_object(o) == NULL) {
2206     log_debug(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s)", p2i(o), o->klass()->external_name());
2207     return;
2208   }
2209 
2210   // If large object list exists and it is large object/array,
2211   // add oop into the list and skip scan. VM thread will process it later.
2212   if (_list != NULL && is_large(o)) {
2213     _list->atomic_push(o);
2214     return;
2215   }
2216 
2217   if (o->is_instance()) {
2218     // create a HPROF_GC_INSTANCE record for each object
2219     DumperSupport::dump_instance(writer(), o);
2220   } else if (o->is_objArray()) {
2221     // create a HPROF_GC_OBJ_ARRAY_DUMP record for each object array
2222     DumperSupport::dump_object_array(writer(), objArrayOop(o));
2223   } else if (o->is_flatArray()) {
2224     DumperSupport::dump_flat_array(writer(), flatArrayOop(o));
2225   } else if (o->is_typeArray()) {
2226     // create a HPROF_GC_PRIM_ARRAY_DUMP record for each type array
2227     DumperSupport::dump_prim_array(writer(), typeArrayOop(o));
2228   }
2229 }
2230 
2231 bool HeapObjectDumper::is_large(oop o) {
2232   size_t size = 0;
2233   if (o->is_instance()) {
2234     // Use o->size() * 8 as the upper limit of instance size to avoid iterating static fields
2235     size = o->size() * 8;
2236   } else if (o->is_objArray()) {
2237     objArrayOop array = objArrayOop(o);
2238     BasicType type = ArrayKlass::cast(array->klass())->element_type();
2239     assert(type >= T_BOOLEAN && type <= T_OBJECT, "invalid array element type");
2240     int length = array->length();
2241     int type_size = sizeof(address);
2242     size = (size_t)length * type_size;
2243   } else if (o->is_typeArray()) {
2244     flatArrayOop array = flatArrayOop(o);
2245     BasicType type = ArrayKlass::cast(array->klass())->element_type();
2246     assert(type == T_PRIMITIVE_OBJECT, "invalid array element type");
2247     int length = array->length();
2248     //TODO: FIXME
2249     //int type_size = type2aelembytes(type);
2250     //size = (size_t)length * type_size;
2251   } else if (o->is_typeArray()) {
2252     typeArrayOop array = typeArrayOop(o);
2253     BasicType type = ArrayKlass::cast(array->klass())->element_type();
2254     assert(type >= T_BOOLEAN && type <= T_OBJECT, "invalid array element type");
2255     int length = array->length();
2256     int type_size = type2aelembytes(type);
2257     size = (size_t)length * type_size;
2258   }
2259   return size > HeapDumpLargeObjectList::LargeObjectSizeThreshold;
2260 }
2261 
2262 // The dumper controller for parallel heap dump
2263 class DumperController : public CHeapObj<mtInternal> {
2264  private:
2265    bool     _started;
2266    Monitor* _lock;
2267    uint   _dumper_number;
2268    uint   _complete_number;
2269 
2270  public:
2271    DumperController(uint number) :
2272      _started(false),
2273      _lock(new (std::nothrow) PaddedMonitor(Mutex::safepoint, "DumperController_lock")),
2274      _dumper_number(number),
2275      _complete_number(0) { }
2276 
2277    ~DumperController() { delete _lock; }
2278 
2279    void wait_for_start_signal() {
2280      MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
2281      while (_started == false) {
2282        ml.wait();
2283      }
2284      assert(_started == true,  "dumper woke up with wrong state");
2285    }
2286 
2287    void start_dump() {
2288      assert (_started == false, "start dump with wrong state");
2289      MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
2290      _started = true;
2291      ml.notify_all();
2292    }
2293 
2294    void dumper_complete() {
2295      assert (_started == true, "dumper complete with wrong state");
2296      MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
2297      _complete_number++;
2298      ml.notify();
2299    }
2300 
2301    void wait_all_dumpers_complete() {
2302      assert (_started == true, "wrong state when wait for dumper complete");
2303      MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
2304      while (_complete_number != _dumper_number) {
2305         ml.wait();
2306      }
2307      _started = false;
2308    }
2309 };
2310 
2311 // The VM operation that performs the heap dump
2312 class VM_HeapDumper : public VM_GC_Operation, public WorkerTask {
2313  private:
2314   static VM_HeapDumper*   _global_dumper;
2315   static DumpWriter*      _global_writer;
2316   DumpWriter*             _local_writer;
2317   JavaThread*             _oome_thread;
2318   Method*                 _oome_constructor;
2319   bool                    _gc_before_heap_dump;
2320   GrowableArray<Klass*>*  _klass_map;
2321   ThreadStackTrace**      _stack_traces;
2322   int                     _num_threads;
2323 
2324   // Inlined object support.
2325   InlinedObjects          _inlined_objects;
2326   InlinedObjects* inlined_objects() { return &_inlined_objects; }
2327 
2328   // parallel heap dump support
2329   uint                    _num_dumper_threads;
2330   uint                    _num_writer_threads;
2331   DumperController*       _dumper_controller;
2332   ParallelObjectIterator* _poi;
2333   HeapDumpLargeObjectList* _large_object_list;
2334 
2335   // VMDumperType is for thread that dumps both heap and non-heap data.
2336   static const size_t VMDumperType = 0;
2337   static const size_t WriterType = 1;
2338   static const size_t DumperType = 2;
2339   // worker id of VMDumper thread.
2340   static const size_t VMDumperWorkerId = 0;
2341 
2342   size_t get_worker_type(uint worker_id) {
2343     assert(_num_writer_threads >= 1, "Must be at least one writer");
2344     // worker id of VMDumper that dump heap and non-heap data
2345     if (worker_id == VMDumperWorkerId) {
2346       return VMDumperType;
2347     }
2348 
2349     // worker id of dumper starts from 1, which only dump heap datar
2350     if (worker_id < _num_dumper_threads) {
2351       return DumperType;
2352     }
2353 
2354     // worker id of writer starts from _num_dumper_threads
2355     return WriterType;
2356   }
2357 
2358   void prepare_parallel_dump(uint num_total) {
2359     assert (_dumper_controller == NULL, "dumper controller must be NULL");
2360     assert (num_total > 0, "active workers number must >= 1");
2361     // Dumper threads number must not be larger than active workers number.
2362     if (num_total < _num_dumper_threads) {
2363       _num_dumper_threads = num_total - 1;
2364     }
2365     // Calculate dumper and writer threads number.
2366     _num_writer_threads = num_total - _num_dumper_threads;
2367     // If dumper threads number is 1, only the VMThread works as a dumper.
2368     // If dumper threads number is equal to active workers, need at lest one worker thread as writer.
2369     if (_num_dumper_threads > 0 && _num_writer_threads == 0) {
2370       _num_writer_threads = 1;
2371       _num_dumper_threads = num_total - _num_writer_threads;
2372     }
2373     // Prepare parallel writer.
2374     if (_num_dumper_threads > 1) {
2375       ParDumpWriter::before_work();
2376       // Number of dumper threads that only iterate heap.
2377       uint _heap_only_dumper_threads = _num_dumper_threads - 1 /* VMDumper thread */;
2378       _dumper_controller = new (std::nothrow) DumperController(_heap_only_dumper_threads);
2379     }
2380   }
2381 
2382   void finish_parallel_dump() {
2383     if (_num_dumper_threads > 1) {
2384       ParDumpWriter::after_work();
2385     }
2386   }
2387 
2388   // accessors and setters
2389   static VM_HeapDumper* dumper()         {  assert(_global_dumper != NULL, "Error"); return _global_dumper; }
2390   static DumpWriter* writer()            {  assert(_global_writer != NULL, "Error"); return _global_writer; }
2391   void set_global_dumper() {
2392     assert(_global_dumper == NULL, "Error");
2393     _global_dumper = this;
2394   }
2395   void set_global_writer() {
2396     assert(_global_writer == NULL, "Error");
2397     _global_writer = _local_writer;
2398   }
2399   void clear_global_dumper() { _global_dumper = NULL; }
2400   void clear_global_writer() { _global_writer = NULL; }
2401 
2402   bool skip_operation() const;
2403 
2404   // writes a HPROF_LOAD_CLASS record
2405   static void do_load_class(Klass* k);
2406 
2407   // writes a HPROF_GC_CLASS_DUMP record for the given class
2408   static void do_class_dump(Klass* k);
2409 
2410   // HPROF_GC_ROOT_THREAD_OBJ records
2411   int do_thread(JavaThread* thread, u4 thread_serial_num);
2412   void do_threads();
2413 
2414   void add_class_serial_number(Klass* k, int serial_num) {
2415     _klass_map->at_put_grow(serial_num, k);
2416   }
2417 
2418   // HPROF_TRACE and HPROF_FRAME records
2419   void dump_stack_traces();
2420 
2421   // large objects
2422   void dump_large_objects(ObjectClosure* writer);
2423 
2424  public:
2425   VM_HeapDumper(DumpWriter* writer, bool gc_before_heap_dump, bool oome, uint num_dump_threads) :
2426     VM_GC_Operation(0 /* total collections,      dummy, ignored */,
2427                     GCCause::_heap_dump /* GC Cause */,
2428                     0 /* total full collections, dummy, ignored */,
2429                     gc_before_heap_dump),
2430     WorkerTask("dump heap") {
2431     _local_writer = writer;
2432     _gc_before_heap_dump = gc_before_heap_dump;
2433     _klass_map = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<Klass*>(INITIAL_CLASS_COUNT, mtServiceability);
2434     _stack_traces = NULL;
2435     _num_threads = 0;
2436     _num_dumper_threads = num_dump_threads;
2437     _dumper_controller = NULL;
2438     _poi = NULL;
2439     _large_object_list = new (std::nothrow) HeapDumpLargeObjectList();
2440     if (oome) {
2441       assert(!Thread::current()->is_VM_thread(), "Dump from OutOfMemoryError cannot be called by the VMThread");
2442       // get OutOfMemoryError zero-parameter constructor
2443       InstanceKlass* oome_ik = vmClasses::OutOfMemoryError_klass();
2444       _oome_constructor = oome_ik->find_method(vmSymbols::object_initializer_name(),
2445                                                           vmSymbols::void_method_signature());
2446       // get thread throwing OOME when generating the heap dump at OOME
2447       _oome_thread = JavaThread::current();
2448     } else {
2449       _oome_thread = NULL;
2450       _oome_constructor = NULL;
2451     }
2452   }
2453 
2454   ~VM_HeapDumper() {
2455     if (_stack_traces != NULL) {
2456       for (int i=0; i < _num_threads; i++) {
2457         delete _stack_traces[i];
2458       }
2459       FREE_C_HEAP_ARRAY(ThreadStackTrace*, _stack_traces);
2460     }
2461     if (_dumper_controller != NULL) {
2462       delete _dumper_controller;
2463       _dumper_controller = NULL;
2464     }
2465     delete _klass_map;
2466     delete _large_object_list;
2467   }
2468 
2469   VMOp_Type type() const { return VMOp_HeapDumper; }
2470   void doit();
2471   void work(uint worker_id);
2472 };
2473 
2474 VM_HeapDumper* VM_HeapDumper::_global_dumper = NULL;
2475 DumpWriter*    VM_HeapDumper::_global_writer = NULL;
2476 
2477 bool VM_HeapDumper::skip_operation() const {
2478   return false;
2479 }
2480 
2481 // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
2482 void DumperSupport::end_of_dump(AbstractDumpWriter* writer) {
2483   writer->finish_dump_segment();
2484 
2485   writer->write_u1(HPROF_HEAP_DUMP_END);
2486   writer->write_u4(0);
2487   writer->write_u4(0);
2488 }
2489 
2490 // writes a HPROF_LOAD_CLASS record for the class
2491 void VM_HeapDumper::do_load_class(Klass* k) {
2492   static u4 class_serial_num = 0;
2493 
2494   // len of HPROF_LOAD_CLASS record
2495   u4 remaining = 2*oopSize + 2*sizeof(u4);
2496 
2497   DumperSupport::write_header(writer(), HPROF_LOAD_CLASS, remaining);
2498 
2499   // class serial number is just a number
2500   writer()->write_u4(++class_serial_num);
2501 
2502   // class ID
2503   writer()->write_classID(k);
2504 
2505   // add the Klass* and class serial number pair
2506   dumper()->add_class_serial_number(k, class_serial_num);
2507 
2508   writer()->write_u4(STACK_TRACE_ID);
2509 
2510   // class name ID
2511   Symbol* name = k->name();
2512   writer()->write_symbolID(name);
2513 }
2514 
2515 // writes a HPROF_GC_CLASS_DUMP record for the given class
2516 void VM_HeapDumper::do_class_dump(Klass* k) {
2517   if (k->is_instance_klass()) {
2518     DumperSupport::dump_instance_class(writer(), k);
2519   } else {
2520     DumperSupport::dump_array_class(writer(), k);
2521   }
2522 }
2523 
2524 // Walk the stack of the given thread.
2525 // Dumps a HPROF_GC_ROOT_JAVA_FRAME record for each local
2526 // Dumps a HPROF_GC_ROOT_JNI_LOCAL record for each JNI local
2527 //
2528 // It returns the number of Java frames in this thread stack
2529 int VM_HeapDumper::do_thread(JavaThread* java_thread, u4 thread_serial_num) {
2530   JNILocalsDumper blk(writer(), thread_serial_num);
2531 
2532   oop threadObj = java_thread->threadObj();
2533   assert(threadObj != NULL, "sanity check");
2534 
2535   int stack_depth = 0;
2536   if (java_thread->has_last_Java_frame()) {
2537 
2538     // vframes are resource allocated
2539     Thread* current_thread = Thread::current();
2540     ResourceMark rm(current_thread);
2541     HandleMark hm(current_thread);
2542 
2543     RegisterMap reg_map(java_thread);
2544     frame f = java_thread->last_frame();
2545     vframe* vf = vframe::new_vframe(&f, &reg_map, java_thread);
2546     frame* last_entry_frame = NULL;
2547     int extra_frames = 0;
2548 
2549     if (java_thread == _oome_thread && _oome_constructor != NULL) {
2550       extra_frames++;
2551     }
2552     while (vf != NULL) {
2553       blk.set_frame_number(stack_depth);
2554       if (vf->is_java_frame()) {
2555 
2556         // java frame (interpreted, compiled, ...)
2557         javaVFrame *jvf = javaVFrame::cast(vf);
2558         if (!(jvf->method()->is_native())) {
2559           StackValueCollection* locals = jvf->locals();
2560           for (int slot=0; slot<locals->size(); slot++) {
2561             if (locals->at(slot)->type() == T_OBJECT) {
2562               oop o = locals->obj_at(slot)();
2563 
2564               if (o != NULL) {
2565                 u4 size = 1 + sizeof(address) + 4 + 4;
2566                 writer()->start_sub_record(HPROF_GC_ROOT_JAVA_FRAME, size);
2567                 writer()->write_objectID(o);
2568                 writer()->write_u4(thread_serial_num);
2569                 writer()->write_u4((u4) (stack_depth + extra_frames));
2570                 writer()->end_sub_record();
2571               }
2572             }
2573           }
2574           StackValueCollection *exprs = jvf->expressions();
2575           for(int index = 0; index < exprs->size(); index++) {
2576             if (exprs->at(index)->type() == T_OBJECT) {
2577                oop o = exprs->obj_at(index)();
2578                if (o != NULL) {
2579                  u4 size = 1 + sizeof(address) + 4 + 4;
2580                  writer()->start_sub_record(HPROF_GC_ROOT_JAVA_FRAME, size);
2581                  writer()->write_objectID(o);
2582                  writer()->write_u4(thread_serial_num);
2583                  writer()->write_u4((u4) (stack_depth + extra_frames));
2584                  writer()->end_sub_record();
2585                }
2586              }
2587           }
2588         } else {
2589           // native frame
2590           if (stack_depth == 0) {
2591             // JNI locals for the top frame.
2592             java_thread->active_handles()->oops_do(&blk);
2593           } else {
2594             if (last_entry_frame != NULL) {
2595               // JNI locals for the entry frame
2596               assert(last_entry_frame->is_entry_frame(), "checking");
2597               last_entry_frame->entry_frame_call_wrapper()->handles()->oops_do(&blk);
2598             }
2599           }
2600         }
2601         // increment only for Java frames
2602         stack_depth++;
2603         last_entry_frame = NULL;
2604 
2605       } else {
2606         // externalVFrame - if it's an entry frame then report any JNI locals
2607         // as roots when we find the corresponding native javaVFrame
2608         frame* fr = vf->frame_pointer();
2609         assert(fr != NULL, "sanity check");
2610         if (fr->is_entry_frame()) {
2611           last_entry_frame = fr;
2612         }
2613       }
2614       vf = vf->sender();
2615     }
2616   } else {
2617     // no last java frame but there may be JNI locals
2618     java_thread->active_handles()->oops_do(&blk);
2619   }
2620   return stack_depth;
2621 }
2622 
2623 
2624 // write a HPROF_GC_ROOT_THREAD_OBJ record for each java thread. Then walk
2625 // the stack so that locals and JNI locals are dumped.
2626 void VM_HeapDumper::do_threads() {
2627   for (int i=0; i < _num_threads; i++) {
2628     JavaThread* thread = _stack_traces[i]->thread();
2629     oop threadObj = thread->threadObj();
2630     u4 thread_serial_num = i+1;
2631     u4 stack_serial_num = thread_serial_num + STACK_TRACE_ID;
2632     u4 size = 1 + sizeof(address) + 4 + 4;
2633     writer()->start_sub_record(HPROF_GC_ROOT_THREAD_OBJ, size);
2634     writer()->write_objectID(threadObj);
2635     writer()->write_u4(thread_serial_num);  // thread number
2636     writer()->write_u4(stack_serial_num);   // stack trace serial number
2637     writer()->end_sub_record();
2638     int num_frames = do_thread(thread, thread_serial_num);
2639     assert(num_frames == _stack_traces[i]->get_stack_depth(),
2640            "total number of Java frames not matched");
2641   }
2642 }
2643 
2644 
2645 // The VM operation that dumps the heap. The dump consists of the following
2646 // records:
2647 //
2648 //  HPROF_HEADER
2649 //  [HPROF_UTF8]*
2650 //  [HPROF_LOAD_CLASS]*
2651 //  [[HPROF_FRAME]*|HPROF_TRACE]*
2652 //  [HPROF_GC_CLASS_DUMP]*
2653 //  [HPROF_HEAP_DUMP_SEGMENT]*
2654 //  HPROF_HEAP_DUMP_END
2655 //
2656 // The HPROF_TRACE records represent the stack traces where the heap dump
2657 // is generated and a "dummy trace" record which does not include
2658 // any frames. The dummy trace record is used to be referenced as the
2659 // unknown object alloc site.
2660 //
2661 // Each HPROF_HEAP_DUMP_SEGMENT record has a length followed by sub-records.
2662 // To allow the heap dump be generated in a single pass we remember the position
2663 // of the dump length and fix it up after all sub-records have been written.
2664 // To generate the sub-records we iterate over the heap, writing
2665 // HPROF_GC_INSTANCE_DUMP, HPROF_GC_OBJ_ARRAY_DUMP, and HPROF_GC_PRIM_ARRAY_DUMP
2666 // records as we go. Once that is done we write records for some of the GC
2667 // roots.
2668 
2669 void VM_HeapDumper::doit() {
2670 
2671   CollectedHeap* ch = Universe::heap();
2672 
2673   ch->ensure_parsability(false); // must happen, even if collection does
2674                                  // not happen (e.g. due to GCLocker)
2675 
2676   if (_gc_before_heap_dump) {
2677     if (GCLocker::is_active()) {
2678       warning("GC locker is held; pre-heapdump GC was skipped");
2679     } else {
2680       ch->collect_as_vm_thread(GCCause::_heap_dump);
2681     }
2682   }
2683 
2684   // At this point we should be the only dumper active, so
2685   // the following should be safe.
2686   set_global_dumper();
2687   set_global_writer();
2688 
2689   WorkerThreads* workers = ch->safepoint_workers();
2690 
2691   if (workers == NULL) {
2692     // Use serial dump, set dumper threads and writer threads number to 1.
2693     _num_dumper_threads=1;
2694     _num_writer_threads=1;
2695     work(0);
2696   } else {
2697     prepare_parallel_dump(workers->active_workers());
2698     if (_num_dumper_threads > 1) {
2699       ParallelObjectIterator poi(_num_dumper_threads);
2700       _poi = &poi;
2701       workers->run_task(this);
2702       _poi = NULL;
2703     } else {
2704       workers->run_task(this);
2705     }
2706     finish_parallel_dump();
2707   }
2708 
2709   // Now we clear the global variables, so that a future dumper can run.
2710   clear_global_dumper();
2711   clear_global_writer();
2712 }
2713 
2714 void VM_HeapDumper::work(uint worker_id) {
2715   if (worker_id != 0) {
2716     if (get_worker_type(worker_id) == WriterType) {
2717       writer()->writer_loop();
2718       return;
2719     }
2720     if (_num_dumper_threads > 1 && get_worker_type(worker_id) == DumperType) {
2721       _dumper_controller->wait_for_start_signal();
2722     }
2723   } else {
2724     // The worker 0 on all non-heap data dumping and part of heap iteration.
2725     // Write the file header - we always use 1.0.2
2726     const char* header = "JAVA PROFILE 1.0.2";
2727 
2728     // header is few bytes long - no chance to overflow int
2729     writer()->write_raw(header, strlen(header) + 1); // NUL terminated
2730     writer()->write_u4(oopSize);
2731     // timestamp is current time in ms
2732     writer()->write_u8(os::javaTimeMillis());
2733     // HPROF_UTF8 records
2734     SymbolTableDumper sym_dumper(writer());
2735     SymbolTable::symbols_do(&sym_dumper);
2736 
2737     // HPROF_UTF8 records for inlined field names.
2738     inlined_objects()->init();
2739     inlined_objects()->dump_inlined_field_names(writer());
2740 
2741     // HPROF_INLINED_FIELDS
2742     inlined_objects()->dump_classed_with_inlined_fields(writer());
2743 
2744     // write HPROF_LOAD_CLASS records
2745     {
2746       LockedClassesDo locked_load_classes(&do_load_class);
2747       ClassLoaderDataGraph::classes_do(&locked_load_classes);
2748     }
2749 
2750     // write HPROF_FRAME and HPROF_TRACE records
2751     // this must be called after _klass_map is built when iterating the classes above.
2752     dump_stack_traces();
2753 
2754     // Writes HPROF_GC_CLASS_DUMP records
2755     {
2756       LockedClassesDo locked_dump_class(&do_class_dump);
2757       ClassLoaderDataGraph::classes_do(&locked_dump_class);
2758     }
2759 
2760     // HPROF_GC_ROOT_THREAD_OBJ + frames + jni locals
2761     do_threads();
2762 
2763     // HPROF_GC_ROOT_JNI_GLOBAL
2764     JNIGlobalsDumper jni_dumper(writer());
2765     JNIHandles::oops_do(&jni_dumper);
2766     // technically not jni roots, but global roots
2767     // for things like preallocated throwable backtraces
2768     Universe::vm_global()->oops_do(&jni_dumper);
2769     // HPROF_GC_ROOT_STICKY_CLASS
2770     // These should be classes in the NULL class loader data, and not all classes
2771     // if !ClassUnloading
2772     StickyClassDumper class_dumper(writer());
2773     ClassLoaderData::the_null_class_loader_data()->classes_do(&class_dumper);
2774   }
2775   // writes HPROF_GC_INSTANCE_DUMP records.
2776   // After each sub-record is written check_segment_length will be invoked
2777   // to check if the current segment exceeds a threshold. If so, a new
2778   // segment is started.
2779   // The HPROF_GC_CLASS_DUMP and HPROF_GC_INSTANCE_DUMP are the vast bulk
2780   // of the heap dump.
2781   if (_num_dumper_threads <= 1) {
2782     HeapObjectDumper obj_dumper(writer());
2783     Universe::heap()->object_iterate(&obj_dumper);
2784   } else {
2785     assert(get_worker_type(worker_id) == DumperType
2786           || get_worker_type(worker_id) == VMDumperType,
2787           "must be dumper thread to do heap iteration");
2788     if (get_worker_type(worker_id) == VMDumperType) {
2789       // Clear global writer's buffer.
2790       writer()->finish_dump_segment(true);
2791       // Notify dumpers to start heap iteration.
2792       _dumper_controller->start_dump();
2793     }
2794     // Heap iteration.
2795     {
2796        ParDumpWriter pw(writer());
2797        {
2798          HeapObjectDumper obj_dumper(&pw, _large_object_list);
2799          _poi->object_iterate(&obj_dumper, worker_id);
2800        }
2801 
2802        if (get_worker_type(worker_id) == VMDumperType) {
2803          _dumper_controller->wait_all_dumpers_complete();
2804          // clear internal buffer;
2805          pw.finish_dump_segment(true);
2806          // refresh the global_writer's buffer and position;
2807          writer()->refresh();
2808        } else {
2809          pw.finish_dump_segment(true);
2810          _dumper_controller->dumper_complete();
2811          return;
2812        }
2813     }
2814   }
2815 
2816   assert(get_worker_type(worker_id) == VMDumperType, "Heap dumper must be VMDumper");
2817   // Use writer() rather than ParDumpWriter to avoid memory consumption.
2818   HeapObjectDumper obj_dumper(writer());
2819   dump_large_objects(&obj_dumper);
2820   // Writes the HPROF_HEAP_DUMP_END record.
2821   DumperSupport::end_of_dump(writer());
2822 
2823   inlined_objects()->dump_flat_arrays(writer());
2824 
2825   // We are done with writing. Release the worker threads.
2826   writer()->deactivate();
2827 
2828   inlined_objects()->release();
2829 }
2830 
2831 void VM_HeapDumper::dump_stack_traces() {
2832   // write a HPROF_TRACE record without any frames to be referenced as object alloc sites
2833   DumperSupport::write_header(writer(), HPROF_TRACE, 3*sizeof(u4));
2834   writer()->write_u4((u4) STACK_TRACE_ID);
2835   writer()->write_u4(0);                    // thread number
2836   writer()->write_u4(0);                    // frame count
2837 
2838   _stack_traces = NEW_C_HEAP_ARRAY(ThreadStackTrace*, Threads::number_of_threads(), mtInternal);
2839   int frame_serial_num = 0;
2840   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
2841     oop threadObj = thread->threadObj();
2842     if (threadObj != NULL && !thread->is_exiting() && !thread->is_hidden_from_external_view()) {
2843       // dump thread stack trace
2844       Thread* current_thread = Thread::current();
2845       ResourceMark rm(current_thread);
2846       HandleMark hm(current_thread);
2847 
2848       ThreadStackTrace* stack_trace = new ThreadStackTrace(thread, false);
2849       stack_trace->dump_stack_at_safepoint(-1, /* ObjectMonitorsHashtable is not needed here */ nullptr);
2850       _stack_traces[_num_threads++] = stack_trace;
2851 
2852       // write HPROF_FRAME records for this thread's stack trace
2853       int depth = stack_trace->get_stack_depth();
2854       int thread_frame_start = frame_serial_num;
2855       int extra_frames = 0;
2856       // write fake frame that makes it look like the thread, which caused OOME,
2857       // is in the OutOfMemoryError zero-parameter constructor
2858       if (thread == _oome_thread && _oome_constructor != NULL) {
2859         int oome_serial_num = _klass_map->find(_oome_constructor->method_holder());
2860         // the class serial number starts from 1
2861         assert(oome_serial_num > 0, "OutOfMemoryError class not found");
2862         DumperSupport::dump_stack_frame(writer(), ++frame_serial_num, oome_serial_num,
2863                                         _oome_constructor, 0);
2864         extra_frames++;
2865       }
2866       for (int j=0; j < depth; j++) {
2867         StackFrameInfo* frame = stack_trace->stack_frame_at(j);
2868         Method* m = frame->method();
2869         int class_serial_num = _klass_map->find(m->method_holder());
2870         // the class serial number starts from 1
2871         assert(class_serial_num > 0, "class not found");
2872         DumperSupport::dump_stack_frame(writer(), ++frame_serial_num, class_serial_num, m, frame->bci());
2873       }
2874       depth += extra_frames;
2875 
2876       // write HPROF_TRACE record for one thread
2877       DumperSupport::write_header(writer(), HPROF_TRACE, 3*sizeof(u4) + depth*oopSize);
2878       int stack_serial_num = _num_threads + STACK_TRACE_ID;
2879       writer()->write_u4(stack_serial_num);      // stack trace serial number
2880       writer()->write_u4((u4) _num_threads);     // thread serial number
2881       writer()->write_u4(depth);                 // frame count
2882       for (int j=1; j <= depth; j++) {
2883         writer()->write_id(thread_frame_start + j);
2884       }
2885     }
2886   }
2887 }
2888 
2889 // dump the large objects.
2890 void VM_HeapDumper::dump_large_objects(ObjectClosure* cl) {
2891   _large_object_list->drain(cl);
2892 }
2893 
2894 // dump the heap to given path.
2895 int HeapDumper::dump(const char* path, outputStream* out, int compression, bool overwrite, uint num_dump_threads) {
2896   assert(path != NULL && strlen(path) > 0, "path missing");
2897 
2898   // print message in interactive case
2899   if (out != NULL) {
2900     out->print_cr("Dumping heap to %s ...", path);
2901     timer()->start();
2902   }
2903   // create JFR event
2904   EventHeapDump event;
2905 
2906   AbstractCompressor* compressor = NULL;
2907 
2908   if (compression > 0) {
2909     compressor = new (std::nothrow) GZipCompressor(compression);
2910 
2911     if (compressor == NULL) {
2912       set_error("Could not allocate gzip compressor");
2913       return -1;
2914     }
2915   }
2916 
2917   DumpWriter writer(new (std::nothrow) FileWriter(path, overwrite), compressor);
2918 
2919   if (writer.error() != NULL) {
2920     set_error(writer.error());
2921     if (out != NULL) {
2922       out->print_cr("Unable to create %s: %s", path,
2923         (error() != NULL) ? error() : "reason unknown");
2924     }
2925     return -1;
2926   }
2927 
2928   // generate the dump
2929   VM_HeapDumper dumper(&writer, _gc_before_heap_dump, _oome, num_dump_threads);
2930   if (Thread::current()->is_VM_thread()) {
2931     assert(SafepointSynchronize::is_at_safepoint(), "Expected to be called at a safepoint");
2932     dumper.doit();
2933   } else {
2934     VMThread::execute(&dumper);
2935   }
2936 
2937   // record any error that the writer may have encountered
2938   set_error(writer.error());
2939 
2940   // emit JFR event
2941   if (error() == NULL) {
2942     event.set_destination(path);
2943     event.set_gcBeforeDump(_gc_before_heap_dump);
2944     event.set_size(writer.bytes_written());
2945     event.set_onOutOfMemoryError(_oome);
2946     event.commit();
2947   }
2948 
2949   // print message in interactive case
2950   if (out != NULL) {
2951     timer()->stop();
2952     if (error() == NULL) {
2953       out->print_cr("Heap dump file created [" JULONG_FORMAT " bytes in %3.3f secs]",
2954                     writer.bytes_written(), timer()->seconds());
2955     } else {
2956       out->print_cr("Dump file is incomplete: %s", writer.error());
2957     }
2958   }
2959 
2960   return (writer.error() == NULL) ? 0 : -1;
2961 }
2962 
2963 // stop timer (if still active), and free any error string we might be holding
2964 HeapDumper::~HeapDumper() {
2965   if (timer()->is_active()) {
2966     timer()->stop();
2967   }
2968   set_error(NULL);
2969 }
2970 
2971 
2972 // returns the error string (resource allocated), or NULL
2973 char* HeapDumper::error_as_C_string() const {
2974   if (error() != NULL) {
2975     char* str = NEW_RESOURCE_ARRAY(char, strlen(error())+1);
2976     strcpy(str, error());
2977     return str;
2978   } else {
2979     return NULL;
2980   }
2981 }
2982 
2983 // set the error string
2984 void HeapDumper::set_error(char const* error) {
2985   if (_error != NULL) {
2986     os::free(_error);
2987   }
2988   if (error == NULL) {
2989     _error = NULL;
2990   } else {
2991     _error = os::strdup(error);
2992     assert(_error != NULL, "allocation failure");
2993   }
2994 }
2995 
2996 // Called by out-of-memory error reporting by a single Java thread
2997 // outside of a JVM safepoint
2998 void HeapDumper::dump_heap_from_oome() {
2999   HeapDumper::dump_heap(true);
3000 }
3001 
3002 // Called by error reporting by a single Java thread outside of a JVM safepoint,
3003 // or by heap dumping by the VM thread during a (GC) safepoint. Thus, these various
3004 // callers are strictly serialized and guaranteed not to interfere below. For more
3005 // general use, however, this method will need modification to prevent
3006 // inteference when updating the static variables base_path and dump_file_seq below.
3007 void HeapDumper::dump_heap() {
3008   HeapDumper::dump_heap(false);
3009 }
3010 
3011 void HeapDumper::dump_heap(bool oome) {
3012   static char base_path[JVM_MAXPATHLEN] = {'\0'};
3013   static uint dump_file_seq = 0;
3014   char* my_path;
3015   const int max_digit_chars = 20;
3016 
3017   const char* dump_file_name = "java_pid";
3018   const char* dump_file_ext  = HeapDumpGzipLevel > 0 ? ".hprof.gz" : ".hprof";
3019 
3020   // The dump file defaults to java_pid<pid>.hprof in the current working
3021   // directory. HeapDumpPath=<file> can be used to specify an alternative
3022   // dump file name or a directory where dump file is created.
3023   if (dump_file_seq == 0) { // first time in, we initialize base_path
3024     // Calculate potentially longest base path and check if we have enough
3025     // allocated statically.
3026     const size_t total_length =
3027                       (HeapDumpPath == NULL ? 0 : strlen(HeapDumpPath)) +
3028                       strlen(os::file_separator()) + max_digit_chars +
3029                       strlen(dump_file_name) + strlen(dump_file_ext) + 1;
3030     if (total_length > sizeof(base_path)) {
3031       warning("Cannot create heap dump file.  HeapDumpPath is too long.");
3032       return;
3033     }
3034 
3035     bool use_default_filename = true;
3036     if (HeapDumpPath == NULL || HeapDumpPath[0] == '\0') {
3037       // HeapDumpPath=<file> not specified
3038     } else {
3039       strcpy(base_path, HeapDumpPath);
3040       // check if the path is a directory (must exist)
3041       DIR* dir = os::opendir(base_path);
3042       if (dir == NULL) {
3043         use_default_filename = false;
3044       } else {
3045         // HeapDumpPath specified a directory. We append a file separator
3046         // (if needed).
3047         os::closedir(dir);
3048         size_t fs_len = strlen(os::file_separator());
3049         if (strlen(base_path) >= fs_len) {
3050           char* end = base_path;
3051           end += (strlen(base_path) - fs_len);
3052           if (strcmp(end, os::file_separator()) != 0) {
3053             strcat(base_path, os::file_separator());
3054           }
3055         }
3056       }
3057     }
3058     // If HeapDumpPath wasn't a file name then we append the default name
3059     if (use_default_filename) {
3060       const size_t dlen = strlen(base_path);  // if heap dump dir specified
3061       jio_snprintf(&base_path[dlen], sizeof(base_path)-dlen, "%s%d%s",
3062                    dump_file_name, os::current_process_id(), dump_file_ext);
3063     }
3064     const size_t len = strlen(base_path) + 1;
3065     my_path = (char*)os::malloc(len, mtInternal);
3066     if (my_path == NULL) {
3067       warning("Cannot create heap dump file.  Out of system memory.");
3068       return;
3069     }
3070     strncpy(my_path, base_path, len);
3071   } else {
3072     // Append a sequence number id for dumps following the first
3073     const size_t len = strlen(base_path) + max_digit_chars + 2; // for '.' and \0
3074     my_path = (char*)os::malloc(len, mtInternal);
3075     if (my_path == NULL) {
3076       warning("Cannot create heap dump file.  Out of system memory.");
3077       return;
3078     }
3079     jio_snprintf(my_path, len, "%s.%d", base_path, dump_file_seq);
3080   }
3081   dump_file_seq++;   // increment seq number for next time we dump
3082 
3083   HeapDumper dumper(false /* no GC before heap dump */,
3084                     oome  /* pass along out-of-memory-error flag */);
3085   dumper.dump(my_path, tty, HeapDumpGzipLevel);
3086   os::free(my_path);
3087 }