< prev index next >

src/hotspot/share/services/heapDumper.cpp

Print this page

  25 
  26 #include "precompiled.hpp"
  27 #include "classfile/classLoaderData.inline.hpp"
  28 #include "classfile/classLoaderDataGraph.hpp"
  29 #include "classfile/javaClasses.inline.hpp"
  30 #include "classfile/symbolTable.hpp"
  31 #include "classfile/vmClasses.hpp"
  32 #include "classfile/vmSymbols.hpp"
  33 #include "gc/shared/gcLocker.hpp"
  34 #include "gc/shared/gcVMOperations.hpp"
  35 #include "gc/shared/workerThread.hpp"
  36 #include "jfr/jfrEvents.hpp"
  37 #include "jvm.h"
  38 #include "memory/allocation.inline.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "memory/universe.hpp"
  41 #include "oops/fieldStreams.inline.hpp"
  42 #include "oops/klass.inline.hpp"
  43 #include "oops/objArrayKlass.hpp"
  44 #include "oops/objArrayOop.inline.hpp"


  45 #include "oops/oop.inline.hpp"
  46 #include "oops/typeArrayOop.inline.hpp"
  47 #include "runtime/continuationWrapper.inline.hpp"

  48 #include "runtime/frame.inline.hpp"
  49 #include "runtime/handles.inline.hpp"
  50 #include "runtime/javaCalls.hpp"
  51 #include "runtime/javaThread.inline.hpp"
  52 #include "runtime/jniHandles.hpp"
  53 #include "runtime/os.hpp"
  54 #include "runtime/threads.hpp"
  55 #include "runtime/threadSMR.hpp"
  56 #include "runtime/vframe.hpp"
  57 #include "runtime/vmOperations.hpp"
  58 #include "runtime/vmThread.hpp"
  59 #include "runtime/timerTrace.hpp"
  60 #include "services/heapDumper.hpp"
  61 #include "services/heapDumperCompression.hpp"
  62 #include "services/threadService.hpp"
  63 #include "utilities/checkedCast.hpp"
  64 #include "utilities/macros.hpp"
  65 #include "utilities/ostream.hpp"
  66 #ifdef LINUX
  67 #include "os_linux.hpp"

 299  *                                     7:  double array
 300  *                                     8:  byte array
 301  *                                     9:  short array
 302  *                                     10: int array
 303  *                                     11: long array
 304  *                          [u1]*      elements
 305  *
 306  * HPROF_CPU_SAMPLES        a set of sample traces of running threads
 307  *
 308  *                u4        total number of samples
 309  *                u4        # of traces
 310  *               [u4        # of samples
 311  *                u4]*      stack trace serial number
 312  *
 313  * HPROF_CONTROL_SETTINGS   the settings of on/off switches
 314  *
 315  *                u4        0x00000001: alloc traces on/off
 316  *                          0x00000002: cpu sampling on/off
 317  *                u2        stack trace depth
 318  *






















 319  *
 320  * When the header is "JAVA PROFILE 1.0.2" a heap dump can optionally
 321  * be generated as a sequence of heap dump segments. This sequence is
 322  * terminated by an end record. The additional tags allowed by format
 323  * "JAVA PROFILE 1.0.2" are:
 324  *
 325  * HPROF_HEAP_DUMP_SEGMENT  denote a heap dump segment
 326  *
 327  *               [heap dump sub-records]*
 328  *               The same sub-record types allowed by HPROF_HEAP_DUMP
 329  *
 330  * HPROF_HEAP_DUMP_END      denotes the end of a heap dump
 331  *
 332  */
 333 
 334 
 335 // HPROF tags
 336 
 337 enum hprofTag : u1 {
 338   // top-level records
 339   HPROF_UTF8                    = 0x01,
 340   HPROF_LOAD_CLASS              = 0x02,
 341   HPROF_UNLOAD_CLASS            = 0x03,
 342   HPROF_FRAME                   = 0x04,
 343   HPROF_TRACE                   = 0x05,
 344   HPROF_ALLOC_SITES             = 0x06,
 345   HPROF_HEAP_SUMMARY            = 0x07,
 346   HPROF_START_THREAD            = 0x0A,
 347   HPROF_END_THREAD              = 0x0B,
 348   HPROF_HEAP_DUMP               = 0x0C,
 349   HPROF_CPU_SAMPLES             = 0x0D,
 350   HPROF_CONTROL_SETTINGS        = 0x0E,
 351 
 352   // 1.0.2 record types
 353   HPROF_HEAP_DUMP_SEGMENT       = 0x1C,
 354   HPROF_HEAP_DUMP_END           = 0x2C,
 355 







 356   // field types
 357   HPROF_ARRAY_OBJECT            = 0x01,
 358   HPROF_NORMAL_OBJECT           = 0x02,
 359   HPROF_BOOLEAN                 = 0x04,
 360   HPROF_CHAR                    = 0x05,
 361   HPROF_FLOAT                   = 0x06,
 362   HPROF_DOUBLE                  = 0x07,
 363   HPROF_BYTE                    = 0x08,
 364   HPROF_SHORT                   = 0x09,
 365   HPROF_INT                     = 0x0A,
 366   HPROF_LONG                    = 0x0B,
 367 
 368   // data-dump sub-records
 369   HPROF_GC_ROOT_UNKNOWN         = 0xFF,
 370   HPROF_GC_ROOT_JNI_GLOBAL      = 0x01,
 371   HPROF_GC_ROOT_JNI_LOCAL       = 0x02,
 372   HPROF_GC_ROOT_JAVA_FRAME      = 0x03,
 373   HPROF_GC_ROOT_NATIVE_STACK    = 0x04,
 374   HPROF_GC_ROOT_STICKY_CLASS    = 0x05,
 375   HPROF_GC_ROOT_THREAD_BLOCK    = 0x06,
 376   HPROF_GC_ROOT_MONITOR_USED    = 0x07,
 377   HPROF_GC_ROOT_THREAD_OBJ      = 0x08,
 378   HPROF_GC_CLASS_DUMP           = 0x20,
 379   HPROF_GC_INSTANCE_DUMP        = 0x21,
 380   HPROF_GC_OBJ_ARRAY_DUMP       = 0x22,
 381   HPROF_GC_PRIM_ARRAY_DUMP      = 0x23
 382 };
 383 
 384 // Default stack trace ID (used for dummy HPROF_TRACE record)
 385 enum {
 386   STACK_TRACE_ID = 1,
 387   INITIAL_CLASS_COUNT = 200
 388 };
 389 

































































 390 // Supports I/O operations for a dump
 391 // Base class for dump and parallel dump
 392 class AbstractDumpWriter : public CHeapObj<mtInternal> {
 393  protected:
 394   enum {
 395     io_buffer_max_size = 1*M,
 396     dump_segment_header_size = 9
 397   };
 398 
 399   char* _buffer;    // internal buffer
 400   size_t _size;
 401   size_t _pos;
 402 
 403   bool _in_dump_segment; // Are we currently in a dump segment?
 404   bool _is_huge_sub_record; // Are we writing a sub-record larger than the buffer size?
 405   DEBUG_ONLY(size_t _sub_record_left;) // The bytes not written for the current sub-record.
 406   DEBUG_ONLY(bool _sub_record_ended;) // True if we have called the end_sub_record().
 407 
 408   char* buffer() const                          { return _buffer; }
 409   size_t buffer_size() const                    { return _size; }

 722   }
 723 }
 724 
 725 class DumperClassCacheTable;
 726 class DumperClassCacheTableEntry;
 727 
 728 // Support class with a collection of functions used when dumping the heap
 729 class DumperSupport : AllStatic {
 730  public:
 731 
 732   // write a header of the given type
 733   static void write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len);
 734 
 735   // returns hprof tag for the given type signature
 736   static hprofTag sig2tag(Symbol* sig);
 737   // returns hprof tag for the given basic type
 738   static hprofTag type2tag(BasicType type);
 739   // Returns the size of the data to write.
 740   static u4 sig2size(Symbol* sig);
 741 
 742   // returns the size of the instance of the given class
 743   static u4 instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry = nullptr);
 744 
 745   // dump a jfloat
 746   static void dump_float(AbstractDumpWriter* writer, jfloat f);
 747   // dump a jdouble
 748   static void dump_double(AbstractDumpWriter* writer, jdouble d);
 749   // dumps the raw value of the given field
 750   static void dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset);
 751   // returns the size of the static fields; also counts the static fields
 752   static u4 get_static_fields_size(InstanceKlass* ik, u2& field_count);
 753   // dumps static fields of the given class
 754   static void dump_static_fields(AbstractDumpWriter* writer, Klass* k);
 755   // dump the raw values of the instance fields of the given object
 756   static void dump_instance_fields(AbstractDumpWriter* writer, oop o, DumperClassCacheTableEntry* class_cache_entry);






 757   // get the count of the instance fields for a given class
 758   static u2 get_instance_fields_count(InstanceKlass* ik);
 759   // dumps the definition of the instance fields for a given class
 760   static void dump_instance_field_descriptors(AbstractDumpWriter* writer, Klass* k);
 761   // creates HPROF_GC_INSTANCE_DUMP record for the given object
 762   static void dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache);
 763   // creates HPROF_GC_CLASS_DUMP record for the given instance class
 764   static void dump_instance_class(AbstractDumpWriter* writer, Klass* k);
 765   // creates HPROF_GC_CLASS_DUMP record for a given array class
 766   static void dump_array_class(AbstractDumpWriter* writer, Klass* k);
 767 
 768   // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
 769   static void dump_object_array(AbstractDumpWriter* writer, objArrayOop array);


 770   // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
 771   static void dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array);
 772   // create HPROF_FRAME record for the given method and bci
 773   static void dump_stack_frame(AbstractDumpWriter* writer, int frame_serial_num, int class_serial_num, Method* m, int bci);
 774 
 775   // check if we need to truncate an array
 776   static int calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size);



 777 
 778   // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
 779   static void end_of_dump(AbstractDumpWriter* writer);
 780 
 781   static oop mask_dormant_archived_object(oop o, oop ref_obj) {
 782     if (o != nullptr && o->klass()->java_mirror_no_keepalive() == nullptr) {
 783       // Ignore this object since the corresponding java mirror is not loaded.
 784       // Might be a dormant archive object.
 785       report_dormant_archived_object(o, ref_obj);
 786       return nullptr;
 787     } else {
 788       return o;
 789     }
 790   }
 791 










 792   static void report_dormant_archived_object(oop o, oop ref_obj) {
 793     if (log_is_enabled(Trace, cds, heap)) {
 794       ResourceMark rm;
 795       if (ref_obj != nullptr) {
 796         log_trace(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s) referenced by " INTPTR_FORMAT " (%s)",
 797                   p2i(o), o->klass()->external_name(),
 798                   p2i(ref_obj), ref_obj->klass()->external_name());
 799       } else {
 800         log_trace(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s)",
 801                   p2i(o), o->klass()->external_name());
 802       }
 803     }
 804   }
 805 };
 806 
 807 // Hash table of klasses to the klass metadata. This should greatly improve the
 808 // hash dumping performance. This hash table is supposed to be used by a single
 809 // thread only.
 810 //
 811 class DumperClassCacheTableEntry : public CHeapObj<mtServiceability> {
 812   friend class DumperClassCacheTable;
 813 private:
 814   GrowableArray<char> _sigs_start;
 815   GrowableArray<int> _offsets;

 816   u4 _instance_size;
 817   int _entries;
 818 
 819 public:
 820   DumperClassCacheTableEntry() : _instance_size(0), _entries(0) {};
 821 
 822   int field_count()             { return _entries; }
 823   char sig_start(int field_idx) { return _sigs_start.at(field_idx); }



 824   int offset(int field_idx)     { return _offsets.at(field_idx); }
 825   u4 instance_size()            { return _instance_size; }
 826 };
 827 
 828 class DumperClassCacheTable {
 829 private:
 830   // ResourceHashtable SIZE is specified at compile time so we
 831   // use 1031 which is the first prime after 1024.
 832   static constexpr size_t TABLE_SIZE = 1031;
 833 
 834   // Maintain the cache for N classes. This limits memory footprint
 835   // impact, regardless of how many classes we have in the dump.
 836   // This also improves look up performance by keeping the statically
 837   // sized table from overloading.
 838   static constexpr int CACHE_TOP = 256;
 839 
 840   typedef ResourceHashtable<InstanceKlass*, DumperClassCacheTableEntry*,
 841                             TABLE_SIZE, AnyObj::C_HEAP, mtServiceability> PtrTable;
 842   PtrTable* _ptrs;
 843 

 852       bool do_entry(InstanceKlass*& key, DumperClassCacheTableEntry*& entry) {
 853         delete entry;
 854         return true;
 855       }
 856     } cleanup;
 857     table->unlink(&cleanup);
 858   }
 859 
 860 public:
 861   DumperClassCacheTableEntry* lookup_or_create(InstanceKlass* ik) {
 862     if (_last_ik == ik) {
 863       return _last_entry;
 864     }
 865 
 866     DumperClassCacheTableEntry* entry;
 867     DumperClassCacheTableEntry** from_cache = _ptrs->get(ik);
 868     if (from_cache == nullptr) {
 869       entry = new DumperClassCacheTableEntry();
 870       for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
 871         if (!fld.access_flags().is_static()) {
 872           Symbol* sig = fld.signature();
 873           entry->_sigs_start.push(sig->char_at(0));









 874           entry->_offsets.push(fld.offset());
 875           entry->_entries++;
 876           entry->_instance_size += DumperSupport::sig2size(sig);
 877         }
 878       }
 879 
 880       if (_ptrs->number_of_entries() >= CACHE_TOP) {
 881         // We do not track the individual hit rates for table entries.
 882         // Purge the entire table, and let the cache catch up with new
 883         // distribution.
 884         unlink_all(_ptrs);
 885       }
 886 
 887       _ptrs->put(ik, entry);
 888     } else {
 889       entry = *from_cache;
 890     }
 891 
 892     // Remember for single-slot cache.
 893     _last_ik = ik;
 894     _last_entry = entry;
 895 
 896     return entry;

 966 }
 967 
 968 // dump a jfloat
 969 void DumperSupport::dump_float(AbstractDumpWriter* writer, jfloat f) {
 970   if (g_isnan(f)) {
 971     writer->write_u4(0x7fc00000); // collapsing NaNs
 972   } else {
 973     writer->write_u4(bit_cast<u4>(f));
 974   }
 975 }
 976 
 977 // dump a jdouble
 978 void DumperSupport::dump_double(AbstractDumpWriter* writer, jdouble d) {
 979   if (g_isnan(d)) {
 980     writer->write_u8(0x7ff80000ull << 32); // collapsing NaNs
 981   } else {
 982     writer->write_u8(bit_cast<u8>(d));
 983   }
 984 }
 985 

 986 // dumps the raw value of the given field
 987 void DumperSupport::dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset) {
 988   switch (type) {
 989     case JVM_SIGNATURE_CLASS :
 990     case JVM_SIGNATURE_ARRAY : {
 991       oop o = obj->obj_field_access<ON_UNKNOWN_OOP_REF | AS_NO_KEEPALIVE>(offset);
 992       o = mask_dormant_archived_object(o, obj);
 993       assert(oopDesc::is_oop_or_null(o), "Expected an oop or nullptr at " PTR_FORMAT, p2i(o));
 994       writer->write_objectID(o);
 995       break;
 996     }
 997     case JVM_SIGNATURE_BYTE : {
 998       jbyte b = obj->byte_field(offset);
 999       writer->write_u1(b);
1000       break;
1001     }
1002     case JVM_SIGNATURE_CHAR : {
1003       jchar c = obj->char_field(offset);
1004       writer->write_u2(c);
1005       break;

1024       writer->write_u4(i);
1025       break;
1026     }
1027     case JVM_SIGNATURE_LONG : {
1028       jlong l = obj->long_field(offset);
1029       writer->write_u8(l);
1030       break;
1031     }
1032     case JVM_SIGNATURE_BOOLEAN : {
1033       jboolean b = obj->bool_field(offset);
1034       writer->write_u1(b);
1035       break;
1036     }
1037     default : {
1038       ShouldNotReachHere();
1039       break;
1040     }
1041   }
1042 }
1043 
1044 // returns the size of the instance of the given class
1045 u4 DumperSupport::instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry) {
1046   if (class_cache_entry != nullptr) {
1047     return class_cache_entry->instance_size();
1048   } else {
1049     u4 size = 0;
1050     for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1051       if (!fld.access_flags().is_static()) {
1052         size += sig2size(fld.signature());




1053       }
1054     }
1055     return size;
1056   }
1057 }
1058 
1059 u4 DumperSupport::get_static_fields_size(InstanceKlass* ik, u2& field_count) {
1060   field_count = 0;
1061   u4 size = 0;
1062 
1063   for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1064     if (fldc.access_flags().is_static()) {


1065       field_count++;
1066       size += sig2size(fldc.signature());
1067     }
1068   }
1069 
1070   // Add in resolved_references which is referenced by the cpCache
1071   // The resolved_references is an array per InstanceKlass holding the
1072   // strings and other oops resolved from the constant pool.
1073   oop resolved_references = ik->constants()->resolved_references_or_null();
1074   if (resolved_references != nullptr) {
1075     field_count++;
1076     size += sizeof(address);
1077 
1078     // Add in the resolved_references of the used previous versions of the class
1079     // in the case of RedefineClasses
1080     InstanceKlass* prev = ik->previous_versions();
1081     while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1082       field_count++;
1083       size += sizeof(address);
1084       prev = prev->previous_versions();
1085     }
1086   }
1087 
1088   // We write the value itself plus a name and a one byte type tag per field.
1089   return checked_cast<u4>(size + field_count * (sizeof(address) + 1));
1090 }
1091 
1092 // dumps static fields of the given class
1093 void DumperSupport::dump_static_fields(AbstractDumpWriter* writer, Klass* k) {
1094   InstanceKlass* ik = InstanceKlass::cast(k);
1095 
1096   // dump the field descriptors and raw values
1097   for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1098     if (fld.access_flags().is_static()) {


1099       Symbol* sig = fld.signature();
1100 
1101       writer->write_symbolID(fld.name());   // name
1102       writer->write_u1(sig2tag(sig));       // type
1103 
1104       // value
1105       dump_field_value(writer, sig->char_at(0), ik->java_mirror(), fld.offset());
1106     }
1107   }
1108 
1109   // Add resolved_references for each class that has them
1110   oop resolved_references = ik->constants()->resolved_references_or_null();
1111   if (resolved_references != nullptr) {
1112     writer->write_symbolID(vmSymbols::resolved_references_name());  // name
1113     writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1114     writer->write_objectID(resolved_references);
1115 
1116     // Also write any previous versions
1117     InstanceKlass* prev = ik->previous_versions();
1118     while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1119       writer->write_symbolID(vmSymbols::resolved_references_name());  // name
1120       writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1121       writer->write_objectID(prev->constants()->resolved_references());
1122       prev = prev->previous_versions();
1123     }
1124   }
1125 }
1126 
1127 // dump the raw values of the instance fields of the given object
1128 void DumperSupport::dump_instance_fields(AbstractDumpWriter* writer, oop o, DumperClassCacheTableEntry* class_cache_entry) {


1129   assert(class_cache_entry != nullptr, "Pre-condition: must be provided");
1130   for (int idx = 0; idx < class_cache_entry->field_count(); idx++) {
1131     dump_field_value(writer, class_cache_entry->sig_start(idx), o, class_cache_entry->offset(idx));







1132   }
1133 }
1134 
1135 // dumps the definition of the instance fields for a given class





1136 u2 DumperSupport::get_instance_fields_count(InstanceKlass* ik) {
1137   u2 field_count = 0;
1138 
1139   for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1140     if (!fldc.access_flags().is_static()) field_count++;







1141   }
1142 
1143   return field_count;
1144 }
1145 
1146 // dumps the definition of the instance fields for a given class
1147 void DumperSupport::dump_instance_field_descriptors(AbstractDumpWriter* writer, Klass* k) {
1148   InstanceKlass* ik = InstanceKlass::cast(k);






1149 
1150   // dump the field descriptors
1151   for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1152     if (!fld.access_flags().is_static()) {
1153       Symbol* sig = fld.signature();






















1154 
1155       writer->write_symbolID(fld.name());   // name
1156       writer->write_u1(sig2tag(sig));       // type

1157     }
1158   }
1159 }
1160 
1161 // creates HPROF_GC_INSTANCE_DUMP record for the given object
1162 void DumperSupport::dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache) {
1163   InstanceKlass* ik = InstanceKlass::cast(o->klass());
1164 
1165   DumperClassCacheTableEntry* cache_entry = class_cache->lookup_or_create(ik);
1166 
1167   u4 is = instance_size(ik, cache_entry);
1168   u4 size = 1 + sizeof(address) + 4 + sizeof(address) + 4 + is;
1169 
1170   writer->start_sub_record(HPROF_GC_INSTANCE_DUMP, size);
1171   writer->write_objectID(o);
1172   writer->write_u4(STACK_TRACE_ID);
1173 
1174   // class ID
1175   writer->write_classID(ik);
1176 
1177   // number of bytes that follow
1178   writer->write_u4(is);
1179 
1180   // field values
1181   dump_instance_fields(writer, o, cache_entry);
1182 
1183   writer->end_sub_record();
1184 }
1185 
1186 // creates HPROF_GC_CLASS_DUMP record for the given instance class
1187 void DumperSupport::dump_instance_class(AbstractDumpWriter* writer, Klass* k) {
1188   InstanceKlass* ik = InstanceKlass::cast(k);
1189 
1190   // We can safepoint and do a heap dump at a point where we have a Klass,
1191   // but no java mirror class has been setup for it. So we need to check
1192   // that the class is at least loaded, to avoid crash from a null mirror.
1193   if (!ik->is_loaded()) {
1194     return;
1195   }
1196 
1197   u2 static_fields_count = 0;
1198   u4 static_size = get_static_fields_size(ik, static_fields_count);
1199   u2 instance_fields_count = get_instance_fields_count(ik);
1200   u4 instance_fields_size = instance_fields_count * (sizeof(address) + 1);
1201   u4 size = checked_cast<u4>(1 + sizeof(address) + 4 + 6 * sizeof(address) + 4 + 2 + 2 + static_size + 2 + instance_fields_size);

1206   writer->write_classID(ik);
1207   writer->write_u4(STACK_TRACE_ID);
1208 
1209   // super class ID
1210   InstanceKlass* java_super = ik->java_super();
1211   if (java_super == nullptr) {
1212     writer->write_objectID(oop(nullptr));
1213   } else {
1214     writer->write_classID(java_super);
1215   }
1216 
1217   writer->write_objectID(ik->class_loader());
1218   writer->write_objectID(ik->signers());
1219   writer->write_objectID(ik->protection_domain());
1220 
1221   // reserved
1222   writer->write_objectID(oop(nullptr));
1223   writer->write_objectID(oop(nullptr));
1224 
1225   // instance size
1226   writer->write_u4(DumperSupport::instance_size(ik));
1227 
1228   // size of constant pool - ignored by HAT 1.1
1229   writer->write_u2(0);
1230 
1231   // static fields
1232   writer->write_u2(static_fields_count);
1233   dump_static_fields(writer, ik);
1234 
1235   // description of instance fields
1236   writer->write_u2(instance_fields_count);
1237   dump_instance_field_descriptors(writer, ik);
1238 
1239   writer->end_sub_record();
1240 }
1241 
1242 // creates HPROF_GC_CLASS_DUMP record for the given array class
1243 void DumperSupport::dump_array_class(AbstractDumpWriter* writer, Klass* k) {
1244   InstanceKlass* ik = nullptr; // bottom class for object arrays, null for primitive type arrays
1245   if (k->is_objArray_klass()) {
1246     Klass *bk = ObjArrayKlass::cast(k)->bottom_klass();

1260   assert(java_super != nullptr, "checking");
1261   writer->write_classID(java_super);
1262 
1263   writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->class_loader());
1264   writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->signers());
1265   writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->protection_domain());
1266 
1267   writer->write_objectID(oop(nullptr));    // reserved
1268   writer->write_objectID(oop(nullptr));
1269   writer->write_u4(0);             // instance size
1270   writer->write_u2(0);             // constant pool
1271   writer->write_u2(0);             // static fields
1272   writer->write_u2(0);             // instance fields
1273 
1274   writer->end_sub_record();
1275 
1276 }
1277 
1278 // Hprof uses an u4 as record length field,
1279 // which means we need to truncate arrays that are too long.
1280 int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size) {
1281   BasicType type = ArrayKlass::cast(array->klass())->element_type();
1282   assert(type >= T_BOOLEAN && type <= T_OBJECT, "invalid array element type");
1283 
1284   int length = array->length();
1285 
1286   int type_size;
1287   if (type == T_OBJECT) {
1288     type_size = sizeof(address);
1289   } else {
1290     type_size = type2aelembytes(type);
1291   }
1292 
1293   size_t length_in_bytes = (size_t)length * type_size;
1294   uint max_bytes = max_juint - header_size;
1295 
1296   if (length_in_bytes > max_bytes) {
1297     length = max_bytes / type_size;
1298     length_in_bytes = (size_t)length * type_size;
1299 

1300     warning("cannot dump array of type %s[] with length %d; truncating to length %d",
1301             type2name_tab[type], array->length(), length);
1302   }
1303   return length;
1304 }
1305 
















1306 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
1307 void DumperSupport::dump_object_array(AbstractDumpWriter* writer, objArrayOop array) {
1308   // sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID) + sizeof(classID)
1309   short header_size = 1 + 2 * 4 + 2 * sizeof(address);
1310   int length = calculate_array_max_length(writer, array, header_size);
1311   u4 size = checked_cast<u4>(header_size + length * sizeof(address));
1312 
1313   writer->start_sub_record(HPROF_GC_OBJ_ARRAY_DUMP, size);
1314   writer->write_objectID(array);
1315   writer->write_u4(STACK_TRACE_ID);
1316   writer->write_u4(length);
1317 
1318   // array class ID
1319   writer->write_classID(array->klass());
1320 
1321   // [id]* elements
1322   for (int index = 0; index < length; index++) {
1323     oop o = array->obj_at(index);
1324     o = mask_dormant_archived_object(o, array);
1325     writer->write_objectID(o);
1326   }
1327 
1328   writer->end_sub_record();
1329 }
1330 









































1331 #define WRITE_ARRAY(Array, Type, Size, Length) \
1332   for (int i = 0; i < Length; i++) { writer->write_##Size((Size)Array->Type##_at(i)); }
1333 
1334 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
1335 void DumperSupport::dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array) {
1336   BasicType type = TypeArrayKlass::cast(array->klass())->element_type();
1337   // 2 * sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID)
1338   short header_size = 2 * 1 + 2 * 4 + sizeof(address);
1339 
1340   int length = calculate_array_max_length(writer, array, header_size);
1341   int type_size = type2aelembytes(type);
1342   u4 length_in_bytes = (u4)length * type_size;
1343   u4 size = header_size + length_in_bytes;
1344 
1345   writer->start_sub_record(HPROF_GC_PRIM_ARRAY_DUMP, size);
1346   writer->write_objectID(array);
1347   writer->write_u4(STACK_TRACE_ID);
1348   writer->write_u4(length);
1349   writer->write_u1(type2tag(type));
1350 

1432                                      int bci) {
1433   int line_number;
1434   if (m->is_native()) {
1435     line_number = -3;  // native frame
1436   } else {
1437     line_number = m->line_number_from_bci(bci);
1438   }
1439 
1440   write_header(writer, HPROF_FRAME, 4*oopSize + 2*sizeof(u4));
1441   writer->write_id(frame_serial_num);               // frame serial number
1442   writer->write_symbolID(m->name());                // method's name
1443   writer->write_symbolID(m->signature());           // method's signature
1444 
1445   assert(m->method_holder()->is_instance_klass(), "not InstanceKlass");
1446   writer->write_symbolID(m->method_holder()->source_file_name());  // source file name
1447   writer->write_u4(class_serial_num);               // class serial number
1448   writer->write_u4((u4) line_number);               // line number
1449 }
1450 
1451 








































































































































































































































































1452 // Support class used to generate HPROF_UTF8 records from the entries in the
1453 // SymbolTable.
1454 
1455 class SymbolTableDumper : public SymbolClosure {
1456  private:
1457   AbstractDumpWriter* _writer;
1458   AbstractDumpWriter* writer() const                { return _writer; }
1459  public:
1460   SymbolTableDumper(AbstractDumpWriter* writer)     { _writer = writer; }
1461   void do_symbol(Symbol** p);
1462 };
1463 
1464 void SymbolTableDumper::do_symbol(Symbol** p) {
1465   ResourceMark rm;
1466   Symbol* sym = *p;
1467   int len = sym->utf8_length();
1468   if (len > 0) {
1469     char* s = sym->as_utf8();
1470     DumperSupport::write_header(writer(), HPROF_UTF8, oopSize + len);
1471     writer()->write_symbolID(sym);

1923       return;
1924     }
1925   }
1926 
1927   if (DumperSupport::mask_dormant_archived_object(o, nullptr) == nullptr) {
1928     return;
1929   }
1930 
1931   if (o->is_instance()) {
1932     // create a HPROF_GC_INSTANCE record for each object
1933     DumperSupport::dump_instance(writer(), o, &_class_cache);
1934     // If we encounter an unmounted virtual thread it needs to be dumped explicitly
1935     // (mounted virtual threads are dumped with their carriers).
1936     if (java_lang_VirtualThread::is_instance(o)
1937         && ThreadDumper::should_dump_vthread(o) && !ThreadDumper::is_vthread_mounted(o)) {
1938       _vthread_dumper->dump_vthread(o, writer());
1939     }
1940   } else if (o->is_objArray()) {
1941     // create a HPROF_GC_OBJ_ARRAY_DUMP record for each object array
1942     DumperSupport::dump_object_array(writer(), objArrayOop(o));


1943   } else if (o->is_typeArray()) {
1944     // create a HPROF_GC_PRIM_ARRAY_DUMP record for each type array
1945     DumperSupport::dump_prim_array(writer(), typeArrayOop(o));
1946   }
1947 }
1948 
1949 // The dumper controller for parallel heap dump
1950 class DumperController : public CHeapObj<mtInternal> {
1951  private:
1952    Monitor* _lock;
1953    Mutex* _global_writer_lock;
1954 
1955    const uint   _dumper_number;
1956    uint   _complete_number;
1957 
1958    bool   _started; // VM dumper started and acquired global writer lock
1959 
1960  public:
1961    DumperController(uint number) :
1962      // _lock and _global_writer_lock are used for synchronization between GC worker threads inside safepoint,

2002      _complete_number++;
2003      // propagate local error to global if any
2004      if (local_writer->has_error()) {
2005        global_writer->set_error(local_writer->error());
2006      }
2007      ml.notify();
2008    }
2009 
2010    void wait_all_dumpers_complete() {
2011      MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
2012      while (_complete_number != _dumper_number) {
2013         ml.wait();
2014      }
2015    }
2016 };
2017 
2018 // DumpMerger merges separate dump files into a complete one
2019 class DumpMerger : public StackObj {
2020 private:
2021   DumpWriter* _writer;

2022   const char* _path;
2023   bool _has_error;
2024   int _dump_seq;
2025 
2026 private:
2027   void merge_file(const char* path);
2028   void merge_done();
2029   void set_error(const char* msg);
2030 
2031 public:
2032   DumpMerger(const char* path, DumpWriter* writer, int dump_seq) :
2033     _writer(writer),

2034     _path(path),
2035     _has_error(_writer->has_error()),
2036     _dump_seq(dump_seq) {}
2037 
2038   void do_merge();
2039 
2040   // returns path for the parallel DumpWriter (resource allocated)
2041   static char* get_writer_path(const char* base_path, int seq);
2042 
2043 };
2044 
2045 char* DumpMerger::get_writer_path(const char* base_path, int seq) {
2046   // approximate required buffer size
2047   size_t buf_size = strlen(base_path)
2048                     + 2                 // ".p"
2049                     + 10                // number (that's enough for 2^32 parallel dumpers)
2050                     + 1;                // '\0'
2051 
2052   char* path = NEW_RESOURCE_ARRAY(char, buf_size);
2053   memset(path, 0, buf_size);
2054 
2055   os::snprintf(path, buf_size, "%s.p%d", base_path, seq);
2056 
2057   return path;
2058 }
2059 
2060 
2061 void DumpMerger::merge_done() {
2062   // Writes the HPROF_HEAP_DUMP_END record.
2063   if (!_has_error) {
2064     DumperSupport::end_of_dump(_writer);

2065     _writer->flush();

2066   }
2067   _dump_seq = 0; //reset
2068 }
2069 
2070 void DumpMerger::set_error(const char* msg) {
2071   assert(msg != nullptr, "sanity check");
2072   log_error(heapdump)("%s (file: %s)", msg, _path);
2073   _writer->set_error(msg);
2074   _has_error = true;
2075 }
2076 
2077 #ifdef LINUX
2078 // Merge segmented heap files via sendfile, it's more efficient than the
2079 // read+write combination, which would require transferring data to and from
2080 // user space.
2081 void DumpMerger::merge_file(const char* path) {
2082   TraceTime timer("Merge segmented heap file directly", TRACETIME_LOG(Info, heapdump));
2083 
2084   int segment_fd = os::open(path, O_RDONLY, 0);
2085   if (segment_fd == -1) {

2180   }
2181 };
2182 
2183 // The VM operation that performs the heap dump
2184 class VM_HeapDumper : public VM_GC_Operation, public WorkerTask, public UnmountedVThreadDumper {
2185  private:
2186   static VM_HeapDumper*   _global_dumper;
2187   static DumpWriter*      _global_writer;
2188   DumpWriter*             _local_writer;
2189   JavaThread*             _oome_thread;
2190   Method*                 _oome_constructor;
2191   bool                    _gc_before_heap_dump;
2192   GrowableArray<Klass*>*  _klass_map;
2193 
2194   ThreadDumper**          _thread_dumpers; // platform, carrier and mounted virtual threads
2195   int                     _thread_dumpers_count;
2196   volatile int            _thread_serial_num;
2197   volatile int            _frame_serial_num;
2198 
2199   volatile int            _dump_seq;




2200   // parallel heap dump support
2201   uint                    _num_dumper_threads;
2202   DumperController*       _dumper_controller;
2203   ParallelObjectIterator* _poi;
2204 
2205   // Dumper id of VMDumper thread.
2206   static const int VMDumperId = 0;
2207   // VM dumper dumps both heap and non-heap data, other dumpers dump heap-only data.
2208   static bool is_vm_dumper(int dumper_id) { return dumper_id == VMDumperId; }
2209   // the 1st dumper calling get_next_dumper_id becomes VM dumper
2210   int get_next_dumper_id() {
2211     return Atomic::fetch_then_add(&_dump_seq, 1);
2212   }
2213 
2214   // accessors and setters
2215   static VM_HeapDumper* dumper()         {  assert(_global_dumper != nullptr, "Error"); return _global_dumper; }
2216   static DumpWriter* writer()            {  assert(_global_writer != nullptr, "Error"); return _global_writer; }
2217 
2218   void set_global_dumper() {
2219     assert(_global_dumper == nullptr, "Error");

2280   }
2281 
2282   ~VM_HeapDumper() {
2283     if (_thread_dumpers != nullptr) {
2284       for (int i = 0; i < _thread_dumpers_count; i++) {
2285         delete _thread_dumpers[i];
2286       }
2287       FREE_C_HEAP_ARRAY(ThreadDumper*, _thread_dumpers);
2288     }
2289 
2290     if (_dumper_controller != nullptr) {
2291       delete _dumper_controller;
2292       _dumper_controller = nullptr;
2293     }
2294     delete _klass_map;
2295   }
2296   int dump_seq()           { return _dump_seq; }
2297   bool is_parallel_dump()  { return _num_dumper_threads > 1; }
2298   void prepare_parallel_dump(WorkerThreads* workers);
2299 


2300   VMOp_Type type() const { return VMOp_HeapDumper; }
2301   virtual bool doit_prologue();
2302   void doit();
2303   void work(uint worker_id);
2304 
2305   // UnmountedVThreadDumper implementation
2306   void dump_vthread(oop vt, AbstractDumpWriter* segment_writer);
2307 };
2308 
2309 VM_HeapDumper* VM_HeapDumper::_global_dumper = nullptr;
2310 DumpWriter*    VM_HeapDumper::_global_writer = nullptr;
2311 
2312 bool VM_HeapDumper::skip_operation() const {
2313   return false;
2314 }
2315 
2316 // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
2317 void DumperSupport::end_of_dump(AbstractDumpWriter* writer) {
2318   writer->finish_dump_segment();
2319 

2454     _dumper_controller->lock_global_writer();
2455     _dumper_controller->signal_start();
2456   } else {
2457     _dumper_controller->wait_for_start_signal();
2458   }
2459 
2460   if (is_vm_dumper(dumper_id)) {
2461     TraceTime timer("Dump non-objects", TRACETIME_LOG(Info, heapdump));
2462     // Write the file header - we always use 1.0.2
2463     const char* header = "JAVA PROFILE 1.0.2";
2464 
2465     // header is few bytes long - no chance to overflow int
2466     writer()->write_raw(header, strlen(header) + 1); // NUL terminated
2467     writer()->write_u4(oopSize);
2468     // timestamp is current time in ms
2469     writer()->write_u8(os::javaTimeMillis());
2470     // HPROF_UTF8 records
2471     SymbolTableDumper sym_dumper(writer());
2472     SymbolTable::symbols_do(&sym_dumper);
2473 







2474     // write HPROF_LOAD_CLASS records
2475     {
2476       LockedClassesDo locked_load_classes(&do_load_class);
2477       ClassLoaderDataGraph::classes_do(&locked_load_classes);
2478     }
2479 
2480     // write HPROF_FRAME and HPROF_TRACE records
2481     // this must be called after _klass_map is built when iterating the classes above.
2482     dump_stack_traces(writer());
2483 
2484     // unlock global writer, so parallel dumpers can dump stack traces of unmounted virtual threads
2485     _dumper_controller->unlock_global_writer();
2486   }
2487 
2488   // HPROF_HEAP_DUMP/HPROF_HEAP_DUMP_SEGMENT starts here
2489 
2490   ResourceMark rm;
2491   // share global compressor, local DumpWriter is not responsible for its life cycle
2492   DumpWriter segment_writer(DumpMerger::get_writer_path(writer()->get_file_path(), dumper_id),
2493                             writer()->is_overwrite(), writer()->compressor());

2641         (error() != nullptr) ? error() : "reason unknown");
2642     }
2643     return -1;
2644   }
2645 
2646   // generate the segmented heap dump into separate files
2647   VM_HeapDumper dumper(&writer, _gc_before_heap_dump, _oome, num_dump_threads);
2648   VMThread::execute(&dumper);
2649 
2650   // record any error that the writer may have encountered
2651   set_error(writer.error());
2652 
2653   // Heap dump process is done in two phases
2654   //
2655   // Phase 1: Concurrent threads directly write heap data to multiple heap files.
2656   //          This is done by VM_HeapDumper, which is performed within safepoint.
2657   //
2658   // Phase 2: Merge multiple heap files into one complete heap dump file.
2659   //          This is done by DumpMerger, which is performed outside safepoint
2660 
2661   DumpMerger merger(path, &writer, dumper.dump_seq());
2662   Thread* current_thread = Thread::current();
2663   if (current_thread->is_AttachListener_thread()) {
2664     // perform heapdump file merge operation in the current thread prevents us
2665     // from occupying the VM Thread, which in turn affects the occurrence of
2666     // GC and other VM operations.
2667     merger.do_merge();
2668   } else {
2669     // otherwise, performs it by VM thread
2670     VM_HeapDumpMerge op(&merger);
2671     VMThread::execute(&op);
2672   }
2673   if (writer.error() != nullptr) {
2674     set_error(writer.error());
2675   }
2676 
2677   // emit JFR event
2678   if (error() == nullptr) {
2679     event.set_destination(path);
2680     event.set_gcBeforeDump(_gc_before_heap_dump);
2681     event.set_size(writer.bytes_written());

  25 
  26 #include "precompiled.hpp"
  27 #include "classfile/classLoaderData.inline.hpp"
  28 #include "classfile/classLoaderDataGraph.hpp"
  29 #include "classfile/javaClasses.inline.hpp"
  30 #include "classfile/symbolTable.hpp"
  31 #include "classfile/vmClasses.hpp"
  32 #include "classfile/vmSymbols.hpp"
  33 #include "gc/shared/gcLocker.hpp"
  34 #include "gc/shared/gcVMOperations.hpp"
  35 #include "gc/shared/workerThread.hpp"
  36 #include "jfr/jfrEvents.hpp"
  37 #include "jvm.h"
  38 #include "memory/allocation.inline.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "memory/universe.hpp"
  41 #include "oops/fieldStreams.inline.hpp"
  42 #include "oops/klass.inline.hpp"
  43 #include "oops/objArrayKlass.hpp"
  44 #include "oops/objArrayOop.inline.hpp"
  45 #include "oops/flatArrayKlass.hpp"
  46 #include "oops/flatArrayOop.inline.hpp"
  47 #include "oops/oop.inline.hpp"
  48 #include "oops/typeArrayOop.inline.hpp"
  49 #include "runtime/continuationWrapper.inline.hpp"
  50 #include "runtime/fieldDescriptor.inline.hpp"
  51 #include "runtime/frame.inline.hpp"
  52 #include "runtime/handles.inline.hpp"
  53 #include "runtime/javaCalls.hpp"
  54 #include "runtime/javaThread.inline.hpp"
  55 #include "runtime/jniHandles.hpp"
  56 #include "runtime/os.hpp"
  57 #include "runtime/threads.hpp"
  58 #include "runtime/threadSMR.hpp"
  59 #include "runtime/vframe.hpp"
  60 #include "runtime/vmOperations.hpp"
  61 #include "runtime/vmThread.hpp"
  62 #include "runtime/timerTrace.hpp"
  63 #include "services/heapDumper.hpp"
  64 #include "services/heapDumperCompression.hpp"
  65 #include "services/threadService.hpp"
  66 #include "utilities/checkedCast.hpp"
  67 #include "utilities/macros.hpp"
  68 #include "utilities/ostream.hpp"
  69 #ifdef LINUX
  70 #include "os_linux.hpp"

 302  *                                     7:  double array
 303  *                                     8:  byte array
 304  *                                     9:  short array
 305  *                                     10: int array
 306  *                                     11: long array
 307  *                          [u1]*      elements
 308  *
 309  * HPROF_CPU_SAMPLES        a set of sample traces of running threads
 310  *
 311  *                u4        total number of samples
 312  *                u4        # of traces
 313  *               [u4        # of samples
 314  *                u4]*      stack trace serial number
 315  *
 316  * HPROF_CONTROL_SETTINGS   the settings of on/off switches
 317  *
 318  *                u4        0x00000001: alloc traces on/off
 319  *                          0x00000002: cpu sampling on/off
 320  *                u2        stack trace depth
 321  *
 322  * HPROF_FLAT_ARRAYS        list of flat arrays
 323  *
 324  *               [flat array sub-records]*
 325  *
 326  *               HPROF_FLAT_ARRAY      flat array
 327  *
 328  *                          id         array object ID (dumped as HPROF_GC_PRIM_ARRAY_DUMP)
 329  *                          id         element class ID (dumped by HPROF_GC_CLASS_DUMP)
 330  *
 331  * HPROF_INLINED_FIELDS     decribes inlined fields
 332  *
 333  *               [class with inlined fields sub-records]*
 334  *
 335  *               HPROF_CLASS_WITH_INLINED_FIELDS
 336  *
 337  *                          id         class ID (dumped as HPROF_GC_CLASS_DUMP)
 338  *
 339  *                          u2         number of instance inlined fields (not including super)
 340  *                          [u2,       inlined field index,
 341  *                           u2,       synthetic field count,
 342  *                           id,       original field name,
 343  *                           id]*      inlined field class ID (dumped by HPROF_GC_CLASS_DUMP)
 344  *
 345  * When the header is "JAVA PROFILE 1.0.2" a heap dump can optionally
 346  * be generated as a sequence of heap dump segments. This sequence is
 347  * terminated by an end record. The additional tags allowed by format
 348  * "JAVA PROFILE 1.0.2" are:
 349  *
 350  * HPROF_HEAP_DUMP_SEGMENT  denote a heap dump segment
 351  *
 352  *               [heap dump sub-records]*
 353  *               The same sub-record types allowed by HPROF_HEAP_DUMP
 354  *
 355  * HPROF_HEAP_DUMP_END      denotes the end of a heap dump
 356  *
 357  */
 358 
 359 
 360 // HPROF tags
 361 
 362 enum hprofTag : u1 {
 363   // top-level records
 364   HPROF_UTF8                    = 0x01,
 365   HPROF_LOAD_CLASS              = 0x02,
 366   HPROF_UNLOAD_CLASS            = 0x03,
 367   HPROF_FRAME                   = 0x04,
 368   HPROF_TRACE                   = 0x05,
 369   HPROF_ALLOC_SITES             = 0x06,
 370   HPROF_HEAP_SUMMARY            = 0x07,
 371   HPROF_START_THREAD            = 0x0A,
 372   HPROF_END_THREAD              = 0x0B,
 373   HPROF_HEAP_DUMP               = 0x0C,
 374   HPROF_CPU_SAMPLES             = 0x0D,
 375   HPROF_CONTROL_SETTINGS        = 0x0E,
 376 
 377   // 1.0.2 record types
 378   HPROF_HEAP_DUMP_SEGMENT       = 0x1C,
 379   HPROF_HEAP_DUMP_END           = 0x2C,
 380 
 381   // inlined object support
 382   HPROF_FLAT_ARRAYS             = 0x12,
 383   HPROF_INLINED_FIELDS          = 0x13,
 384   // inlined object subrecords
 385   HPROF_FLAT_ARRAY                  = 0x01,
 386   HPROF_CLASS_WITH_INLINED_FIELDS   = 0x01,
 387 
 388   // field types
 389   HPROF_ARRAY_OBJECT            = 0x01,
 390   HPROF_NORMAL_OBJECT           = 0x02,
 391   HPROF_BOOLEAN                 = 0x04,
 392   HPROF_CHAR                    = 0x05,
 393   HPROF_FLOAT                   = 0x06,
 394   HPROF_DOUBLE                  = 0x07,
 395   HPROF_BYTE                    = 0x08,
 396   HPROF_SHORT                   = 0x09,
 397   HPROF_INT                     = 0x0A,
 398   HPROF_LONG                    = 0x0B,
 399 
 400   // data-dump sub-records
 401   HPROF_GC_ROOT_UNKNOWN         = 0xFF,
 402   HPROF_GC_ROOT_JNI_GLOBAL      = 0x01,
 403   HPROF_GC_ROOT_JNI_LOCAL       = 0x02,
 404   HPROF_GC_ROOT_JAVA_FRAME      = 0x03,
 405   HPROF_GC_ROOT_NATIVE_STACK    = 0x04,
 406   HPROF_GC_ROOT_STICKY_CLASS    = 0x05,
 407   HPROF_GC_ROOT_THREAD_BLOCK    = 0x06,
 408   HPROF_GC_ROOT_MONITOR_USED    = 0x07,
 409   HPROF_GC_ROOT_THREAD_OBJ      = 0x08,
 410   HPROF_GC_CLASS_DUMP           = 0x20,
 411   HPROF_GC_INSTANCE_DUMP        = 0x21,
 412   HPROF_GC_OBJ_ARRAY_DUMP       = 0x22,
 413   HPROF_GC_PRIM_ARRAY_DUMP      = 0x23
 414 };
 415 
 416 // Default stack trace ID (used for dummy HPROF_TRACE record)
 417 enum {
 418   STACK_TRACE_ID = 1,
 419   INITIAL_CLASS_COUNT = 200
 420 };
 421 
 422 
 423 class AbstractDumpWriter;
 424 
 425 class InlinedObjects {
 426 
 427   struct ClassInlinedFields {
 428     const Klass *klass;
 429     uintx base_index;   // base index of the inlined field names (1st field has index base_index+1).
 430     ClassInlinedFields(const Klass *klass = nullptr, uintx base_index = 0) : klass(klass), base_index(base_index) {}
 431 
 432     // For GrowableArray::find_sorted().
 433     static int compare(const ClassInlinedFields& a, const ClassInlinedFields& b) {
 434       return a.klass - b.klass;
 435     }
 436     // For GrowableArray::sort().
 437     static int compare(ClassInlinedFields* a, ClassInlinedFields* b) {
 438       return compare(*a, *b);
 439     }
 440   };
 441 
 442   uintx _min_string_id;
 443   uintx _max_string_id;
 444 
 445   GrowableArray<ClassInlinedFields> *_inlined_field_map;
 446 
 447   // counters for classes with inlined fields and for the fields
 448   int _classes_count;
 449   int _inlined_fields_count;
 450 
 451   static InlinedObjects *_instance;
 452 
 453   static void inlined_field_names_callback(InlinedObjects* _this, const Klass *klass, uintx base_index, int count);
 454 
 455   GrowableArray<oop> *_flat_arrays;
 456 
 457 public:
 458   InlinedObjects()
 459     : _min_string_id(0), _max_string_id(0),
 460     _inlined_field_map(nullptr),
 461     _classes_count(0), _inlined_fields_count(0),
 462     _flat_arrays(nullptr) {
 463   }
 464 
 465   static InlinedObjects* get_instance() {
 466     return _instance;
 467   }
 468 
 469   void init();
 470   void release();
 471 
 472   void dump_inlined_field_names(AbstractDumpWriter *writer);
 473 
 474   uintx get_base_index_for(Klass* k);
 475   uintx get_next_string_id(uintx id);
 476 
 477   void dump_classed_with_inlined_fields(AbstractDumpWriter* writer);
 478 
 479   void add_flat_array(oop array);
 480   void dump_flat_arrays(AbstractDumpWriter* writer);
 481 
 482 };
 483 
 484 InlinedObjects *InlinedObjects::_instance = nullptr;
 485 
 486 
 487 // Supports I/O operations for a dump
 488 // Base class for dump and parallel dump
 489 class AbstractDumpWriter : public CHeapObj<mtInternal> {
 490  protected:
 491   enum {
 492     io_buffer_max_size = 1*M,
 493     dump_segment_header_size = 9
 494   };
 495 
 496   char* _buffer;    // internal buffer
 497   size_t _size;
 498   size_t _pos;
 499 
 500   bool _in_dump_segment; // Are we currently in a dump segment?
 501   bool _is_huge_sub_record; // Are we writing a sub-record larger than the buffer size?
 502   DEBUG_ONLY(size_t _sub_record_left;) // The bytes not written for the current sub-record.
 503   DEBUG_ONLY(bool _sub_record_ended;) // True if we have called the end_sub_record().
 504 
 505   char* buffer() const                          { return _buffer; }
 506   size_t buffer_size() const                    { return _size; }

 819   }
 820 }
 821 
 822 class DumperClassCacheTable;
 823 class DumperClassCacheTableEntry;
 824 
 825 // Support class with a collection of functions used when dumping the heap
 826 class DumperSupport : AllStatic {
 827  public:
 828 
 829   // write a header of the given type
 830   static void write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len);
 831 
 832   // returns hprof tag for the given type signature
 833   static hprofTag sig2tag(Symbol* sig);
 834   // returns hprof tag for the given basic type
 835   static hprofTag type2tag(BasicType type);
 836   // Returns the size of the data to write.
 837   static u4 sig2size(Symbol* sig);
 838 
 839   // calculates the total size of the all fields of the given class.
 840   static u4 instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry = nullptr);
 841 
 842   // dump a jfloat
 843   static void dump_float(AbstractDumpWriter* writer, jfloat f);
 844   // dump a jdouble
 845   static void dump_double(AbstractDumpWriter* writer, jdouble d);
 846   // dumps the raw value of the given field
 847   static void dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset);
 848   // returns the size of the static fields; also counts the static fields
 849   static u4 get_static_fields_size(InstanceKlass* ik, u2& field_count);
 850   // dumps static fields of the given class
 851   static void dump_static_fields(AbstractDumpWriter* writer, Klass* k);
 852   // dump the raw values of the instance fields of the given identity or inlined object;
 853   // for identity objects offset is 0 and 'klass' is o->klass(),
 854   // for inlined objects offset is the offset in the holder object, 'klass' is inlined object class
 855   static void dump_instance_fields(AbstractDumpWriter* writer, oop o, int offset, DumperClassCacheTable* class_cache, DumperClassCacheTableEntry* class_cache_entry);
 856   // dump the raw values of the instance fields of the given inlined object;
 857   // dump_instance_fields wrapper for inlined objects
 858   static void dump_inlined_object_fields(AbstractDumpWriter* writer, oop o, int offset, DumperClassCacheTable* class_cache, DumperClassCacheTableEntry* class_cache_entry);
 859 
 860   // get the count of the instance fields for a given class
 861   static u2 get_instance_fields_count(InstanceKlass* ik);
 862   // dumps the definition of the instance fields for a given class
 863   static void dump_instance_field_descriptors(AbstractDumpWriter* writer, InstanceKlass* k, uintx *inlined_fields_index = nullptr);
 864   // creates HPROF_GC_INSTANCE_DUMP record for the given object
 865   static void dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache);
 866   // creates HPROF_GC_CLASS_DUMP record for the given instance class
 867   static void dump_instance_class(AbstractDumpWriter* writer, Klass* k);
 868   // creates HPROF_GC_CLASS_DUMP record for a given array class
 869   static void dump_array_class(AbstractDumpWriter* writer, Klass* k);
 870 
 871   // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
 872   static void dump_object_array(AbstractDumpWriter* writer, objArrayOop array);
 873   // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given flat array
 874   static void dump_flat_array(AbstractDumpWriter* writer, flatArrayOop array, DumperClassCacheTable* class_cache);
 875   // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
 876   static void dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array);
 877   // create HPROF_FRAME record for the given method and bci
 878   static void dump_stack_frame(AbstractDumpWriter* writer, int frame_serial_num, int class_serial_num, Method* m, int bci);
 879 
 880   // check if we need to truncate an array
 881   static int calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size);
 882   // extended version to dump flat arrays as primitive arrays;
 883   // type_size specifies size of the inlined objects.
 884   static int calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, int type_size, short header_size);
 885 
 886   // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
 887   static void end_of_dump(AbstractDumpWriter* writer);
 888 
 889   static oop mask_dormant_archived_object(oop o, oop ref_obj) {
 890     if (o != nullptr && o->klass()->java_mirror_no_keepalive() == nullptr) {
 891       // Ignore this object since the corresponding java mirror is not loaded.
 892       // Might be a dormant archive object.
 893       report_dormant_archived_object(o, ref_obj);
 894       return nullptr;
 895     } else {
 896       return o;
 897     }
 898   }
 899 
 900   // helper methods for inlined fields.
 901   static bool is_inlined_field(const fieldDescriptor& fld) {
 902     return fld.is_flat();
 903   }
 904   static InlineKlass* get_inlined_field_klass(const fieldDescriptor& fld) {
 905     assert(is_inlined_field(fld), "must be inlined field");
 906     InstanceKlass* holder_klass = fld.field_holder();
 907     return InlineKlass::cast(holder_klass->get_inline_type_field_klass(fld.index()));
 908   }
 909 
 910   static void report_dormant_archived_object(oop o, oop ref_obj) {
 911     if (log_is_enabled(Trace, cds, heap)) {
 912       ResourceMark rm;
 913       if (ref_obj != nullptr) {
 914         log_trace(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s) referenced by " INTPTR_FORMAT " (%s)",
 915                   p2i(o), o->klass()->external_name(),
 916                   p2i(ref_obj), ref_obj->klass()->external_name());
 917       } else {
 918         log_trace(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s)",
 919                   p2i(o), o->klass()->external_name());
 920       }
 921     }
 922   }
 923 };
 924 
 925 // Hash table of klasses to the klass metadata. This should greatly improve the
 926 // hash dumping performance. This hash table is supposed to be used by a single
 927 // thread only.
 928 //
 929 class DumperClassCacheTableEntry : public CHeapObj<mtServiceability> {
 930   friend class DumperClassCacheTable;
 931 private:
 932   GrowableArray<char> _sigs_start;
 933   GrowableArray<int> _offsets;
 934   GrowableArray<InlineKlass*> _inline_klasses;
 935   u4 _instance_size;
 936   int _entries;
 937 
 938 public:
 939   DumperClassCacheTableEntry() : _instance_size(0), _entries(0) {};
 940 
 941   int field_count()             { return _entries; }
 942   char sig_start(int field_idx) { return _sigs_start.at(field_idx); }
 943   void push_sig_start_inlined() { _sigs_start.push('Q'); }
 944   bool is_inlined(int field_idx){ return _sigs_start.at(field_idx) == 'Q'; }
 945   InlineKlass* inline_klass(int field_idx) { assert(is_inlined(field_idx), "Not inlined"); return _inline_klasses.at(field_idx); }
 946   int offset(int field_idx)     { return _offsets.at(field_idx); }
 947   u4 instance_size()            { return _instance_size; }
 948 };
 949 
 950 class DumperClassCacheTable {
 951 private:
 952   // ResourceHashtable SIZE is specified at compile time so we
 953   // use 1031 which is the first prime after 1024.
 954   static constexpr size_t TABLE_SIZE = 1031;
 955 
 956   // Maintain the cache for N classes. This limits memory footprint
 957   // impact, regardless of how many classes we have in the dump.
 958   // This also improves look up performance by keeping the statically
 959   // sized table from overloading.
 960   static constexpr int CACHE_TOP = 256;
 961 
 962   typedef ResourceHashtable<InstanceKlass*, DumperClassCacheTableEntry*,
 963                             TABLE_SIZE, AnyObj::C_HEAP, mtServiceability> PtrTable;
 964   PtrTable* _ptrs;
 965 

 974       bool do_entry(InstanceKlass*& key, DumperClassCacheTableEntry*& entry) {
 975         delete entry;
 976         return true;
 977       }
 978     } cleanup;
 979     table->unlink(&cleanup);
 980   }
 981 
 982 public:
 983   DumperClassCacheTableEntry* lookup_or_create(InstanceKlass* ik) {
 984     if (_last_ik == ik) {
 985       return _last_entry;
 986     }
 987 
 988     DumperClassCacheTableEntry* entry;
 989     DumperClassCacheTableEntry** from_cache = _ptrs->get(ik);
 990     if (from_cache == nullptr) {
 991       entry = new DumperClassCacheTableEntry();
 992       for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
 993         if (!fld.access_flags().is_static()) {
 994           InlineKlass* inlineKlass = nullptr;
 995           if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
 996             inlineKlass = DumperSupport::get_inlined_field_klass(fld.field_descriptor());
 997             entry->push_sig_start_inlined();
 998             entry->_instance_size += DumperSupport::instance_size(inlineKlass);
 999           } else {
1000             Symbol* sig = fld.signature();
1001             entry->_sigs_start.push(sig->char_at(0));
1002             entry->_instance_size += DumperSupport::sig2size(sig);
1003           }
1004           entry->_inline_klasses.push(inlineKlass);
1005           entry->_offsets.push(fld.offset());
1006           entry->_entries++;

1007         }
1008       }
1009 
1010       if (_ptrs->number_of_entries() >= CACHE_TOP) {
1011         // We do not track the individual hit rates for table entries.
1012         // Purge the entire table, and let the cache catch up with new
1013         // distribution.
1014         unlink_all(_ptrs);
1015       }
1016 
1017       _ptrs->put(ik, entry);
1018     } else {
1019       entry = *from_cache;
1020     }
1021 
1022     // Remember for single-slot cache.
1023     _last_ik = ik;
1024     _last_entry = entry;
1025 
1026     return entry;

1096 }
1097 
1098 // dump a jfloat
1099 void DumperSupport::dump_float(AbstractDumpWriter* writer, jfloat f) {
1100   if (g_isnan(f)) {
1101     writer->write_u4(0x7fc00000); // collapsing NaNs
1102   } else {
1103     writer->write_u4(bit_cast<u4>(f));
1104   }
1105 }
1106 
1107 // dump a jdouble
1108 void DumperSupport::dump_double(AbstractDumpWriter* writer, jdouble d) {
1109   if (g_isnan(d)) {
1110     writer->write_u8(0x7ff80000ull << 32); // collapsing NaNs
1111   } else {
1112     writer->write_u8(bit_cast<u8>(d));
1113   }
1114 }
1115 
1116 
1117 // dumps the raw value of the given field
1118 void DumperSupport::dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset) {
1119   switch (type) {
1120     case JVM_SIGNATURE_CLASS :
1121     case JVM_SIGNATURE_ARRAY : {
1122       oop o = obj->obj_field_access<ON_UNKNOWN_OOP_REF | AS_NO_KEEPALIVE>(offset);
1123       o = mask_dormant_archived_object(o, obj);
1124       assert(oopDesc::is_oop_or_null(o), "Expected an oop or nullptr at " PTR_FORMAT, p2i(o));
1125       writer->write_objectID(o);
1126       break;
1127     }
1128     case JVM_SIGNATURE_BYTE : {
1129       jbyte b = obj->byte_field(offset);
1130       writer->write_u1(b);
1131       break;
1132     }
1133     case JVM_SIGNATURE_CHAR : {
1134       jchar c = obj->char_field(offset);
1135       writer->write_u2(c);
1136       break;

1155       writer->write_u4(i);
1156       break;
1157     }
1158     case JVM_SIGNATURE_LONG : {
1159       jlong l = obj->long_field(offset);
1160       writer->write_u8(l);
1161       break;
1162     }
1163     case JVM_SIGNATURE_BOOLEAN : {
1164       jboolean b = obj->bool_field(offset);
1165       writer->write_u1(b);
1166       break;
1167     }
1168     default : {
1169       ShouldNotReachHere();
1170       break;
1171     }
1172   }
1173 }
1174 
1175 // calculates the total size of the all fields of the given class.
1176 u4 DumperSupport::instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry) {
1177   if (class_cache_entry != nullptr) {
1178     return class_cache_entry->instance_size();
1179   } else {
1180     u4 size = 0;
1181     for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1182       if (!fld.access_flags().is_static()) {
1183         if (is_inlined_field(fld.field_descriptor())) {
1184           size += instance_size(get_inlined_field_klass(fld.field_descriptor()));
1185         } else {
1186           size += sig2size(fld.signature());
1187         }
1188       }
1189     }
1190     return size;
1191   }
1192 }
1193 
1194 u4 DumperSupport::get_static_fields_size(InstanceKlass* ik, u2& field_count) {
1195   field_count = 0;
1196   u4 size = 0;
1197 
1198   for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1199     if (fldc.access_flags().is_static()) {
1200       assert(!is_inlined_field(fldc.field_descriptor()), "static fields cannot be inlined");
1201 
1202       field_count++;
1203       size += sig2size(fldc.signature());
1204     }
1205   }
1206 
1207   // Add in resolved_references which is referenced by the cpCache
1208   // The resolved_references is an array per InstanceKlass holding the
1209   // strings and other oops resolved from the constant pool.
1210   oop resolved_references = ik->constants()->resolved_references_or_null();
1211   if (resolved_references != nullptr) {
1212     field_count++;
1213     size += sizeof(address);
1214 
1215     // Add in the resolved_references of the used previous versions of the class
1216     // in the case of RedefineClasses
1217     InstanceKlass* prev = ik->previous_versions();
1218     while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1219       field_count++;
1220       size += sizeof(address);
1221       prev = prev->previous_versions();
1222     }
1223   }
1224 
1225   // We write the value itself plus a name and a one byte type tag per field.
1226   return checked_cast<u4>(size + field_count * (sizeof(address) + 1));
1227 }
1228 
1229 // dumps static fields of the given class
1230 void DumperSupport::dump_static_fields(AbstractDumpWriter* writer, Klass* k) {
1231   InstanceKlass* ik = InstanceKlass::cast(k);
1232 
1233   // dump the field descriptors and raw values
1234   for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1235     if (fld.access_flags().is_static()) {
1236       assert(!is_inlined_field(fld.field_descriptor()), "static fields cannot be inlined");
1237 
1238       Symbol* sig = fld.signature();
1239 
1240       writer->write_symbolID(fld.name());   // name
1241       writer->write_u1(sig2tag(sig));       // type
1242 
1243       // value
1244       dump_field_value(writer, sig->char_at(0), ik->java_mirror(), fld.offset());
1245     }
1246   }
1247 
1248   // Add resolved_references for each class that has them
1249   oop resolved_references = ik->constants()->resolved_references_or_null();
1250   if (resolved_references != nullptr) {
1251     writer->write_symbolID(vmSymbols::resolved_references_name());  // name
1252     writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1253     writer->write_objectID(resolved_references);
1254 
1255     // Also write any previous versions
1256     InstanceKlass* prev = ik->previous_versions();
1257     while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1258       writer->write_symbolID(vmSymbols::resolved_references_name());  // name
1259       writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1260       writer->write_objectID(prev->constants()->resolved_references());
1261       prev = prev->previous_versions();
1262     }
1263   }
1264 }
1265 
1266 // dump the raw values of the instance fields of the given identity or inlined object;
1267 // for identity objects offset is 0 and 'klass' is o->klass(),
1268 // for inlined objects offset is the offset in the holder object, 'klass' is inlined object class.
1269 void DumperSupport::dump_instance_fields(AbstractDumpWriter* writer, oop o, int offset, DumperClassCacheTable* class_cache, DumperClassCacheTableEntry* class_cache_entry) {
1270   assert(class_cache_entry != nullptr, "Pre-condition: must be provided");
1271   for (int idx = 0; idx < class_cache_entry->field_count(); idx++) {
1272     if (class_cache_entry->is_inlined(idx)) {
1273       InlineKlass* field_klass = class_cache_entry->inline_klass(idx);
1274       int fields_offset = offset + (class_cache_entry->offset(idx) - field_klass->first_field_offset());
1275       DumperClassCacheTableEntry* inline_class_cache_entry = class_cache->lookup_or_create(field_klass);
1276       dump_inlined_object_fields(writer, o, fields_offset, class_cache, inline_class_cache_entry);
1277     } else {
1278       dump_field_value(writer, class_cache_entry->sig_start(idx), o, class_cache_entry->offset(idx));
1279     }
1280   }
1281 }
1282 
1283 void DumperSupport::dump_inlined_object_fields(AbstractDumpWriter* writer, oop o, int offset, DumperClassCacheTable* class_cache, DumperClassCacheTableEntry* class_cache_entry) {
1284   // the object is inlined, so all its fields are stored without headers.
1285   dump_instance_fields(writer, o, offset, class_cache, class_cache_entry);
1286 }
1287 
1288 // gets the count of the instance fields for a given class
1289 u2 DumperSupport::get_instance_fields_count(InstanceKlass* ik) {
1290   u2 field_count = 0;
1291 
1292   for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1293     if (!fldc.access_flags().is_static()) {
1294       if (is_inlined_field(fldc.field_descriptor())) {
1295         // add "synthetic" fields for inlined fields.
1296         field_count += get_instance_fields_count(get_inlined_field_klass(fldc.field_descriptor()));
1297       } else {
1298         field_count++;
1299       }
1300     }
1301   }
1302 
1303   return field_count;
1304 }
1305 
1306 // dumps the definition of the instance fields for a given class
1307 // inlined_fields_id is not-nullptr for inlined fields (to get synthetic field name IDs
1308 // by using InlinedObjects::get_next_string_id()).
1309 void DumperSupport::dump_instance_field_descriptors(AbstractDumpWriter* writer, InstanceKlass* ik, uintx* inlined_fields_id) {
1310   // inlined_fields_id != nullptr means ik is a class of inlined field.
1311   // Inlined field id pointer for this class; lazyly initialized
1312   // if the class has inlined field(s) and the caller didn't provide inlined_fields_id.
1313   uintx *this_klass_inlined_fields_id = inlined_fields_id;
1314   uintx inlined_id = 0;
1315 
1316   // dump the field descriptors
1317   for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1318     if (!fld.access_flags().is_static()) {
1319       if (is_inlined_field(fld.field_descriptor())) {
1320         // dump "synthetic" fields for inlined fields.
1321         if (this_klass_inlined_fields_id == nullptr) {
1322           inlined_id = InlinedObjects::get_instance()->get_base_index_for(ik);
1323           this_klass_inlined_fields_id = &inlined_id;
1324         }
1325         dump_instance_field_descriptors(writer, get_inlined_field_klass(fld.field_descriptor()), this_klass_inlined_fields_id);
1326       } else {
1327         Symbol* sig = fld.signature();
1328         Symbol* name = nullptr;
1329         // Use inlined_fields_id provided by caller.
1330         if (inlined_fields_id != nullptr) {
1331           uintx name_id = InlinedObjects::get_instance()->get_next_string_id(*inlined_fields_id);
1332 
1333           // name_id == 0 is returned on error. use original field signature.
1334           if (name_id != 0) {
1335             *inlined_fields_id = name_id;
1336             name = reinterpret_cast<Symbol*>(name_id);
1337           }
1338         }
1339         if (name == nullptr) {
1340           name = fld.name();
1341         }
1342 
1343         writer->write_symbolID(name);         // name
1344         writer->write_u1(sig2tag(sig));       // type
1345       }
1346     }
1347   }
1348 }
1349 
1350 // creates HPROF_GC_INSTANCE_DUMP record for the given object
1351 void DumperSupport::dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache) {
1352   InstanceKlass* ik = InstanceKlass::cast(o->klass());
1353 
1354   DumperClassCacheTableEntry* cache_entry = class_cache->lookup_or_create(ik);
1355 
1356   u4 is = instance_size(ik, cache_entry);
1357   u4 size = 1 + sizeof(address) + 4 + sizeof(address) + 4 + is;
1358 
1359   writer->start_sub_record(HPROF_GC_INSTANCE_DUMP, size);
1360   writer->write_objectID(o);
1361   writer->write_u4(STACK_TRACE_ID);
1362 
1363   // class ID
1364   writer->write_classID(ik);
1365 
1366   // number of bytes that follow
1367   writer->write_u4(is);
1368 
1369   // field values
1370   dump_instance_fields(writer, o, 0, class_cache, cache_entry);
1371 
1372   writer->end_sub_record();
1373 }
1374 
1375 // creates HPROF_GC_CLASS_DUMP record for the given instance class
1376 void DumperSupport::dump_instance_class(AbstractDumpWriter* writer, Klass* k) {
1377   InstanceKlass* ik = InstanceKlass::cast(k);
1378 
1379   // We can safepoint and do a heap dump at a point where we have a Klass,
1380   // but no java mirror class has been setup for it. So we need to check
1381   // that the class is at least loaded, to avoid crash from a null mirror.
1382   if (!ik->is_loaded()) {
1383     return;
1384   }
1385 
1386   u2 static_fields_count = 0;
1387   u4 static_size = get_static_fields_size(ik, static_fields_count);
1388   u2 instance_fields_count = get_instance_fields_count(ik);
1389   u4 instance_fields_size = instance_fields_count * (sizeof(address) + 1);
1390   u4 size = checked_cast<u4>(1 + sizeof(address) + 4 + 6 * sizeof(address) + 4 + 2 + 2 + static_size + 2 + instance_fields_size);

1395   writer->write_classID(ik);
1396   writer->write_u4(STACK_TRACE_ID);
1397 
1398   // super class ID
1399   InstanceKlass* java_super = ik->java_super();
1400   if (java_super == nullptr) {
1401     writer->write_objectID(oop(nullptr));
1402   } else {
1403     writer->write_classID(java_super);
1404   }
1405 
1406   writer->write_objectID(ik->class_loader());
1407   writer->write_objectID(ik->signers());
1408   writer->write_objectID(ik->protection_domain());
1409 
1410   // reserved
1411   writer->write_objectID(oop(nullptr));
1412   writer->write_objectID(oop(nullptr));
1413 
1414   // instance size
1415   writer->write_u4(HeapWordSize * ik->size_helper());
1416 
1417   // size of constant pool - ignored by HAT 1.1
1418   writer->write_u2(0);
1419 
1420   // static fields
1421   writer->write_u2(static_fields_count);
1422   dump_static_fields(writer, ik);
1423 
1424   // description of instance fields
1425   writer->write_u2(instance_fields_count);
1426   dump_instance_field_descriptors(writer, ik);
1427 
1428   writer->end_sub_record();
1429 }
1430 
1431 // creates HPROF_GC_CLASS_DUMP record for the given array class
1432 void DumperSupport::dump_array_class(AbstractDumpWriter* writer, Klass* k) {
1433   InstanceKlass* ik = nullptr; // bottom class for object arrays, null for primitive type arrays
1434   if (k->is_objArray_klass()) {
1435     Klass *bk = ObjArrayKlass::cast(k)->bottom_klass();

1449   assert(java_super != nullptr, "checking");
1450   writer->write_classID(java_super);
1451 
1452   writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->class_loader());
1453   writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->signers());
1454   writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->protection_domain());
1455 
1456   writer->write_objectID(oop(nullptr));    // reserved
1457   writer->write_objectID(oop(nullptr));
1458   writer->write_u4(0);             // instance size
1459   writer->write_u2(0);             // constant pool
1460   writer->write_u2(0);             // static fields
1461   writer->write_u2(0);             // instance fields
1462 
1463   writer->end_sub_record();
1464 
1465 }
1466 
1467 // Hprof uses an u4 as record length field,
1468 // which means we need to truncate arrays that are too long.
1469 int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, int type_size, short header_size) {



1470   int length = array->length();
1471 







1472   size_t length_in_bytes = (size_t)length * type_size;
1473   uint max_bytes = max_juint - header_size;
1474 
1475   if (length_in_bytes > max_bytes) {
1476     length = max_bytes / type_size;
1477     length_in_bytes = (size_t)length * type_size;
1478 
1479     BasicType type = ArrayKlass::cast(array->klass())->element_type();
1480     warning("cannot dump array of type %s[] with length %d; truncating to length %d",
1481             type2name_tab[type], array->length(), length);
1482   }
1483   return length;
1484 }
1485 
1486 int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size) {
1487   BasicType type = ArrayKlass::cast(array->klass())->element_type();
1488   assert((type >= T_BOOLEAN && type <= T_OBJECT) || type == T_PRIMITIVE_OBJECT, "invalid array element type");
1489   int type_size;
1490   if (type == T_OBJECT) {
1491     type_size = sizeof(address);
1492   } else if (type == T_PRIMITIVE_OBJECT) {
1493       // TODO: FIXME
1494       fatal("Not supported yet"); // FIXME: JDK-8325678
1495   } else {
1496     type_size = type2aelembytes(type);
1497   }
1498 
1499   return calculate_array_max_length(writer, array, type_size, header_size);
1500 }
1501 
1502 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
1503 void DumperSupport::dump_object_array(AbstractDumpWriter* writer, objArrayOop array) {
1504   // sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID) + sizeof(classID)
1505   short header_size = 1 + 2 * 4 + 2 * sizeof(address);
1506   int length = calculate_array_max_length(writer, array, header_size);
1507   u4 size = checked_cast<u4>(header_size + length * sizeof(address));
1508 
1509   writer->start_sub_record(HPROF_GC_OBJ_ARRAY_DUMP, size);
1510   writer->write_objectID(array);
1511   writer->write_u4(STACK_TRACE_ID);
1512   writer->write_u4(length);
1513 
1514   // array class ID
1515   writer->write_classID(array->klass());
1516 
1517   // [id]* elements
1518   for (int index = 0; index < length; index++) {
1519     oop o = array->obj_at(index);
1520     o = mask_dormant_archived_object(o, array);
1521     writer->write_objectID(o);
1522   }
1523 
1524   writer->end_sub_record();
1525 }
1526 
1527 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given flat array
1528 void DumperSupport::dump_flat_array(AbstractDumpWriter* writer, flatArrayOop array, DumperClassCacheTable* class_cache) {
1529   FlatArrayKlass* array_klass = FlatArrayKlass::cast(array->klass());
1530   InlineKlass* element_klass = array_klass->element_klass();
1531   int element_size = instance_size(element_klass);
1532   /*                          id         array object ID
1533    *                          u4         stack trace serial number
1534    *                          u4         number of elements
1535    *                          u1         element type
1536    */
1537   short header_size = 1 + sizeof(address) + 2 * 4 + 1;
1538 
1539   // TODO: use T_SHORT/T_INT/T_LONG if needed to avoid truncation
1540   BasicType type = T_BYTE;
1541   int type_size = type2aelembytes(type);
1542   int length = calculate_array_max_length(writer, array, element_size, header_size);
1543   u4 length_in_bytes = (u4)(length * element_size);
1544   u4 size = header_size + length_in_bytes;
1545 
1546   writer->start_sub_record(HPROF_GC_PRIM_ARRAY_DUMP, size);
1547   writer->write_objectID(array);
1548   writer->write_u4(STACK_TRACE_ID);
1549   // TODO: round up array length for T_SHORT/T_INT/T_LONG
1550   writer->write_u4(length * element_size);
1551   writer->write_u1(type2tag(type));
1552 
1553   for (int index = 0; index < length; index++) {
1554     // need offset in the holder to read inlined object. calculate it from flatArrayOop::value_at_addr()
1555     int offset = (int)((address)array->value_at_addr(index, array_klass->layout_helper())
1556                   - cast_from_oop<address>(array));
1557     DumperClassCacheTableEntry* class_cache_entry = class_cache->lookup_or_create(element_klass);
1558     dump_inlined_object_fields(writer, array, offset, class_cache, class_cache_entry);
1559   }
1560 
1561   // TODO: write padding bytes for T_SHORT/T_INT/T_LONG
1562 
1563   InlinedObjects::get_instance()->add_flat_array(array);
1564 
1565   writer->end_sub_record();
1566 }
1567 
1568 #define WRITE_ARRAY(Array, Type, Size, Length) \
1569   for (int i = 0; i < Length; i++) { writer->write_##Size((Size)Array->Type##_at(i)); }
1570 
1571 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
1572 void DumperSupport::dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array) {
1573   BasicType type = TypeArrayKlass::cast(array->klass())->element_type();
1574   // 2 * sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID)
1575   short header_size = 2 * 1 + 2 * 4 + sizeof(address);
1576 
1577   int length = calculate_array_max_length(writer, array, header_size);
1578   int type_size = type2aelembytes(type);
1579   u4 length_in_bytes = (u4)length * type_size;
1580   u4 size = header_size + length_in_bytes;
1581 
1582   writer->start_sub_record(HPROF_GC_PRIM_ARRAY_DUMP, size);
1583   writer->write_objectID(array);
1584   writer->write_u4(STACK_TRACE_ID);
1585   writer->write_u4(length);
1586   writer->write_u1(type2tag(type));
1587 

1669                                      int bci) {
1670   int line_number;
1671   if (m->is_native()) {
1672     line_number = -3;  // native frame
1673   } else {
1674     line_number = m->line_number_from_bci(bci);
1675   }
1676 
1677   write_header(writer, HPROF_FRAME, 4*oopSize + 2*sizeof(u4));
1678   writer->write_id(frame_serial_num);               // frame serial number
1679   writer->write_symbolID(m->name());                // method's name
1680   writer->write_symbolID(m->signature());           // method's signature
1681 
1682   assert(m->method_holder()->is_instance_klass(), "not InstanceKlass");
1683   writer->write_symbolID(m->method_holder()->source_file_name());  // source file name
1684   writer->write_u4(class_serial_num);               // class serial number
1685   writer->write_u4((u4) line_number);               // line number
1686 }
1687 
1688 
1689 class InlinedFieldNameDumper : public LockedClassesDo {
1690 public:
1691   typedef void (*Callback)(InlinedObjects *owner, const Klass *klass, uintx base_index, int count);
1692 
1693 private:
1694   AbstractDumpWriter* _writer;
1695   InlinedObjects *_owner;
1696   Callback       _callback;
1697   uintx _index;
1698 
1699   void dump_inlined_field_names(GrowableArray<Symbol*>* super_names, Symbol* field_name, InlineKlass* klass) {
1700     super_names->push(field_name);
1701     for (HierarchicalFieldStream<JavaFieldStream> fld(klass); !fld.done(); fld.next()) {
1702       if (!fld.access_flags().is_static()) {
1703         if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
1704           dump_inlined_field_names(super_names, fld.name(), DumperSupport::get_inlined_field_klass(fld.field_descriptor()));
1705         } else {
1706           // get next string ID.
1707           uintx next_index = _owner->get_next_string_id(_index);
1708           if (next_index == 0) {
1709             // something went wrong (overflow?)
1710             // stop generation; the rest of inlined objects will have original field names.
1711             return;
1712           }
1713           _index = next_index;
1714 
1715           // Calculate length.
1716           int len = fld.name()->utf8_length();
1717           for (GrowableArrayIterator<Symbol*> it = super_names->begin(); it != super_names->end(); ++it) {
1718             len += (*it)->utf8_length() + 1;    // +1 for ".".
1719           }
1720 
1721           DumperSupport::write_header(_writer, HPROF_UTF8, oopSize + len);
1722           _writer->write_symbolID(reinterpret_cast<Symbol*>(_index));
1723           // Write the string value.
1724           // 1) super_names.
1725           for (GrowableArrayIterator<Symbol*> it = super_names->begin(); it != super_names->end(); ++it) {
1726             _writer->write_raw((*it)->bytes(), (*it)->utf8_length());
1727             _writer->write_u1('.');
1728           }
1729           // 2) field name.
1730           _writer->write_raw(fld.name()->bytes(), fld.name()->utf8_length());
1731         }
1732       }
1733     }
1734     super_names->pop();
1735   }
1736 
1737   void dump_inlined_field_names(Symbol* field_name, InlineKlass* field_klass) {
1738     GrowableArray<Symbol*> super_names(4, mtServiceability);
1739     dump_inlined_field_names(&super_names, field_name, field_klass);
1740   }
1741 
1742 public:
1743   InlinedFieldNameDumper(AbstractDumpWriter* writer, InlinedObjects* owner, Callback callback)
1744     : _writer(writer), _owner(owner), _callback(callback), _index(0)  {
1745   }
1746 
1747   void do_klass(Klass* k) {
1748     if (!k->is_instance_klass()) {
1749       return;
1750     }
1751     InstanceKlass* ik = InstanceKlass::cast(k);
1752     // if (ik->has_inline_type_fields()) {
1753     //   return;
1754     // }
1755 
1756     uintx base_index = _index;
1757     int count = 0;
1758 
1759     for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1760       if (!fld.access_flags().is_static()) {
1761         if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
1762           dump_inlined_field_names(fld.name(), DumperSupport::get_inlined_field_klass(fld.field_descriptor()));
1763           count++;
1764         }
1765       }
1766     }
1767 
1768     if (count != 0) {
1769       _callback(_owner, k, base_index, count);
1770     }
1771   }
1772 };
1773 
1774 class InlinedFieldsDumper : public LockedClassesDo {
1775 private:
1776   AbstractDumpWriter* _writer;
1777 
1778 public:
1779   InlinedFieldsDumper(AbstractDumpWriter* writer) : _writer(writer) {}
1780 
1781   void do_klass(Klass* k) {
1782     if (!k->is_instance_klass()) {
1783       return;
1784     }
1785     InstanceKlass* ik = InstanceKlass::cast(k);
1786     // if (ik->has_inline_type_fields()) {
1787     //   return;
1788     // }
1789 
1790     // We can be at a point where java mirror does not exist yet.
1791     // So we need to check that the class is at least loaded, to avoid crash from a null mirror.
1792     if (!ik->is_loaded()) {
1793       return;
1794     }
1795 
1796     u2 inlined_count = 0;
1797     for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1798       if (!fld.access_flags().is_static()) {
1799         if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
1800           inlined_count++;
1801         }
1802       }
1803     }
1804     if (inlined_count != 0) {
1805       _writer->write_u1(HPROF_CLASS_WITH_INLINED_FIELDS);
1806 
1807       // class ID
1808       _writer->write_classID(ik);
1809       // number of inlined fields
1810       _writer->write_u2(inlined_count);
1811       u2 index = 0;
1812       for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1813         if (!fld.access_flags().is_static()) {
1814           if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
1815             // inlined field index
1816             _writer->write_u2(index);
1817             // synthetic field count
1818             u2 field_count = DumperSupport::get_instance_fields_count(DumperSupport::get_inlined_field_klass(fld.field_descriptor()));
1819             _writer->write_u2(field_count);
1820             // original field name
1821             _writer->write_symbolID(fld.name());
1822             // inlined field class ID
1823             _writer->write_classID(DumperSupport::get_inlined_field_klass(fld.field_descriptor()));
1824 
1825             index += field_count;
1826           } else {
1827             index++;
1828           }
1829         }
1830       }
1831     }
1832   }
1833 };
1834 
1835 
1836 void InlinedObjects::init() {
1837   _instance = this;
1838 
1839   struct Closure : public SymbolClosure {
1840     uintx _min_id = max_uintx;
1841     uintx _max_id = 0;
1842     Closure() : _min_id(max_uintx), _max_id(0) {}
1843 
1844     void do_symbol(Symbol** p) {
1845       uintx val = reinterpret_cast<uintx>(*p);
1846       if (val < _min_id) {
1847         _min_id = val;
1848       }
1849       if (val > _max_id) {
1850         _max_id = val;
1851       }
1852     }
1853   } closure;
1854 
1855   SymbolTable::symbols_do(&closure);
1856 
1857   _min_string_id = closure._min_id;
1858   _max_string_id = closure._max_id;
1859 }
1860 
1861 void InlinedObjects::release() {
1862   _instance = nullptr;
1863 
1864   if (_inlined_field_map != nullptr) {
1865     delete _inlined_field_map;
1866     _inlined_field_map = nullptr;
1867   }
1868   if (_flat_arrays != nullptr) {
1869     delete _flat_arrays;
1870     _flat_arrays = nullptr;
1871   }
1872 }
1873 
1874 void InlinedObjects::inlined_field_names_callback(InlinedObjects* _this, const Klass* klass, uintx base_index, int count) {
1875   if (_this->_inlined_field_map == nullptr) {
1876     _this->_inlined_field_map = new (mtServiceability) GrowableArray<ClassInlinedFields>(100, mtServiceability);
1877   }
1878   _this->_inlined_field_map->append(ClassInlinedFields(klass, base_index));
1879 
1880   // counters for dumping classes with inlined fields
1881   _this->_classes_count++;
1882   _this->_inlined_fields_count += count;
1883 }
1884 
1885 void InlinedObjects::dump_inlined_field_names(AbstractDumpWriter* writer) {
1886   InlinedFieldNameDumper nameDumper(writer, this, inlined_field_names_callback);
1887   ClassLoaderDataGraph::classes_do(&nameDumper);
1888 
1889   if (_inlined_field_map != nullptr) {
1890     // prepare the map for  get_base_index_for().
1891     _inlined_field_map->sort(ClassInlinedFields::compare);
1892   }
1893 }
1894 
1895 uintx InlinedObjects::get_base_index_for(Klass* k) {
1896   if (_inlined_field_map != nullptr) {
1897     bool found = false;
1898     int idx = _inlined_field_map->find_sorted<ClassInlinedFields, ClassInlinedFields::compare>(ClassInlinedFields(k, 0), found);
1899     if (found) {
1900         return _inlined_field_map->at(idx).base_index;
1901     }
1902   }
1903 
1904   // return max_uintx, so get_next_string_id returns 0.
1905   return max_uintx;
1906 }
1907 
1908 uintx InlinedObjects::get_next_string_id(uintx id) {
1909   if (++id == _min_string_id) {
1910     return _max_string_id + 1;
1911   }
1912   return id;
1913 }
1914 
1915 void InlinedObjects::dump_classed_with_inlined_fields(AbstractDumpWriter* writer) {
1916   if (_classes_count != 0) {
1917     // Record for each class contains tag(u1), class ID and count(u2)
1918     // for each inlined field index(u2), synthetic fields count(u2), original field name and class ID
1919     int size = _classes_count * (1 + sizeof(address) + 2)
1920              + _inlined_fields_count * (2 + 2 + sizeof(address) + sizeof(address));
1921     DumperSupport::write_header(writer, HPROF_INLINED_FIELDS, (u4)size);
1922 
1923     InlinedFieldsDumper dumper(writer);
1924     ClassLoaderDataGraph::classes_do(&dumper);
1925   }
1926 }
1927 
1928 void InlinedObjects::add_flat_array(oop array) {
1929   if (_flat_arrays == nullptr) {
1930     _flat_arrays = new (mtServiceability) GrowableArray<oop>(100, mtServiceability);
1931   }
1932   _flat_arrays->append(array);
1933 }
1934 
1935 void InlinedObjects::dump_flat_arrays(AbstractDumpWriter* writer) {
1936   if (_flat_arrays != nullptr) {
1937     // For each flat array the record contains tag (u1), object ID and class ID.
1938     int size = _flat_arrays->length() * (1 + sizeof(address) + sizeof(address));
1939 
1940     DumperSupport::write_header(writer, HPROF_FLAT_ARRAYS, (u4)size);
1941     for (GrowableArrayIterator<oop> it = _flat_arrays->begin(); it != _flat_arrays->end(); ++it) {
1942       flatArrayOop array = flatArrayOop(*it);
1943       FlatArrayKlass* array_klass = FlatArrayKlass::cast(array->klass());
1944       InlineKlass* element_klass = array_klass->element_klass();
1945       writer->write_u1(HPROF_FLAT_ARRAY);
1946       writer->write_objectID(array);
1947       writer->write_classID(element_klass);
1948     }
1949   }
1950 }
1951 
1952 
1953 // Support class used to generate HPROF_UTF8 records from the entries in the
1954 // SymbolTable.
1955 
1956 class SymbolTableDumper : public SymbolClosure {
1957  private:
1958   AbstractDumpWriter* _writer;
1959   AbstractDumpWriter* writer() const                { return _writer; }
1960  public:
1961   SymbolTableDumper(AbstractDumpWriter* writer)     { _writer = writer; }
1962   void do_symbol(Symbol** p);
1963 };
1964 
1965 void SymbolTableDumper::do_symbol(Symbol** p) {
1966   ResourceMark rm;
1967   Symbol* sym = *p;
1968   int len = sym->utf8_length();
1969   if (len > 0) {
1970     char* s = sym->as_utf8();
1971     DumperSupport::write_header(writer(), HPROF_UTF8, oopSize + len);
1972     writer()->write_symbolID(sym);

2424       return;
2425     }
2426   }
2427 
2428   if (DumperSupport::mask_dormant_archived_object(o, nullptr) == nullptr) {
2429     return;
2430   }
2431 
2432   if (o->is_instance()) {
2433     // create a HPROF_GC_INSTANCE record for each object
2434     DumperSupport::dump_instance(writer(), o, &_class_cache);
2435     // If we encounter an unmounted virtual thread it needs to be dumped explicitly
2436     // (mounted virtual threads are dumped with their carriers).
2437     if (java_lang_VirtualThread::is_instance(o)
2438         && ThreadDumper::should_dump_vthread(o) && !ThreadDumper::is_vthread_mounted(o)) {
2439       _vthread_dumper->dump_vthread(o, writer());
2440     }
2441   } else if (o->is_objArray()) {
2442     // create a HPROF_GC_OBJ_ARRAY_DUMP record for each object array
2443     DumperSupport::dump_object_array(writer(), objArrayOop(o));
2444   } else if (o->is_flatArray()) {
2445     DumperSupport::dump_flat_array(writer(), flatArrayOop(o), &_class_cache);
2446   } else if (o->is_typeArray()) {
2447     // create a HPROF_GC_PRIM_ARRAY_DUMP record for each type array
2448     DumperSupport::dump_prim_array(writer(), typeArrayOop(o));
2449   }
2450 }
2451 
2452 // The dumper controller for parallel heap dump
2453 class DumperController : public CHeapObj<mtInternal> {
2454  private:
2455    Monitor* _lock;
2456    Mutex* _global_writer_lock;
2457 
2458    const uint   _dumper_number;
2459    uint   _complete_number;
2460 
2461    bool   _started; // VM dumper started and acquired global writer lock
2462 
2463  public:
2464    DumperController(uint number) :
2465      // _lock and _global_writer_lock are used for synchronization between GC worker threads inside safepoint,

2505      _complete_number++;
2506      // propagate local error to global if any
2507      if (local_writer->has_error()) {
2508        global_writer->set_error(local_writer->error());
2509      }
2510      ml.notify();
2511    }
2512 
2513    void wait_all_dumpers_complete() {
2514      MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
2515      while (_complete_number != _dumper_number) {
2516         ml.wait();
2517      }
2518    }
2519 };
2520 
2521 // DumpMerger merges separate dump files into a complete one
2522 class DumpMerger : public StackObj {
2523 private:
2524   DumpWriter* _writer;
2525   InlinedObjects*  _inlined_objects;
2526   const char* _path;
2527   bool _has_error;
2528   int _dump_seq;
2529 
2530 private:
2531   void merge_file(const char* path);
2532   void merge_done();
2533   void set_error(const char* msg);
2534 
2535 public:
2536   DumpMerger(const char* path, DumpWriter* writer, InlinedObjects* inlined_objects, int dump_seq) :
2537     _writer(writer),
2538     _inlined_objects(inlined_objects),
2539     _path(path),
2540     _has_error(_writer->has_error()),
2541     _dump_seq(dump_seq) {}
2542 
2543   void do_merge();
2544 
2545   // returns path for the parallel DumpWriter (resource allocated)
2546   static char* get_writer_path(const char* base_path, int seq);
2547 
2548 };
2549 
2550 char* DumpMerger::get_writer_path(const char* base_path, int seq) {
2551   // approximate required buffer size
2552   size_t buf_size = strlen(base_path)
2553                     + 2                 // ".p"
2554                     + 10                // number (that's enough for 2^32 parallel dumpers)
2555                     + 1;                // '\0'
2556 
2557   char* path = NEW_RESOURCE_ARRAY(char, buf_size);
2558   memset(path, 0, buf_size);
2559 
2560   os::snprintf(path, buf_size, "%s.p%d", base_path, seq);
2561 
2562   return path;
2563 }
2564 
2565 
2566 void DumpMerger::merge_done() {
2567   // Writes the HPROF_HEAP_DUMP_END record.
2568   if (!_has_error) {
2569     DumperSupport::end_of_dump(_writer);
2570     _inlined_objects->dump_flat_arrays(_writer);
2571     _writer->flush();
2572     _inlined_objects->release();
2573   }
2574   _dump_seq = 0; //reset
2575 }
2576 
2577 void DumpMerger::set_error(const char* msg) {
2578   assert(msg != nullptr, "sanity check");
2579   log_error(heapdump)("%s (file: %s)", msg, _path);
2580   _writer->set_error(msg);
2581   _has_error = true;
2582 }
2583 
2584 #ifdef LINUX
2585 // Merge segmented heap files via sendfile, it's more efficient than the
2586 // read+write combination, which would require transferring data to and from
2587 // user space.
2588 void DumpMerger::merge_file(const char* path) {
2589   TraceTime timer("Merge segmented heap file directly", TRACETIME_LOG(Info, heapdump));
2590 
2591   int segment_fd = os::open(path, O_RDONLY, 0);
2592   if (segment_fd == -1) {

2687   }
2688 };
2689 
2690 // The VM operation that performs the heap dump
2691 class VM_HeapDumper : public VM_GC_Operation, public WorkerTask, public UnmountedVThreadDumper {
2692  private:
2693   static VM_HeapDumper*   _global_dumper;
2694   static DumpWriter*      _global_writer;
2695   DumpWriter*             _local_writer;
2696   JavaThread*             _oome_thread;
2697   Method*                 _oome_constructor;
2698   bool                    _gc_before_heap_dump;
2699   GrowableArray<Klass*>*  _klass_map;
2700 
2701   ThreadDumper**          _thread_dumpers; // platform, carrier and mounted virtual threads
2702   int                     _thread_dumpers_count;
2703   volatile int            _thread_serial_num;
2704   volatile int            _frame_serial_num;
2705 
2706   volatile int            _dump_seq;
2707 
2708   // Inlined object support.
2709   InlinedObjects          _inlined_objects;
2710 
2711   // parallel heap dump support
2712   uint                    _num_dumper_threads;
2713   DumperController*       _dumper_controller;
2714   ParallelObjectIterator* _poi;
2715 
2716   // Dumper id of VMDumper thread.
2717   static const int VMDumperId = 0;
2718   // VM dumper dumps both heap and non-heap data, other dumpers dump heap-only data.
2719   static bool is_vm_dumper(int dumper_id) { return dumper_id == VMDumperId; }
2720   // the 1st dumper calling get_next_dumper_id becomes VM dumper
2721   int get_next_dumper_id() {
2722     return Atomic::fetch_then_add(&_dump_seq, 1);
2723   }
2724 
2725   // accessors and setters
2726   static VM_HeapDumper* dumper()         {  assert(_global_dumper != nullptr, "Error"); return _global_dumper; }
2727   static DumpWriter* writer()            {  assert(_global_writer != nullptr, "Error"); return _global_writer; }
2728 
2729   void set_global_dumper() {
2730     assert(_global_dumper == nullptr, "Error");

2791   }
2792 
2793   ~VM_HeapDumper() {
2794     if (_thread_dumpers != nullptr) {
2795       for (int i = 0; i < _thread_dumpers_count; i++) {
2796         delete _thread_dumpers[i];
2797       }
2798       FREE_C_HEAP_ARRAY(ThreadDumper*, _thread_dumpers);
2799     }
2800 
2801     if (_dumper_controller != nullptr) {
2802       delete _dumper_controller;
2803       _dumper_controller = nullptr;
2804     }
2805     delete _klass_map;
2806   }
2807   int dump_seq()           { return _dump_seq; }
2808   bool is_parallel_dump()  { return _num_dumper_threads > 1; }
2809   void prepare_parallel_dump(WorkerThreads* workers);
2810 
2811   InlinedObjects* inlined_objects() { return &_inlined_objects; }
2812 
2813   VMOp_Type type() const { return VMOp_HeapDumper; }
2814   virtual bool doit_prologue();
2815   void doit();
2816   void work(uint worker_id);
2817 
2818   // UnmountedVThreadDumper implementation
2819   void dump_vthread(oop vt, AbstractDumpWriter* segment_writer);
2820 };
2821 
2822 VM_HeapDumper* VM_HeapDumper::_global_dumper = nullptr;
2823 DumpWriter*    VM_HeapDumper::_global_writer = nullptr;
2824 
2825 bool VM_HeapDumper::skip_operation() const {
2826   return false;
2827 }
2828 
2829 // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
2830 void DumperSupport::end_of_dump(AbstractDumpWriter* writer) {
2831   writer->finish_dump_segment();
2832 

2967     _dumper_controller->lock_global_writer();
2968     _dumper_controller->signal_start();
2969   } else {
2970     _dumper_controller->wait_for_start_signal();
2971   }
2972 
2973   if (is_vm_dumper(dumper_id)) {
2974     TraceTime timer("Dump non-objects", TRACETIME_LOG(Info, heapdump));
2975     // Write the file header - we always use 1.0.2
2976     const char* header = "JAVA PROFILE 1.0.2";
2977 
2978     // header is few bytes long - no chance to overflow int
2979     writer()->write_raw(header, strlen(header) + 1); // NUL terminated
2980     writer()->write_u4(oopSize);
2981     // timestamp is current time in ms
2982     writer()->write_u8(os::javaTimeMillis());
2983     // HPROF_UTF8 records
2984     SymbolTableDumper sym_dumper(writer());
2985     SymbolTable::symbols_do(&sym_dumper);
2986 
2987     // HPROF_UTF8 records for inlined field names.
2988     inlined_objects()->init();
2989     inlined_objects()->dump_inlined_field_names(writer());
2990 
2991     // HPROF_INLINED_FIELDS
2992     inlined_objects()->dump_classed_with_inlined_fields(writer());
2993 
2994     // write HPROF_LOAD_CLASS records
2995     {
2996       LockedClassesDo locked_load_classes(&do_load_class);
2997       ClassLoaderDataGraph::classes_do(&locked_load_classes);
2998     }
2999 
3000     // write HPROF_FRAME and HPROF_TRACE records
3001     // this must be called after _klass_map is built when iterating the classes above.
3002     dump_stack_traces(writer());
3003 
3004     // unlock global writer, so parallel dumpers can dump stack traces of unmounted virtual threads
3005     _dumper_controller->unlock_global_writer();
3006   }
3007 
3008   // HPROF_HEAP_DUMP/HPROF_HEAP_DUMP_SEGMENT starts here
3009 
3010   ResourceMark rm;
3011   // share global compressor, local DumpWriter is not responsible for its life cycle
3012   DumpWriter segment_writer(DumpMerger::get_writer_path(writer()->get_file_path(), dumper_id),
3013                             writer()->is_overwrite(), writer()->compressor());

3161         (error() != nullptr) ? error() : "reason unknown");
3162     }
3163     return -1;
3164   }
3165 
3166   // generate the segmented heap dump into separate files
3167   VM_HeapDumper dumper(&writer, _gc_before_heap_dump, _oome, num_dump_threads);
3168   VMThread::execute(&dumper);
3169 
3170   // record any error that the writer may have encountered
3171   set_error(writer.error());
3172 
3173   // Heap dump process is done in two phases
3174   //
3175   // Phase 1: Concurrent threads directly write heap data to multiple heap files.
3176   //          This is done by VM_HeapDumper, which is performed within safepoint.
3177   //
3178   // Phase 2: Merge multiple heap files into one complete heap dump file.
3179   //          This is done by DumpMerger, which is performed outside safepoint
3180 
3181   DumpMerger merger(path, &writer, dumper.inlined_objects(), dumper.dump_seq());
3182   Thread* current_thread = Thread::current();
3183   if (current_thread->is_AttachListener_thread()) {
3184     // perform heapdump file merge operation in the current thread prevents us
3185     // from occupying the VM Thread, which in turn affects the occurrence of
3186     // GC and other VM operations.
3187     merger.do_merge();
3188   } else {
3189     // otherwise, performs it by VM thread
3190     VM_HeapDumpMerge op(&merger);
3191     VMThread::execute(&op);
3192   }
3193   if (writer.error() != nullptr) {
3194     set_error(writer.error());
3195   }
3196 
3197   // emit JFR event
3198   if (error() == nullptr) {
3199     event.set_destination(path);
3200     event.set_gcBeforeDump(_gc_before_heap_dump);
3201     event.set_size(writer.bytes_written());
< prev index next >