< prev index next >

src/hotspot/share/services/heapDumper.cpp

Print this page

  25 
  26 #include "precompiled.hpp"
  27 #include "classfile/classLoaderData.inline.hpp"
  28 #include "classfile/classLoaderDataGraph.hpp"
  29 #include "classfile/javaClasses.inline.hpp"
  30 #include "classfile/symbolTable.hpp"
  31 #include "classfile/vmClasses.hpp"
  32 #include "classfile/vmSymbols.hpp"
  33 #include "gc/shared/gcLocker.hpp"
  34 #include "gc/shared/gcVMOperations.hpp"
  35 #include "gc/shared/workerThread.hpp"
  36 #include "jfr/jfrEvents.hpp"
  37 #include "jvm.h"
  38 #include "memory/allocation.inline.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "memory/universe.hpp"
  41 #include "oops/fieldStreams.inline.hpp"
  42 #include "oops/klass.inline.hpp"
  43 #include "oops/objArrayKlass.hpp"
  44 #include "oops/objArrayOop.inline.hpp"


  45 #include "oops/oop.inline.hpp"
  46 #include "oops/typeArrayOop.inline.hpp"
  47 #include "runtime/continuationWrapper.inline.hpp"

  48 #include "runtime/frame.inline.hpp"
  49 #include "runtime/handles.inline.hpp"
  50 #include "runtime/javaCalls.hpp"
  51 #include "runtime/javaThread.inline.hpp"
  52 #include "runtime/jniHandles.hpp"
  53 #include "runtime/os.hpp"
  54 #include "runtime/threads.hpp"
  55 #include "runtime/threadSMR.hpp"
  56 #include "runtime/vframe.hpp"
  57 #include "runtime/vmOperations.hpp"
  58 #include "runtime/vmThread.hpp"
  59 #include "runtime/timerTrace.hpp"
  60 #include "services/heapDumper.hpp"
  61 #include "services/heapDumperCompression.hpp"
  62 #include "services/threadService.hpp"
  63 #include "utilities/checkedCast.hpp"
  64 #include "utilities/macros.hpp"
  65 #include "utilities/ostream.hpp"
  66 #ifdef LINUX
  67 #include "os_linux.hpp"

 299  *                                     7:  double array
 300  *                                     8:  byte array
 301  *                                     9:  short array
 302  *                                     10: int array
 303  *                                     11: long array
 304  *                          [u1]*      elements
 305  *
 306  * HPROF_CPU_SAMPLES        a set of sample traces of running threads
 307  *
 308  *                u4        total number of samples
 309  *                u4        # of traces
 310  *               [u4        # of samples
 311  *                u4]*      stack trace serial number
 312  *
 313  * HPROF_CONTROL_SETTINGS   the settings of on/off switches
 314  *
 315  *                u4        0x00000001: alloc traces on/off
 316  *                          0x00000002: cpu sampling on/off
 317  *                u2        stack trace depth
 318  *






















 319  *
 320  * When the header is "JAVA PROFILE 1.0.2" a heap dump can optionally
 321  * be generated as a sequence of heap dump segments. This sequence is
 322  * terminated by an end record. The additional tags allowed by format
 323  * "JAVA PROFILE 1.0.2" are:
 324  *
 325  * HPROF_HEAP_DUMP_SEGMENT  denote a heap dump segment
 326  *
 327  *               [heap dump sub-records]*
 328  *               The same sub-record types allowed by HPROF_HEAP_DUMP
 329  *
 330  * HPROF_HEAP_DUMP_END      denotes the end of a heap dump
 331  *
 332  */
 333 
 334 
 335 // HPROF tags
 336 
 337 enum hprofTag : u1 {
 338   // top-level records
 339   HPROF_UTF8                    = 0x01,
 340   HPROF_LOAD_CLASS              = 0x02,
 341   HPROF_UNLOAD_CLASS            = 0x03,
 342   HPROF_FRAME                   = 0x04,
 343   HPROF_TRACE                   = 0x05,
 344   HPROF_ALLOC_SITES             = 0x06,
 345   HPROF_HEAP_SUMMARY            = 0x07,
 346   HPROF_START_THREAD            = 0x0A,
 347   HPROF_END_THREAD              = 0x0B,
 348   HPROF_HEAP_DUMP               = 0x0C,
 349   HPROF_CPU_SAMPLES             = 0x0D,
 350   HPROF_CONTROL_SETTINGS        = 0x0E,
 351 
 352   // 1.0.2 record types
 353   HPROF_HEAP_DUMP_SEGMENT       = 0x1C,
 354   HPROF_HEAP_DUMP_END           = 0x2C,
 355 







 356   // field types
 357   HPROF_ARRAY_OBJECT            = 0x01,
 358   HPROF_NORMAL_OBJECT           = 0x02,
 359   HPROF_BOOLEAN                 = 0x04,
 360   HPROF_CHAR                    = 0x05,
 361   HPROF_FLOAT                   = 0x06,
 362   HPROF_DOUBLE                  = 0x07,
 363   HPROF_BYTE                    = 0x08,
 364   HPROF_SHORT                   = 0x09,
 365   HPROF_INT                     = 0x0A,
 366   HPROF_LONG                    = 0x0B,
 367 
 368   // data-dump sub-records
 369   HPROF_GC_ROOT_UNKNOWN         = 0xFF,
 370   HPROF_GC_ROOT_JNI_GLOBAL      = 0x01,
 371   HPROF_GC_ROOT_JNI_LOCAL       = 0x02,
 372   HPROF_GC_ROOT_JAVA_FRAME      = 0x03,
 373   HPROF_GC_ROOT_NATIVE_STACK    = 0x04,
 374   HPROF_GC_ROOT_STICKY_CLASS    = 0x05,
 375   HPROF_GC_ROOT_THREAD_BLOCK    = 0x06,
 376   HPROF_GC_ROOT_MONITOR_USED    = 0x07,
 377   HPROF_GC_ROOT_THREAD_OBJ      = 0x08,
 378   HPROF_GC_CLASS_DUMP           = 0x20,
 379   HPROF_GC_INSTANCE_DUMP        = 0x21,
 380   HPROF_GC_OBJ_ARRAY_DUMP       = 0x22,
 381   HPROF_GC_PRIM_ARRAY_DUMP      = 0x23
 382 };
 383 
 384 // Default stack trace ID (used for dummy HPROF_TRACE record)
 385 enum {
 386   STACK_TRACE_ID = 1,
 387   INITIAL_CLASS_COUNT = 200
 388 };
 389 

































































 390 // Supports I/O operations for a dump
 391 // Base class for dump and parallel dump
 392 class AbstractDumpWriter : public CHeapObj<mtInternal> {
 393  protected:
 394   enum {
 395     io_buffer_max_size = 1*M,
 396     dump_segment_header_size = 9
 397   };
 398 
 399   char* _buffer;    // internal buffer
 400   size_t _size;
 401   size_t _pos;
 402 
 403   bool _in_dump_segment; // Are we currently in a dump segment?
 404   bool _is_huge_sub_record; // Are we writing a sub-record larger than the buffer size?
 405   DEBUG_ONLY(size_t _sub_record_left;) // The bytes not written for the current sub-record.
 406   DEBUG_ONLY(bool _sub_record_ended;) // True if we have called the end_sub_record().
 407 
 408   char* buffer() const                          { return _buffer; }
 409   size_t buffer_size() const                    { return _size; }

 722   }
 723 }
 724 
 725 class DumperClassCacheTable;
 726 class DumperClassCacheTableEntry;
 727 
 728 // Support class with a collection of functions used when dumping the heap
 729 class DumperSupport : AllStatic {
 730  public:
 731 
 732   // write a header of the given type
 733   static void write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len);
 734 
 735   // returns hprof tag for the given type signature
 736   static hprofTag sig2tag(Symbol* sig);
 737   // returns hprof tag for the given basic type
 738   static hprofTag type2tag(BasicType type);
 739   // Returns the size of the data to write.
 740   static u4 sig2size(Symbol* sig);
 741 
 742   // returns the size of the instance of the given class
 743   static u4 instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry = nullptr);
 744 
 745   // dump a jfloat
 746   static void dump_float(AbstractDumpWriter* writer, jfloat f);
 747   // dump a jdouble
 748   static void dump_double(AbstractDumpWriter* writer, jdouble d);
 749   // dumps the raw value of the given field
 750   static void dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset);
 751   // returns the size of the static fields; also counts the static fields
 752   static u4 get_static_fields_size(InstanceKlass* ik, u2& field_count);
 753   // dumps static fields of the given class
 754   static void dump_static_fields(AbstractDumpWriter* writer, Klass* k);
 755   // dump the raw values of the instance fields of the given object
 756   static void dump_instance_fields(AbstractDumpWriter* writer, oop o, DumperClassCacheTableEntry* class_cache_entry);






 757   // get the count of the instance fields for a given class
 758   static u2 get_instance_fields_count(InstanceKlass* ik);
 759   // dumps the definition of the instance fields for a given class
 760   static void dump_instance_field_descriptors(AbstractDumpWriter* writer, Klass* k);
 761   // creates HPROF_GC_INSTANCE_DUMP record for the given object
 762   static void dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache);
 763   // creates HPROF_GC_CLASS_DUMP record for the given instance class
 764   static void dump_instance_class(AbstractDumpWriter* writer, Klass* k);
 765   // creates HPROF_GC_CLASS_DUMP record for a given array class
 766   static void dump_array_class(AbstractDumpWriter* writer, Klass* k);
 767 
 768   // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
 769   static void dump_object_array(AbstractDumpWriter* writer, objArrayOop array);


 770   // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
 771   static void dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array);
 772   // create HPROF_FRAME record for the given method and bci
 773   static void dump_stack_frame(AbstractDumpWriter* writer, int frame_serial_num, int class_serial_num, Method* m, int bci);
 774 
 775   // check if we need to truncate an array
 776   static int calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size);



 777 
 778   // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
 779   static void end_of_dump(AbstractDumpWriter* writer);
 780 
 781   static oop mask_dormant_archived_object(oop o) {
 782     if (o != nullptr && o->klass()->java_mirror() == nullptr) {
 783       // Ignore this object since the corresponding java mirror is not loaded.
 784       // Might be a dormant archive object.
 785       return nullptr;
 786     } else {
 787       return o;
 788     }
 789   }










 790 };
 791 
 792 // Hash table of klasses to the klass metadata. This should greatly improve the
 793 // hash dumping performance. This hash table is supposed to be used by a single
 794 // thread only.
 795 //
 796 class DumperClassCacheTableEntry : public CHeapObj<mtServiceability> {
 797   friend class DumperClassCacheTable;
 798 private:
 799   GrowableArray<char> _sigs_start;
 800   GrowableArray<int> _offsets;

 801   u4 _instance_size;
 802   int _entries;
 803 
 804 public:
 805   DumperClassCacheTableEntry() : _instance_size(0), _entries(0) {};
 806 
 807   int field_count()             { return _entries; }
 808   char sig_start(int field_idx) { return _sigs_start.at(field_idx); }



 809   int offset(int field_idx)     { return _offsets.at(field_idx); }
 810   u4 instance_size()            { return _instance_size; }
 811 };
 812 
 813 class DumperClassCacheTable {
 814 private:
 815   // ResourceHashtable SIZE is specified at compile time so we
 816   // use 1031 which is the first prime after 1024.
 817   static constexpr size_t TABLE_SIZE = 1031;
 818 
 819   // Maintain the cache for N classes. This limits memory footprint
 820   // impact, regardless of how many classes we have in the dump.
 821   // This also improves look up performance by keeping the statically
 822   // sized table from overloading.
 823   static constexpr int CACHE_TOP = 256;
 824 
 825   typedef ResourceHashtable<InstanceKlass*, DumperClassCacheTableEntry*,
 826                             TABLE_SIZE, AnyObj::C_HEAP, mtServiceability> PtrTable;
 827   PtrTable* _ptrs;
 828 

 837       bool do_entry(InstanceKlass*& key, DumperClassCacheTableEntry*& entry) {
 838         delete entry;
 839         return true;
 840       }
 841     } cleanup;
 842     table->unlink(&cleanup);
 843   }
 844 
 845 public:
 846   DumperClassCacheTableEntry* lookup_or_create(InstanceKlass* ik) {
 847     if (_last_ik == ik) {
 848       return _last_entry;
 849     }
 850 
 851     DumperClassCacheTableEntry* entry;
 852     DumperClassCacheTableEntry** from_cache = _ptrs->get(ik);
 853     if (from_cache == nullptr) {
 854       entry = new DumperClassCacheTableEntry();
 855       for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
 856         if (!fld.access_flags().is_static()) {
 857           Symbol* sig = fld.signature();
 858           entry->_sigs_start.push(sig->char_at(0));









 859           entry->_offsets.push(fld.offset());
 860           entry->_entries++;
 861           entry->_instance_size += DumperSupport::sig2size(sig);
 862         }
 863       }
 864 
 865       if (_ptrs->number_of_entries() >= CACHE_TOP) {
 866         // We do not track the individual hit rates for table entries.
 867         // Purge the entire table, and let the cache catch up with new
 868         // distribution.
 869         unlink_all(_ptrs);
 870       }
 871 
 872       _ptrs->put(ik, entry);
 873     } else {
 874       entry = *from_cache;
 875     }
 876 
 877     // Remember for single-slot cache.
 878     _last_ik = ik;
 879     _last_entry = entry;
 880 
 881     return entry;

 951 }
 952 
 953 // dump a jfloat
 954 void DumperSupport::dump_float(AbstractDumpWriter* writer, jfloat f) {
 955   if (g_isnan(f)) {
 956     writer->write_u4(0x7fc00000); // collapsing NaNs
 957   } else {
 958     writer->write_u4(bit_cast<u4>(f));
 959   }
 960 }
 961 
 962 // dump a jdouble
 963 void DumperSupport::dump_double(AbstractDumpWriter* writer, jdouble d) {
 964   if (g_isnan(d)) {
 965     writer->write_u8(0x7ff80000ull << 32); // collapsing NaNs
 966   } else {
 967     writer->write_u8(bit_cast<u8>(d));
 968   }
 969 }
 970 

 971 // dumps the raw value of the given field
 972 void DumperSupport::dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset) {
 973   switch (type) {
 974     case JVM_SIGNATURE_CLASS :
 975     case JVM_SIGNATURE_ARRAY : {
 976       oop o = obj->obj_field_access<ON_UNKNOWN_OOP_REF | AS_NO_KEEPALIVE>(offset);
 977       if (o != nullptr && log_is_enabled(Debug, cds, heap) && mask_dormant_archived_object(o) == nullptr) {
 978         ResourceMark rm;
 979         log_debug(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s) referenced by " INTPTR_FORMAT " (%s)",
 980                              p2i(o), o->klass()->external_name(),
 981                              p2i(obj), obj->klass()->external_name());
 982       }
 983       o = mask_dormant_archived_object(o);
 984       assert(oopDesc::is_oop_or_null(o), "Expected an oop or nullptr at " PTR_FORMAT, p2i(o));
 985       writer->write_objectID(o);
 986       break;
 987     }
 988     case JVM_SIGNATURE_BYTE : {
 989       jbyte b = obj->byte_field(offset);
 990       writer->write_u1(b);

1015       writer->write_u4(i);
1016       break;
1017     }
1018     case JVM_SIGNATURE_LONG : {
1019       jlong l = obj->long_field(offset);
1020       writer->write_u8(l);
1021       break;
1022     }
1023     case JVM_SIGNATURE_BOOLEAN : {
1024       jboolean b = obj->bool_field(offset);
1025       writer->write_u1(b);
1026       break;
1027     }
1028     default : {
1029       ShouldNotReachHere();
1030       break;
1031     }
1032   }
1033 }
1034 
1035 // returns the size of the instance of the given class
1036 u4 DumperSupport::instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry) {
1037   if (class_cache_entry != nullptr) {
1038     return class_cache_entry->instance_size();
1039   } else {
1040     u4 size = 0;
1041     for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1042       if (!fld.access_flags().is_static()) {
1043         size += sig2size(fld.signature());




1044       }
1045     }
1046     return size;
1047   }
1048 }
1049 
1050 u4 DumperSupport::get_static_fields_size(InstanceKlass* ik, u2& field_count) {
1051   field_count = 0;
1052   u4 size = 0;
1053 
1054   for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1055     if (fldc.access_flags().is_static()) {


1056       field_count++;
1057       size += sig2size(fldc.signature());
1058     }
1059   }
1060 
1061   // Add in resolved_references which is referenced by the cpCache
1062   // The resolved_references is an array per InstanceKlass holding the
1063   // strings and other oops resolved from the constant pool.
1064   oop resolved_references = ik->constants()->resolved_references_or_null();
1065   if (resolved_references != nullptr) {
1066     field_count++;
1067     size += sizeof(address);
1068 
1069     // Add in the resolved_references of the used previous versions of the class
1070     // in the case of RedefineClasses
1071     InstanceKlass* prev = ik->previous_versions();
1072     while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1073       field_count++;
1074       size += sizeof(address);
1075       prev = prev->previous_versions();
1076     }
1077   }
1078 
1079   // We write the value itself plus a name and a one byte type tag per field.
1080   return checked_cast<u4>(size + field_count * (sizeof(address) + 1));
1081 }
1082 
1083 // dumps static fields of the given class
1084 void DumperSupport::dump_static_fields(AbstractDumpWriter* writer, Klass* k) {
1085   InstanceKlass* ik = InstanceKlass::cast(k);
1086 
1087   // dump the field descriptors and raw values
1088   for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1089     if (fld.access_flags().is_static()) {


1090       Symbol* sig = fld.signature();
1091 
1092       writer->write_symbolID(fld.name());   // name
1093       writer->write_u1(sig2tag(sig));       // type
1094 
1095       // value
1096       dump_field_value(writer, sig->char_at(0), ik->java_mirror(), fld.offset());
1097     }
1098   }
1099 
1100   // Add resolved_references for each class that has them
1101   oop resolved_references = ik->constants()->resolved_references_or_null();
1102   if (resolved_references != nullptr) {
1103     writer->write_symbolID(vmSymbols::resolved_references_name());  // name
1104     writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1105     writer->write_objectID(resolved_references);
1106 
1107     // Also write any previous versions
1108     InstanceKlass* prev = ik->previous_versions();
1109     while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1110       writer->write_symbolID(vmSymbols::resolved_references_name());  // name
1111       writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1112       writer->write_objectID(prev->constants()->resolved_references());
1113       prev = prev->previous_versions();
1114     }
1115   }
1116 }
1117 
1118 // dump the raw values of the instance fields of the given object
1119 void DumperSupport::dump_instance_fields(AbstractDumpWriter* writer, oop o, DumperClassCacheTableEntry* class_cache_entry) {


1120   assert(class_cache_entry != nullptr, "Pre-condition: must be provided");
1121   for (int idx = 0; idx < class_cache_entry->field_count(); idx++) {
1122     dump_field_value(writer, class_cache_entry->sig_start(idx), o, class_cache_entry->offset(idx));







1123   }
1124 }
1125 
1126 // dumps the definition of the instance fields for a given class





1127 u2 DumperSupport::get_instance_fields_count(InstanceKlass* ik) {
1128   u2 field_count = 0;
1129 
1130   for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1131     if (!fldc.access_flags().is_static()) field_count++;







1132   }
1133 
1134   return field_count;
1135 }
1136 
1137 // dumps the definition of the instance fields for a given class
1138 void DumperSupport::dump_instance_field_descriptors(AbstractDumpWriter* writer, Klass* k) {
1139   InstanceKlass* ik = InstanceKlass::cast(k);






1140 
1141   // dump the field descriptors
1142   for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1143     if (!fld.access_flags().is_static()) {
1144       Symbol* sig = fld.signature();






















1145 
1146       writer->write_symbolID(fld.name());   // name
1147       writer->write_u1(sig2tag(sig));       // type

1148     }
1149   }
1150 }
1151 
1152 // creates HPROF_GC_INSTANCE_DUMP record for the given object
1153 void DumperSupport::dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache) {
1154   InstanceKlass* ik = InstanceKlass::cast(o->klass());
1155 
1156   DumperClassCacheTableEntry* cache_entry = class_cache->lookup_or_create(ik);
1157 
1158   u4 is = instance_size(ik, cache_entry);
1159   u4 size = 1 + sizeof(address) + 4 + sizeof(address) + 4 + is;
1160 
1161   writer->start_sub_record(HPROF_GC_INSTANCE_DUMP, size);
1162   writer->write_objectID(o);
1163   writer->write_u4(STACK_TRACE_ID);
1164 
1165   // class ID
1166   writer->write_classID(ik);
1167 
1168   // number of bytes that follow
1169   writer->write_u4(is);
1170 
1171   // field values
1172   dump_instance_fields(writer, o, cache_entry);
1173 
1174   writer->end_sub_record();
1175 }
1176 
1177 // creates HPROF_GC_CLASS_DUMP record for the given instance class
1178 void DumperSupport::dump_instance_class(AbstractDumpWriter* writer, Klass* k) {
1179   InstanceKlass* ik = InstanceKlass::cast(k);
1180 
1181   // We can safepoint and do a heap dump at a point where we have a Klass,
1182   // but no java mirror class has been setup for it. So we need to check
1183   // that the class is at least loaded, to avoid crash from a null mirror.
1184   if (!ik->is_loaded()) {
1185     return;
1186   }
1187 
1188   u2 static_fields_count = 0;
1189   u4 static_size = get_static_fields_size(ik, static_fields_count);
1190   u2 instance_fields_count = get_instance_fields_count(ik);
1191   u4 instance_fields_size = instance_fields_count * (sizeof(address) + 1);
1192   u4 size = checked_cast<u4>(1 + sizeof(address) + 4 + 6 * sizeof(address) + 4 + 2 + 2 + static_size + 2 + instance_fields_size);

1197   writer->write_classID(ik);
1198   writer->write_u4(STACK_TRACE_ID);
1199 
1200   // super class ID
1201   InstanceKlass* java_super = ik->java_super();
1202   if (java_super == nullptr) {
1203     writer->write_objectID(oop(nullptr));
1204   } else {
1205     writer->write_classID(java_super);
1206   }
1207 
1208   writer->write_objectID(ik->class_loader());
1209   writer->write_objectID(ik->signers());
1210   writer->write_objectID(ik->protection_domain());
1211 
1212   // reserved
1213   writer->write_objectID(oop(nullptr));
1214   writer->write_objectID(oop(nullptr));
1215 
1216   // instance size
1217   writer->write_u4(DumperSupport::instance_size(ik));
1218 
1219   // size of constant pool - ignored by HAT 1.1
1220   writer->write_u2(0);
1221 
1222   // static fields
1223   writer->write_u2(static_fields_count);
1224   dump_static_fields(writer, ik);
1225 
1226   // description of instance fields
1227   writer->write_u2(instance_fields_count);
1228   dump_instance_field_descriptors(writer, ik);
1229 
1230   writer->end_sub_record();
1231 }
1232 
1233 // creates HPROF_GC_CLASS_DUMP record for the given array class
1234 void DumperSupport::dump_array_class(AbstractDumpWriter* writer, Klass* k) {
1235   InstanceKlass* ik = nullptr; // bottom class for object arrays, null for primitive type arrays
1236   if (k->is_objArray_klass()) {
1237     Klass *bk = ObjArrayKlass::cast(k)->bottom_klass();

1251   assert(java_super != nullptr, "checking");
1252   writer->write_classID(java_super);
1253 
1254   writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->class_loader());
1255   writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->signers());
1256   writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->protection_domain());
1257 
1258   writer->write_objectID(oop(nullptr));    // reserved
1259   writer->write_objectID(oop(nullptr));
1260   writer->write_u4(0);             // instance size
1261   writer->write_u2(0);             // constant pool
1262   writer->write_u2(0);             // static fields
1263   writer->write_u2(0);             // instance fields
1264 
1265   writer->end_sub_record();
1266 
1267 }
1268 
1269 // Hprof uses an u4 as record length field,
1270 // which means we need to truncate arrays that are too long.
1271 int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size) {
1272   BasicType type = ArrayKlass::cast(array->klass())->element_type();
1273   assert(type >= T_BOOLEAN && type <= T_OBJECT, "invalid array element type");
1274 
1275   int length = array->length();
1276 
1277   int type_size;
1278   if (type == T_OBJECT) {
1279     type_size = sizeof(address);
1280   } else {
1281     type_size = type2aelembytes(type);
1282   }
1283 
1284   size_t length_in_bytes = (size_t)length * type_size;
1285   uint max_bytes = max_juint - header_size;
1286 
1287   if (length_in_bytes > max_bytes) {
1288     length = max_bytes / type_size;
1289     length_in_bytes = (size_t)length * type_size;
1290 

1291     warning("cannot dump array of type %s[] with length %d; truncating to length %d",
1292             type2name_tab[type], array->length(), length);
1293   }
1294   return length;
1295 }
1296 
















1297 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
1298 void DumperSupport::dump_object_array(AbstractDumpWriter* writer, objArrayOop array) {
1299   // sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID) + sizeof(classID)
1300   short header_size = 1 + 2 * 4 + 2 * sizeof(address);
1301   int length = calculate_array_max_length(writer, array, header_size);
1302   u4 size = checked_cast<u4>(header_size + length * sizeof(address));
1303 
1304   writer->start_sub_record(HPROF_GC_OBJ_ARRAY_DUMP, size);
1305   writer->write_objectID(array);
1306   writer->write_u4(STACK_TRACE_ID);
1307   writer->write_u4(length);
1308 
1309   // array class ID
1310   writer->write_classID(array->klass());
1311 
1312   // [id]* elements
1313   for (int index = 0; index < length; index++) {
1314     oop o = array->obj_at(index);
1315     if (o != nullptr && log_is_enabled(Debug, cds, heap) && mask_dormant_archived_object(o) == nullptr) {
1316       ResourceMark rm;
1317       log_debug(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s) referenced by " INTPTR_FORMAT " (%s)",
1318                            p2i(o), o->klass()->external_name(),
1319                            p2i(array), array->klass()->external_name());
1320     }
1321     o = mask_dormant_archived_object(o);
1322     writer->write_objectID(o);
1323   }
1324 
1325   writer->end_sub_record();
1326 }
1327 









































1328 #define WRITE_ARRAY(Array, Type, Size, Length) \
1329   for (int i = 0; i < Length; i++) { writer->write_##Size((Size)Array->Type##_at(i)); }
1330 
1331 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
1332 void DumperSupport::dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array) {
1333   BasicType type = TypeArrayKlass::cast(array->klass())->element_type();
1334   // 2 * sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID)
1335   short header_size = 2 * 1 + 2 * 4 + sizeof(address);
1336 
1337   int length = calculate_array_max_length(writer, array, header_size);
1338   int type_size = type2aelembytes(type);
1339   u4 length_in_bytes = (u4)length * type_size;
1340   u4 size = header_size + length_in_bytes;
1341 
1342   writer->start_sub_record(HPROF_GC_PRIM_ARRAY_DUMP, size);
1343   writer->write_objectID(array);
1344   writer->write_u4(STACK_TRACE_ID);
1345   writer->write_u4(length);
1346   writer->write_u1(type2tag(type));
1347 

1429                                      int bci) {
1430   int line_number;
1431   if (m->is_native()) {
1432     line_number = -3;  // native frame
1433   } else {
1434     line_number = m->line_number_from_bci(bci);
1435   }
1436 
1437   write_header(writer, HPROF_FRAME, 4*oopSize + 2*sizeof(u4));
1438   writer->write_id(frame_serial_num);               // frame serial number
1439   writer->write_symbolID(m->name());                // method's name
1440   writer->write_symbolID(m->signature());           // method's signature
1441 
1442   assert(m->method_holder()->is_instance_klass(), "not InstanceKlass");
1443   writer->write_symbolID(m->method_holder()->source_file_name());  // source file name
1444   writer->write_u4(class_serial_num);               // class serial number
1445   writer->write_u4((u4) line_number);               // line number
1446 }
1447 
1448 








































































































































































































































































1449 // Support class used to generate HPROF_UTF8 records from the entries in the
1450 // SymbolTable.
1451 
1452 class SymbolTableDumper : public SymbolClosure {
1453  private:
1454   AbstractDumpWriter* _writer;
1455   AbstractDumpWriter* writer() const                { return _writer; }
1456  public:
1457   SymbolTableDumper(AbstractDumpWriter* writer)     { _writer = writer; }
1458   void do_symbol(Symbol** p);
1459 };
1460 
1461 void SymbolTableDumper::do_symbol(Symbol** p) {
1462   ResourceMark rm;
1463   Symbol* sym = *p;
1464   int len = sym->utf8_length();
1465   if (len > 0) {
1466     char* s = sym->as_utf8();
1467     DumperSupport::write_header(writer(), HPROF_UTF8, oopSize + len);
1468     writer()->write_symbolID(sym);

1879 
1880 void HeapObjectDumper::do_object(oop o) {
1881   // skip classes as these emitted as HPROF_GC_CLASS_DUMP records
1882   if (o->klass() == vmClasses::Class_klass()) {
1883     if (!java_lang_Class::is_primitive(o)) {
1884       return;
1885     }
1886   }
1887 
1888   if (DumperSupport::mask_dormant_archived_object(o) == nullptr) {
1889     log_debug(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s)", p2i(o), o->klass()->external_name());
1890     return;
1891   }
1892 
1893   if (o->is_instance()) {
1894     // create a HPROF_GC_INSTANCE record for each object
1895     DumperSupport::dump_instance(writer(), o, &_class_cache);
1896   } else if (o->is_objArray()) {
1897     // create a HPROF_GC_OBJ_ARRAY_DUMP record for each object array
1898     DumperSupport::dump_object_array(writer(), objArrayOop(o));


1899   } else if (o->is_typeArray()) {
1900     // create a HPROF_GC_PRIM_ARRAY_DUMP record for each type array
1901     DumperSupport::dump_prim_array(writer(), typeArrayOop(o));
1902   }
1903 }
1904 
1905 // The dumper controller for parallel heap dump
1906 class DumperController : public CHeapObj<mtInternal> {
1907  private:
1908    Monitor* _lock;
1909    const uint   _dumper_number;
1910    uint   _complete_number;
1911 
1912  public:
1913    DumperController(uint number) :
1914      _lock(new (std::nothrow) PaddedMonitor(Mutex::safepoint, "DumperController_lock")),
1915      _dumper_number(number),
1916      _complete_number(0) { }
1917 
1918    ~DumperController() { delete _lock; }

1922      _complete_number++;
1923      // propagate local error to global if any
1924      if (local_writer->has_error()) {
1925        global_writer->set_error(local_writer->error());
1926      }
1927      ml.notify();
1928    }
1929 
1930    void wait_all_dumpers_complete() {
1931      MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
1932      while (_complete_number != _dumper_number) {
1933         ml.wait();
1934      }
1935    }
1936 };
1937 
1938 // DumpMerger merges separate dump files into a complete one
1939 class DumpMerger : public StackObj {
1940 private:
1941   DumpWriter* _writer;

1942   const char* _path;
1943   bool _has_error;
1944   int _dump_seq;
1945 
1946 private:
1947   void merge_file(char* path);
1948   void merge_done();
1949   void set_error(const char* msg);
1950 
1951 public:
1952   DumpMerger(const char* path, DumpWriter* writer, int dump_seq) :
1953     _writer(writer),

1954     _path(path),
1955     _has_error(_writer->has_error()),
1956     _dump_seq(dump_seq) {}
1957 
1958   void do_merge();
1959 };
1960 
1961 void DumpMerger::merge_done() {
1962   // Writes the HPROF_HEAP_DUMP_END record.
1963   if (!_has_error) {
1964     DumperSupport::end_of_dump(_writer);

1965     _writer->flush();

1966   }
1967   _dump_seq = 0; //reset
1968 }
1969 
1970 void DumpMerger::set_error(const char* msg) {
1971   assert(msg != nullptr, "sanity check");
1972   log_error(heapdump)("%s (file: %s)", msg, _path);
1973   _writer->set_error(msg);
1974   _has_error = true;
1975 }
1976 
1977 #ifdef LINUX
1978 // Merge segmented heap files via sendfile, it's more efficient than the
1979 // read+write combination, which would require transferring data to and from
1980 // user space.
1981 void DumpMerger::merge_file(char* path) {
1982   assert(!SafepointSynchronize::is_at_safepoint(), "merging happens outside safepoint");
1983   TraceTime timer("Merge segmented heap file directly", TRACETIME_LOG(Info, heapdump));
1984 
1985   int segment_fd = os::open(path, O_RDONLY, 0);

2084   }
2085 };
2086 
2087 // The VM operation that performs the heap dump
2088 class VM_HeapDumper : public VM_GC_Operation, public WorkerTask {
2089  private:
2090   static VM_HeapDumper*   _global_dumper;
2091   static DumpWriter*      _global_writer;
2092   DumpWriter*             _local_writer;
2093   JavaThread*             _oome_thread;
2094   Method*                 _oome_constructor;
2095   bool                    _gc_before_heap_dump;
2096   GrowableArray<Klass*>*  _klass_map;
2097 
2098   ThreadDumper**          _thread_dumpers; // platform, carrier and mounted virtual threads
2099   int                     _thread_dumpers_count;
2100   volatile int            _thread_serial_num;
2101   volatile int            _frame_serial_num;
2102 
2103   volatile int            _dump_seq;




2104   // parallel heap dump support
2105   uint                    _num_dumper_threads;
2106   DumperController*       _dumper_controller;
2107   ParallelObjectIterator* _poi;
2108   // worker id of VMDumper thread.
2109   static const size_t VMDumperWorkerId = 0;
2110   // VM dumper dumps both heap and non-heap data, other dumpers dump heap-only data.
2111   static bool is_vm_dumper(uint worker_id) { return worker_id == VMDumperWorkerId; }
2112 
2113   // accessors and setters
2114   static VM_HeapDumper* dumper()         {  assert(_global_dumper != nullptr, "Error"); return _global_dumper; }
2115   static DumpWriter* writer()            {  assert(_global_writer != nullptr, "Error"); return _global_writer; }
2116 
2117   void set_global_dumper() {
2118     assert(_global_dumper == nullptr, "Error");
2119     _global_dumper = this;
2120   }
2121   void set_global_writer() {
2122     assert(_global_writer == nullptr, "Error");
2123     _global_writer = _local_writer;

2185   }
2186 
2187   ~VM_HeapDumper() {
2188     if (_thread_dumpers != nullptr) {
2189       for (int i = 0; i < _thread_dumpers_count; i++) {
2190         delete _thread_dumpers[i];
2191       }
2192       FREE_C_HEAP_ARRAY(ThreadDumper*, _thread_dumpers);
2193     }
2194 
2195     if (_dumper_controller != nullptr) {
2196       delete _dumper_controller;
2197       _dumper_controller = nullptr;
2198     }
2199     delete _klass_map;
2200   }
2201   int dump_seq()           { return _dump_seq; }
2202   bool is_parallel_dump()  { return _num_dumper_threads > 1; }
2203   bool can_parallel_dump(WorkerThreads* workers);
2204 


2205   VMOp_Type type() const { return VMOp_HeapDumper; }
2206   virtual bool doit_prologue();
2207   void doit();
2208   void work(uint worker_id);
2209 };
2210 
2211 VM_HeapDumper* VM_HeapDumper::_global_dumper = nullptr;
2212 DumpWriter*    VM_HeapDumper::_global_writer = nullptr;
2213 
2214 bool VM_HeapDumper::skip_operation() const {
2215   return false;
2216 }
2217 
2218 // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
2219 void DumperSupport::end_of_dump(AbstractDumpWriter* writer) {
2220   writer->finish_dump_segment();
2221 
2222   writer->write_u1(HPROF_HEAP_DUMP_END);
2223   writer->write_u4(0);
2224   writer->write_u4(0);

2382   DumpWriter* local_writer = new DumpWriter(path, writer()->is_overwrite(), compressor);
2383   return local_writer;
2384 }
2385 
2386 void VM_HeapDumper::work(uint worker_id) {
2387   // VM Dumper works on all non-heap data dumping and part of heap iteration.
2388   if (is_vm_dumper(worker_id)) {
2389     TraceTime timer("Dump non-objects", TRACETIME_LOG(Info, heapdump));
2390     // Write the file header - we always use 1.0.2
2391     const char* header = "JAVA PROFILE 1.0.2";
2392 
2393     // header is few bytes long - no chance to overflow int
2394     writer()->write_raw(header, strlen(header) + 1); // NUL terminated
2395     writer()->write_u4(oopSize);
2396     // timestamp is current time in ms
2397     writer()->write_u8(os::javaTimeMillis());
2398     // HPROF_UTF8 records
2399     SymbolTableDumper sym_dumper(writer());
2400     SymbolTable::symbols_do(&sym_dumper);
2401 







2402     // write HPROF_LOAD_CLASS records
2403     {
2404       LockedClassesDo locked_load_classes(&do_load_class);
2405       ClassLoaderDataGraph::classes_do(&locked_load_classes);
2406     }
2407 
2408     // write HPROF_FRAME and HPROF_TRACE records
2409     // this must be called after _klass_map is built when iterating the classes above.
2410     dump_stack_traces();
2411 
2412     // HPROF_HEAP_DUMP/HPROF_HEAP_DUMP_SEGMENT starts here
2413 
2414     // Writes HPROF_GC_CLASS_DUMP records
2415     {
2416       LockedClassesDo locked_dump_class(&do_class_dump);
2417       ClassLoaderDataGraph::classes_do(&locked_dump_class);
2418     }
2419 
2420     // HPROF_GC_ROOT_THREAD_OBJ + frames + jni locals
2421     dump_threads();

2433     ClassLoaderData::the_null_class_loader_data()->classes_do(&class_dumper);
2434   }
2435 
2436   // Heap iteration.
2437   // writes HPROF_GC_INSTANCE_DUMP records.
2438   // After each sub-record is written check_segment_length will be invoked
2439   // to check if the current segment exceeds a threshold. If so, a new
2440   // segment is started.
2441   // The HPROF_GC_CLASS_DUMP and HPROF_GC_INSTANCE_DUMP are the vast bulk
2442   // of the heap dump.
2443   if (!is_parallel_dump()) {
2444     assert(is_vm_dumper(worker_id), "must be");
2445     // == Serial dump
2446     ResourceMark rm;
2447     TraceTime timer("Dump heap objects", TRACETIME_LOG(Info, heapdump));
2448     HeapObjectDumper obj_dumper(writer());
2449     Universe::heap()->object_iterate(&obj_dumper);
2450     writer()->finish_dump_segment();
2451     // Writes the HPROF_HEAP_DUMP_END record because merge does not happen in serial dump
2452     DumperSupport::end_of_dump(writer());

2453     writer()->flush();

2454   } else {
2455     // == Parallel dump
2456     ResourceMark rm;
2457     TraceTime timer("Dump heap objects in parallel", TRACETIME_LOG(Info, heapdump));
2458     DumpWriter* local_writer = is_vm_dumper(worker_id) ? writer() : create_local_writer();
2459     if (!local_writer->has_error()) {
2460       HeapObjectDumper obj_dumper(local_writer);
2461       _poi->object_iterate(&obj_dumper, worker_id);
2462       local_writer->finish_dump_segment();
2463       local_writer->flush();
2464     }
2465     if (is_vm_dumper(worker_id)) {
2466       _dumper_controller->wait_all_dumpers_complete();
2467     } else {
2468       _dumper_controller->dumper_complete(local_writer, writer());
2469       delete local_writer;
2470       return;
2471     }
2472   }
2473   // At this point, all fragments of the heapdump have been written to separate files.

2552     return -1;
2553   }
2554 
2555   // generate the segmented heap dump into separate files
2556   VM_HeapDumper dumper(&writer, _gc_before_heap_dump, _oome, num_dump_threads);
2557   VMThread::execute(&dumper);
2558 
2559   // record any error that the writer may have encountered
2560   set_error(writer.error());
2561 
2562   // For serial dump, once VM_HeapDumper completes, the whole heap dump process
2563   // is done, no further phases needed. For parallel dump, the whole heap dump
2564   // process is done in two phases
2565   //
2566   // Phase 1: Concurrent threads directly write heap data to multiple heap files.
2567   //          This is done by VM_HeapDumper, which is performed within safepoint.
2568   //
2569   // Phase 2: Merge multiple heap files into one complete heap dump file.
2570   //          This is done by DumpMerger, which is performed outside safepoint
2571   if (dumper.is_parallel_dump()) {
2572     DumpMerger merger(path, &writer, dumper.dump_seq());
2573     Thread* current_thread = Thread::current();
2574     if (current_thread->is_AttachListener_thread()) {
2575       // perform heapdump file merge operation in the current thread prevents us
2576       // from occupying the VM Thread, which in turn affects the occurrence of
2577       // GC and other VM operations.
2578       merger.do_merge();
2579     } else {
2580       // otherwise, performs it by VM thread
2581       VM_HeapDumpMerge op(&merger);
2582       VMThread::execute(&op);
2583     }
2584     set_error(writer.error());
2585   }
2586 
2587   // emit JFR event
2588   if (error() == nullptr) {
2589     event.set_destination(path);
2590     event.set_gcBeforeDump(_gc_before_heap_dump);
2591     event.set_size(writer.bytes_written());
2592     event.set_onOutOfMemoryError(_oome);

  25 
  26 #include "precompiled.hpp"
  27 #include "classfile/classLoaderData.inline.hpp"
  28 #include "classfile/classLoaderDataGraph.hpp"
  29 #include "classfile/javaClasses.inline.hpp"
  30 #include "classfile/symbolTable.hpp"
  31 #include "classfile/vmClasses.hpp"
  32 #include "classfile/vmSymbols.hpp"
  33 #include "gc/shared/gcLocker.hpp"
  34 #include "gc/shared/gcVMOperations.hpp"
  35 #include "gc/shared/workerThread.hpp"
  36 #include "jfr/jfrEvents.hpp"
  37 #include "jvm.h"
  38 #include "memory/allocation.inline.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "memory/universe.hpp"
  41 #include "oops/fieldStreams.inline.hpp"
  42 #include "oops/klass.inline.hpp"
  43 #include "oops/objArrayKlass.hpp"
  44 #include "oops/objArrayOop.inline.hpp"
  45 #include "oops/flatArrayKlass.hpp"
  46 #include "oops/flatArrayOop.inline.hpp"
  47 #include "oops/oop.inline.hpp"
  48 #include "oops/typeArrayOop.inline.hpp"
  49 #include "runtime/continuationWrapper.inline.hpp"
  50 #include "runtime/fieldDescriptor.inline.hpp"
  51 #include "runtime/frame.inline.hpp"
  52 #include "runtime/handles.inline.hpp"
  53 #include "runtime/javaCalls.hpp"
  54 #include "runtime/javaThread.inline.hpp"
  55 #include "runtime/jniHandles.hpp"
  56 #include "runtime/os.hpp"
  57 #include "runtime/threads.hpp"
  58 #include "runtime/threadSMR.hpp"
  59 #include "runtime/vframe.hpp"
  60 #include "runtime/vmOperations.hpp"
  61 #include "runtime/vmThread.hpp"
  62 #include "runtime/timerTrace.hpp"
  63 #include "services/heapDumper.hpp"
  64 #include "services/heapDumperCompression.hpp"
  65 #include "services/threadService.hpp"
  66 #include "utilities/checkedCast.hpp"
  67 #include "utilities/macros.hpp"
  68 #include "utilities/ostream.hpp"
  69 #ifdef LINUX
  70 #include "os_linux.hpp"

 302  *                                     7:  double array
 303  *                                     8:  byte array
 304  *                                     9:  short array
 305  *                                     10: int array
 306  *                                     11: long array
 307  *                          [u1]*      elements
 308  *
 309  * HPROF_CPU_SAMPLES        a set of sample traces of running threads
 310  *
 311  *                u4        total number of samples
 312  *                u4        # of traces
 313  *               [u4        # of samples
 314  *                u4]*      stack trace serial number
 315  *
 316  * HPROF_CONTROL_SETTINGS   the settings of on/off switches
 317  *
 318  *                u4        0x00000001: alloc traces on/off
 319  *                          0x00000002: cpu sampling on/off
 320  *                u2        stack trace depth
 321  *
 322  * HPROF_FLAT_ARRAYS        list of flat arrays
 323  *
 324  *               [flat array sub-records]*
 325  *
 326  *               HPROF_FLAT_ARRAY      flat array
 327  *
 328  *                          id         array object ID (dumped as HPROF_GC_PRIM_ARRAY_DUMP)
 329  *                          id         element class ID (dumped by HPROF_GC_CLASS_DUMP)
 330  *
 331  * HPROF_INLINED_FIELDS     decribes inlined fields
 332  *
 333  *               [class with inlined fields sub-records]*
 334  *
 335  *               HPROF_CLASS_WITH_INLINED_FIELDS
 336  *
 337  *                          id         class ID (dumped as HPROF_GC_CLASS_DUMP)
 338  *
 339  *                          u2         number of instance inlined fields (not including super)
 340  *                          [u2,       inlined field index,
 341  *                           u2,       synthetic field count,
 342  *                           id,       original field name,
 343  *                           id]*      inlined field class ID (dumped by HPROF_GC_CLASS_DUMP)
 344  *
 345  * When the header is "JAVA PROFILE 1.0.2" a heap dump can optionally
 346  * be generated as a sequence of heap dump segments. This sequence is
 347  * terminated by an end record. The additional tags allowed by format
 348  * "JAVA PROFILE 1.0.2" are:
 349  *
 350  * HPROF_HEAP_DUMP_SEGMENT  denote a heap dump segment
 351  *
 352  *               [heap dump sub-records]*
 353  *               The same sub-record types allowed by HPROF_HEAP_DUMP
 354  *
 355  * HPROF_HEAP_DUMP_END      denotes the end of a heap dump
 356  *
 357  */
 358 
 359 
 360 // HPROF tags
 361 
 362 enum hprofTag : u1 {
 363   // top-level records
 364   HPROF_UTF8                    = 0x01,
 365   HPROF_LOAD_CLASS              = 0x02,
 366   HPROF_UNLOAD_CLASS            = 0x03,
 367   HPROF_FRAME                   = 0x04,
 368   HPROF_TRACE                   = 0x05,
 369   HPROF_ALLOC_SITES             = 0x06,
 370   HPROF_HEAP_SUMMARY            = 0x07,
 371   HPROF_START_THREAD            = 0x0A,
 372   HPROF_END_THREAD              = 0x0B,
 373   HPROF_HEAP_DUMP               = 0x0C,
 374   HPROF_CPU_SAMPLES             = 0x0D,
 375   HPROF_CONTROL_SETTINGS        = 0x0E,
 376 
 377   // 1.0.2 record types
 378   HPROF_HEAP_DUMP_SEGMENT       = 0x1C,
 379   HPROF_HEAP_DUMP_END           = 0x2C,
 380 
 381   // inlined object support
 382   HPROF_FLAT_ARRAYS             = 0x12,
 383   HPROF_INLINED_FIELDS          = 0x13,
 384   // inlined object subrecords
 385   HPROF_FLAT_ARRAY                  = 0x01,
 386   HPROF_CLASS_WITH_INLINED_FIELDS   = 0x01,
 387 
 388   // field types
 389   HPROF_ARRAY_OBJECT            = 0x01,
 390   HPROF_NORMAL_OBJECT           = 0x02,
 391   HPROF_BOOLEAN                 = 0x04,
 392   HPROF_CHAR                    = 0x05,
 393   HPROF_FLOAT                   = 0x06,
 394   HPROF_DOUBLE                  = 0x07,
 395   HPROF_BYTE                    = 0x08,
 396   HPROF_SHORT                   = 0x09,
 397   HPROF_INT                     = 0x0A,
 398   HPROF_LONG                    = 0x0B,
 399 
 400   // data-dump sub-records
 401   HPROF_GC_ROOT_UNKNOWN         = 0xFF,
 402   HPROF_GC_ROOT_JNI_GLOBAL      = 0x01,
 403   HPROF_GC_ROOT_JNI_LOCAL       = 0x02,
 404   HPROF_GC_ROOT_JAVA_FRAME      = 0x03,
 405   HPROF_GC_ROOT_NATIVE_STACK    = 0x04,
 406   HPROF_GC_ROOT_STICKY_CLASS    = 0x05,
 407   HPROF_GC_ROOT_THREAD_BLOCK    = 0x06,
 408   HPROF_GC_ROOT_MONITOR_USED    = 0x07,
 409   HPROF_GC_ROOT_THREAD_OBJ      = 0x08,
 410   HPROF_GC_CLASS_DUMP           = 0x20,
 411   HPROF_GC_INSTANCE_DUMP        = 0x21,
 412   HPROF_GC_OBJ_ARRAY_DUMP       = 0x22,
 413   HPROF_GC_PRIM_ARRAY_DUMP      = 0x23
 414 };
 415 
 416 // Default stack trace ID (used for dummy HPROF_TRACE record)
 417 enum {
 418   STACK_TRACE_ID = 1,
 419   INITIAL_CLASS_COUNT = 200
 420 };
 421 
 422 
 423 class AbstractDumpWriter;
 424 
 425 class InlinedObjects {
 426 
 427   struct ClassInlinedFields {
 428     const Klass *klass;
 429     uintx base_index;   // base index of the inlined field names (1st field has index base_index+1).
 430     ClassInlinedFields(const Klass *klass = nullptr, uintx base_index = 0) : klass(klass), base_index(base_index) {}
 431 
 432     // For GrowableArray::find_sorted().
 433     static int compare(const ClassInlinedFields& a, const ClassInlinedFields& b) {
 434       return a.klass - b.klass;
 435     }
 436     // For GrowableArray::sort().
 437     static int compare(ClassInlinedFields* a, ClassInlinedFields* b) {
 438       return compare(*a, *b);
 439     }
 440   };
 441 
 442   uintx _min_string_id;
 443   uintx _max_string_id;
 444 
 445   GrowableArray<ClassInlinedFields> *_inlined_field_map;
 446 
 447   // counters for classes with inlined fields and for the fields
 448   int _classes_count;
 449   int _inlined_fields_count;
 450 
 451   static InlinedObjects *_instance;
 452 
 453   static void inlined_field_names_callback(InlinedObjects* _this, const Klass *klass, uintx base_index, int count);
 454 
 455   GrowableArray<oop> *_flat_arrays;
 456 
 457 public:
 458   InlinedObjects()
 459     : _min_string_id(0), _max_string_id(0),
 460     _inlined_field_map(nullptr),
 461     _classes_count(0), _inlined_fields_count(0),
 462     _flat_arrays(nullptr) {
 463   }
 464 
 465   static InlinedObjects* get_instance() {
 466     return _instance;
 467   }
 468 
 469   void init();
 470   void release();
 471 
 472   void dump_inlined_field_names(AbstractDumpWriter *writer);
 473 
 474   uintx get_base_index_for(Klass* k);
 475   uintx get_next_string_id(uintx id);
 476 
 477   void dump_classed_with_inlined_fields(AbstractDumpWriter* writer);
 478 
 479   void add_flat_array(oop array);
 480   void dump_flat_arrays(AbstractDumpWriter* writer);
 481 
 482 };
 483 
 484 InlinedObjects *InlinedObjects::_instance = nullptr;
 485 
 486 
 487 // Supports I/O operations for a dump
 488 // Base class for dump and parallel dump
 489 class AbstractDumpWriter : public CHeapObj<mtInternal> {
 490  protected:
 491   enum {
 492     io_buffer_max_size = 1*M,
 493     dump_segment_header_size = 9
 494   };
 495 
 496   char* _buffer;    // internal buffer
 497   size_t _size;
 498   size_t _pos;
 499 
 500   bool _in_dump_segment; // Are we currently in a dump segment?
 501   bool _is_huge_sub_record; // Are we writing a sub-record larger than the buffer size?
 502   DEBUG_ONLY(size_t _sub_record_left;) // The bytes not written for the current sub-record.
 503   DEBUG_ONLY(bool _sub_record_ended;) // True if we have called the end_sub_record().
 504 
 505   char* buffer() const                          { return _buffer; }
 506   size_t buffer_size() const                    { return _size; }

 819   }
 820 }
 821 
 822 class DumperClassCacheTable;
 823 class DumperClassCacheTableEntry;
 824 
 825 // Support class with a collection of functions used when dumping the heap
 826 class DumperSupport : AllStatic {
 827  public:
 828 
 829   // write a header of the given type
 830   static void write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len);
 831 
 832   // returns hprof tag for the given type signature
 833   static hprofTag sig2tag(Symbol* sig);
 834   // returns hprof tag for the given basic type
 835   static hprofTag type2tag(BasicType type);
 836   // Returns the size of the data to write.
 837   static u4 sig2size(Symbol* sig);
 838 
 839   // calculates the total size of the all fields of the given class.
 840   static u4 instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry = nullptr);
 841 
 842   // dump a jfloat
 843   static void dump_float(AbstractDumpWriter* writer, jfloat f);
 844   // dump a jdouble
 845   static void dump_double(AbstractDumpWriter* writer, jdouble d);
 846   // dumps the raw value of the given field
 847   static void dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset);
 848   // returns the size of the static fields; also counts the static fields
 849   static u4 get_static_fields_size(InstanceKlass* ik, u2& field_count);
 850   // dumps static fields of the given class
 851   static void dump_static_fields(AbstractDumpWriter* writer, Klass* k);
 852   // dump the raw values of the instance fields of the given identity or inlined object;
 853   // for identity objects offset is 0 and 'klass' is o->klass(),
 854   // for inlined objects offset is the offset in the holder object, 'klass' is inlined object class
 855   static void dump_instance_fields(AbstractDumpWriter* writer, oop o, int offset, DumperClassCacheTable* class_cache, DumperClassCacheTableEntry* class_cache_entry);
 856   // dump the raw values of the instance fields of the given inlined object;
 857   // dump_instance_fields wrapper for inlined objects
 858   static void dump_inlined_object_fields(AbstractDumpWriter* writer, oop o, int offset, DumperClassCacheTable* class_cache, DumperClassCacheTableEntry* class_cache_entry);
 859 
 860   // get the count of the instance fields for a given class
 861   static u2 get_instance_fields_count(InstanceKlass* ik);
 862   // dumps the definition of the instance fields for a given class
 863   static void dump_instance_field_descriptors(AbstractDumpWriter* writer, InstanceKlass* k, uintx *inlined_fields_index = nullptr);
 864   // creates HPROF_GC_INSTANCE_DUMP record for the given object
 865   static void dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache);
 866   // creates HPROF_GC_CLASS_DUMP record for the given instance class
 867   static void dump_instance_class(AbstractDumpWriter* writer, Klass* k);
 868   // creates HPROF_GC_CLASS_DUMP record for a given array class
 869   static void dump_array_class(AbstractDumpWriter* writer, Klass* k);
 870 
 871   // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
 872   static void dump_object_array(AbstractDumpWriter* writer, objArrayOop array);
 873   // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given flat array
 874   static void dump_flat_array(AbstractDumpWriter* writer, flatArrayOop array, DumperClassCacheTable* class_cache);
 875   // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
 876   static void dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array);
 877   // create HPROF_FRAME record for the given method and bci
 878   static void dump_stack_frame(AbstractDumpWriter* writer, int frame_serial_num, int class_serial_num, Method* m, int bci);
 879 
 880   // check if we need to truncate an array
 881   static int calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size);
 882   // extended version to dump flat arrays as primitive arrays;
 883   // type_size specifies size of the inlined objects.
 884   static int calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, int type_size, short header_size);
 885 
 886   // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
 887   static void end_of_dump(AbstractDumpWriter* writer);
 888 
 889   static oop mask_dormant_archived_object(oop o) {
 890     if (o != nullptr && o->klass()->java_mirror() == nullptr) {
 891       // Ignore this object since the corresponding java mirror is not loaded.
 892       // Might be a dormant archive object.
 893       return nullptr;
 894     } else {
 895       return o;
 896     }
 897   }
 898 
 899   // helper methods for inlined fields.
 900   static bool is_inlined_field(const fieldDescriptor& fld) {
 901     return fld.is_flat();
 902   }
 903   static InlineKlass* get_inlined_field_klass(const fieldDescriptor& fld) {
 904     assert(is_inlined_field(fld), "must be inlined field");
 905     InstanceKlass* holder_klass = fld.field_holder();
 906     return InlineKlass::cast(holder_klass->get_inline_type_field_klass(fld.index()));
 907   }
 908 };
 909 
 910 // Hash table of klasses to the klass metadata. This should greatly improve the
 911 // hash dumping performance. This hash table is supposed to be used by a single
 912 // thread only.
 913 //
 914 class DumperClassCacheTableEntry : public CHeapObj<mtServiceability> {
 915   friend class DumperClassCacheTable;
 916 private:
 917   GrowableArray<char> _sigs_start;
 918   GrowableArray<int> _offsets;
 919   GrowableArray<InlineKlass*> _inline_klasses;
 920   u4 _instance_size;
 921   int _entries;
 922 
 923 public:
 924   DumperClassCacheTableEntry() : _instance_size(0), _entries(0) {};
 925 
 926   int field_count()             { return _entries; }
 927   char sig_start(int field_idx) { return _sigs_start.at(field_idx); }
 928   void push_sig_start_inlined() { _sigs_start.push('Q'); }
 929   bool is_inlined(int field_idx){ return _sigs_start.at(field_idx) == 'Q'; }
 930   InlineKlass* inline_klass(int field_idx) { assert(is_inlined(field_idx), "Not inlined"); return _inline_klasses.at(field_idx); }
 931   int offset(int field_idx)     { return _offsets.at(field_idx); }
 932   u4 instance_size()            { return _instance_size; }
 933 };
 934 
 935 class DumperClassCacheTable {
 936 private:
 937   // ResourceHashtable SIZE is specified at compile time so we
 938   // use 1031 which is the first prime after 1024.
 939   static constexpr size_t TABLE_SIZE = 1031;
 940 
 941   // Maintain the cache for N classes. This limits memory footprint
 942   // impact, regardless of how many classes we have in the dump.
 943   // This also improves look up performance by keeping the statically
 944   // sized table from overloading.
 945   static constexpr int CACHE_TOP = 256;
 946 
 947   typedef ResourceHashtable<InstanceKlass*, DumperClassCacheTableEntry*,
 948                             TABLE_SIZE, AnyObj::C_HEAP, mtServiceability> PtrTable;
 949   PtrTable* _ptrs;
 950 

 959       bool do_entry(InstanceKlass*& key, DumperClassCacheTableEntry*& entry) {
 960         delete entry;
 961         return true;
 962       }
 963     } cleanup;
 964     table->unlink(&cleanup);
 965   }
 966 
 967 public:
 968   DumperClassCacheTableEntry* lookup_or_create(InstanceKlass* ik) {
 969     if (_last_ik == ik) {
 970       return _last_entry;
 971     }
 972 
 973     DumperClassCacheTableEntry* entry;
 974     DumperClassCacheTableEntry** from_cache = _ptrs->get(ik);
 975     if (from_cache == nullptr) {
 976       entry = new DumperClassCacheTableEntry();
 977       for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
 978         if (!fld.access_flags().is_static()) {
 979           InlineKlass* inlineKlass = nullptr;
 980           if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
 981             inlineKlass = DumperSupport::get_inlined_field_klass(fld.field_descriptor());
 982             entry->push_sig_start_inlined();
 983             entry->_instance_size += DumperSupport::instance_size(inlineKlass);
 984           } else {
 985             Symbol* sig = fld.signature();
 986             entry->_sigs_start.push(sig->char_at(0));
 987             entry->_instance_size += DumperSupport::sig2size(sig);
 988           }
 989           entry->_inline_klasses.push(inlineKlass);
 990           entry->_offsets.push(fld.offset());
 991           entry->_entries++;

 992         }
 993       }
 994 
 995       if (_ptrs->number_of_entries() >= CACHE_TOP) {
 996         // We do not track the individual hit rates for table entries.
 997         // Purge the entire table, and let the cache catch up with new
 998         // distribution.
 999         unlink_all(_ptrs);
1000       }
1001 
1002       _ptrs->put(ik, entry);
1003     } else {
1004       entry = *from_cache;
1005     }
1006 
1007     // Remember for single-slot cache.
1008     _last_ik = ik;
1009     _last_entry = entry;
1010 
1011     return entry;

1081 }
1082 
1083 // dump a jfloat
1084 void DumperSupport::dump_float(AbstractDumpWriter* writer, jfloat f) {
1085   if (g_isnan(f)) {
1086     writer->write_u4(0x7fc00000); // collapsing NaNs
1087   } else {
1088     writer->write_u4(bit_cast<u4>(f));
1089   }
1090 }
1091 
1092 // dump a jdouble
1093 void DumperSupport::dump_double(AbstractDumpWriter* writer, jdouble d) {
1094   if (g_isnan(d)) {
1095     writer->write_u8(0x7ff80000ull << 32); // collapsing NaNs
1096   } else {
1097     writer->write_u8(bit_cast<u8>(d));
1098   }
1099 }
1100 
1101 
1102 // dumps the raw value of the given field
1103 void DumperSupport::dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset) {
1104   switch (type) {
1105     case JVM_SIGNATURE_CLASS :
1106     case JVM_SIGNATURE_ARRAY : {
1107       oop o = obj->obj_field_access<ON_UNKNOWN_OOP_REF | AS_NO_KEEPALIVE>(offset);
1108       if (o != nullptr && log_is_enabled(Debug, cds, heap) && mask_dormant_archived_object(o) == nullptr) {
1109         ResourceMark rm;
1110         log_debug(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s) referenced by " INTPTR_FORMAT " (%s)",
1111                              p2i(o), o->klass()->external_name(),
1112                              p2i(obj), obj->klass()->external_name());
1113       }
1114       o = mask_dormant_archived_object(o);
1115       assert(oopDesc::is_oop_or_null(o), "Expected an oop or nullptr at " PTR_FORMAT, p2i(o));
1116       writer->write_objectID(o);
1117       break;
1118     }
1119     case JVM_SIGNATURE_BYTE : {
1120       jbyte b = obj->byte_field(offset);
1121       writer->write_u1(b);

1146       writer->write_u4(i);
1147       break;
1148     }
1149     case JVM_SIGNATURE_LONG : {
1150       jlong l = obj->long_field(offset);
1151       writer->write_u8(l);
1152       break;
1153     }
1154     case JVM_SIGNATURE_BOOLEAN : {
1155       jboolean b = obj->bool_field(offset);
1156       writer->write_u1(b);
1157       break;
1158     }
1159     default : {
1160       ShouldNotReachHere();
1161       break;
1162     }
1163   }
1164 }
1165 
1166 // calculates the total size of the all fields of the given class.
1167 u4 DumperSupport::instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry) {
1168   if (class_cache_entry != nullptr) {
1169     return class_cache_entry->instance_size();
1170   } else {
1171     u4 size = 0;
1172     for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1173       if (!fld.access_flags().is_static()) {
1174         if (is_inlined_field(fld.field_descriptor())) {
1175           size += instance_size(get_inlined_field_klass(fld.field_descriptor()));
1176         } else {
1177           size += sig2size(fld.signature());
1178         }
1179       }
1180     }
1181     return size;
1182   }
1183 }
1184 
1185 u4 DumperSupport::get_static_fields_size(InstanceKlass* ik, u2& field_count) {
1186   field_count = 0;
1187   u4 size = 0;
1188 
1189   for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1190     if (fldc.access_flags().is_static()) {
1191       assert(!is_inlined_field(fldc.field_descriptor()), "static fields cannot be inlined");
1192 
1193       field_count++;
1194       size += sig2size(fldc.signature());
1195     }
1196   }
1197 
1198   // Add in resolved_references which is referenced by the cpCache
1199   // The resolved_references is an array per InstanceKlass holding the
1200   // strings and other oops resolved from the constant pool.
1201   oop resolved_references = ik->constants()->resolved_references_or_null();
1202   if (resolved_references != nullptr) {
1203     field_count++;
1204     size += sizeof(address);
1205 
1206     // Add in the resolved_references of the used previous versions of the class
1207     // in the case of RedefineClasses
1208     InstanceKlass* prev = ik->previous_versions();
1209     while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1210       field_count++;
1211       size += sizeof(address);
1212       prev = prev->previous_versions();
1213     }
1214   }
1215 
1216   // We write the value itself plus a name and a one byte type tag per field.
1217   return checked_cast<u4>(size + field_count * (sizeof(address) + 1));
1218 }
1219 
1220 // dumps static fields of the given class
1221 void DumperSupport::dump_static_fields(AbstractDumpWriter* writer, Klass* k) {
1222   InstanceKlass* ik = InstanceKlass::cast(k);
1223 
1224   // dump the field descriptors and raw values
1225   for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1226     if (fld.access_flags().is_static()) {
1227       assert(!is_inlined_field(fld.field_descriptor()), "static fields cannot be inlined");
1228 
1229       Symbol* sig = fld.signature();
1230 
1231       writer->write_symbolID(fld.name());   // name
1232       writer->write_u1(sig2tag(sig));       // type
1233 
1234       // value
1235       dump_field_value(writer, sig->char_at(0), ik->java_mirror(), fld.offset());
1236     }
1237   }
1238 
1239   // Add resolved_references for each class that has them
1240   oop resolved_references = ik->constants()->resolved_references_or_null();
1241   if (resolved_references != nullptr) {
1242     writer->write_symbolID(vmSymbols::resolved_references_name());  // name
1243     writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1244     writer->write_objectID(resolved_references);
1245 
1246     // Also write any previous versions
1247     InstanceKlass* prev = ik->previous_versions();
1248     while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1249       writer->write_symbolID(vmSymbols::resolved_references_name());  // name
1250       writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1251       writer->write_objectID(prev->constants()->resolved_references());
1252       prev = prev->previous_versions();
1253     }
1254   }
1255 }
1256 
1257 // dump the raw values of the instance fields of the given identity or inlined object;
1258 // for identity objects offset is 0 and 'klass' is o->klass(),
1259 // for inlined objects offset is the offset in the holder object, 'klass' is inlined object class.
1260 void DumperSupport::dump_instance_fields(AbstractDumpWriter* writer, oop o, int offset, DumperClassCacheTable* class_cache, DumperClassCacheTableEntry* class_cache_entry) {
1261   assert(class_cache_entry != nullptr, "Pre-condition: must be provided");
1262   for (int idx = 0; idx < class_cache_entry->field_count(); idx++) {
1263     if (class_cache_entry->is_inlined(idx)) {
1264       InlineKlass* field_klass = class_cache_entry->inline_klass(idx);
1265       int fields_offset = offset + (class_cache_entry->offset(idx) - field_klass->first_field_offset());
1266       DumperClassCacheTableEntry* inline_class_cache_entry = class_cache->lookup_or_create(field_klass);
1267       dump_inlined_object_fields(writer, o, fields_offset, class_cache, inline_class_cache_entry);
1268     } else {
1269       dump_field_value(writer, class_cache_entry->sig_start(idx), o, class_cache_entry->offset(idx));
1270     }
1271   }
1272 }
1273 
1274 void DumperSupport::dump_inlined_object_fields(AbstractDumpWriter* writer, oop o, int offset, DumperClassCacheTable* class_cache, DumperClassCacheTableEntry* class_cache_entry) {
1275   // the object is inlined, so all its fields are stored without headers.
1276   dump_instance_fields(writer, o, offset, class_cache, class_cache_entry);
1277 }
1278 
1279 // gets the count of the instance fields for a given class
1280 u2 DumperSupport::get_instance_fields_count(InstanceKlass* ik) {
1281   u2 field_count = 0;
1282 
1283   for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1284     if (!fldc.access_flags().is_static()) {
1285       if (is_inlined_field(fldc.field_descriptor())) {
1286         // add "synthetic" fields for inlined fields.
1287         field_count += get_instance_fields_count(get_inlined_field_klass(fldc.field_descriptor()));
1288       } else {
1289         field_count++;
1290       }
1291     }
1292   }
1293 
1294   return field_count;
1295 }
1296 
1297 // dumps the definition of the instance fields for a given class
1298 // inlined_fields_id is not-nullptr for inlined fields (to get synthetic field name IDs
1299 // by using InlinedObjects::get_next_string_id()).
1300 void DumperSupport::dump_instance_field_descriptors(AbstractDumpWriter* writer, InstanceKlass* ik, uintx* inlined_fields_id) {
1301   // inlined_fields_id != nullptr means ik is a class of inlined field.
1302   // Inlined field id pointer for this class; lazyly initialized
1303   // if the class has inlined field(s) and the caller didn't provide inlined_fields_id.
1304   uintx *this_klass_inlined_fields_id = inlined_fields_id;
1305   uintx inlined_id = 0;
1306 
1307   // dump the field descriptors
1308   for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1309     if (!fld.access_flags().is_static()) {
1310       if (is_inlined_field(fld.field_descriptor())) {
1311         // dump "synthetic" fields for inlined fields.
1312         if (this_klass_inlined_fields_id == nullptr) {
1313           inlined_id = InlinedObjects::get_instance()->get_base_index_for(ik);
1314           this_klass_inlined_fields_id = &inlined_id;
1315         }
1316         dump_instance_field_descriptors(writer, get_inlined_field_klass(fld.field_descriptor()), this_klass_inlined_fields_id);
1317       } else {
1318         Symbol* sig = fld.signature();
1319         Symbol* name = nullptr;
1320         // Use inlined_fields_id provided by caller.
1321         if (inlined_fields_id != nullptr) {
1322           uintx name_id = InlinedObjects::get_instance()->get_next_string_id(*inlined_fields_id);
1323 
1324           // name_id == 0 is returned on error. use original field signature.
1325           if (name_id != 0) {
1326             *inlined_fields_id = name_id;
1327             name = reinterpret_cast<Symbol*>(name_id);
1328           }
1329         }
1330         if (name == nullptr) {
1331           name = fld.name();
1332         }
1333 
1334         writer->write_symbolID(name);         // name
1335         writer->write_u1(sig2tag(sig));       // type
1336       }
1337     }
1338   }
1339 }
1340 
1341 // creates HPROF_GC_INSTANCE_DUMP record for the given object
1342 void DumperSupport::dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache) {
1343   InstanceKlass* ik = InstanceKlass::cast(o->klass());
1344 
1345   DumperClassCacheTableEntry* cache_entry = class_cache->lookup_or_create(ik);
1346 
1347   u4 is = instance_size(ik, cache_entry);
1348   u4 size = 1 + sizeof(address) + 4 + sizeof(address) + 4 + is;
1349 
1350   writer->start_sub_record(HPROF_GC_INSTANCE_DUMP, size);
1351   writer->write_objectID(o);
1352   writer->write_u4(STACK_TRACE_ID);
1353 
1354   // class ID
1355   writer->write_classID(ik);
1356 
1357   // number of bytes that follow
1358   writer->write_u4(is);
1359 
1360   // field values
1361   dump_instance_fields(writer, o, 0, class_cache, cache_entry);
1362 
1363   writer->end_sub_record();
1364 }
1365 
1366 // creates HPROF_GC_CLASS_DUMP record for the given instance class
1367 void DumperSupport::dump_instance_class(AbstractDumpWriter* writer, Klass* k) {
1368   InstanceKlass* ik = InstanceKlass::cast(k);
1369 
1370   // We can safepoint and do a heap dump at a point where we have a Klass,
1371   // but no java mirror class has been setup for it. So we need to check
1372   // that the class is at least loaded, to avoid crash from a null mirror.
1373   if (!ik->is_loaded()) {
1374     return;
1375   }
1376 
1377   u2 static_fields_count = 0;
1378   u4 static_size = get_static_fields_size(ik, static_fields_count);
1379   u2 instance_fields_count = get_instance_fields_count(ik);
1380   u4 instance_fields_size = instance_fields_count * (sizeof(address) + 1);
1381   u4 size = checked_cast<u4>(1 + sizeof(address) + 4 + 6 * sizeof(address) + 4 + 2 + 2 + static_size + 2 + instance_fields_size);

1386   writer->write_classID(ik);
1387   writer->write_u4(STACK_TRACE_ID);
1388 
1389   // super class ID
1390   InstanceKlass* java_super = ik->java_super();
1391   if (java_super == nullptr) {
1392     writer->write_objectID(oop(nullptr));
1393   } else {
1394     writer->write_classID(java_super);
1395   }
1396 
1397   writer->write_objectID(ik->class_loader());
1398   writer->write_objectID(ik->signers());
1399   writer->write_objectID(ik->protection_domain());
1400 
1401   // reserved
1402   writer->write_objectID(oop(nullptr));
1403   writer->write_objectID(oop(nullptr));
1404 
1405   // instance size
1406   writer->write_u4(HeapWordSize * ik->size_helper());
1407 
1408   // size of constant pool - ignored by HAT 1.1
1409   writer->write_u2(0);
1410 
1411   // static fields
1412   writer->write_u2(static_fields_count);
1413   dump_static_fields(writer, ik);
1414 
1415   // description of instance fields
1416   writer->write_u2(instance_fields_count);
1417   dump_instance_field_descriptors(writer, ik);
1418 
1419   writer->end_sub_record();
1420 }
1421 
1422 // creates HPROF_GC_CLASS_DUMP record for the given array class
1423 void DumperSupport::dump_array_class(AbstractDumpWriter* writer, Klass* k) {
1424   InstanceKlass* ik = nullptr; // bottom class for object arrays, null for primitive type arrays
1425   if (k->is_objArray_klass()) {
1426     Klass *bk = ObjArrayKlass::cast(k)->bottom_klass();

1440   assert(java_super != nullptr, "checking");
1441   writer->write_classID(java_super);
1442 
1443   writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->class_loader());
1444   writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->signers());
1445   writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->protection_domain());
1446 
1447   writer->write_objectID(oop(nullptr));    // reserved
1448   writer->write_objectID(oop(nullptr));
1449   writer->write_u4(0);             // instance size
1450   writer->write_u2(0);             // constant pool
1451   writer->write_u2(0);             // static fields
1452   writer->write_u2(0);             // instance fields
1453 
1454   writer->end_sub_record();
1455 
1456 }
1457 
1458 // Hprof uses an u4 as record length field,
1459 // which means we need to truncate arrays that are too long.
1460 int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, int type_size, short header_size) {



1461   int length = array->length();
1462 







1463   size_t length_in_bytes = (size_t)length * type_size;
1464   uint max_bytes = max_juint - header_size;
1465 
1466   if (length_in_bytes > max_bytes) {
1467     length = max_bytes / type_size;
1468     length_in_bytes = (size_t)length * type_size;
1469 
1470     BasicType type = ArrayKlass::cast(array->klass())->element_type();
1471     warning("cannot dump array of type %s[] with length %d; truncating to length %d",
1472             type2name_tab[type], array->length(), length);
1473   }
1474   return length;
1475 }
1476 
1477 int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size) {
1478   BasicType type = ArrayKlass::cast(array->klass())->element_type();
1479   assert((type >= T_BOOLEAN && type <= T_OBJECT) || type == T_PRIMITIVE_OBJECT, "invalid array element type");
1480   int type_size;
1481   if (type == T_OBJECT) {
1482     type_size = sizeof(address);
1483   } else if (type == T_PRIMITIVE_OBJECT) {
1484       // TODO: FIXME
1485       fatal("Not supported yet"); // FIXME: JDK-8325678
1486   } else {
1487     type_size = type2aelembytes(type);
1488   }
1489 
1490   return calculate_array_max_length(writer, array, type_size, header_size);
1491 }
1492 
1493 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
1494 void DumperSupport::dump_object_array(AbstractDumpWriter* writer, objArrayOop array) {
1495   // sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID) + sizeof(classID)
1496   short header_size = 1 + 2 * 4 + 2 * sizeof(address);
1497   int length = calculate_array_max_length(writer, array, header_size);
1498   u4 size = checked_cast<u4>(header_size + length * sizeof(address));
1499 
1500   writer->start_sub_record(HPROF_GC_OBJ_ARRAY_DUMP, size);
1501   writer->write_objectID(array);
1502   writer->write_u4(STACK_TRACE_ID);
1503   writer->write_u4(length);
1504 
1505   // array class ID
1506   writer->write_classID(array->klass());
1507 
1508   // [id]* elements
1509   for (int index = 0; index < length; index++) {
1510     oop o = array->obj_at(index);
1511     if (o != nullptr && log_is_enabled(Debug, cds, heap) && mask_dormant_archived_object(o) == nullptr) {
1512       ResourceMark rm;
1513       log_debug(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s) referenced by " INTPTR_FORMAT " (%s)",
1514                            p2i(o), o->klass()->external_name(),
1515                            p2i(array), array->klass()->external_name());
1516     }
1517     o = mask_dormant_archived_object(o);
1518     writer->write_objectID(o);
1519   }
1520 
1521   writer->end_sub_record();
1522 }
1523 
1524 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given flat array
1525 void DumperSupport::dump_flat_array(AbstractDumpWriter* writer, flatArrayOop array, DumperClassCacheTable* class_cache) {
1526   FlatArrayKlass* array_klass = FlatArrayKlass::cast(array->klass());
1527   InlineKlass* element_klass = array_klass->element_klass();
1528   int element_size = instance_size(element_klass);
1529   /*                          id         array object ID
1530    *                          u4         stack trace serial number
1531    *                          u4         number of elements
1532    *                          u1         element type
1533    */
1534   short header_size = 1 + sizeof(address) + 2 * 4 + 1;
1535 
1536   // TODO: use T_SHORT/T_INT/T_LONG if needed to avoid truncation
1537   BasicType type = T_BYTE;
1538   int type_size = type2aelembytes(type);
1539   int length = calculate_array_max_length(writer, array, element_size, header_size);
1540   u4 length_in_bytes = (u4)(length * element_size);
1541   u4 size = header_size + length_in_bytes;
1542 
1543   writer->start_sub_record(HPROF_GC_PRIM_ARRAY_DUMP, size);
1544   writer->write_objectID(array);
1545   writer->write_u4(STACK_TRACE_ID);
1546   // TODO: round up array length for T_SHORT/T_INT/T_LONG
1547   writer->write_u4(length * element_size);
1548   writer->write_u1(type2tag(type));
1549 
1550   for (int index = 0; index < length; index++) {
1551     // need offset in the holder to read inlined object. calculate it from flatArrayOop::value_at_addr()
1552     int offset = (int)((address)array->value_at_addr(index, array_klass->layout_helper())
1553                   - cast_from_oop<address>(array));
1554     DumperClassCacheTableEntry* class_cache_entry = class_cache->lookup_or_create(element_klass);
1555     dump_inlined_object_fields(writer, array, offset, class_cache, class_cache_entry);
1556   }
1557 
1558   // TODO: write padding bytes for T_SHORT/T_INT/T_LONG
1559 
1560   InlinedObjects::get_instance()->add_flat_array(array);
1561 
1562   writer->end_sub_record();
1563 }
1564 
1565 #define WRITE_ARRAY(Array, Type, Size, Length) \
1566   for (int i = 0; i < Length; i++) { writer->write_##Size((Size)Array->Type##_at(i)); }
1567 
1568 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
1569 void DumperSupport::dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array) {
1570   BasicType type = TypeArrayKlass::cast(array->klass())->element_type();
1571   // 2 * sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID)
1572   short header_size = 2 * 1 + 2 * 4 + sizeof(address);
1573 
1574   int length = calculate_array_max_length(writer, array, header_size);
1575   int type_size = type2aelembytes(type);
1576   u4 length_in_bytes = (u4)length * type_size;
1577   u4 size = header_size + length_in_bytes;
1578 
1579   writer->start_sub_record(HPROF_GC_PRIM_ARRAY_DUMP, size);
1580   writer->write_objectID(array);
1581   writer->write_u4(STACK_TRACE_ID);
1582   writer->write_u4(length);
1583   writer->write_u1(type2tag(type));
1584 

1666                                      int bci) {
1667   int line_number;
1668   if (m->is_native()) {
1669     line_number = -3;  // native frame
1670   } else {
1671     line_number = m->line_number_from_bci(bci);
1672   }
1673 
1674   write_header(writer, HPROF_FRAME, 4*oopSize + 2*sizeof(u4));
1675   writer->write_id(frame_serial_num);               // frame serial number
1676   writer->write_symbolID(m->name());                // method's name
1677   writer->write_symbolID(m->signature());           // method's signature
1678 
1679   assert(m->method_holder()->is_instance_klass(), "not InstanceKlass");
1680   writer->write_symbolID(m->method_holder()->source_file_name());  // source file name
1681   writer->write_u4(class_serial_num);               // class serial number
1682   writer->write_u4((u4) line_number);               // line number
1683 }
1684 
1685 
1686 class InlinedFieldNameDumper : public LockedClassesDo {
1687 public:
1688   typedef void (*Callback)(InlinedObjects *owner, const Klass *klass, uintx base_index, int count);
1689 
1690 private:
1691   AbstractDumpWriter* _writer;
1692   InlinedObjects *_owner;
1693   Callback       _callback;
1694   uintx _index;
1695 
1696   void dump_inlined_field_names(GrowableArray<Symbol*>* super_names, Symbol* field_name, InlineKlass* klass) {
1697     super_names->push(field_name);
1698     for (HierarchicalFieldStream<JavaFieldStream> fld(klass); !fld.done(); fld.next()) {
1699       if (!fld.access_flags().is_static()) {
1700         if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
1701           dump_inlined_field_names(super_names, fld.name(), DumperSupport::get_inlined_field_klass(fld.field_descriptor()));
1702         } else {
1703           // get next string ID.
1704           uintx next_index = _owner->get_next_string_id(_index);
1705           if (next_index == 0) {
1706             // something went wrong (overflow?)
1707             // stop generation; the rest of inlined objects will have original field names.
1708             return;
1709           }
1710           _index = next_index;
1711 
1712           // Calculate length.
1713           int len = fld.name()->utf8_length();
1714           for (GrowableArrayIterator<Symbol*> it = super_names->begin(); it != super_names->end(); ++it) {
1715             len += (*it)->utf8_length() + 1;    // +1 for ".".
1716           }
1717 
1718           DumperSupport::write_header(_writer, HPROF_UTF8, oopSize + len);
1719           _writer->write_symbolID(reinterpret_cast<Symbol*>(_index));
1720           // Write the string value.
1721           // 1) super_names.
1722           for (GrowableArrayIterator<Symbol*> it = super_names->begin(); it != super_names->end(); ++it) {
1723             _writer->write_raw((*it)->bytes(), (*it)->utf8_length());
1724             _writer->write_u1('.');
1725           }
1726           // 2) field name.
1727           _writer->write_raw(fld.name()->bytes(), fld.name()->utf8_length());
1728         }
1729       }
1730     }
1731     super_names->pop();
1732   }
1733 
1734   void dump_inlined_field_names(Symbol* field_name, InlineKlass* field_klass) {
1735     GrowableArray<Symbol*> super_names(4, mtServiceability);
1736     dump_inlined_field_names(&super_names, field_name, field_klass);
1737   }
1738 
1739 public:
1740   InlinedFieldNameDumper(AbstractDumpWriter* writer, InlinedObjects* owner, Callback callback)
1741     : _writer(writer), _owner(owner), _callback(callback), _index(0)  {
1742   }
1743 
1744   void do_klass(Klass* k) {
1745     if (!k->is_instance_klass()) {
1746       return;
1747     }
1748     InstanceKlass* ik = InstanceKlass::cast(k);
1749     // if (ik->has_inline_type_fields()) {
1750     //   return;
1751     // }
1752 
1753     uintx base_index = _index;
1754     int count = 0;
1755 
1756     for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1757       if (!fld.access_flags().is_static()) {
1758         if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
1759           dump_inlined_field_names(fld.name(), DumperSupport::get_inlined_field_klass(fld.field_descriptor()));
1760           count++;
1761         }
1762       }
1763     }
1764 
1765     if (count != 0) {
1766       _callback(_owner, k, base_index, count);
1767     }
1768   }
1769 };
1770 
1771 class InlinedFieldsDumper : public LockedClassesDo {
1772 private:
1773   AbstractDumpWriter* _writer;
1774 
1775 public:
1776   InlinedFieldsDumper(AbstractDumpWriter* writer) : _writer(writer) {}
1777 
1778   void do_klass(Klass* k) {
1779     if (!k->is_instance_klass()) {
1780       return;
1781     }
1782     InstanceKlass* ik = InstanceKlass::cast(k);
1783     // if (ik->has_inline_type_fields()) {
1784     //   return;
1785     // }
1786 
1787     // We can be at a point where java mirror does not exist yet.
1788     // So we need to check that the class is at least loaded, to avoid crash from a null mirror.
1789     if (!ik->is_loaded()) {
1790       return;
1791     }
1792 
1793     u2 inlined_count = 0;
1794     for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1795       if (!fld.access_flags().is_static()) {
1796         if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
1797           inlined_count++;
1798         }
1799       }
1800     }
1801     if (inlined_count != 0) {
1802       _writer->write_u1(HPROF_CLASS_WITH_INLINED_FIELDS);
1803 
1804       // class ID
1805       _writer->write_classID(ik);
1806       // number of inlined fields
1807       _writer->write_u2(inlined_count);
1808       u2 index = 0;
1809       for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1810         if (!fld.access_flags().is_static()) {
1811           if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
1812             // inlined field index
1813             _writer->write_u2(index);
1814             // synthetic field count
1815             u2 field_count = DumperSupport::get_instance_fields_count(DumperSupport::get_inlined_field_klass(fld.field_descriptor()));
1816             _writer->write_u2(field_count);
1817             // original field name
1818             _writer->write_symbolID(fld.name());
1819             // inlined field class ID
1820             _writer->write_classID(DumperSupport::get_inlined_field_klass(fld.field_descriptor()));
1821 
1822             index += field_count;
1823           } else {
1824             index++;
1825           }
1826         }
1827       }
1828     }
1829   }
1830 };
1831 
1832 
1833 void InlinedObjects::init() {
1834   _instance = this;
1835 
1836   struct Closure : public SymbolClosure {
1837     uintx _min_id = max_uintx;
1838     uintx _max_id = 0;
1839     Closure() : _min_id(max_uintx), _max_id(0) {}
1840 
1841     void do_symbol(Symbol** p) {
1842       uintx val = reinterpret_cast<uintx>(*p);
1843       if (val < _min_id) {
1844         _min_id = val;
1845       }
1846       if (val > _max_id) {
1847         _max_id = val;
1848       }
1849     }
1850   } closure;
1851 
1852   SymbolTable::symbols_do(&closure);
1853 
1854   _min_string_id = closure._min_id;
1855   _max_string_id = closure._max_id;
1856 }
1857 
1858 void InlinedObjects::release() {
1859   _instance = nullptr;
1860 
1861   if (_inlined_field_map != nullptr) {
1862     delete _inlined_field_map;
1863     _inlined_field_map = nullptr;
1864   }
1865   if (_flat_arrays != nullptr) {
1866     delete _flat_arrays;
1867     _flat_arrays = nullptr;
1868   }
1869 }
1870 
1871 void InlinedObjects::inlined_field_names_callback(InlinedObjects* _this, const Klass* klass, uintx base_index, int count) {
1872   if (_this->_inlined_field_map == nullptr) {
1873     _this->_inlined_field_map = new (mtServiceability) GrowableArray<ClassInlinedFields>(100, mtServiceability);
1874   }
1875   _this->_inlined_field_map->append(ClassInlinedFields(klass, base_index));
1876 
1877   // counters for dumping classes with inlined fields
1878   _this->_classes_count++;
1879   _this->_inlined_fields_count += count;
1880 }
1881 
1882 void InlinedObjects::dump_inlined_field_names(AbstractDumpWriter* writer) {
1883   InlinedFieldNameDumper nameDumper(writer, this, inlined_field_names_callback);
1884   ClassLoaderDataGraph::classes_do(&nameDumper);
1885 
1886   if (_inlined_field_map != nullptr) {
1887     // prepare the map for  get_base_index_for().
1888     _inlined_field_map->sort(ClassInlinedFields::compare);
1889   }
1890 }
1891 
1892 uintx InlinedObjects::get_base_index_for(Klass* k) {
1893   if (_inlined_field_map != nullptr) {
1894     bool found = false;
1895     int idx = _inlined_field_map->find_sorted<ClassInlinedFields, ClassInlinedFields::compare>(ClassInlinedFields(k, 0), found);
1896     if (found) {
1897         return _inlined_field_map->at(idx).base_index;
1898     }
1899   }
1900 
1901   // return max_uintx, so get_next_string_id returns 0.
1902   return max_uintx;
1903 }
1904 
1905 uintx InlinedObjects::get_next_string_id(uintx id) {
1906   if (++id == _min_string_id) {
1907     return _max_string_id + 1;
1908   }
1909   return id;
1910 }
1911 
1912 void InlinedObjects::dump_classed_with_inlined_fields(AbstractDumpWriter* writer) {
1913   if (_classes_count != 0) {
1914     // Record for each class contains tag(u1), class ID and count(u2)
1915     // for each inlined field index(u2), synthetic fields count(u2), original field name and class ID
1916     int size = _classes_count * (1 + sizeof(address) + 2)
1917              + _inlined_fields_count * (2 + 2 + sizeof(address) + sizeof(address));
1918     DumperSupport::write_header(writer, HPROF_INLINED_FIELDS, (u4)size);
1919 
1920     InlinedFieldsDumper dumper(writer);
1921     ClassLoaderDataGraph::classes_do(&dumper);
1922   }
1923 }
1924 
1925 void InlinedObjects::add_flat_array(oop array) {
1926   if (_flat_arrays == nullptr) {
1927     _flat_arrays = new (mtServiceability) GrowableArray<oop>(100, mtServiceability);
1928   }
1929   _flat_arrays->append(array);
1930 }
1931 
1932 void InlinedObjects::dump_flat_arrays(AbstractDumpWriter* writer) {
1933   if (_flat_arrays != nullptr) {
1934     // For each flat array the record contains tag (u1), object ID and class ID.
1935     int size = _flat_arrays->length() * (1 + sizeof(address) + sizeof(address));
1936 
1937     DumperSupport::write_header(writer, HPROF_FLAT_ARRAYS, (u4)size);
1938     for (GrowableArrayIterator<oop> it = _flat_arrays->begin(); it != _flat_arrays->end(); ++it) {
1939       flatArrayOop array = flatArrayOop(*it);
1940       FlatArrayKlass* array_klass = FlatArrayKlass::cast(array->klass());
1941       InlineKlass* element_klass = array_klass->element_klass();
1942       writer->write_u1(HPROF_FLAT_ARRAY);
1943       writer->write_objectID(array);
1944       writer->write_classID(element_klass);
1945     }
1946   }
1947 }
1948 
1949 
1950 // Support class used to generate HPROF_UTF8 records from the entries in the
1951 // SymbolTable.
1952 
1953 class SymbolTableDumper : public SymbolClosure {
1954  private:
1955   AbstractDumpWriter* _writer;
1956   AbstractDumpWriter* writer() const                { return _writer; }
1957  public:
1958   SymbolTableDumper(AbstractDumpWriter* writer)     { _writer = writer; }
1959   void do_symbol(Symbol** p);
1960 };
1961 
1962 void SymbolTableDumper::do_symbol(Symbol** p) {
1963   ResourceMark rm;
1964   Symbol* sym = *p;
1965   int len = sym->utf8_length();
1966   if (len > 0) {
1967     char* s = sym->as_utf8();
1968     DumperSupport::write_header(writer(), HPROF_UTF8, oopSize + len);
1969     writer()->write_symbolID(sym);

2380 
2381 void HeapObjectDumper::do_object(oop o) {
2382   // skip classes as these emitted as HPROF_GC_CLASS_DUMP records
2383   if (o->klass() == vmClasses::Class_klass()) {
2384     if (!java_lang_Class::is_primitive(o)) {
2385       return;
2386     }
2387   }
2388 
2389   if (DumperSupport::mask_dormant_archived_object(o) == nullptr) {
2390     log_debug(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s)", p2i(o), o->klass()->external_name());
2391     return;
2392   }
2393 
2394   if (o->is_instance()) {
2395     // create a HPROF_GC_INSTANCE record for each object
2396     DumperSupport::dump_instance(writer(), o, &_class_cache);
2397   } else if (o->is_objArray()) {
2398     // create a HPROF_GC_OBJ_ARRAY_DUMP record for each object array
2399     DumperSupport::dump_object_array(writer(), objArrayOop(o));
2400   } else if (o->is_flatArray()) {
2401     DumperSupport::dump_flat_array(writer(), flatArrayOop(o), &_class_cache);
2402   } else if (o->is_typeArray()) {
2403     // create a HPROF_GC_PRIM_ARRAY_DUMP record for each type array
2404     DumperSupport::dump_prim_array(writer(), typeArrayOop(o));
2405   }
2406 }
2407 
2408 // The dumper controller for parallel heap dump
2409 class DumperController : public CHeapObj<mtInternal> {
2410  private:
2411    Monitor* _lock;
2412    const uint   _dumper_number;
2413    uint   _complete_number;
2414 
2415  public:
2416    DumperController(uint number) :
2417      _lock(new (std::nothrow) PaddedMonitor(Mutex::safepoint, "DumperController_lock")),
2418      _dumper_number(number),
2419      _complete_number(0) { }
2420 
2421    ~DumperController() { delete _lock; }

2425      _complete_number++;
2426      // propagate local error to global if any
2427      if (local_writer->has_error()) {
2428        global_writer->set_error(local_writer->error());
2429      }
2430      ml.notify();
2431    }
2432 
2433    void wait_all_dumpers_complete() {
2434      MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
2435      while (_complete_number != _dumper_number) {
2436         ml.wait();
2437      }
2438    }
2439 };
2440 
2441 // DumpMerger merges separate dump files into a complete one
2442 class DumpMerger : public StackObj {
2443 private:
2444   DumpWriter* _writer;
2445   InlinedObjects*  _inlined_objects;
2446   const char* _path;
2447   bool _has_error;
2448   int _dump_seq;
2449 
2450 private:
2451   void merge_file(char* path);
2452   void merge_done();
2453   void set_error(const char* msg);
2454 
2455 public:
2456   DumpMerger(const char* path, DumpWriter* writer, InlinedObjects* inlined_objects, int dump_seq) :
2457     _writer(writer),
2458     _inlined_objects(inlined_objects),
2459     _path(path),
2460     _has_error(_writer->has_error()),
2461     _dump_seq(dump_seq) {}
2462 
2463   void do_merge();
2464 };
2465 
2466 void DumpMerger::merge_done() {
2467   // Writes the HPROF_HEAP_DUMP_END record.
2468   if (!_has_error) {
2469     DumperSupport::end_of_dump(_writer);
2470     _inlined_objects->dump_flat_arrays(_writer);
2471     _writer->flush();
2472     _inlined_objects->release();
2473   }
2474   _dump_seq = 0; //reset
2475 }
2476 
2477 void DumpMerger::set_error(const char* msg) {
2478   assert(msg != nullptr, "sanity check");
2479   log_error(heapdump)("%s (file: %s)", msg, _path);
2480   _writer->set_error(msg);
2481   _has_error = true;
2482 }
2483 
2484 #ifdef LINUX
2485 // Merge segmented heap files via sendfile, it's more efficient than the
2486 // read+write combination, which would require transferring data to and from
2487 // user space.
2488 void DumpMerger::merge_file(char* path) {
2489   assert(!SafepointSynchronize::is_at_safepoint(), "merging happens outside safepoint");
2490   TraceTime timer("Merge segmented heap file directly", TRACETIME_LOG(Info, heapdump));
2491 
2492   int segment_fd = os::open(path, O_RDONLY, 0);

2591   }
2592 };
2593 
2594 // The VM operation that performs the heap dump
2595 class VM_HeapDumper : public VM_GC_Operation, public WorkerTask {
2596  private:
2597   static VM_HeapDumper*   _global_dumper;
2598   static DumpWriter*      _global_writer;
2599   DumpWriter*             _local_writer;
2600   JavaThread*             _oome_thread;
2601   Method*                 _oome_constructor;
2602   bool                    _gc_before_heap_dump;
2603   GrowableArray<Klass*>*  _klass_map;
2604 
2605   ThreadDumper**          _thread_dumpers; // platform, carrier and mounted virtual threads
2606   int                     _thread_dumpers_count;
2607   volatile int            _thread_serial_num;
2608   volatile int            _frame_serial_num;
2609 
2610   volatile int            _dump_seq;
2611 
2612   // Inlined object support.
2613   InlinedObjects          _inlined_objects;
2614 
2615   // parallel heap dump support
2616   uint                    _num_dumper_threads;
2617   DumperController*       _dumper_controller;
2618   ParallelObjectIterator* _poi;
2619   // worker id of VMDumper thread.
2620   static const size_t VMDumperWorkerId = 0;
2621   // VM dumper dumps both heap and non-heap data, other dumpers dump heap-only data.
2622   static bool is_vm_dumper(uint worker_id) { return worker_id == VMDumperWorkerId; }
2623 
2624   // accessors and setters
2625   static VM_HeapDumper* dumper()         {  assert(_global_dumper != nullptr, "Error"); return _global_dumper; }
2626   static DumpWriter* writer()            {  assert(_global_writer != nullptr, "Error"); return _global_writer; }
2627 
2628   void set_global_dumper() {
2629     assert(_global_dumper == nullptr, "Error");
2630     _global_dumper = this;
2631   }
2632   void set_global_writer() {
2633     assert(_global_writer == nullptr, "Error");
2634     _global_writer = _local_writer;

2696   }
2697 
2698   ~VM_HeapDumper() {
2699     if (_thread_dumpers != nullptr) {
2700       for (int i = 0; i < _thread_dumpers_count; i++) {
2701         delete _thread_dumpers[i];
2702       }
2703       FREE_C_HEAP_ARRAY(ThreadDumper*, _thread_dumpers);
2704     }
2705 
2706     if (_dumper_controller != nullptr) {
2707       delete _dumper_controller;
2708       _dumper_controller = nullptr;
2709     }
2710     delete _klass_map;
2711   }
2712   int dump_seq()           { return _dump_seq; }
2713   bool is_parallel_dump()  { return _num_dumper_threads > 1; }
2714   bool can_parallel_dump(WorkerThreads* workers);
2715 
2716   InlinedObjects* inlined_objects() { return &_inlined_objects; }
2717 
2718   VMOp_Type type() const { return VMOp_HeapDumper; }
2719   virtual bool doit_prologue();
2720   void doit();
2721   void work(uint worker_id);
2722 };
2723 
2724 VM_HeapDumper* VM_HeapDumper::_global_dumper = nullptr;
2725 DumpWriter*    VM_HeapDumper::_global_writer = nullptr;
2726 
2727 bool VM_HeapDumper::skip_operation() const {
2728   return false;
2729 }
2730 
2731 // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
2732 void DumperSupport::end_of_dump(AbstractDumpWriter* writer) {
2733   writer->finish_dump_segment();
2734 
2735   writer->write_u1(HPROF_HEAP_DUMP_END);
2736   writer->write_u4(0);
2737   writer->write_u4(0);

2895   DumpWriter* local_writer = new DumpWriter(path, writer()->is_overwrite(), compressor);
2896   return local_writer;
2897 }
2898 
2899 void VM_HeapDumper::work(uint worker_id) {
2900   // VM Dumper works on all non-heap data dumping and part of heap iteration.
2901   if (is_vm_dumper(worker_id)) {
2902     TraceTime timer("Dump non-objects", TRACETIME_LOG(Info, heapdump));
2903     // Write the file header - we always use 1.0.2
2904     const char* header = "JAVA PROFILE 1.0.2";
2905 
2906     // header is few bytes long - no chance to overflow int
2907     writer()->write_raw(header, strlen(header) + 1); // NUL terminated
2908     writer()->write_u4(oopSize);
2909     // timestamp is current time in ms
2910     writer()->write_u8(os::javaTimeMillis());
2911     // HPROF_UTF8 records
2912     SymbolTableDumper sym_dumper(writer());
2913     SymbolTable::symbols_do(&sym_dumper);
2914 
2915     // HPROF_UTF8 records for inlined field names.
2916     inlined_objects()->init();
2917     inlined_objects()->dump_inlined_field_names(writer());
2918 
2919     // HPROF_INLINED_FIELDS
2920     inlined_objects()->dump_classed_with_inlined_fields(writer());
2921 
2922     // write HPROF_LOAD_CLASS records
2923     {
2924       LockedClassesDo locked_load_classes(&do_load_class);
2925       ClassLoaderDataGraph::classes_do(&locked_load_classes);
2926     }
2927 
2928     // write HPROF_FRAME and HPROF_TRACE records
2929     // this must be called after _klass_map is built when iterating the classes above.
2930     dump_stack_traces();
2931 
2932     // HPROF_HEAP_DUMP/HPROF_HEAP_DUMP_SEGMENT starts here
2933 
2934     // Writes HPROF_GC_CLASS_DUMP records
2935     {
2936       LockedClassesDo locked_dump_class(&do_class_dump);
2937       ClassLoaderDataGraph::classes_do(&locked_dump_class);
2938     }
2939 
2940     // HPROF_GC_ROOT_THREAD_OBJ + frames + jni locals
2941     dump_threads();

2953     ClassLoaderData::the_null_class_loader_data()->classes_do(&class_dumper);
2954   }
2955 
2956   // Heap iteration.
2957   // writes HPROF_GC_INSTANCE_DUMP records.
2958   // After each sub-record is written check_segment_length will be invoked
2959   // to check if the current segment exceeds a threshold. If so, a new
2960   // segment is started.
2961   // The HPROF_GC_CLASS_DUMP and HPROF_GC_INSTANCE_DUMP are the vast bulk
2962   // of the heap dump.
2963   if (!is_parallel_dump()) {
2964     assert(is_vm_dumper(worker_id), "must be");
2965     // == Serial dump
2966     ResourceMark rm;
2967     TraceTime timer("Dump heap objects", TRACETIME_LOG(Info, heapdump));
2968     HeapObjectDumper obj_dumper(writer());
2969     Universe::heap()->object_iterate(&obj_dumper);
2970     writer()->finish_dump_segment();
2971     // Writes the HPROF_HEAP_DUMP_END record because merge does not happen in serial dump
2972     DumperSupport::end_of_dump(writer());
2973     inlined_objects()->dump_flat_arrays(writer());
2974     writer()->flush();
2975     inlined_objects()->release();
2976   } else {
2977     // == Parallel dump
2978     ResourceMark rm;
2979     TraceTime timer("Dump heap objects in parallel", TRACETIME_LOG(Info, heapdump));
2980     DumpWriter* local_writer = is_vm_dumper(worker_id) ? writer() : create_local_writer();
2981     if (!local_writer->has_error()) {
2982       HeapObjectDumper obj_dumper(local_writer);
2983       _poi->object_iterate(&obj_dumper, worker_id);
2984       local_writer->finish_dump_segment();
2985       local_writer->flush();
2986     }
2987     if (is_vm_dumper(worker_id)) {
2988       _dumper_controller->wait_all_dumpers_complete();
2989     } else {
2990       _dumper_controller->dumper_complete(local_writer, writer());
2991       delete local_writer;
2992       return;
2993     }
2994   }
2995   // At this point, all fragments of the heapdump have been written to separate files.

3074     return -1;
3075   }
3076 
3077   // generate the segmented heap dump into separate files
3078   VM_HeapDumper dumper(&writer, _gc_before_heap_dump, _oome, num_dump_threads);
3079   VMThread::execute(&dumper);
3080 
3081   // record any error that the writer may have encountered
3082   set_error(writer.error());
3083 
3084   // For serial dump, once VM_HeapDumper completes, the whole heap dump process
3085   // is done, no further phases needed. For parallel dump, the whole heap dump
3086   // process is done in two phases
3087   //
3088   // Phase 1: Concurrent threads directly write heap data to multiple heap files.
3089   //          This is done by VM_HeapDumper, which is performed within safepoint.
3090   //
3091   // Phase 2: Merge multiple heap files into one complete heap dump file.
3092   //          This is done by DumpMerger, which is performed outside safepoint
3093   if (dumper.is_parallel_dump()) {
3094     DumpMerger merger(path, &writer, dumper.inlined_objects(), dumper.dump_seq());
3095     Thread* current_thread = Thread::current();
3096     if (current_thread->is_AttachListener_thread()) {
3097       // perform heapdump file merge operation in the current thread prevents us
3098       // from occupying the VM Thread, which in turn affects the occurrence of
3099       // GC and other VM operations.
3100       merger.do_merge();
3101     } else {
3102       // otherwise, performs it by VM thread
3103       VM_HeapDumpMerge op(&merger);
3104       VMThread::execute(&op);
3105     }
3106     set_error(writer.error());
3107   }
3108 
3109   // emit JFR event
3110   if (error() == nullptr) {
3111     event.set_destination(path);
3112     event.set_gcBeforeDump(_gc_before_heap_dump);
3113     event.set_size(writer.bytes_written());
3114     event.set_onOutOfMemoryError(_oome);
< prev index next >