< prev index next >

src/hotspot/share/services/heapDumper.cpp

Print this page

  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "classfile/classLoaderData.inline.hpp"
  27 #include "classfile/classLoaderDataGraph.hpp"
  28 #include "classfile/javaClasses.inline.hpp"
  29 #include "classfile/symbolTable.hpp"
  30 #include "classfile/vmClasses.hpp"
  31 #include "classfile/vmSymbols.hpp"
  32 #include "gc/shared/gcLocker.hpp"
  33 #include "gc/shared/gcVMOperations.hpp"
  34 #include "gc/shared/workerThread.hpp"
  35 #include "jfr/jfrEvents.hpp"
  36 #include "jvm.h"
  37 #include "memory/allocation.inline.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "memory/universe.hpp"
  40 #include "oops/fieldStreams.inline.hpp"


  41 #include "oops/klass.inline.hpp"
  42 #include "oops/objArrayKlass.hpp"
  43 #include "oops/objArrayOop.inline.hpp"
  44 #include "oops/oop.inline.hpp"
  45 #include "oops/typeArrayOop.inline.hpp"
  46 #include "runtime/arguments.hpp"
  47 #include "runtime/continuationWrapper.inline.hpp"

  48 #include "runtime/frame.inline.hpp"
  49 #include "runtime/handles.inline.hpp"
  50 #include "runtime/javaCalls.hpp"
  51 #include "runtime/javaThread.inline.hpp"
  52 #include "runtime/jniHandles.hpp"
  53 #include "runtime/os.hpp"
  54 #include "runtime/threads.hpp"
  55 #include "runtime/threadSMR.hpp"
  56 #include "runtime/timerTrace.hpp"
  57 #include "runtime/vframe.hpp"
  58 #include "runtime/vmOperations.hpp"
  59 #include "runtime/vmThread.hpp"
  60 #include "services/heapDumper.hpp"
  61 #include "services/heapDumperCompression.hpp"
  62 #include "services/threadService.hpp"
  63 #include "utilities/checkedCast.hpp"
  64 #include "utilities/macros.hpp"
  65 #include "utilities/ostream.hpp"
  66 #ifdef LINUX
  67 #include "os_linux.hpp"

 299  *                                     7:  double array
 300  *                                     8:  byte array
 301  *                                     9:  short array
 302  *                                     10: int array
 303  *                                     11: long array
 304  *                          [u1]*      elements
 305  *
 306  * HPROF_CPU_SAMPLES        a set of sample traces of running threads
 307  *
 308  *                u4        total number of samples
 309  *                u4        # of traces
 310  *               [u4        # of samples
 311  *                u4]*      stack trace serial number
 312  *
 313  * HPROF_CONTROL_SETTINGS   the settings of on/off switches
 314  *
 315  *                u4        0x00000001: alloc traces on/off
 316  *                          0x00000002: cpu sampling on/off
 317  *                u2        stack trace depth
 318  *






















 319  *
 320  * When the header is "JAVA PROFILE 1.0.2" a heap dump can optionally
 321  * be generated as a sequence of heap dump segments. This sequence is
 322  * terminated by an end record. The additional tags allowed by format
 323  * "JAVA PROFILE 1.0.2" are:
 324  *
 325  * HPROF_HEAP_DUMP_SEGMENT  denote a heap dump segment
 326  *
 327  *               [heap dump sub-records]*
 328  *               The same sub-record types allowed by HPROF_HEAP_DUMP
 329  *
 330  * HPROF_HEAP_DUMP_END      denotes the end of a heap dump
 331  *
 332  */
 333 
 334 
 335 // HPROF tags
 336 
 337 enum hprofTag : u1 {
 338   // top-level records
 339   HPROF_UTF8                    = 0x01,
 340   HPROF_LOAD_CLASS              = 0x02,
 341   HPROF_UNLOAD_CLASS            = 0x03,
 342   HPROF_FRAME                   = 0x04,
 343   HPROF_TRACE                   = 0x05,
 344   HPROF_ALLOC_SITES             = 0x06,
 345   HPROF_HEAP_SUMMARY            = 0x07,
 346   HPROF_START_THREAD            = 0x0A,
 347   HPROF_END_THREAD              = 0x0B,
 348   HPROF_HEAP_DUMP               = 0x0C,
 349   HPROF_CPU_SAMPLES             = 0x0D,
 350   HPROF_CONTROL_SETTINGS        = 0x0E,
 351 
 352   // 1.0.2 record types
 353   HPROF_HEAP_DUMP_SEGMENT       = 0x1C,
 354   HPROF_HEAP_DUMP_END           = 0x2C,
 355 







 356   // field types
 357   HPROF_ARRAY_OBJECT            = 0x01,
 358   HPROF_NORMAL_OBJECT           = 0x02,
 359   HPROF_BOOLEAN                 = 0x04,
 360   HPROF_CHAR                    = 0x05,
 361   HPROF_FLOAT                   = 0x06,
 362   HPROF_DOUBLE                  = 0x07,
 363   HPROF_BYTE                    = 0x08,
 364   HPROF_SHORT                   = 0x09,
 365   HPROF_INT                     = 0x0A,
 366   HPROF_LONG                    = 0x0B,
 367 
 368   // data-dump sub-records
 369   HPROF_GC_ROOT_UNKNOWN         = 0xFF,
 370   HPROF_GC_ROOT_JNI_GLOBAL      = 0x01,
 371   HPROF_GC_ROOT_JNI_LOCAL       = 0x02,
 372   HPROF_GC_ROOT_JAVA_FRAME      = 0x03,
 373   HPROF_GC_ROOT_NATIVE_STACK    = 0x04,
 374   HPROF_GC_ROOT_STICKY_CLASS    = 0x05,
 375   HPROF_GC_ROOT_THREAD_BLOCK    = 0x06,
 376   HPROF_GC_ROOT_MONITOR_USED    = 0x07,
 377   HPROF_GC_ROOT_THREAD_OBJ      = 0x08,
 378   HPROF_GC_CLASS_DUMP           = 0x20,
 379   HPROF_GC_INSTANCE_DUMP        = 0x21,
 380   HPROF_GC_OBJ_ARRAY_DUMP       = 0x22,
 381   HPROF_GC_PRIM_ARRAY_DUMP      = 0x23
 382 };
 383 
 384 // Default stack trace ID (used for dummy HPROF_TRACE record)
 385 enum {
 386   STACK_TRACE_ID = 1,
 387   INITIAL_CLASS_COUNT = 200
 388 };
 389 

































































 390 // Supports I/O operations for a dump
 391 // Base class for dump and parallel dump
 392 class AbstractDumpWriter : public CHeapObj<mtInternal> {
 393  protected:
 394   enum {
 395     io_buffer_max_size = 1*M,
 396     dump_segment_header_size = 9
 397   };
 398 
 399   char* _buffer;    // internal buffer
 400   size_t _size;
 401   size_t _pos;
 402 
 403   bool _in_dump_segment; // Are we currently in a dump segment?
 404   bool _is_huge_sub_record; // Are we writing a sub-record larger than the buffer size?
 405   DEBUG_ONLY(size_t _sub_record_left;) // The bytes not written for the current sub-record.
 406   DEBUG_ONLY(bool _sub_record_ended;) // True if we have called the end_sub_record().
 407 
 408   char* buffer() const                          { return _buffer; }
 409   size_t buffer_size() const                    { return _size; }

 726   }
 727 }
 728 
 729 class DumperClassCacheTable;
 730 class DumperClassCacheTableEntry;
 731 
 732 // Support class with a collection of functions used when dumping the heap
 733 class DumperSupport : AllStatic {
 734  public:
 735 
 736   // write a header of the given type
 737   static void write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len);
 738 
 739   // returns hprof tag for the given type signature
 740   static hprofTag sig2tag(Symbol* sig);
 741   // returns hprof tag for the given basic type
 742   static hprofTag type2tag(BasicType type);
 743   // Returns the size of the data to write.
 744   static u4 sig2size(Symbol* sig);
 745 
 746   // returns the size of the instance of the given class
 747   static u4 instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry = nullptr);
 748 
 749   // dump a jfloat
 750   static void dump_float(AbstractDumpWriter* writer, jfloat f);
 751   // dump a jdouble
 752   static void dump_double(AbstractDumpWriter* writer, jdouble d);
 753   // dumps the raw value of the given field
 754   static void dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset);
 755   // returns the size of the static fields; also counts the static fields
 756   static u4 get_static_fields_size(InstanceKlass* ik, u2& field_count);
 757   // dumps static fields of the given class
 758   static void dump_static_fields(AbstractDumpWriter* writer, Klass* k);
 759   // dump the raw values of the instance fields of the given object
 760   static void dump_instance_fields(AbstractDumpWriter* writer, oop o, DumperClassCacheTableEntry* class_cache_entry);






 761   // get the count of the instance fields for a given class
 762   static u2 get_instance_fields_count(InstanceKlass* ik);
 763   // dumps the definition of the instance fields for a given class
 764   static void dump_instance_field_descriptors(AbstractDumpWriter* writer, Klass* k);
 765   // creates HPROF_GC_INSTANCE_DUMP record for the given object
 766   static void dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache);
 767   // creates HPROF_GC_CLASS_DUMP record for the given instance class
 768   static void dump_instance_class(AbstractDumpWriter* writer, InstanceKlass* ik);
 769   // creates HPROF_GC_CLASS_DUMP record for a given array class
 770   static void dump_array_class(AbstractDumpWriter* writer, Klass* k);
 771 
 772   // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
 773   static void dump_object_array(AbstractDumpWriter* writer, objArrayOop array);


 774   // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
 775   static void dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array);
 776   // create HPROF_FRAME record for the given method and bci
 777   static void dump_stack_frame(AbstractDumpWriter* writer, int frame_serial_num, int class_serial_num, Method* m, int bci);
 778 
 779   // check if we need to truncate an array
 780   static int calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size);



 781 
 782   // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
 783   static void end_of_dump(AbstractDumpWriter* writer);
 784 
 785   static oop mask_dormant_archived_object(oop o, oop ref_obj) {
 786     if (o != nullptr && o->klass()->java_mirror_no_keepalive() == nullptr) {
 787       // Ignore this object since the corresponding java mirror is not loaded.
 788       // Might be a dormant archive object.
 789       report_dormant_archived_object(o, ref_obj);
 790       return nullptr;
 791     } else {
 792       return o;
 793     }
 794   }
 795 










 796   static void report_dormant_archived_object(oop o, oop ref_obj) {
 797     if (log_is_enabled(Trace, aot, heap)) {
 798       ResourceMark rm;
 799       if (ref_obj != nullptr) {
 800         log_trace(aot, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s) referenced by " INTPTR_FORMAT " (%s)",
 801                   p2i(o), o->klass()->external_name(),
 802                   p2i(ref_obj), ref_obj->klass()->external_name());
 803       } else {
 804         log_trace(aot, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s)",
 805                   p2i(o), o->klass()->external_name());
 806       }
 807     }
 808   }
 809 };
 810 
 811 // Hash table of klasses to the klass metadata. This should greatly improve the
 812 // hash dumping performance. This hash table is supposed to be used by a single
 813 // thread only.
 814 //
 815 class DumperClassCacheTableEntry : public CHeapObj<mtServiceability> {
 816   friend class DumperClassCacheTable;
 817 private:
 818   GrowableArray<char> _sigs_start;
 819   GrowableArray<int> _offsets;

 820   u4 _instance_size;
 821   int _entries;
 822 
 823 public:
 824   DumperClassCacheTableEntry() : _instance_size(0), _entries(0) {};
 825 
 826   int field_count()             { return _entries; }
 827   char sig_start(int field_idx) { return _sigs_start.at(field_idx); }



 828   int offset(int field_idx)     { return _offsets.at(field_idx); }
 829   u4 instance_size()            { return _instance_size; }
 830 };
 831 
 832 class DumperClassCacheTable {
 833 private:
 834   // HashTable SIZE is specified at compile time so we
 835   // use 1031 which is the first prime after 1024.
 836   static constexpr size_t TABLE_SIZE = 1031;
 837 
 838   // Maintain the cache for N classes. This limits memory footprint
 839   // impact, regardless of how many classes we have in the dump.
 840   // This also improves look up performance by keeping the statically
 841   // sized table from overloading.
 842   static constexpr int CACHE_TOP = 256;
 843 
 844   typedef HashTable<InstanceKlass*, DumperClassCacheTableEntry*,
 845                             TABLE_SIZE, AnyObj::C_HEAP, mtServiceability> PtrTable;
 846   PtrTable* _ptrs;
 847 

 856       bool do_entry(InstanceKlass*& key, DumperClassCacheTableEntry*& entry) {
 857         delete entry;
 858         return true;
 859       }
 860     } cleanup;
 861     table->unlink(&cleanup);
 862   }
 863 
 864 public:
 865   DumperClassCacheTableEntry* lookup_or_create(InstanceKlass* ik) {
 866     if (_last_ik == ik) {
 867       return _last_entry;
 868     }
 869 
 870     DumperClassCacheTableEntry* entry;
 871     DumperClassCacheTableEntry** from_cache = _ptrs->get(ik);
 872     if (from_cache == nullptr) {
 873       entry = new DumperClassCacheTableEntry();
 874       for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
 875         if (!fld.access_flags().is_static()) {
 876           Symbol* sig = fld.signature();
 877           entry->_sigs_start.push(sig->char_at(0));









 878           entry->_offsets.push(fld.offset());
 879           entry->_entries++;
 880           entry->_instance_size += DumperSupport::sig2size(sig);
 881         }
 882       }
 883 
 884       if (_ptrs->number_of_entries() >= CACHE_TOP) {
 885         // We do not track the individual hit rates for table entries.
 886         // Purge the entire table, and let the cache catch up with new
 887         // distribution.
 888         unlink_all(_ptrs);
 889       }
 890 
 891       _ptrs->put(ik, entry);
 892     } else {
 893       entry = *from_cache;
 894     }
 895 
 896     // Remember for single-slot cache.
 897     _last_ik = ik;
 898     _last_entry = entry;
 899 
 900     return entry;

 970 }
 971 
 972 // dump a jfloat
 973 void DumperSupport::dump_float(AbstractDumpWriter* writer, jfloat f) {
 974   if (g_isnan(f)) {
 975     writer->write_u4(0x7fc00000); // collapsing NaNs
 976   } else {
 977     writer->write_u4(bit_cast<u4>(f));
 978   }
 979 }
 980 
 981 // dump a jdouble
 982 void DumperSupport::dump_double(AbstractDumpWriter* writer, jdouble d) {
 983   if (g_isnan(d)) {
 984     writer->write_u8(0x7ff80000ull << 32); // collapsing NaNs
 985   } else {
 986     writer->write_u8(bit_cast<u8>(d));
 987   }
 988 }
 989 

 990 // dumps the raw value of the given field
 991 void DumperSupport::dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset) {
 992   switch (type) {
 993     case JVM_SIGNATURE_CLASS :
 994     case JVM_SIGNATURE_ARRAY : {
 995       oop o = obj->obj_field_access<ON_UNKNOWN_OOP_REF | AS_NO_KEEPALIVE>(offset);
 996       o = mask_dormant_archived_object(o, obj);
 997       assert(oopDesc::is_oop_or_null(o), "Expected an oop or nullptr at " PTR_FORMAT, p2i(o));
 998       writer->write_objectID(o);
 999       break;
1000     }
1001     case JVM_SIGNATURE_BYTE : {
1002       jbyte b = obj->byte_field(offset);
1003       writer->write_u1(b);
1004       break;
1005     }
1006     case JVM_SIGNATURE_CHAR : {
1007       jchar c = obj->char_field(offset);
1008       writer->write_u2(c);
1009       break;

1028       writer->write_u4(i);
1029       break;
1030     }
1031     case JVM_SIGNATURE_LONG : {
1032       jlong l = obj->long_field(offset);
1033       writer->write_u8(l);
1034       break;
1035     }
1036     case JVM_SIGNATURE_BOOLEAN : {
1037       jboolean b = obj->bool_field(offset);
1038       writer->write_u1(b);
1039       break;
1040     }
1041     default : {
1042       ShouldNotReachHere();
1043       break;
1044     }
1045   }
1046 }
1047 
1048 // returns the size of the instance of the given class
1049 u4 DumperSupport::instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry) {
1050   if (class_cache_entry != nullptr) {
1051     return class_cache_entry->instance_size();
1052   } else {
1053     u4 size = 0;
1054     for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1055       if (!fld.access_flags().is_static()) {
1056         size += sig2size(fld.signature());




1057       }
1058     }
1059     return size;
1060   }
1061 }
1062 
1063 u4 DumperSupport::get_static_fields_size(InstanceKlass* ik, u2& field_count) {
1064   field_count = 0;
1065   u4 size = 0;
1066 
1067   for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1068     if (fldc.access_flags().is_static()) {


1069       field_count++;
1070       size += sig2size(fldc.signature());
1071     }
1072   }
1073 
1074   // Add in resolved_references which is referenced by the cpCache
1075   // The resolved_references is an array per InstanceKlass holding the
1076   // strings and other oops resolved from the constant pool.
1077   oop resolved_references = ik->constants()->resolved_references_or_null();
1078   if (resolved_references != nullptr) {
1079     field_count++;
1080     size += sizeof(address);
1081 
1082     // Add in the resolved_references of the used previous versions of the class
1083     // in the case of RedefineClasses
1084     InstanceKlass* prev = ik->previous_versions();
1085     while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1086       field_count++;
1087       size += sizeof(address);
1088       prev = prev->previous_versions();

1091 
1092   // Also provide a pointer to the init_lock if present, so there aren't unreferenced int[0]
1093   // arrays.
1094   oop init_lock = ik->init_lock();
1095   if (init_lock != nullptr) {
1096     field_count++;
1097     size += sizeof(address);
1098   }
1099 
1100   // We write the value itself plus a name and a one byte type tag per field.
1101   return checked_cast<u4>(size + field_count * (sizeof(address) + 1));
1102 }
1103 
1104 // dumps static fields of the given class
1105 void DumperSupport::dump_static_fields(AbstractDumpWriter* writer, Klass* k) {
1106   InstanceKlass* ik = InstanceKlass::cast(k);
1107 
1108   // dump the field descriptors and raw values
1109   for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1110     if (fld.access_flags().is_static()) {


1111       Symbol* sig = fld.signature();
1112 
1113       writer->write_symbolID(fld.name());   // name
1114       writer->write_u1(sig2tag(sig));       // type
1115 
1116       // value
1117       dump_field_value(writer, sig->char_at(0), ik->java_mirror(), fld.offset());
1118     }
1119   }
1120 
1121   // Add resolved_references for each class that has them
1122   oop resolved_references = ik->constants()->resolved_references_or_null();
1123   if (resolved_references != nullptr) {
1124     writer->write_symbolID(vmSymbols::resolved_references_name());  // name
1125     writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1126     writer->write_objectID(resolved_references);
1127 
1128     // Also write any previous versions
1129     InstanceKlass* prev = ik->previous_versions();
1130     while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1131       writer->write_symbolID(vmSymbols::resolved_references_name());  // name
1132       writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1133       writer->write_objectID(prev->constants()->resolved_references());
1134       prev = prev->previous_versions();
1135     }
1136   }
1137 
1138   // Add init lock to the end if the class is not yet initialized
1139   oop init_lock = ik->init_lock();
1140   if (init_lock != nullptr) {
1141     writer->write_symbolID(vmSymbols::init_lock_name());         // name
1142     writer->write_u1(sig2tag(vmSymbols::int_array_signature())); // type
1143     writer->write_objectID(init_lock);
1144   }
1145 }
1146 
1147 // dump the raw values of the instance fields of the given object
1148 void DumperSupport::dump_instance_fields(AbstractDumpWriter* writer, oop o, DumperClassCacheTableEntry* class_cache_entry) {


1149   assert(class_cache_entry != nullptr, "Pre-condition: must be provided");
1150   for (int idx = 0; idx < class_cache_entry->field_count(); idx++) {
1151     dump_field_value(writer, class_cache_entry->sig_start(idx), o, class_cache_entry->offset(idx));







1152   }
1153 }
1154 
1155 // dumps the definition of the instance fields for a given class





1156 u2 DumperSupport::get_instance_fields_count(InstanceKlass* ik) {
1157   u2 field_count = 0;
1158 
1159   for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1160     if (!fldc.access_flags().is_static()) field_count++;







1161   }
1162 
1163   return field_count;
1164 }
1165 
1166 // dumps the definition of the instance fields for a given class
1167 void DumperSupport::dump_instance_field_descriptors(AbstractDumpWriter* writer, Klass* k) {
1168   InstanceKlass* ik = InstanceKlass::cast(k);






1169 
1170   // dump the field descriptors
1171   for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1172     if (!fld.access_flags().is_static()) {
1173       Symbol* sig = fld.signature();






















1174 
1175       writer->write_symbolID(fld.name());   // name
1176       writer->write_u1(sig2tag(sig));       // type

1177     }
1178   }
1179 }
1180 
1181 // creates HPROF_GC_INSTANCE_DUMP record for the given object
1182 void DumperSupport::dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache) {
1183   InstanceKlass* ik = InstanceKlass::cast(o->klass());
1184 
1185   DumperClassCacheTableEntry* cache_entry = class_cache->lookup_or_create(ik);
1186 
1187   u4 is = instance_size(ik, cache_entry);
1188   u4 size = 1 + sizeof(address) + 4 + sizeof(address) + 4 + is;
1189 
1190   writer->start_sub_record(HPROF_GC_INSTANCE_DUMP, size);
1191   writer->write_objectID(o);
1192   writer->write_u4(STACK_TRACE_ID);
1193 
1194   // class ID
1195   writer->write_classID(ik);
1196 
1197   // number of bytes that follow
1198   writer->write_u4(is);
1199 
1200   // field values
1201   dump_instance_fields(writer, o, cache_entry);
1202 
1203   writer->end_sub_record();
1204 }
1205 
1206 // creates HPROF_GC_CLASS_DUMP record for the given instance class
1207 void DumperSupport::dump_instance_class(AbstractDumpWriter* writer, InstanceKlass* ik) {
1208   // We can safepoint and do a heap dump at a point where we have a Klass,
1209   // but no java mirror class has been setup for it. So we need to check
1210   // that the class is at least loaded, to avoid crash from a null mirror.
1211   if (!ik->is_loaded()) {
1212     return;
1213   }
1214 
1215   u2 static_fields_count = 0;
1216   u4 static_size = get_static_fields_size(ik, static_fields_count);
1217   u2 instance_fields_count = get_instance_fields_count(ik);
1218   u4 instance_fields_size = instance_fields_count * (sizeof(address) + 1);
1219   u4 size = checked_cast<u4>(1 + sizeof(address) + 4 + 6 * sizeof(address) + 4 + 2 + 2 + static_size + 2 + instance_fields_size);
1220 
1221   writer->start_sub_record(HPROF_GC_CLASS_DUMP, size);

1224   writer->write_classID(ik);
1225   writer->write_u4(STACK_TRACE_ID);
1226 
1227   // super class ID
1228   InstanceKlass* super = ik->super();
1229   if (super == nullptr) {
1230     writer->write_objectID(oop(nullptr));
1231   } else {
1232     writer->write_classID(super);
1233   }
1234 
1235   writer->write_objectID(ik->class_loader());
1236   writer->write_objectID(ik->signers());
1237   writer->write_objectID(ik->protection_domain());
1238 
1239   // reserved
1240   writer->write_objectID(oop(nullptr));
1241   writer->write_objectID(oop(nullptr));
1242 
1243   // instance size
1244   writer->write_u4(DumperSupport::instance_size(ik));
1245 
1246   // size of constant pool - ignored by HAT 1.1
1247   writer->write_u2(0);
1248 
1249   // static fields
1250   writer->write_u2(static_fields_count);
1251   dump_static_fields(writer, ik);
1252 
1253   // description of instance fields
1254   writer->write_u2(instance_fields_count);
1255   dump_instance_field_descriptors(writer, ik);
1256 
1257   writer->end_sub_record();
1258 }
1259 
1260 // creates HPROF_GC_CLASS_DUMP record for the given array class
1261 void DumperSupport::dump_array_class(AbstractDumpWriter* writer, Klass* k) {
1262   InstanceKlass* ik = nullptr; // bottom class for object arrays, null for primitive type arrays
1263   if (k->is_objArray_klass()) {
1264     Klass *bk = ObjArrayKlass::cast(k)->bottom_klass();

1278   assert(java_super != nullptr, "checking");
1279   writer->write_classID(java_super);
1280 
1281   writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->class_loader());
1282   writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->signers());
1283   writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->protection_domain());
1284 
1285   writer->write_objectID(oop(nullptr));    // reserved
1286   writer->write_objectID(oop(nullptr));
1287   writer->write_u4(0);             // instance size
1288   writer->write_u2(0);             // constant pool
1289   writer->write_u2(0);             // static fields
1290   writer->write_u2(0);             // instance fields
1291 
1292   writer->end_sub_record();
1293 
1294 }
1295 
1296 // Hprof uses an u4 as record length field,
1297 // which means we need to truncate arrays that are too long.
1298 int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size) {
1299   BasicType type = ArrayKlass::cast(array->klass())->element_type();
1300   assert(type >= T_BOOLEAN && type <= T_OBJECT, "invalid array element type");
1301 
1302   int length = array->length();
1303 
1304   int type_size;
1305   if (type == T_OBJECT) {
1306     type_size = sizeof(address);
1307   } else {
1308     type_size = type2aelembytes(type);
1309   }
1310 
1311   size_t length_in_bytes = (size_t)length * type_size;
1312   uint max_bytes = max_juint - header_size;
1313 
1314   if (length_in_bytes > max_bytes) {
1315     length = max_bytes / type_size;
1316     length_in_bytes = (size_t)length * type_size;
1317 

1318     warning("cannot dump array of type %s[] with length %d; truncating to length %d",
1319             type2name_tab[type], array->length(), length);
1320   }
1321   return length;
1322 }
1323 
















1324 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
1325 void DumperSupport::dump_object_array(AbstractDumpWriter* writer, objArrayOop array) {
1326   // sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID) + sizeof(classID)
1327   short header_size = 1 + 2 * 4 + 2 * sizeof(address);
1328   int length = calculate_array_max_length(writer, array, header_size);
1329   u4 size = checked_cast<u4>(header_size + length * sizeof(address));
1330 
1331   writer->start_sub_record(HPROF_GC_OBJ_ARRAY_DUMP, size);
1332   writer->write_objectID(array);
1333   writer->write_u4(STACK_TRACE_ID);
1334   writer->write_u4(length);
1335 
1336   // array class ID
1337   writer->write_classID(array->klass());
1338 
1339   // [id]* elements
1340   for (int index = 0; index < length; index++) {
1341     oop o = array->obj_at(index);
1342     o = mask_dormant_archived_object(o, array);
1343     writer->write_objectID(o);
1344   }
1345 
1346   writer->end_sub_record();
1347 }
1348 









































1349 #define WRITE_ARRAY(Array, Type, Size, Length) \
1350   for (int i = 0; i < Length; i++) { writer->write_##Size((Size)Array->Type##_at(i)); }
1351 
1352 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
1353 void DumperSupport::dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array) {
1354   BasicType type = TypeArrayKlass::cast(array->klass())->element_type();
1355   // 2 * sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID)
1356   short header_size = 2 * 1 + 2 * 4 + sizeof(address);
1357 
1358   int length = calculate_array_max_length(writer, array, header_size);
1359   int type_size = type2aelembytes(type);
1360   u4 length_in_bytes = (u4)length * type_size;
1361   u4 size = header_size + length_in_bytes;
1362 
1363   writer->start_sub_record(HPROF_GC_PRIM_ARRAY_DUMP, size);
1364   writer->write_objectID(array);
1365   writer->write_u4(STACK_TRACE_ID);
1366   writer->write_u4(length);
1367   writer->write_u1(type2tag(type));
1368 

1450                                      int bci) {
1451   int line_number;
1452   if (m->is_native()) {
1453     line_number = -3;  // native frame
1454   } else {
1455     line_number = m->line_number_from_bci(bci);
1456   }
1457 
1458   write_header(writer, HPROF_FRAME, 4*oopSize + 2*sizeof(u4));
1459   writer->write_id(frame_serial_num);               // frame serial number
1460   writer->write_symbolID(m->name());                // method's name
1461   writer->write_symbolID(m->signature());           // method's signature
1462 
1463   assert(m->method_holder()->is_instance_klass(), "not InstanceKlass");
1464   writer->write_symbolID(m->method_holder()->source_file_name());  // source file name
1465   writer->write_u4(class_serial_num);               // class serial number
1466   writer->write_u4((u4) line_number);               // line number
1467 }
1468 
1469 








































































































































































































































































1470 // Support class used to generate HPROF_UTF8 records from the entries in the
1471 // SymbolTable.
1472 
1473 class SymbolTableDumper : public SymbolClosure {
1474  private:
1475   AbstractDumpWriter* _writer;
1476   AbstractDumpWriter* writer() const                { return _writer; }
1477  public:
1478   SymbolTableDumper(AbstractDumpWriter* writer)     { _writer = writer; }
1479   void do_symbol(Symbol** p);
1480 };
1481 
1482 void SymbolTableDumper::do_symbol(Symbol** p) {
1483   ResourceMark rm;
1484   Symbol* sym = *p;
1485   int len = sym->utf8_length();
1486   if (len > 0) {
1487     char* s = sym->as_utf8();
1488     DumperSupport::write_header(writer(), HPROF_UTF8, oopSize + len);
1489     writer()->write_symbolID(sym);

1982       return;
1983     }
1984   }
1985 
1986   if (DumperSupport::mask_dormant_archived_object(o, nullptr) == nullptr) {
1987     return;
1988   }
1989 
1990   if (o->is_instance()) {
1991     // create a HPROF_GC_INSTANCE record for each object
1992     DumperSupport::dump_instance(writer(), o, &_class_cache);
1993     // If we encounter an unmounted virtual thread it needs to be dumped explicitly
1994     // (mounted virtual threads are dumped with their carriers).
1995     if (java_lang_VirtualThread::is_instance(o)
1996         && ThreadDumper::should_dump_vthread(o) && !ThreadDumper::is_vthread_mounted(o)) {
1997       _vthread_dumper->dump_vthread(o, writer());
1998     }
1999   } else if (o->is_objArray()) {
2000     // create a HPROF_GC_OBJ_ARRAY_DUMP record for each object array
2001     DumperSupport::dump_object_array(writer(), objArrayOop(o));


2002   } else if (o->is_typeArray()) {
2003     // create a HPROF_GC_PRIM_ARRAY_DUMP record for each type array
2004     DumperSupport::dump_prim_array(writer(), typeArrayOop(o));
2005   }
2006 }
2007 
2008 // The dumper controller for parallel heap dump
2009 class DumperController : public CHeapObj<mtInternal> {
2010  private:
2011    Monitor* _lock;
2012    Mutex* _global_writer_lock;
2013 
2014    const uint   _dumper_number;
2015    uint   _complete_number;
2016 
2017    bool   _started; // VM dumper started and acquired global writer lock
2018 
2019  public:
2020    DumperController(uint number) :
2021      // _lock and _global_writer_lock are used for synchronization between GC worker threads inside safepoint,

2061      _complete_number++;
2062      // propagate local error to global if any
2063      if (local_writer->has_error()) {
2064        global_writer->set_error(local_writer->error());
2065      }
2066      ml.notify();
2067    }
2068 
2069    void wait_all_dumpers_complete() {
2070      MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
2071      while (_complete_number != _dumper_number) {
2072         ml.wait();
2073      }
2074    }
2075 };
2076 
2077 // DumpMerger merges separate dump files into a complete one
2078 class DumpMerger : public StackObj {
2079 private:
2080   DumpWriter* _writer;

2081   const char* _path;
2082   bool _has_error;
2083   int _dump_seq;
2084 
2085 private:
2086   void merge_file(const char* path);
2087   void merge_done();
2088   void set_error(const char* msg);
2089 
2090 public:
2091   DumpMerger(const char* path, DumpWriter* writer, int dump_seq) :
2092     _writer(writer),

2093     _path(path),
2094     _has_error(_writer->has_error()),
2095     _dump_seq(dump_seq) {}
2096 
2097   void do_merge();
2098 
2099   // returns path for the parallel DumpWriter (resource allocated)
2100   static char* get_writer_path(const char* base_path, int seq);
2101 
2102 };
2103 
2104 char* DumpMerger::get_writer_path(const char* base_path, int seq) {
2105   // approximate required buffer size
2106   size_t buf_size = strlen(base_path)
2107                     + 2                 // ".p"
2108                     + 10                // number (that's enough for 2^32 parallel dumpers)
2109                     + 1;                // '\0'
2110 
2111   char* path = NEW_RESOURCE_ARRAY(char, buf_size);
2112   memset(path, 0, buf_size);
2113 
2114   os::snprintf_checked(path, buf_size, "%s.p%d", base_path, seq);
2115 
2116   return path;
2117 }
2118 
2119 
2120 void DumpMerger::merge_done() {
2121   // Writes the HPROF_HEAP_DUMP_END record.
2122   if (!_has_error) {
2123     DumperSupport::end_of_dump(_writer);

2124     _writer->flush();

2125   }
2126   _dump_seq = 0; //reset
2127 }
2128 
2129 void DumpMerger::set_error(const char* msg) {
2130   assert(msg != nullptr, "sanity check");
2131   log_error(heapdump)("%s (file: %s)", msg, _path);
2132   _writer->set_error(msg);
2133   _has_error = true;
2134 }
2135 
2136 #ifdef LINUX
2137 // Merge segmented heap files via sendfile, it's more efficient than the
2138 // read+write combination, which would require transferring data to and from
2139 // user space.
2140 void DumpMerger::merge_file(const char* path) {
2141   TraceTime timer("Merge segmented heap file directly", TRACETIME_LOG(Info, heapdump));
2142 
2143   int segment_fd = os::open(path, O_RDONLY, 0);
2144   if (segment_fd == -1) {

2224   // restore compressor for further use
2225   _writer->set_compressor(saved_compressor);
2226   merge_done();
2227 }
2228 
2229 // The VM operation that performs the heap dump
2230 class VM_HeapDumper : public VM_GC_Operation, public WorkerTask, public UnmountedVThreadDumper {
2231  private:
2232   DumpWriter*             _writer;
2233   JavaThread*             _oome_thread;
2234   Method*                 _oome_constructor;
2235   bool                    _gc_before_heap_dump;
2236   GrowableArray<Klass*>*  _klass_map;
2237 
2238   ThreadDumper**          _thread_dumpers; // platform, carrier and mounted virtual threads
2239   int                     _thread_dumpers_count;
2240   volatile int            _thread_serial_num;
2241   volatile int            _frame_serial_num;
2242 
2243   volatile int            _dump_seq;




2244   // parallel heap dump support
2245   uint                    _num_dumper_threads;
2246   DumperController*       _dumper_controller;
2247   ParallelObjectIterator* _poi;
2248 
2249   // Dumper id of VMDumper thread.
2250   static const int VMDumperId = 0;
2251   // VM dumper dumps both heap and non-heap data, other dumpers dump heap-only data.
2252   static bool is_vm_dumper(int dumper_id) { return dumper_id == VMDumperId; }
2253   // the 1st dumper calling get_next_dumper_id becomes VM dumper
2254   int get_next_dumper_id() {
2255     return AtomicAccess::fetch_then_add(&_dump_seq, 1);
2256   }
2257 
2258   DumpWriter* writer() const { return _writer; }
2259 
2260   bool skip_operation() const;
2261 
2262   // HPROF_GC_ROOT_THREAD_OBJ records for platform and mounted virtual threads
2263   void dump_threads(AbstractDumpWriter* writer);

2304   }
2305 
2306   ~VM_HeapDumper() {
2307     if (_thread_dumpers != nullptr) {
2308       for (int i = 0; i < _thread_dumpers_count; i++) {
2309         delete _thread_dumpers[i];
2310       }
2311       FREE_C_HEAP_ARRAY(ThreadDumper*, _thread_dumpers);
2312     }
2313 
2314     if (_dumper_controller != nullptr) {
2315       delete _dumper_controller;
2316       _dumper_controller = nullptr;
2317     }
2318     delete _klass_map;
2319   }
2320   int dump_seq()           { return _dump_seq; }
2321   bool is_parallel_dump()  { return _num_dumper_threads > 1; }
2322   void prepare_parallel_dump(WorkerThreads* workers);
2323 


2324   VMOp_Type type() const { return VMOp_HeapDumper; }
2325   virtual bool doit_prologue();
2326   void doit();
2327   void work(uint worker_id);
2328 
2329   // UnmountedVThreadDumper implementation
2330   void dump_vthread(oop vt, AbstractDumpWriter* segment_writer);
2331 };
2332 
2333 bool VM_HeapDumper::skip_operation() const {
2334   return false;
2335 }
2336 
2337 // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
2338 void DumperSupport::end_of_dump(AbstractDumpWriter* writer) {
2339   writer->finish_dump_segment();
2340 
2341   writer->write_u1(HPROF_HEAP_DUMP_END);
2342   writer->write_u4(0);
2343   writer->write_u4(0);

2440     _dumper_controller->lock_global_writer();
2441     _dumper_controller->signal_start();
2442   } else {
2443     _dumper_controller->wait_for_start_signal();
2444   }
2445 
2446   if (is_vm_dumper(dumper_id)) {
2447     TraceTime timer("Dump non-objects", TRACETIME_LOG(Info, heapdump));
2448     // Write the file header - we always use 1.0.2
2449     const char* header = "JAVA PROFILE 1.0.2";
2450 
2451     // header is few bytes long - no chance to overflow int
2452     writer()->write_raw(header, strlen(header) + 1); // NUL terminated
2453     writer()->write_u4(oopSize);
2454     // timestamp is current time in ms
2455     writer()->write_u8(os::javaTimeMillis());
2456     // HPROF_UTF8 records
2457     SymbolTableDumper sym_dumper(writer());
2458     SymbolTable::symbols_do(&sym_dumper);
2459 







2460     // write HPROF_LOAD_CLASS records
2461     {
2462       LoadedClassDumper loaded_class_dumper(writer(), _klass_map);
2463       ClassLoaderDataGraph::classes_do(&loaded_class_dumper);
2464     }
2465 
2466     // write HPROF_FRAME and HPROF_TRACE records
2467     // this must be called after _klass_map is built when iterating the classes above.
2468     dump_stack_traces(writer());
2469 
2470     // unlock global writer, so parallel dumpers can dump stack traces of unmounted virtual threads
2471     _dumper_controller->unlock_global_writer();
2472   }
2473 
2474   // HPROF_HEAP_DUMP/HPROF_HEAP_DUMP_SEGMENT starts here
2475 
2476   ResourceMark rm;
2477   // share global compressor, local DumpWriter is not responsible for its life cycle
2478   DumpWriter segment_writer(DumpMerger::get_writer_path(writer()->get_file_path(), dumper_id),
2479                             writer()->is_overwrite(), writer()->compressor());

2642         (error() != nullptr) ? error() : "reason unknown");
2643     }
2644     return -1;
2645   }
2646 
2647   // generate the segmented heap dump into separate files
2648   VM_HeapDumper dumper(&writer, _gc_before_heap_dump, _oome, num_dump_threads);
2649   VMThread::execute(&dumper);
2650 
2651   // record any error that the writer may have encountered
2652   set_error(writer.error());
2653 
2654   // Heap dump process is done in two phases
2655   //
2656   // Phase 1: Concurrent threads directly write heap data to multiple heap files.
2657   //          This is done by VM_HeapDumper, which is performed within safepoint.
2658   //
2659   // Phase 2: Merge multiple heap files into one complete heap dump file.
2660   //          This is done by DumpMerger, which is performed outside safepoint
2661 
2662   DumpMerger merger(path, &writer, dumper.dump_seq());
2663   // Perform heapdump file merge operation in the current thread prevents us
2664   // from occupying the VM Thread, which in turn affects the occurrence of
2665   // GC and other VM operations.
2666   merger.do_merge();
2667   if (writer.error() != nullptr) {
2668     set_error(writer.error());
2669   }
2670 
2671   // emit JFR event
2672   if (error() == nullptr) {
2673     event.set_destination(path);
2674     event.set_gcBeforeDump(_gc_before_heap_dump);
2675     event.set_size(writer.bytes_written());
2676     event.set_onOutOfMemoryError(_oome);
2677     event.set_overwrite(overwrite);
2678     event.set_compression(compression);
2679     event.commit();
2680   } else {
2681     log_debug(aot, heap)("Error %s while dumping heap", error());
2682   }

  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "classfile/classLoaderData.inline.hpp"
  27 #include "classfile/classLoaderDataGraph.hpp"
  28 #include "classfile/javaClasses.inline.hpp"
  29 #include "classfile/symbolTable.hpp"
  30 #include "classfile/vmClasses.hpp"
  31 #include "classfile/vmSymbols.hpp"
  32 #include "gc/shared/gcLocker.hpp"
  33 #include "gc/shared/gcVMOperations.hpp"
  34 #include "gc/shared/workerThread.hpp"
  35 #include "jfr/jfrEvents.hpp"
  36 #include "jvm.h"
  37 #include "memory/allocation.inline.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "memory/universe.hpp"
  40 #include "oops/fieldStreams.inline.hpp"
  41 #include "oops/flatArrayKlass.hpp"
  42 #include "oops/flatArrayOop.inline.hpp"
  43 #include "oops/klass.inline.hpp"
  44 #include "oops/objArrayKlass.hpp"
  45 #include "oops/objArrayOop.inline.hpp"
  46 #include "oops/oop.inline.hpp"
  47 #include "oops/typeArrayOop.inline.hpp"
  48 #include "runtime/arguments.hpp"
  49 #include "runtime/continuationWrapper.inline.hpp"
  50 #include "runtime/fieldDescriptor.inline.hpp"
  51 #include "runtime/frame.inline.hpp"
  52 #include "runtime/handles.inline.hpp"
  53 #include "runtime/javaCalls.hpp"
  54 #include "runtime/javaThread.inline.hpp"
  55 #include "runtime/jniHandles.hpp"
  56 #include "runtime/os.hpp"
  57 #include "runtime/threads.hpp"
  58 #include "runtime/threadSMR.hpp"
  59 #include "runtime/timerTrace.hpp"
  60 #include "runtime/vframe.hpp"
  61 #include "runtime/vmOperations.hpp"
  62 #include "runtime/vmThread.hpp"
  63 #include "services/heapDumper.hpp"
  64 #include "services/heapDumperCompression.hpp"
  65 #include "services/threadService.hpp"
  66 #include "utilities/checkedCast.hpp"
  67 #include "utilities/macros.hpp"
  68 #include "utilities/ostream.hpp"
  69 #ifdef LINUX
  70 #include "os_linux.hpp"

 302  *                                     7:  double array
 303  *                                     8:  byte array
 304  *                                     9:  short array
 305  *                                     10: int array
 306  *                                     11: long array
 307  *                          [u1]*      elements
 308  *
 309  * HPROF_CPU_SAMPLES        a set of sample traces of running threads
 310  *
 311  *                u4        total number of samples
 312  *                u4        # of traces
 313  *               [u4        # of samples
 314  *                u4]*      stack trace serial number
 315  *
 316  * HPROF_CONTROL_SETTINGS   the settings of on/off switches
 317  *
 318  *                u4        0x00000001: alloc traces on/off
 319  *                          0x00000002: cpu sampling on/off
 320  *                u2        stack trace depth
 321  *
 322  * HPROF_FLAT_ARRAYS        list of flat arrays
 323  *
 324  *               [flat array sub-records]*
 325  *
 326  *               HPROF_FLAT_ARRAY      flat array
 327  *
 328  *                          id         array object ID (dumped as HPROF_GC_PRIM_ARRAY_DUMP)
 329  *                          id         element class ID (dumped by HPROF_GC_CLASS_DUMP)
 330  *
 331  * HPROF_INLINED_FIELDS     decribes inlined fields
 332  *
 333  *               [class with inlined fields sub-records]*
 334  *
 335  *               HPROF_CLASS_WITH_INLINED_FIELDS
 336  *
 337  *                          id         class ID (dumped as HPROF_GC_CLASS_DUMP)
 338  *
 339  *                          u2         number of instance inlined fields (not including super)
 340  *                          [u2,       inlined field index,
 341  *                           u2,       synthetic field count,
 342  *                           id,       original field name,
 343  *                           id]*      inlined field class ID (dumped by HPROF_GC_CLASS_DUMP)
 344  *
 345  * When the header is "JAVA PROFILE 1.0.2" a heap dump can optionally
 346  * be generated as a sequence of heap dump segments. This sequence is
 347  * terminated by an end record. The additional tags allowed by format
 348  * "JAVA PROFILE 1.0.2" are:
 349  *
 350  * HPROF_HEAP_DUMP_SEGMENT  denote a heap dump segment
 351  *
 352  *               [heap dump sub-records]*
 353  *               The same sub-record types allowed by HPROF_HEAP_DUMP
 354  *
 355  * HPROF_HEAP_DUMP_END      denotes the end of a heap dump
 356  *
 357  */
 358 
 359 
 360 // HPROF tags
 361 
 362 enum hprofTag : u1 {
 363   // top-level records
 364   HPROF_UTF8                    = 0x01,
 365   HPROF_LOAD_CLASS              = 0x02,
 366   HPROF_UNLOAD_CLASS            = 0x03,
 367   HPROF_FRAME                   = 0x04,
 368   HPROF_TRACE                   = 0x05,
 369   HPROF_ALLOC_SITES             = 0x06,
 370   HPROF_HEAP_SUMMARY            = 0x07,
 371   HPROF_START_THREAD            = 0x0A,
 372   HPROF_END_THREAD              = 0x0B,
 373   HPROF_HEAP_DUMP               = 0x0C,
 374   HPROF_CPU_SAMPLES             = 0x0D,
 375   HPROF_CONTROL_SETTINGS        = 0x0E,
 376 
 377   // 1.0.2 record types
 378   HPROF_HEAP_DUMP_SEGMENT       = 0x1C,
 379   HPROF_HEAP_DUMP_END           = 0x2C,
 380 
 381   // inlined object support
 382   HPROF_FLAT_ARRAYS             = 0x12,
 383   HPROF_INLINED_FIELDS          = 0x13,
 384   // inlined object subrecords
 385   HPROF_FLAT_ARRAY                  = 0x01,
 386   HPROF_CLASS_WITH_INLINED_FIELDS   = 0x01,
 387 
 388   // field types
 389   HPROF_ARRAY_OBJECT            = 0x01,
 390   HPROF_NORMAL_OBJECT           = 0x02,
 391   HPROF_BOOLEAN                 = 0x04,
 392   HPROF_CHAR                    = 0x05,
 393   HPROF_FLOAT                   = 0x06,
 394   HPROF_DOUBLE                  = 0x07,
 395   HPROF_BYTE                    = 0x08,
 396   HPROF_SHORT                   = 0x09,
 397   HPROF_INT                     = 0x0A,
 398   HPROF_LONG                    = 0x0B,
 399 
 400   // data-dump sub-records
 401   HPROF_GC_ROOT_UNKNOWN         = 0xFF,
 402   HPROF_GC_ROOT_JNI_GLOBAL      = 0x01,
 403   HPROF_GC_ROOT_JNI_LOCAL       = 0x02,
 404   HPROF_GC_ROOT_JAVA_FRAME      = 0x03,
 405   HPROF_GC_ROOT_NATIVE_STACK    = 0x04,
 406   HPROF_GC_ROOT_STICKY_CLASS    = 0x05,
 407   HPROF_GC_ROOT_THREAD_BLOCK    = 0x06,
 408   HPROF_GC_ROOT_MONITOR_USED    = 0x07,
 409   HPROF_GC_ROOT_THREAD_OBJ      = 0x08,
 410   HPROF_GC_CLASS_DUMP           = 0x20,
 411   HPROF_GC_INSTANCE_DUMP        = 0x21,
 412   HPROF_GC_OBJ_ARRAY_DUMP       = 0x22,
 413   HPROF_GC_PRIM_ARRAY_DUMP      = 0x23
 414 };
 415 
 416 // Default stack trace ID (used for dummy HPROF_TRACE record)
 417 enum {
 418   STACK_TRACE_ID = 1,
 419   INITIAL_CLASS_COUNT = 200
 420 };
 421 
 422 
 423 class AbstractDumpWriter;
 424 
 425 class InlinedObjects {
 426 
 427   struct ClassInlinedFields {
 428     const Klass *klass;
 429     uintx base_index;   // base index of the inlined field names (1st field has index base_index+1).
 430     ClassInlinedFields(const Klass *klass = nullptr, uintx base_index = 0) : klass(klass), base_index(base_index) {}
 431 
 432     // For GrowableArray::find_sorted().
 433     static int compare(const ClassInlinedFields& a, const ClassInlinedFields& b) {
 434       return a.klass - b.klass;
 435     }
 436     // For GrowableArray::sort().
 437     static int compare(ClassInlinedFields* a, ClassInlinedFields* b) {
 438       return compare(*a, *b);
 439     }
 440   };
 441 
 442   uintx _min_string_id;
 443   uintx _max_string_id;
 444 
 445   GrowableArray<ClassInlinedFields> *_inlined_field_map;
 446 
 447   // counters for classes with inlined fields and for the fields
 448   int _classes_count;
 449   int _inlined_fields_count;
 450 
 451   static InlinedObjects *_instance;
 452 
 453   static void inlined_field_names_callback(InlinedObjects* _this, const Klass *klass, uintx base_index, int count);
 454 
 455   GrowableArray<oop> *_flat_arrays;
 456 
 457 public:
 458   InlinedObjects()
 459     : _min_string_id(0), _max_string_id(0),
 460     _inlined_field_map(nullptr),
 461     _classes_count(0), _inlined_fields_count(0),
 462     _flat_arrays(nullptr) {
 463   }
 464 
 465   static InlinedObjects* get_instance() {
 466     return _instance;
 467   }
 468 
 469   void init();
 470   void release();
 471 
 472   void dump_inlined_field_names(AbstractDumpWriter *writer);
 473 
 474   uintx get_base_index_for(Klass* k);
 475   uintx get_next_string_id(uintx id);
 476 
 477   void dump_classed_with_inlined_fields(AbstractDumpWriter* writer);
 478 
 479   void add_flat_array(oop array);
 480   void dump_flat_arrays(AbstractDumpWriter* writer);
 481 
 482 };
 483 
 484 InlinedObjects *InlinedObjects::_instance = nullptr;
 485 
 486 
 487 // Supports I/O operations for a dump
 488 // Base class for dump and parallel dump
 489 class AbstractDumpWriter : public CHeapObj<mtInternal> {
 490  protected:
 491   enum {
 492     io_buffer_max_size = 1*M,
 493     dump_segment_header_size = 9
 494   };
 495 
 496   char* _buffer;    // internal buffer
 497   size_t _size;
 498   size_t _pos;
 499 
 500   bool _in_dump_segment; // Are we currently in a dump segment?
 501   bool _is_huge_sub_record; // Are we writing a sub-record larger than the buffer size?
 502   DEBUG_ONLY(size_t _sub_record_left;) // The bytes not written for the current sub-record.
 503   DEBUG_ONLY(bool _sub_record_ended;) // True if we have called the end_sub_record().
 504 
 505   char* buffer() const                          { return _buffer; }
 506   size_t buffer_size() const                    { return _size; }

 823   }
 824 }
 825 
 826 class DumperClassCacheTable;
 827 class DumperClassCacheTableEntry;
 828 
 829 // Support class with a collection of functions used when dumping the heap
 830 class DumperSupport : AllStatic {
 831  public:
 832 
 833   // write a header of the given type
 834   static void write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len);
 835 
 836   // returns hprof tag for the given type signature
 837   static hprofTag sig2tag(Symbol* sig);
 838   // returns hprof tag for the given basic type
 839   static hprofTag type2tag(BasicType type);
 840   // Returns the size of the data to write.
 841   static u4 sig2size(Symbol* sig);
 842 
 843   // calculates the total size of the all fields of the given class.
 844   static u4 instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry = nullptr);
 845 
 846   // dump a jfloat
 847   static void dump_float(AbstractDumpWriter* writer, jfloat f);
 848   // dump a jdouble
 849   static void dump_double(AbstractDumpWriter* writer, jdouble d);
 850   // dumps the raw value of the given field
 851   static void dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset);
 852   // returns the size of the static fields; also counts the static fields
 853   static u4 get_static_fields_size(InstanceKlass* ik, u2& field_count);
 854   // dumps static fields of the given class
 855   static void dump_static_fields(AbstractDumpWriter* writer, Klass* k);
 856   // dump the raw values of the instance fields of the given identity or inlined object;
 857   // for identity objects offset is 0 and 'klass' is o->klass(),
 858   // for inlined objects offset is the offset in the holder object, 'klass' is inlined object class
 859   static void dump_instance_fields(AbstractDumpWriter* writer, oop o, int offset, DumperClassCacheTable* class_cache, DumperClassCacheTableEntry* class_cache_entry);
 860   // dump the raw values of the instance fields of the given inlined object;
 861   // dump_instance_fields wrapper for inlined objects
 862   static void dump_inlined_object_fields(AbstractDumpWriter* writer, oop o, int offset, DumperClassCacheTable* class_cache, DumperClassCacheTableEntry* class_cache_entry);
 863 
 864   // get the count of the instance fields for a given class
 865   static u2 get_instance_fields_count(InstanceKlass* ik);
 866   // dumps the definition of the instance fields for a given class
 867   static void dump_instance_field_descriptors(AbstractDumpWriter* writer, InstanceKlass* k, uintx *inlined_fields_index = nullptr);
 868   // creates HPROF_GC_INSTANCE_DUMP record for the given object
 869   static void dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache);
 870   // creates HPROF_GC_CLASS_DUMP record for the given instance class
 871   static void dump_instance_class(AbstractDumpWriter* writer, InstanceKlass* ik);
 872   // creates HPROF_GC_CLASS_DUMP record for a given array class
 873   static void dump_array_class(AbstractDumpWriter* writer, Klass* k);
 874 
 875   // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
 876   static void dump_object_array(AbstractDumpWriter* writer, objArrayOop array);
 877   // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given flat array
 878   static void dump_flat_array(AbstractDumpWriter* writer, flatArrayOop array, DumperClassCacheTable* class_cache);
 879   // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
 880   static void dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array);
 881   // create HPROF_FRAME record for the given method and bci
 882   static void dump_stack_frame(AbstractDumpWriter* writer, int frame_serial_num, int class_serial_num, Method* m, int bci);
 883 
 884   // check if we need to truncate an array
 885   static int calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size);
 886   // extended version to dump flat arrays as primitive arrays;
 887   // type_size specifies size of the inlined objects.
 888   static int calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, int type_size, short header_size);
 889 
 890   // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
 891   static void end_of_dump(AbstractDumpWriter* writer);
 892 
 893   static oop mask_dormant_archived_object(oop o, oop ref_obj) {
 894     if (o != nullptr && o->klass()->java_mirror_no_keepalive() == nullptr) {
 895       // Ignore this object since the corresponding java mirror is not loaded.
 896       // Might be a dormant archive object.
 897       report_dormant_archived_object(o, ref_obj);
 898       return nullptr;
 899     } else {
 900       return o;
 901     }
 902   }
 903 
 904   // helper methods for inlined fields.
 905   static bool is_inlined_field(const fieldDescriptor& fld) {
 906     return fld.is_flat();
 907   }
 908   static InlineKlass* get_inlined_field_klass(const fieldDescriptor& fld) {
 909     assert(is_inlined_field(fld), "must be inlined field");
 910     InstanceKlass* holder_klass = fld.field_holder();
 911     return InlineKlass::cast(holder_klass->get_inline_type_field_klass(fld.index()));
 912   }
 913 
 914   static void report_dormant_archived_object(oop o, oop ref_obj) {
 915     if (log_is_enabled(Trace, aot, heap)) {
 916       ResourceMark rm;
 917       if (ref_obj != nullptr) {
 918         log_trace(aot, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s) referenced by " INTPTR_FORMAT " (%s)",
 919                   p2i(o), o->klass()->external_name(),
 920                   p2i(ref_obj), ref_obj->klass()->external_name());
 921       } else {
 922         log_trace(aot, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s)",
 923                   p2i(o), o->klass()->external_name());
 924       }
 925     }
 926   }
 927 };
 928 
 929 // Hash table of klasses to the klass metadata. This should greatly improve the
 930 // hash dumping performance. This hash table is supposed to be used by a single
 931 // thread only.
 932 //
 933 class DumperClassCacheTableEntry : public CHeapObj<mtServiceability> {
 934   friend class DumperClassCacheTable;
 935 private:
 936   GrowableArray<char> _sigs_start;
 937   GrowableArray<int> _offsets;
 938   GrowableArray<InlineKlass*> _inline_klasses;
 939   u4 _instance_size;
 940   int _entries;
 941 
 942 public:
 943   DumperClassCacheTableEntry() : _instance_size(0), _entries(0) {};
 944 
 945   int field_count()             { return _entries; }
 946   char sig_start(int field_idx) { return _sigs_start.at(field_idx); }
 947   void push_sig_start_inlined() { _sigs_start.push('Q'); }
 948   bool is_inlined(int field_idx){ return _sigs_start.at(field_idx) == 'Q'; }
 949   InlineKlass* inline_klass(int field_idx) { assert(is_inlined(field_idx), "Not inlined"); return _inline_klasses.at(field_idx); }
 950   int offset(int field_idx)     { return _offsets.at(field_idx); }
 951   u4 instance_size()            { return _instance_size; }
 952 };
 953 
 954 class DumperClassCacheTable {
 955 private:
 956   // HashTable SIZE is specified at compile time so we
 957   // use 1031 which is the first prime after 1024.
 958   static constexpr size_t TABLE_SIZE = 1031;
 959 
 960   // Maintain the cache for N classes. This limits memory footprint
 961   // impact, regardless of how many classes we have in the dump.
 962   // This also improves look up performance by keeping the statically
 963   // sized table from overloading.
 964   static constexpr int CACHE_TOP = 256;
 965 
 966   typedef HashTable<InstanceKlass*, DumperClassCacheTableEntry*,
 967                             TABLE_SIZE, AnyObj::C_HEAP, mtServiceability> PtrTable;
 968   PtrTable* _ptrs;
 969 

 978       bool do_entry(InstanceKlass*& key, DumperClassCacheTableEntry*& entry) {
 979         delete entry;
 980         return true;
 981       }
 982     } cleanup;
 983     table->unlink(&cleanup);
 984   }
 985 
 986 public:
 987   DumperClassCacheTableEntry* lookup_or_create(InstanceKlass* ik) {
 988     if (_last_ik == ik) {
 989       return _last_entry;
 990     }
 991 
 992     DumperClassCacheTableEntry* entry;
 993     DumperClassCacheTableEntry** from_cache = _ptrs->get(ik);
 994     if (from_cache == nullptr) {
 995       entry = new DumperClassCacheTableEntry();
 996       for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
 997         if (!fld.access_flags().is_static()) {
 998           InlineKlass* inlineKlass = nullptr;
 999           if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
1000             inlineKlass = DumperSupport::get_inlined_field_klass(fld.field_descriptor());
1001             entry->push_sig_start_inlined();
1002             entry->_instance_size += DumperSupport::instance_size(inlineKlass);
1003           } else {
1004             Symbol* sig = fld.signature();
1005             entry->_sigs_start.push(sig->char_at(0));
1006             entry->_instance_size += DumperSupport::sig2size(sig);
1007           }
1008           entry->_inline_klasses.push(inlineKlass);
1009           entry->_offsets.push(fld.offset());
1010           entry->_entries++;

1011         }
1012       }
1013 
1014       if (_ptrs->number_of_entries() >= CACHE_TOP) {
1015         // We do not track the individual hit rates for table entries.
1016         // Purge the entire table, and let the cache catch up with new
1017         // distribution.
1018         unlink_all(_ptrs);
1019       }
1020 
1021       _ptrs->put(ik, entry);
1022     } else {
1023       entry = *from_cache;
1024     }
1025 
1026     // Remember for single-slot cache.
1027     _last_ik = ik;
1028     _last_entry = entry;
1029 
1030     return entry;

1100 }
1101 
1102 // dump a jfloat
1103 void DumperSupport::dump_float(AbstractDumpWriter* writer, jfloat f) {
1104   if (g_isnan(f)) {
1105     writer->write_u4(0x7fc00000); // collapsing NaNs
1106   } else {
1107     writer->write_u4(bit_cast<u4>(f));
1108   }
1109 }
1110 
1111 // dump a jdouble
1112 void DumperSupport::dump_double(AbstractDumpWriter* writer, jdouble d) {
1113   if (g_isnan(d)) {
1114     writer->write_u8(0x7ff80000ull << 32); // collapsing NaNs
1115   } else {
1116     writer->write_u8(bit_cast<u8>(d));
1117   }
1118 }
1119 
1120 
1121 // dumps the raw value of the given field
1122 void DumperSupport::dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset) {
1123   switch (type) {
1124     case JVM_SIGNATURE_CLASS :
1125     case JVM_SIGNATURE_ARRAY : {
1126       oop o = obj->obj_field_access<ON_UNKNOWN_OOP_REF | AS_NO_KEEPALIVE>(offset);
1127       o = mask_dormant_archived_object(o, obj);
1128       assert(oopDesc::is_oop_or_null(o), "Expected an oop or nullptr at " PTR_FORMAT, p2i(o));
1129       writer->write_objectID(o);
1130       break;
1131     }
1132     case JVM_SIGNATURE_BYTE : {
1133       jbyte b = obj->byte_field(offset);
1134       writer->write_u1(b);
1135       break;
1136     }
1137     case JVM_SIGNATURE_CHAR : {
1138       jchar c = obj->char_field(offset);
1139       writer->write_u2(c);
1140       break;

1159       writer->write_u4(i);
1160       break;
1161     }
1162     case JVM_SIGNATURE_LONG : {
1163       jlong l = obj->long_field(offset);
1164       writer->write_u8(l);
1165       break;
1166     }
1167     case JVM_SIGNATURE_BOOLEAN : {
1168       jboolean b = obj->bool_field(offset);
1169       writer->write_u1(b);
1170       break;
1171     }
1172     default : {
1173       ShouldNotReachHere();
1174       break;
1175     }
1176   }
1177 }
1178 
1179 // calculates the total size of the all fields of the given class.
1180 u4 DumperSupport::instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry) {
1181   if (class_cache_entry != nullptr) {
1182     return class_cache_entry->instance_size();
1183   } else {
1184     u4 size = 0;
1185     for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1186       if (!fld.access_flags().is_static()) {
1187         if (is_inlined_field(fld.field_descriptor())) {
1188           size += instance_size(get_inlined_field_klass(fld.field_descriptor()));
1189         } else {
1190           size += sig2size(fld.signature());
1191         }
1192       }
1193     }
1194     return size;
1195   }
1196 }
1197 
1198 u4 DumperSupport::get_static_fields_size(InstanceKlass* ik, u2& field_count) {
1199   field_count = 0;
1200   u4 size = 0;
1201 
1202   for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1203     if (fldc.access_flags().is_static()) {
1204       assert(!is_inlined_field(fldc.field_descriptor()), "static fields cannot be inlined");
1205 
1206       field_count++;
1207       size += sig2size(fldc.signature());
1208     }
1209   }
1210 
1211   // Add in resolved_references which is referenced by the cpCache
1212   // The resolved_references is an array per InstanceKlass holding the
1213   // strings and other oops resolved from the constant pool.
1214   oop resolved_references = ik->constants()->resolved_references_or_null();
1215   if (resolved_references != nullptr) {
1216     field_count++;
1217     size += sizeof(address);
1218 
1219     // Add in the resolved_references of the used previous versions of the class
1220     // in the case of RedefineClasses
1221     InstanceKlass* prev = ik->previous_versions();
1222     while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1223       field_count++;
1224       size += sizeof(address);
1225       prev = prev->previous_versions();

1228 
1229   // Also provide a pointer to the init_lock if present, so there aren't unreferenced int[0]
1230   // arrays.
1231   oop init_lock = ik->init_lock();
1232   if (init_lock != nullptr) {
1233     field_count++;
1234     size += sizeof(address);
1235   }
1236 
1237   // We write the value itself plus a name and a one byte type tag per field.
1238   return checked_cast<u4>(size + field_count * (sizeof(address) + 1));
1239 }
1240 
1241 // dumps static fields of the given class
1242 void DumperSupport::dump_static_fields(AbstractDumpWriter* writer, Klass* k) {
1243   InstanceKlass* ik = InstanceKlass::cast(k);
1244 
1245   // dump the field descriptors and raw values
1246   for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1247     if (fld.access_flags().is_static()) {
1248       assert(!is_inlined_field(fld.field_descriptor()), "static fields cannot be inlined");
1249 
1250       Symbol* sig = fld.signature();
1251 
1252       writer->write_symbolID(fld.name());   // name
1253       writer->write_u1(sig2tag(sig));       // type
1254 
1255       // value
1256       dump_field_value(writer, sig->char_at(0), ik->java_mirror(), fld.offset());
1257     }
1258   }
1259 
1260   // Add resolved_references for each class that has them
1261   oop resolved_references = ik->constants()->resolved_references_or_null();
1262   if (resolved_references != nullptr) {
1263     writer->write_symbolID(vmSymbols::resolved_references_name());  // name
1264     writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1265     writer->write_objectID(resolved_references);
1266 
1267     // Also write any previous versions
1268     InstanceKlass* prev = ik->previous_versions();
1269     while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1270       writer->write_symbolID(vmSymbols::resolved_references_name());  // name
1271       writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1272       writer->write_objectID(prev->constants()->resolved_references());
1273       prev = prev->previous_versions();
1274     }
1275   }
1276 
1277   // Add init lock to the end if the class is not yet initialized
1278   oop init_lock = ik->init_lock();
1279   if (init_lock != nullptr) {
1280     writer->write_symbolID(vmSymbols::init_lock_name());         // name
1281     writer->write_u1(sig2tag(vmSymbols::int_array_signature())); // type
1282     writer->write_objectID(init_lock);
1283   }
1284 }
1285 
1286 // dump the raw values of the instance fields of the given identity or inlined object;
1287 // for identity objects offset is 0 and 'klass' is o->klass(),
1288 // for inlined objects offset is the offset in the holder object, 'klass' is inlined object class.
1289 void DumperSupport::dump_instance_fields(AbstractDumpWriter* writer, oop o, int offset, DumperClassCacheTable* class_cache, DumperClassCacheTableEntry* class_cache_entry) {
1290   assert(class_cache_entry != nullptr, "Pre-condition: must be provided");
1291   for (int idx = 0; idx < class_cache_entry->field_count(); idx++) {
1292     if (class_cache_entry->is_inlined(idx)) {
1293       InlineKlass* field_klass = class_cache_entry->inline_klass(idx);
1294       int fields_offset = offset + (class_cache_entry->offset(idx) - field_klass->payload_offset());
1295       DumperClassCacheTableEntry* inline_class_cache_entry = class_cache->lookup_or_create(field_klass);
1296       dump_inlined_object_fields(writer, o, fields_offset, class_cache, inline_class_cache_entry);
1297     } else {
1298       dump_field_value(writer, class_cache_entry->sig_start(idx), o, class_cache_entry->offset(idx));
1299     }
1300   }
1301 }
1302 
1303 void DumperSupport::dump_inlined_object_fields(AbstractDumpWriter* writer, oop o, int offset, DumperClassCacheTable* class_cache, DumperClassCacheTableEntry* class_cache_entry) {
1304   // the object is inlined, so all its fields are stored without headers.
1305   dump_instance_fields(writer, o, offset, class_cache, class_cache_entry);
1306 }
1307 
1308 // gets the count of the instance fields for a given class
1309 u2 DumperSupport::get_instance_fields_count(InstanceKlass* ik) {
1310   u2 field_count = 0;
1311 
1312   for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1313     if (!fldc.access_flags().is_static()) {
1314       if (is_inlined_field(fldc.field_descriptor())) {
1315         // add "synthetic" fields for inlined fields.
1316         field_count += get_instance_fields_count(get_inlined_field_klass(fldc.field_descriptor()));
1317       } else {
1318         field_count++;
1319       }
1320     }
1321   }
1322 
1323   return field_count;
1324 }
1325 
1326 // dumps the definition of the instance fields for a given class
1327 // inlined_fields_id is not-nullptr for inlined fields (to get synthetic field name IDs
1328 // by using InlinedObjects::get_next_string_id()).
1329 void DumperSupport::dump_instance_field_descriptors(AbstractDumpWriter* writer, InstanceKlass* ik, uintx* inlined_fields_id) {
1330   // inlined_fields_id != nullptr means ik is a class of inlined field.
1331   // Inlined field id pointer for this class; lazyly initialized
1332   // if the class has inlined field(s) and the caller didn't provide inlined_fields_id.
1333   uintx *this_klass_inlined_fields_id = inlined_fields_id;
1334   uintx inlined_id = 0;
1335 
1336   // dump the field descriptors
1337   for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1338     if (!fld.access_flags().is_static()) {
1339       if (is_inlined_field(fld.field_descriptor())) {
1340         // dump "synthetic" fields for inlined fields.
1341         if (this_klass_inlined_fields_id == nullptr) {
1342           inlined_id = InlinedObjects::get_instance()->get_base_index_for(ik);
1343           this_klass_inlined_fields_id = &inlined_id;
1344         }
1345         dump_instance_field_descriptors(writer, get_inlined_field_klass(fld.field_descriptor()), this_klass_inlined_fields_id);
1346       } else {
1347         Symbol* sig = fld.signature();
1348         Symbol* name = nullptr;
1349         // Use inlined_fields_id provided by caller.
1350         if (inlined_fields_id != nullptr) {
1351           uintx name_id = InlinedObjects::get_instance()->get_next_string_id(*inlined_fields_id);
1352 
1353           // name_id == 0 is returned on error. use original field signature.
1354           if (name_id != 0) {
1355             *inlined_fields_id = name_id;
1356             name = reinterpret_cast<Symbol*>(name_id);
1357           }
1358         }
1359         if (name == nullptr) {
1360           name = fld.name();
1361         }
1362 
1363         writer->write_symbolID(name);         // name
1364         writer->write_u1(sig2tag(sig));       // type
1365       }
1366     }
1367   }
1368 }
1369 
1370 // creates HPROF_GC_INSTANCE_DUMP record for the given object
1371 void DumperSupport::dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache) {
1372   InstanceKlass* ik = InstanceKlass::cast(o->klass());
1373 
1374   DumperClassCacheTableEntry* cache_entry = class_cache->lookup_or_create(ik);
1375 
1376   u4 is = instance_size(ik, cache_entry);
1377   u4 size = 1 + sizeof(address) + 4 + sizeof(address) + 4 + is;
1378 
1379   writer->start_sub_record(HPROF_GC_INSTANCE_DUMP, size);
1380   writer->write_objectID(o);
1381   writer->write_u4(STACK_TRACE_ID);
1382 
1383   // class ID
1384   writer->write_classID(ik);
1385 
1386   // number of bytes that follow
1387   writer->write_u4(is);
1388 
1389   // field values
1390   dump_instance_fields(writer, o, 0, class_cache, cache_entry);
1391 
1392   writer->end_sub_record();
1393 }
1394 
1395 // creates HPROF_GC_CLASS_DUMP record for the given instance class
1396 void DumperSupport::dump_instance_class(AbstractDumpWriter* writer, InstanceKlass* ik) {
1397   // We can safepoint and do a heap dump at a point where we have a Klass,
1398   // but no java mirror class has been setup for it. So we need to check
1399   // that the class is at least loaded, to avoid crash from a null mirror.
1400   if (!ik->is_loaded()) {
1401     return;
1402   }
1403 
1404   u2 static_fields_count = 0;
1405   u4 static_size = get_static_fields_size(ik, static_fields_count);
1406   u2 instance_fields_count = get_instance_fields_count(ik);
1407   u4 instance_fields_size = instance_fields_count * (sizeof(address) + 1);
1408   u4 size = checked_cast<u4>(1 + sizeof(address) + 4 + 6 * sizeof(address) + 4 + 2 + 2 + static_size + 2 + instance_fields_size);
1409 
1410   writer->start_sub_record(HPROF_GC_CLASS_DUMP, size);

1413   writer->write_classID(ik);
1414   writer->write_u4(STACK_TRACE_ID);
1415 
1416   // super class ID
1417   InstanceKlass* super = ik->super();
1418   if (super == nullptr) {
1419     writer->write_objectID(oop(nullptr));
1420   } else {
1421     writer->write_classID(super);
1422   }
1423 
1424   writer->write_objectID(ik->class_loader());
1425   writer->write_objectID(ik->signers());
1426   writer->write_objectID(ik->protection_domain());
1427 
1428   // reserved
1429   writer->write_objectID(oop(nullptr));
1430   writer->write_objectID(oop(nullptr));
1431 
1432   // instance size
1433   writer->write_u4(HeapWordSize * ik->size_helper());
1434 
1435   // size of constant pool - ignored by HAT 1.1
1436   writer->write_u2(0);
1437 
1438   // static fields
1439   writer->write_u2(static_fields_count);
1440   dump_static_fields(writer, ik);
1441 
1442   // description of instance fields
1443   writer->write_u2(instance_fields_count);
1444   dump_instance_field_descriptors(writer, ik);
1445 
1446   writer->end_sub_record();
1447 }
1448 
1449 // creates HPROF_GC_CLASS_DUMP record for the given array class
1450 void DumperSupport::dump_array_class(AbstractDumpWriter* writer, Klass* k) {
1451   InstanceKlass* ik = nullptr; // bottom class for object arrays, null for primitive type arrays
1452   if (k->is_objArray_klass()) {
1453     Klass *bk = ObjArrayKlass::cast(k)->bottom_klass();

1467   assert(java_super != nullptr, "checking");
1468   writer->write_classID(java_super);
1469 
1470   writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->class_loader());
1471   writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->signers());
1472   writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->protection_domain());
1473 
1474   writer->write_objectID(oop(nullptr));    // reserved
1475   writer->write_objectID(oop(nullptr));
1476   writer->write_u4(0);             // instance size
1477   writer->write_u2(0);             // constant pool
1478   writer->write_u2(0);             // static fields
1479   writer->write_u2(0);             // instance fields
1480 
1481   writer->end_sub_record();
1482 
1483 }
1484 
1485 // Hprof uses an u4 as record length field,
1486 // which means we need to truncate arrays that are too long.
1487 int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, int type_size, short header_size) {



1488   int length = array->length();
1489 







1490   size_t length_in_bytes = (size_t)length * type_size;
1491   uint max_bytes = max_juint - header_size;
1492 
1493   if (length_in_bytes > max_bytes) {
1494     length = max_bytes / type_size;
1495     length_in_bytes = (size_t)length * type_size;
1496 
1497     BasicType type = ArrayKlass::cast(array->klass())->element_type();
1498     warning("cannot dump array of type %s[] with length %d; truncating to length %d",
1499             type2name_tab[type], array->length(), length);
1500   }
1501   return length;
1502 }
1503 
1504 int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size) {
1505   BasicType type = ArrayKlass::cast(array->klass())->element_type();
1506   assert((type >= T_BOOLEAN && type <= T_OBJECT) || type == T_FLAT_ELEMENT, "invalid array element type");
1507   int type_size;
1508   if (type == T_OBJECT) {
1509     type_size = sizeof(address);
1510   } else if (type == T_FLAT_ELEMENT) {
1511       // TODO: FIXME
1512       fatal("Not supported yet"); // FIXME: JDK-8325678
1513   } else {
1514     type_size = type2aelembytes(type);
1515   }
1516 
1517   return calculate_array_max_length(writer, array, type_size, header_size);
1518 }
1519 
1520 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
1521 void DumperSupport::dump_object_array(AbstractDumpWriter* writer, objArrayOop array) {
1522   // sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID) + sizeof(classID)
1523   short header_size = 1 + 2 * 4 + 2 * sizeof(address);
1524   int length = calculate_array_max_length(writer, array, header_size);
1525   u4 size = checked_cast<u4>(header_size + length * sizeof(address));
1526 
1527   writer->start_sub_record(HPROF_GC_OBJ_ARRAY_DUMP, size);
1528   writer->write_objectID(array);
1529   writer->write_u4(STACK_TRACE_ID);
1530   writer->write_u4(length);
1531 
1532   // array class ID
1533   writer->write_classID(array->klass());
1534 
1535   // [id]* elements
1536   for (int index = 0; index < length; index++) {
1537     oop o = array->obj_at(index);
1538     o = mask_dormant_archived_object(o, array);
1539     writer->write_objectID(o);
1540   }
1541 
1542   writer->end_sub_record();
1543 }
1544 
1545 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given flat array
1546 void DumperSupport::dump_flat_array(AbstractDumpWriter* writer, flatArrayOop array, DumperClassCacheTable* class_cache) {
1547   FlatArrayKlass* array_klass = FlatArrayKlass::cast(array->klass());
1548   InlineKlass* element_klass = array_klass->element_klass();
1549   int element_size = instance_size(element_klass);
1550   /*                          id         array object ID
1551    *                          u4         stack trace serial number
1552    *                          u4         number of elements
1553    *                          u1         element type
1554    */
1555   short header_size = 1 + sizeof(address) + 2 * 4 + 1;
1556 
1557   // TODO: use T_SHORT/T_INT/T_LONG if needed to avoid truncation
1558   BasicType type = T_BYTE;
1559   int type_size = type2aelembytes(type);
1560   int length = calculate_array_max_length(writer, array, element_size, header_size);
1561   u4 length_in_bytes = (u4)(length * element_size);
1562   u4 size = header_size + length_in_bytes;
1563 
1564   writer->start_sub_record(HPROF_GC_PRIM_ARRAY_DUMP, size);
1565   writer->write_objectID(array);
1566   writer->write_u4(STACK_TRACE_ID);
1567   // TODO: round up array length for T_SHORT/T_INT/T_LONG
1568   writer->write_u4(length * element_size);
1569   writer->write_u1(type2tag(type));
1570 
1571   for (int index = 0; index < length; index++) {
1572     // need offset in the holder to read inlined object. calculate it from flatArrayOop::value_at_addr()
1573     int offset = (int)((address)array->value_at_addr(index, array_klass->layout_helper())
1574                   - cast_from_oop<address>(array));
1575     DumperClassCacheTableEntry* class_cache_entry = class_cache->lookup_or_create(element_klass);
1576     dump_inlined_object_fields(writer, array, offset, class_cache, class_cache_entry);
1577   }
1578 
1579   // TODO: write padding bytes for T_SHORT/T_INT/T_LONG
1580 
1581   InlinedObjects::get_instance()->add_flat_array(array);
1582 
1583   writer->end_sub_record();
1584 }
1585 
1586 #define WRITE_ARRAY(Array, Type, Size, Length) \
1587   for (int i = 0; i < Length; i++) { writer->write_##Size((Size)Array->Type##_at(i)); }
1588 
1589 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
1590 void DumperSupport::dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array) {
1591   BasicType type = TypeArrayKlass::cast(array->klass())->element_type();
1592   // 2 * sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID)
1593   short header_size = 2 * 1 + 2 * 4 + sizeof(address);
1594 
1595   int length = calculate_array_max_length(writer, array, header_size);
1596   int type_size = type2aelembytes(type);
1597   u4 length_in_bytes = (u4)length * type_size;
1598   u4 size = header_size + length_in_bytes;
1599 
1600   writer->start_sub_record(HPROF_GC_PRIM_ARRAY_DUMP, size);
1601   writer->write_objectID(array);
1602   writer->write_u4(STACK_TRACE_ID);
1603   writer->write_u4(length);
1604   writer->write_u1(type2tag(type));
1605 

1687                                      int bci) {
1688   int line_number;
1689   if (m->is_native()) {
1690     line_number = -3;  // native frame
1691   } else {
1692     line_number = m->line_number_from_bci(bci);
1693   }
1694 
1695   write_header(writer, HPROF_FRAME, 4*oopSize + 2*sizeof(u4));
1696   writer->write_id(frame_serial_num);               // frame serial number
1697   writer->write_symbolID(m->name());                // method's name
1698   writer->write_symbolID(m->signature());           // method's signature
1699 
1700   assert(m->method_holder()->is_instance_klass(), "not InstanceKlass");
1701   writer->write_symbolID(m->method_holder()->source_file_name());  // source file name
1702   writer->write_u4(class_serial_num);               // class serial number
1703   writer->write_u4((u4) line_number);               // line number
1704 }
1705 
1706 
1707 class InlinedFieldNameDumper : public LockedClassesDo {
1708 public:
1709   typedef void (*Callback)(InlinedObjects *owner, const Klass *klass, uintx base_index, int count);
1710 
1711 private:
1712   AbstractDumpWriter* _writer;
1713   InlinedObjects *_owner;
1714   Callback       _callback;
1715   uintx _index;
1716 
1717   void dump_inlined_field_names(GrowableArray<Symbol*>* super_names, Symbol* field_name, InlineKlass* klass) {
1718     super_names->push(field_name);
1719     for (HierarchicalFieldStream<JavaFieldStream> fld(klass); !fld.done(); fld.next()) {
1720       if (!fld.access_flags().is_static()) {
1721         if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
1722           dump_inlined_field_names(super_names, fld.name(), DumperSupport::get_inlined_field_klass(fld.field_descriptor()));
1723         } else {
1724           // get next string ID.
1725           uintx next_index = _owner->get_next_string_id(_index);
1726           if (next_index == 0) {
1727             // something went wrong (overflow?)
1728             // stop generation; the rest of inlined objects will have original field names.
1729             return;
1730           }
1731           _index = next_index;
1732 
1733           // Calculate length.
1734           int len = fld.name()->utf8_length();
1735           for (GrowableArrayIterator<Symbol*> it = super_names->begin(); it != super_names->end(); ++it) {
1736             len += (*it)->utf8_length() + 1;    // +1 for ".".
1737           }
1738 
1739           DumperSupport::write_header(_writer, HPROF_UTF8, oopSize + len);
1740           _writer->write_symbolID(reinterpret_cast<Symbol*>(_index));
1741           // Write the string value.
1742           // 1) super_names.
1743           for (GrowableArrayIterator<Symbol*> it = super_names->begin(); it != super_names->end(); ++it) {
1744             _writer->write_raw((*it)->bytes(), (*it)->utf8_length());
1745             _writer->write_u1('.');
1746           }
1747           // 2) field name.
1748           _writer->write_raw(fld.name()->bytes(), fld.name()->utf8_length());
1749         }
1750       }
1751     }
1752     super_names->pop();
1753   }
1754 
1755   void dump_inlined_field_names(Symbol* field_name, InlineKlass* field_klass) {
1756     GrowableArray<Symbol*> super_names(4, mtServiceability);
1757     dump_inlined_field_names(&super_names, field_name, field_klass);
1758   }
1759 
1760 public:
1761   InlinedFieldNameDumper(AbstractDumpWriter* writer, InlinedObjects* owner, Callback callback)
1762     : _writer(writer), _owner(owner), _callback(callback), _index(0)  {
1763   }
1764 
1765   void do_klass(Klass* k) {
1766     if (!k->is_instance_klass()) {
1767       return;
1768     }
1769     InstanceKlass* ik = InstanceKlass::cast(k);
1770     // if (ik->has_inline_type_fields()) {
1771     //   return;
1772     // }
1773 
1774     uintx base_index = _index;
1775     int count = 0;
1776 
1777     for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1778       if (!fld.access_flags().is_static()) {
1779         if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
1780           dump_inlined_field_names(fld.name(), DumperSupport::get_inlined_field_klass(fld.field_descriptor()));
1781           count++;
1782         }
1783       }
1784     }
1785 
1786     if (count != 0) {
1787       _callback(_owner, k, base_index, count);
1788     }
1789   }
1790 };
1791 
1792 class InlinedFieldsDumper : public LockedClassesDo {
1793 private:
1794   AbstractDumpWriter* _writer;
1795 
1796 public:
1797   InlinedFieldsDumper(AbstractDumpWriter* writer) : _writer(writer) {}
1798 
1799   void do_klass(Klass* k) {
1800     if (!k->is_instance_klass()) {
1801       return;
1802     }
1803     InstanceKlass* ik = InstanceKlass::cast(k);
1804     // if (ik->has_inline_type_fields()) {
1805     //   return;
1806     // }
1807 
1808     // We can be at a point where java mirror does not exist yet.
1809     // So we need to check that the class is at least loaded, to avoid crash from a null mirror.
1810     if (!ik->is_loaded()) {
1811       return;
1812     }
1813 
1814     u2 inlined_count = 0;
1815     for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1816       if (!fld.access_flags().is_static()) {
1817         if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
1818           inlined_count++;
1819         }
1820       }
1821     }
1822     if (inlined_count != 0) {
1823       _writer->write_u1(HPROF_CLASS_WITH_INLINED_FIELDS);
1824 
1825       // class ID
1826       _writer->write_classID(ik);
1827       // number of inlined fields
1828       _writer->write_u2(inlined_count);
1829       u2 index = 0;
1830       for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1831         if (!fld.access_flags().is_static()) {
1832           if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
1833             // inlined field index
1834             _writer->write_u2(index);
1835             // synthetic field count
1836             u2 field_count = DumperSupport::get_instance_fields_count(DumperSupport::get_inlined_field_klass(fld.field_descriptor()));
1837             _writer->write_u2(field_count);
1838             // original field name
1839             _writer->write_symbolID(fld.name());
1840             // inlined field class ID
1841             _writer->write_classID(DumperSupport::get_inlined_field_klass(fld.field_descriptor()));
1842 
1843             index += field_count;
1844           } else {
1845             index++;
1846           }
1847         }
1848       }
1849     }
1850   }
1851 };
1852 
1853 
1854 void InlinedObjects::init() {
1855   _instance = this;
1856 
1857   struct Closure : public SymbolClosure {
1858     uintx _min_id = max_uintx;
1859     uintx _max_id = 0;
1860     Closure() : _min_id(max_uintx), _max_id(0) {}
1861 
1862     void do_symbol(Symbol** p) {
1863       uintx val = reinterpret_cast<uintx>(*p);
1864       if (val < _min_id) {
1865         _min_id = val;
1866       }
1867       if (val > _max_id) {
1868         _max_id = val;
1869       }
1870     }
1871   } closure;
1872 
1873   SymbolTable::symbols_do(&closure);
1874 
1875   _min_string_id = closure._min_id;
1876   _max_string_id = closure._max_id;
1877 }
1878 
1879 void InlinedObjects::release() {
1880   _instance = nullptr;
1881 
1882   if (_inlined_field_map != nullptr) {
1883     delete _inlined_field_map;
1884     _inlined_field_map = nullptr;
1885   }
1886   if (_flat_arrays != nullptr) {
1887     delete _flat_arrays;
1888     _flat_arrays = nullptr;
1889   }
1890 }
1891 
1892 void InlinedObjects::inlined_field_names_callback(InlinedObjects* _this, const Klass* klass, uintx base_index, int count) {
1893   if (_this->_inlined_field_map == nullptr) {
1894     _this->_inlined_field_map = new (mtServiceability) GrowableArray<ClassInlinedFields>(100, mtServiceability);
1895   }
1896   _this->_inlined_field_map->append(ClassInlinedFields(klass, base_index));
1897 
1898   // counters for dumping classes with inlined fields
1899   _this->_classes_count++;
1900   _this->_inlined_fields_count += count;
1901 }
1902 
1903 void InlinedObjects::dump_inlined_field_names(AbstractDumpWriter* writer) {
1904   InlinedFieldNameDumper nameDumper(writer, this, inlined_field_names_callback);
1905   ClassLoaderDataGraph::classes_do(&nameDumper);
1906 
1907   if (_inlined_field_map != nullptr) {
1908     // prepare the map for  get_base_index_for().
1909     _inlined_field_map->sort(ClassInlinedFields::compare);
1910   }
1911 }
1912 
1913 uintx InlinedObjects::get_base_index_for(Klass* k) {
1914   if (_inlined_field_map != nullptr) {
1915     bool found = false;
1916     int idx = _inlined_field_map->find_sorted<ClassInlinedFields, ClassInlinedFields::compare>(ClassInlinedFields(k, 0), found);
1917     if (found) {
1918         return _inlined_field_map->at(idx).base_index;
1919     }
1920   }
1921 
1922   // return max_uintx, so get_next_string_id returns 0.
1923   return max_uintx;
1924 }
1925 
1926 uintx InlinedObjects::get_next_string_id(uintx id) {
1927   if (++id == _min_string_id) {
1928     return _max_string_id + 1;
1929   }
1930   return id;
1931 }
1932 
1933 void InlinedObjects::dump_classed_with_inlined_fields(AbstractDumpWriter* writer) {
1934   if (_classes_count != 0) {
1935     // Record for each class contains tag(u1), class ID and count(u2)
1936     // for each inlined field index(u2), synthetic fields count(u2), original field name and class ID
1937     int size = _classes_count * (1 + sizeof(address) + 2)
1938              + _inlined_fields_count * (2 + 2 + sizeof(address) + sizeof(address));
1939     DumperSupport::write_header(writer, HPROF_INLINED_FIELDS, (u4)size);
1940 
1941     InlinedFieldsDumper dumper(writer);
1942     ClassLoaderDataGraph::classes_do(&dumper);
1943   }
1944 }
1945 
1946 void InlinedObjects::add_flat_array(oop array) {
1947   if (_flat_arrays == nullptr) {
1948     _flat_arrays = new (mtServiceability) GrowableArray<oop>(100, mtServiceability);
1949   }
1950   _flat_arrays->append(array);
1951 }
1952 
1953 void InlinedObjects::dump_flat_arrays(AbstractDumpWriter* writer) {
1954   if (_flat_arrays != nullptr) {
1955     // For each flat array the record contains tag (u1), object ID and class ID.
1956     int size = _flat_arrays->length() * (1 + sizeof(address) + sizeof(address));
1957 
1958     DumperSupport::write_header(writer, HPROF_FLAT_ARRAYS, (u4)size);
1959     for (GrowableArrayIterator<oop> it = _flat_arrays->begin(); it != _flat_arrays->end(); ++it) {
1960       flatArrayOop array = flatArrayOop(*it);
1961       FlatArrayKlass* array_klass = FlatArrayKlass::cast(array->klass());
1962       InlineKlass* element_klass = array_klass->element_klass();
1963       writer->write_u1(HPROF_FLAT_ARRAY);
1964       writer->write_objectID(array);
1965       writer->write_classID(element_klass);
1966     }
1967   }
1968 }
1969 
1970 
1971 // Support class used to generate HPROF_UTF8 records from the entries in the
1972 // SymbolTable.
1973 
1974 class SymbolTableDumper : public SymbolClosure {
1975  private:
1976   AbstractDumpWriter* _writer;
1977   AbstractDumpWriter* writer() const                { return _writer; }
1978  public:
1979   SymbolTableDumper(AbstractDumpWriter* writer)     { _writer = writer; }
1980   void do_symbol(Symbol** p);
1981 };
1982 
1983 void SymbolTableDumper::do_symbol(Symbol** p) {
1984   ResourceMark rm;
1985   Symbol* sym = *p;
1986   int len = sym->utf8_length();
1987   if (len > 0) {
1988     char* s = sym->as_utf8();
1989     DumperSupport::write_header(writer(), HPROF_UTF8, oopSize + len);
1990     writer()->write_symbolID(sym);

2483       return;
2484     }
2485   }
2486 
2487   if (DumperSupport::mask_dormant_archived_object(o, nullptr) == nullptr) {
2488     return;
2489   }
2490 
2491   if (o->is_instance()) {
2492     // create a HPROF_GC_INSTANCE record for each object
2493     DumperSupport::dump_instance(writer(), o, &_class_cache);
2494     // If we encounter an unmounted virtual thread it needs to be dumped explicitly
2495     // (mounted virtual threads are dumped with their carriers).
2496     if (java_lang_VirtualThread::is_instance(o)
2497         && ThreadDumper::should_dump_vthread(o) && !ThreadDumper::is_vthread_mounted(o)) {
2498       _vthread_dumper->dump_vthread(o, writer());
2499     }
2500   } else if (o->is_objArray()) {
2501     // create a HPROF_GC_OBJ_ARRAY_DUMP record for each object array
2502     DumperSupport::dump_object_array(writer(), objArrayOop(o));
2503   } else if (o->is_flatArray()) {
2504     DumperSupport::dump_flat_array(writer(), flatArrayOop(o), &_class_cache);
2505   } else if (o->is_typeArray()) {
2506     // create a HPROF_GC_PRIM_ARRAY_DUMP record for each type array
2507     DumperSupport::dump_prim_array(writer(), typeArrayOop(o));
2508   }
2509 }
2510 
2511 // The dumper controller for parallel heap dump
2512 class DumperController : public CHeapObj<mtInternal> {
2513  private:
2514    Monitor* _lock;
2515    Mutex* _global_writer_lock;
2516 
2517    const uint   _dumper_number;
2518    uint   _complete_number;
2519 
2520    bool   _started; // VM dumper started and acquired global writer lock
2521 
2522  public:
2523    DumperController(uint number) :
2524      // _lock and _global_writer_lock are used for synchronization between GC worker threads inside safepoint,

2564      _complete_number++;
2565      // propagate local error to global if any
2566      if (local_writer->has_error()) {
2567        global_writer->set_error(local_writer->error());
2568      }
2569      ml.notify();
2570    }
2571 
2572    void wait_all_dumpers_complete() {
2573      MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
2574      while (_complete_number != _dumper_number) {
2575         ml.wait();
2576      }
2577    }
2578 };
2579 
2580 // DumpMerger merges separate dump files into a complete one
2581 class DumpMerger : public StackObj {
2582 private:
2583   DumpWriter* _writer;
2584   InlinedObjects*  _inlined_objects;
2585   const char* _path;
2586   bool _has_error;
2587   int _dump_seq;
2588 
2589 private:
2590   void merge_file(const char* path);
2591   void merge_done();
2592   void set_error(const char* msg);
2593 
2594 public:
2595   DumpMerger(const char* path, DumpWriter* writer, InlinedObjects* inlined_objects, int dump_seq) :
2596     _writer(writer),
2597     _inlined_objects(inlined_objects),
2598     _path(path),
2599     _has_error(_writer->has_error()),
2600     _dump_seq(dump_seq) {}
2601 
2602   void do_merge();
2603 
2604   // returns path for the parallel DumpWriter (resource allocated)
2605   static char* get_writer_path(const char* base_path, int seq);
2606 
2607 };
2608 
2609 char* DumpMerger::get_writer_path(const char* base_path, int seq) {
2610   // approximate required buffer size
2611   size_t buf_size = strlen(base_path)
2612                     + 2                 // ".p"
2613                     + 10                // number (that's enough for 2^32 parallel dumpers)
2614                     + 1;                // '\0'
2615 
2616   char* path = NEW_RESOURCE_ARRAY(char, buf_size);
2617   memset(path, 0, buf_size);
2618 
2619   os::snprintf_checked(path, buf_size, "%s.p%d", base_path, seq);
2620 
2621   return path;
2622 }
2623 
2624 
2625 void DumpMerger::merge_done() {
2626   // Writes the HPROF_HEAP_DUMP_END record.
2627   if (!_has_error) {
2628     DumperSupport::end_of_dump(_writer);
2629     _inlined_objects->dump_flat_arrays(_writer);
2630     _writer->flush();
2631     _inlined_objects->release();
2632   }
2633   _dump_seq = 0; //reset
2634 }
2635 
2636 void DumpMerger::set_error(const char* msg) {
2637   assert(msg != nullptr, "sanity check");
2638   log_error(heapdump)("%s (file: %s)", msg, _path);
2639   _writer->set_error(msg);
2640   _has_error = true;
2641 }
2642 
2643 #ifdef LINUX
2644 // Merge segmented heap files via sendfile, it's more efficient than the
2645 // read+write combination, which would require transferring data to and from
2646 // user space.
2647 void DumpMerger::merge_file(const char* path) {
2648   TraceTime timer("Merge segmented heap file directly", TRACETIME_LOG(Info, heapdump));
2649 
2650   int segment_fd = os::open(path, O_RDONLY, 0);
2651   if (segment_fd == -1) {

2731   // restore compressor for further use
2732   _writer->set_compressor(saved_compressor);
2733   merge_done();
2734 }
2735 
2736 // The VM operation that performs the heap dump
2737 class VM_HeapDumper : public VM_GC_Operation, public WorkerTask, public UnmountedVThreadDumper {
2738  private:
2739   DumpWriter*             _writer;
2740   JavaThread*             _oome_thread;
2741   Method*                 _oome_constructor;
2742   bool                    _gc_before_heap_dump;
2743   GrowableArray<Klass*>*  _klass_map;
2744 
2745   ThreadDumper**          _thread_dumpers; // platform, carrier and mounted virtual threads
2746   int                     _thread_dumpers_count;
2747   volatile int            _thread_serial_num;
2748   volatile int            _frame_serial_num;
2749 
2750   volatile int            _dump_seq;
2751 
2752   // Inlined object support.
2753   InlinedObjects          _inlined_objects;
2754 
2755   // parallel heap dump support
2756   uint                    _num_dumper_threads;
2757   DumperController*       _dumper_controller;
2758   ParallelObjectIterator* _poi;
2759 
2760   // Dumper id of VMDumper thread.
2761   static const int VMDumperId = 0;
2762   // VM dumper dumps both heap and non-heap data, other dumpers dump heap-only data.
2763   static bool is_vm_dumper(int dumper_id) { return dumper_id == VMDumperId; }
2764   // the 1st dumper calling get_next_dumper_id becomes VM dumper
2765   int get_next_dumper_id() {
2766     return AtomicAccess::fetch_then_add(&_dump_seq, 1);
2767   }
2768 
2769   DumpWriter* writer() const { return _writer; }
2770 
2771   bool skip_operation() const;
2772 
2773   // HPROF_GC_ROOT_THREAD_OBJ records for platform and mounted virtual threads
2774   void dump_threads(AbstractDumpWriter* writer);

2815   }
2816 
2817   ~VM_HeapDumper() {
2818     if (_thread_dumpers != nullptr) {
2819       for (int i = 0; i < _thread_dumpers_count; i++) {
2820         delete _thread_dumpers[i];
2821       }
2822       FREE_C_HEAP_ARRAY(ThreadDumper*, _thread_dumpers);
2823     }
2824 
2825     if (_dumper_controller != nullptr) {
2826       delete _dumper_controller;
2827       _dumper_controller = nullptr;
2828     }
2829     delete _klass_map;
2830   }
2831   int dump_seq()           { return _dump_seq; }
2832   bool is_parallel_dump()  { return _num_dumper_threads > 1; }
2833   void prepare_parallel_dump(WorkerThreads* workers);
2834 
2835   InlinedObjects* inlined_objects() { return &_inlined_objects; }
2836 
2837   VMOp_Type type() const { return VMOp_HeapDumper; }
2838   virtual bool doit_prologue();
2839   void doit();
2840   void work(uint worker_id);
2841 
2842   // UnmountedVThreadDumper implementation
2843   void dump_vthread(oop vt, AbstractDumpWriter* segment_writer);
2844 };
2845 
2846 bool VM_HeapDumper::skip_operation() const {
2847   return false;
2848 }
2849 
2850 // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
2851 void DumperSupport::end_of_dump(AbstractDumpWriter* writer) {
2852   writer->finish_dump_segment();
2853 
2854   writer->write_u1(HPROF_HEAP_DUMP_END);
2855   writer->write_u4(0);
2856   writer->write_u4(0);

2953     _dumper_controller->lock_global_writer();
2954     _dumper_controller->signal_start();
2955   } else {
2956     _dumper_controller->wait_for_start_signal();
2957   }
2958 
2959   if (is_vm_dumper(dumper_id)) {
2960     TraceTime timer("Dump non-objects", TRACETIME_LOG(Info, heapdump));
2961     // Write the file header - we always use 1.0.2
2962     const char* header = "JAVA PROFILE 1.0.2";
2963 
2964     // header is few bytes long - no chance to overflow int
2965     writer()->write_raw(header, strlen(header) + 1); // NUL terminated
2966     writer()->write_u4(oopSize);
2967     // timestamp is current time in ms
2968     writer()->write_u8(os::javaTimeMillis());
2969     // HPROF_UTF8 records
2970     SymbolTableDumper sym_dumper(writer());
2971     SymbolTable::symbols_do(&sym_dumper);
2972 
2973     // HPROF_UTF8 records for inlined field names.
2974     inlined_objects()->init();
2975     inlined_objects()->dump_inlined_field_names(writer());
2976 
2977     // HPROF_INLINED_FIELDS
2978     inlined_objects()->dump_classed_with_inlined_fields(writer());
2979 
2980     // write HPROF_LOAD_CLASS records
2981     {
2982       LoadedClassDumper loaded_class_dumper(writer(), _klass_map);
2983       ClassLoaderDataGraph::classes_do(&loaded_class_dumper);
2984     }
2985 
2986     // write HPROF_FRAME and HPROF_TRACE records
2987     // this must be called after _klass_map is built when iterating the classes above.
2988     dump_stack_traces(writer());
2989 
2990     // unlock global writer, so parallel dumpers can dump stack traces of unmounted virtual threads
2991     _dumper_controller->unlock_global_writer();
2992   }
2993 
2994   // HPROF_HEAP_DUMP/HPROF_HEAP_DUMP_SEGMENT starts here
2995 
2996   ResourceMark rm;
2997   // share global compressor, local DumpWriter is not responsible for its life cycle
2998   DumpWriter segment_writer(DumpMerger::get_writer_path(writer()->get_file_path(), dumper_id),
2999                             writer()->is_overwrite(), writer()->compressor());

3162         (error() != nullptr) ? error() : "reason unknown");
3163     }
3164     return -1;
3165   }
3166 
3167   // generate the segmented heap dump into separate files
3168   VM_HeapDumper dumper(&writer, _gc_before_heap_dump, _oome, num_dump_threads);
3169   VMThread::execute(&dumper);
3170 
3171   // record any error that the writer may have encountered
3172   set_error(writer.error());
3173 
3174   // Heap dump process is done in two phases
3175   //
3176   // Phase 1: Concurrent threads directly write heap data to multiple heap files.
3177   //          This is done by VM_HeapDumper, which is performed within safepoint.
3178   //
3179   // Phase 2: Merge multiple heap files into one complete heap dump file.
3180   //          This is done by DumpMerger, which is performed outside safepoint
3181 
3182   DumpMerger merger(path, &writer, dumper.inlined_objects(), dumper.dump_seq());
3183   // Perform heapdump file merge operation in the current thread prevents us
3184   // from occupying the VM Thread, which in turn affects the occurrence of
3185   // GC and other VM operations.
3186   merger.do_merge();
3187   if (writer.error() != nullptr) {
3188     set_error(writer.error());
3189   }
3190 
3191   // emit JFR event
3192   if (error() == nullptr) {
3193     event.set_destination(path);
3194     event.set_gcBeforeDump(_gc_before_heap_dump);
3195     event.set_size(writer.bytes_written());
3196     event.set_onOutOfMemoryError(_oome);
3197     event.set_overwrite(overwrite);
3198     event.set_compression(compression);
3199     event.commit();
3200   } else {
3201     log_debug(aot, heap)("Error %s while dumping heap", error());
3202   }
< prev index next >