25
26 #include "precompiled.hpp"
27 #include "classfile/classLoaderData.inline.hpp"
28 #include "classfile/classLoaderDataGraph.hpp"
29 #include "classfile/javaClasses.inline.hpp"
30 #include "classfile/symbolTable.hpp"
31 #include "classfile/vmClasses.hpp"
32 #include "classfile/vmSymbols.hpp"
33 #include "gc/shared/gcLocker.hpp"
34 #include "gc/shared/gcVMOperations.hpp"
35 #include "gc/shared/workerThread.hpp"
36 #include "jfr/jfrEvents.hpp"
37 #include "jvm.h"
38 #include "memory/allocation.inline.hpp"
39 #include "memory/resourceArea.hpp"
40 #include "memory/universe.hpp"
41 #include "oops/fieldStreams.inline.hpp"
42 #include "oops/klass.inline.hpp"
43 #include "oops/objArrayKlass.hpp"
44 #include "oops/objArrayOop.inline.hpp"
45 #include "oops/oop.inline.hpp"
46 #include "oops/typeArrayOop.inline.hpp"
47 #include "runtime/continuationWrapper.inline.hpp"
48 #include "runtime/frame.inline.hpp"
49 #include "runtime/handles.inline.hpp"
50 #include "runtime/javaCalls.hpp"
51 #include "runtime/javaThread.inline.hpp"
52 #include "runtime/jniHandles.hpp"
53 #include "runtime/os.hpp"
54 #include "runtime/threads.hpp"
55 #include "runtime/threadSMR.hpp"
56 #include "runtime/vframe.hpp"
57 #include "runtime/vmOperations.hpp"
58 #include "runtime/vmThread.hpp"
59 #include "runtime/timerTrace.hpp"
60 #include "services/heapDumper.hpp"
61 #include "services/heapDumperCompression.hpp"
62 #include "services/threadService.hpp"
63 #include "utilities/checkedCast.hpp"
64 #include "utilities/macros.hpp"
65 #include "utilities/ostream.hpp"
66 #ifdef LINUX
67 #include "os_linux.hpp"
299 * 7: double array
300 * 8: byte array
301 * 9: short array
302 * 10: int array
303 * 11: long array
304 * [u1]* elements
305 *
306 * HPROF_CPU_SAMPLES a set of sample traces of running threads
307 *
308 * u4 total number of samples
309 * u4 # of traces
310 * [u4 # of samples
311 * u4]* stack trace serial number
312 *
313 * HPROF_CONTROL_SETTINGS the settings of on/off switches
314 *
315 * u4 0x00000001: alloc traces on/off
316 * 0x00000002: cpu sampling on/off
317 * u2 stack trace depth
318 *
319 *
320 * When the header is "JAVA PROFILE 1.0.2" a heap dump can optionally
321 * be generated as a sequence of heap dump segments. This sequence is
322 * terminated by an end record. The additional tags allowed by format
323 * "JAVA PROFILE 1.0.2" are:
324 *
325 * HPROF_HEAP_DUMP_SEGMENT denote a heap dump segment
326 *
327 * [heap dump sub-records]*
328 * The same sub-record types allowed by HPROF_HEAP_DUMP
329 *
330 * HPROF_HEAP_DUMP_END denotes the end of a heap dump
331 *
332 */
333
334
335 // HPROF tags
336
337 enum hprofTag : u1 {
338 // top-level records
339 HPROF_UTF8 = 0x01,
340 HPROF_LOAD_CLASS = 0x02,
341 HPROF_UNLOAD_CLASS = 0x03,
342 HPROF_FRAME = 0x04,
343 HPROF_TRACE = 0x05,
344 HPROF_ALLOC_SITES = 0x06,
345 HPROF_HEAP_SUMMARY = 0x07,
346 HPROF_START_THREAD = 0x0A,
347 HPROF_END_THREAD = 0x0B,
348 HPROF_HEAP_DUMP = 0x0C,
349 HPROF_CPU_SAMPLES = 0x0D,
350 HPROF_CONTROL_SETTINGS = 0x0E,
351
352 // 1.0.2 record types
353 HPROF_HEAP_DUMP_SEGMENT = 0x1C,
354 HPROF_HEAP_DUMP_END = 0x2C,
355
356 // field types
357 HPROF_ARRAY_OBJECT = 0x01,
358 HPROF_NORMAL_OBJECT = 0x02,
359 HPROF_BOOLEAN = 0x04,
360 HPROF_CHAR = 0x05,
361 HPROF_FLOAT = 0x06,
362 HPROF_DOUBLE = 0x07,
363 HPROF_BYTE = 0x08,
364 HPROF_SHORT = 0x09,
365 HPROF_INT = 0x0A,
366 HPROF_LONG = 0x0B,
367
368 // data-dump sub-records
369 HPROF_GC_ROOT_UNKNOWN = 0xFF,
370 HPROF_GC_ROOT_JNI_GLOBAL = 0x01,
371 HPROF_GC_ROOT_JNI_LOCAL = 0x02,
372 HPROF_GC_ROOT_JAVA_FRAME = 0x03,
373 HPROF_GC_ROOT_NATIVE_STACK = 0x04,
374 HPROF_GC_ROOT_STICKY_CLASS = 0x05,
375 HPROF_GC_ROOT_THREAD_BLOCK = 0x06,
376 HPROF_GC_ROOT_MONITOR_USED = 0x07,
377 HPROF_GC_ROOT_THREAD_OBJ = 0x08,
378 HPROF_GC_CLASS_DUMP = 0x20,
379 HPROF_GC_INSTANCE_DUMP = 0x21,
380 HPROF_GC_OBJ_ARRAY_DUMP = 0x22,
381 HPROF_GC_PRIM_ARRAY_DUMP = 0x23
382 };
383
384 // Default stack trace ID (used for dummy HPROF_TRACE record)
385 enum {
386 STACK_TRACE_ID = 1,
387 INITIAL_CLASS_COUNT = 200
388 };
389
390 // Supports I/O operations for a dump
391 // Base class for dump and parallel dump
392 class AbstractDumpWriter : public CHeapObj<mtInternal> {
393 protected:
394 enum {
395 io_buffer_max_size = 1*M,
396 dump_segment_header_size = 9
397 };
398
399 char* _buffer; // internal buffer
400 size_t _size;
401 size_t _pos;
402
403 bool _in_dump_segment; // Are we currently in a dump segment?
404 bool _is_huge_sub_record; // Are we writing a sub-record larger than the buffer size?
405 DEBUG_ONLY(size_t _sub_record_left;) // The bytes not written for the current sub-record.
406 DEBUG_ONLY(bool _sub_record_ended;) // True if we have called the end_sub_record().
407
408 char* buffer() const { return _buffer; }
409 size_t buffer_size() const { return _size; }
726 }
727 }
728
729 class DumperClassCacheTable;
730 class DumperClassCacheTableEntry;
731
732 // Support class with a collection of functions used when dumping the heap
733 class DumperSupport : AllStatic {
734 public:
735
736 // write a header of the given type
737 static void write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len);
738
739 // returns hprof tag for the given type signature
740 static hprofTag sig2tag(Symbol* sig);
741 // returns hprof tag for the given basic type
742 static hprofTag type2tag(BasicType type);
743 // Returns the size of the data to write.
744 static u4 sig2size(Symbol* sig);
745
746 // returns the size of the instance of the given class
747 static u4 instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry = nullptr);
748
749 // dump a jfloat
750 static void dump_float(AbstractDumpWriter* writer, jfloat f);
751 // dump a jdouble
752 static void dump_double(AbstractDumpWriter* writer, jdouble d);
753 // dumps the raw value of the given field
754 static void dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset);
755 // returns the size of the static fields; also counts the static fields
756 static u4 get_static_fields_size(InstanceKlass* ik, u2& field_count);
757 // dumps static fields of the given class
758 static void dump_static_fields(AbstractDumpWriter* writer, Klass* k);
759 // dump the raw values of the instance fields of the given object
760 static void dump_instance_fields(AbstractDumpWriter* writer, oop o, DumperClassCacheTableEntry* class_cache_entry);
761 // get the count of the instance fields for a given class
762 static u2 get_instance_fields_count(InstanceKlass* ik);
763 // dumps the definition of the instance fields for a given class
764 static void dump_instance_field_descriptors(AbstractDumpWriter* writer, Klass* k);
765 // creates HPROF_GC_INSTANCE_DUMP record for the given object
766 static void dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache);
767 // creates HPROF_GC_CLASS_DUMP record for the given instance class
768 static void dump_instance_class(AbstractDumpWriter* writer, Klass* k);
769 // creates HPROF_GC_CLASS_DUMP record for a given array class
770 static void dump_array_class(AbstractDumpWriter* writer, Klass* k);
771
772 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
773 static void dump_object_array(AbstractDumpWriter* writer, objArrayOop array);
774 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
775 static void dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array);
776 // create HPROF_FRAME record for the given method and bci
777 static void dump_stack_frame(AbstractDumpWriter* writer, int frame_serial_num, int class_serial_num, Method* m, int bci);
778
779 // check if we need to truncate an array
780 static int calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size);
781
782 // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
783 static void end_of_dump(AbstractDumpWriter* writer);
784
785 static oop mask_dormant_archived_object(oop o, oop ref_obj) {
786 if (o != nullptr && o->klass()->java_mirror_no_keepalive() == nullptr) {
787 // Ignore this object since the corresponding java mirror is not loaded.
788 // Might be a dormant archive object.
789 report_dormant_archived_object(o, ref_obj);
790 return nullptr;
791 } else {
792 return o;
793 }
794 }
795
796 static void report_dormant_archived_object(oop o, oop ref_obj) {
797 if (log_is_enabled(Trace, cds, heap)) {
798 ResourceMark rm;
799 if (ref_obj != nullptr) {
800 log_trace(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s) referenced by " INTPTR_FORMAT " (%s)",
801 p2i(o), o->klass()->external_name(),
802 p2i(ref_obj), ref_obj->klass()->external_name());
803 } else {
804 log_trace(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s)",
805 p2i(o), o->klass()->external_name());
806 }
807 }
808 }
809 };
810
811 // Hash table of klasses to the klass metadata. This should greatly improve the
812 // hash dumping performance. This hash table is supposed to be used by a single
813 // thread only.
814 //
815 class DumperClassCacheTableEntry : public CHeapObj<mtServiceability> {
816 friend class DumperClassCacheTable;
817 private:
818 GrowableArray<char> _sigs_start;
819 GrowableArray<int> _offsets;
820 u4 _instance_size;
821 int _entries;
822
823 public:
824 DumperClassCacheTableEntry() : _instance_size(0), _entries(0) {};
825
826 int field_count() { return _entries; }
827 char sig_start(int field_idx) { return _sigs_start.at(field_idx); }
828 int offset(int field_idx) { return _offsets.at(field_idx); }
829 u4 instance_size() { return _instance_size; }
830 };
831
832 class DumperClassCacheTable {
833 private:
834 // ResourceHashtable SIZE is specified at compile time so we
835 // use 1031 which is the first prime after 1024.
836 static constexpr size_t TABLE_SIZE = 1031;
837
838 // Maintain the cache for N classes. This limits memory footprint
839 // impact, regardless of how many classes we have in the dump.
840 // This also improves look up performance by keeping the statically
841 // sized table from overloading.
842 static constexpr int CACHE_TOP = 256;
843
844 typedef ResourceHashtable<InstanceKlass*, DumperClassCacheTableEntry*,
845 TABLE_SIZE, AnyObj::C_HEAP, mtServiceability> PtrTable;
846 PtrTable* _ptrs;
847
856 bool do_entry(InstanceKlass*& key, DumperClassCacheTableEntry*& entry) {
857 delete entry;
858 return true;
859 }
860 } cleanup;
861 table->unlink(&cleanup);
862 }
863
864 public:
865 DumperClassCacheTableEntry* lookup_or_create(InstanceKlass* ik) {
866 if (_last_ik == ik) {
867 return _last_entry;
868 }
869
870 DumperClassCacheTableEntry* entry;
871 DumperClassCacheTableEntry** from_cache = _ptrs->get(ik);
872 if (from_cache == nullptr) {
873 entry = new DumperClassCacheTableEntry();
874 for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
875 if (!fld.access_flags().is_static()) {
876 Symbol* sig = fld.signature();
877 entry->_sigs_start.push(sig->char_at(0));
878 entry->_offsets.push(fld.offset());
879 entry->_entries++;
880 entry->_instance_size += DumperSupport::sig2size(sig);
881 }
882 }
883
884 if (_ptrs->number_of_entries() >= CACHE_TOP) {
885 // We do not track the individual hit rates for table entries.
886 // Purge the entire table, and let the cache catch up with new
887 // distribution.
888 unlink_all(_ptrs);
889 }
890
891 _ptrs->put(ik, entry);
892 } else {
893 entry = *from_cache;
894 }
895
896 // Remember for single-slot cache.
897 _last_ik = ik;
898 _last_entry = entry;
899
900 return entry;
970 }
971
972 // dump a jfloat
973 void DumperSupport::dump_float(AbstractDumpWriter* writer, jfloat f) {
974 if (g_isnan(f)) {
975 writer->write_u4(0x7fc00000); // collapsing NaNs
976 } else {
977 writer->write_u4(bit_cast<u4>(f));
978 }
979 }
980
981 // dump a jdouble
982 void DumperSupport::dump_double(AbstractDumpWriter* writer, jdouble d) {
983 if (g_isnan(d)) {
984 writer->write_u8(0x7ff80000ull << 32); // collapsing NaNs
985 } else {
986 writer->write_u8(bit_cast<u8>(d));
987 }
988 }
989
990 // dumps the raw value of the given field
991 void DumperSupport::dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset) {
992 switch (type) {
993 case JVM_SIGNATURE_CLASS :
994 case JVM_SIGNATURE_ARRAY : {
995 oop o = obj->obj_field_access<ON_UNKNOWN_OOP_REF | AS_NO_KEEPALIVE>(offset);
996 o = mask_dormant_archived_object(o, obj);
997 assert(oopDesc::is_oop_or_null(o), "Expected an oop or nullptr at " PTR_FORMAT, p2i(o));
998 writer->write_objectID(o);
999 break;
1000 }
1001 case JVM_SIGNATURE_BYTE : {
1002 jbyte b = obj->byte_field(offset);
1003 writer->write_u1(b);
1004 break;
1005 }
1006 case JVM_SIGNATURE_CHAR : {
1007 jchar c = obj->char_field(offset);
1008 writer->write_u2(c);
1009 break;
1028 writer->write_u4(i);
1029 break;
1030 }
1031 case JVM_SIGNATURE_LONG : {
1032 jlong l = obj->long_field(offset);
1033 writer->write_u8(l);
1034 break;
1035 }
1036 case JVM_SIGNATURE_BOOLEAN : {
1037 jboolean b = obj->bool_field(offset);
1038 writer->write_u1(b);
1039 break;
1040 }
1041 default : {
1042 ShouldNotReachHere();
1043 break;
1044 }
1045 }
1046 }
1047
1048 // returns the size of the instance of the given class
1049 u4 DumperSupport::instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry) {
1050 if (class_cache_entry != nullptr) {
1051 return class_cache_entry->instance_size();
1052 } else {
1053 u4 size = 0;
1054 for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1055 if (!fld.access_flags().is_static()) {
1056 size += sig2size(fld.signature());
1057 }
1058 }
1059 return size;
1060 }
1061 }
1062
1063 u4 DumperSupport::get_static_fields_size(InstanceKlass* ik, u2& field_count) {
1064 field_count = 0;
1065 u4 size = 0;
1066
1067 for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1068 if (fldc.access_flags().is_static()) {
1069 field_count++;
1070 size += sig2size(fldc.signature());
1071 }
1072 }
1073
1074 // Add in resolved_references which is referenced by the cpCache
1075 // The resolved_references is an array per InstanceKlass holding the
1076 // strings and other oops resolved from the constant pool.
1077 oop resolved_references = ik->constants()->resolved_references_or_null();
1078 if (resolved_references != nullptr) {
1079 field_count++;
1080 size += sizeof(address);
1081
1082 // Add in the resolved_references of the used previous versions of the class
1083 // in the case of RedefineClasses
1084 InstanceKlass* prev = ik->previous_versions();
1085 while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1086 field_count++;
1087 size += sizeof(address);
1088 prev = prev->previous_versions();
1091
1092 // Also provide a pointer to the init_lock if present, so there aren't unreferenced int[0]
1093 // arrays.
1094 oop init_lock = ik->init_lock();
1095 if (init_lock != nullptr) {
1096 field_count++;
1097 size += sizeof(address);
1098 }
1099
1100 // We write the value itself plus a name and a one byte type tag per field.
1101 return checked_cast<u4>(size + field_count * (sizeof(address) + 1));
1102 }
1103
1104 // dumps static fields of the given class
1105 void DumperSupport::dump_static_fields(AbstractDumpWriter* writer, Klass* k) {
1106 InstanceKlass* ik = InstanceKlass::cast(k);
1107
1108 // dump the field descriptors and raw values
1109 for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1110 if (fld.access_flags().is_static()) {
1111 Symbol* sig = fld.signature();
1112
1113 writer->write_symbolID(fld.name()); // name
1114 writer->write_u1(sig2tag(sig)); // type
1115
1116 // value
1117 dump_field_value(writer, sig->char_at(0), ik->java_mirror(), fld.offset());
1118 }
1119 }
1120
1121 // Add resolved_references for each class that has them
1122 oop resolved_references = ik->constants()->resolved_references_or_null();
1123 if (resolved_references != nullptr) {
1124 writer->write_symbolID(vmSymbols::resolved_references_name()); // name
1125 writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1126 writer->write_objectID(resolved_references);
1127
1128 // Also write any previous versions
1129 InstanceKlass* prev = ik->previous_versions();
1130 while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1131 writer->write_symbolID(vmSymbols::resolved_references_name()); // name
1132 writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1133 writer->write_objectID(prev->constants()->resolved_references());
1134 prev = prev->previous_versions();
1135 }
1136 }
1137
1138 // Add init lock to the end if the class is not yet initialized
1139 oop init_lock = ik->init_lock();
1140 if (init_lock != nullptr) {
1141 writer->write_symbolID(vmSymbols::init_lock_name()); // name
1142 writer->write_u1(sig2tag(vmSymbols::int_array_signature())); // type
1143 writer->write_objectID(init_lock);
1144 }
1145 }
1146
1147 // dump the raw values of the instance fields of the given object
1148 void DumperSupport::dump_instance_fields(AbstractDumpWriter* writer, oop o, DumperClassCacheTableEntry* class_cache_entry) {
1149 assert(class_cache_entry != nullptr, "Pre-condition: must be provided");
1150 for (int idx = 0; idx < class_cache_entry->field_count(); idx++) {
1151 dump_field_value(writer, class_cache_entry->sig_start(idx), o, class_cache_entry->offset(idx));
1152 }
1153 }
1154
1155 // dumps the definition of the instance fields for a given class
1156 u2 DumperSupport::get_instance_fields_count(InstanceKlass* ik) {
1157 u2 field_count = 0;
1158
1159 for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1160 if (!fldc.access_flags().is_static()) field_count++;
1161 }
1162
1163 return field_count;
1164 }
1165
1166 // dumps the definition of the instance fields for a given class
1167 void DumperSupport::dump_instance_field_descriptors(AbstractDumpWriter* writer, Klass* k) {
1168 InstanceKlass* ik = InstanceKlass::cast(k);
1169
1170 // dump the field descriptors
1171 for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1172 if (!fld.access_flags().is_static()) {
1173 Symbol* sig = fld.signature();
1174
1175 writer->write_symbolID(fld.name()); // name
1176 writer->write_u1(sig2tag(sig)); // type
1177 }
1178 }
1179 }
1180
1181 // creates HPROF_GC_INSTANCE_DUMP record for the given object
1182 void DumperSupport::dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache) {
1183 InstanceKlass* ik = InstanceKlass::cast(o->klass());
1184
1185 DumperClassCacheTableEntry* cache_entry = class_cache->lookup_or_create(ik);
1186
1187 u4 is = instance_size(ik, cache_entry);
1188 u4 size = 1 + sizeof(address) + 4 + sizeof(address) + 4 + is;
1189
1190 writer->start_sub_record(HPROF_GC_INSTANCE_DUMP, size);
1191 writer->write_objectID(o);
1192 writer->write_u4(STACK_TRACE_ID);
1193
1194 // class ID
1195 writer->write_classID(ik);
1196
1197 // number of bytes that follow
1198 writer->write_u4(is);
1199
1200 // field values
1201 dump_instance_fields(writer, o, cache_entry);
1202
1203 writer->end_sub_record();
1204 }
1205
1206 // creates HPROF_GC_CLASS_DUMP record for the given instance class
1207 void DumperSupport::dump_instance_class(AbstractDumpWriter* writer, Klass* k) {
1208 InstanceKlass* ik = InstanceKlass::cast(k);
1209
1210 // We can safepoint and do a heap dump at a point where we have a Klass,
1211 // but no java mirror class has been setup for it. So we need to check
1212 // that the class is at least loaded, to avoid crash from a null mirror.
1213 if (!ik->is_loaded()) {
1214 return;
1215 }
1216
1217 u2 static_fields_count = 0;
1218 u4 static_size = get_static_fields_size(ik, static_fields_count);
1219 u2 instance_fields_count = get_instance_fields_count(ik);
1220 u4 instance_fields_size = instance_fields_count * (sizeof(address) + 1);
1221 u4 size = checked_cast<u4>(1 + sizeof(address) + 4 + 6 * sizeof(address) + 4 + 2 + 2 + static_size + 2 + instance_fields_size);
1226 writer->write_classID(ik);
1227 writer->write_u4(STACK_TRACE_ID);
1228
1229 // super class ID
1230 InstanceKlass* java_super = ik->java_super();
1231 if (java_super == nullptr) {
1232 writer->write_objectID(oop(nullptr));
1233 } else {
1234 writer->write_classID(java_super);
1235 }
1236
1237 writer->write_objectID(ik->class_loader());
1238 writer->write_objectID(ik->signers());
1239 writer->write_objectID(ik->protection_domain());
1240
1241 // reserved
1242 writer->write_objectID(oop(nullptr));
1243 writer->write_objectID(oop(nullptr));
1244
1245 // instance size
1246 writer->write_u4(DumperSupport::instance_size(ik));
1247
1248 // size of constant pool - ignored by HAT 1.1
1249 writer->write_u2(0);
1250
1251 // static fields
1252 writer->write_u2(static_fields_count);
1253 dump_static_fields(writer, ik);
1254
1255 // description of instance fields
1256 writer->write_u2(instance_fields_count);
1257 dump_instance_field_descriptors(writer, ik);
1258
1259 writer->end_sub_record();
1260 }
1261
1262 // creates HPROF_GC_CLASS_DUMP record for the given array class
1263 void DumperSupport::dump_array_class(AbstractDumpWriter* writer, Klass* k) {
1264 InstanceKlass* ik = nullptr; // bottom class for object arrays, null for primitive type arrays
1265 if (k->is_objArray_klass()) {
1266 Klass *bk = ObjArrayKlass::cast(k)->bottom_klass();
1280 assert(java_super != nullptr, "checking");
1281 writer->write_classID(java_super);
1282
1283 writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->class_loader());
1284 writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->signers());
1285 writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->protection_domain());
1286
1287 writer->write_objectID(oop(nullptr)); // reserved
1288 writer->write_objectID(oop(nullptr));
1289 writer->write_u4(0); // instance size
1290 writer->write_u2(0); // constant pool
1291 writer->write_u2(0); // static fields
1292 writer->write_u2(0); // instance fields
1293
1294 writer->end_sub_record();
1295
1296 }
1297
1298 // Hprof uses an u4 as record length field,
1299 // which means we need to truncate arrays that are too long.
1300 int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size) {
1301 BasicType type = ArrayKlass::cast(array->klass())->element_type();
1302 assert(type >= T_BOOLEAN && type <= T_OBJECT, "invalid array element type");
1303
1304 int length = array->length();
1305
1306 int type_size;
1307 if (type == T_OBJECT) {
1308 type_size = sizeof(address);
1309 } else {
1310 type_size = type2aelembytes(type);
1311 }
1312
1313 size_t length_in_bytes = (size_t)length * type_size;
1314 uint max_bytes = max_juint - header_size;
1315
1316 if (length_in_bytes > max_bytes) {
1317 length = max_bytes / type_size;
1318 length_in_bytes = (size_t)length * type_size;
1319
1320 warning("cannot dump array of type %s[] with length %d; truncating to length %d",
1321 type2name_tab[type], array->length(), length);
1322 }
1323 return length;
1324 }
1325
1326 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
1327 void DumperSupport::dump_object_array(AbstractDumpWriter* writer, objArrayOop array) {
1328 // sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID) + sizeof(classID)
1329 short header_size = 1 + 2 * 4 + 2 * sizeof(address);
1330 int length = calculate_array_max_length(writer, array, header_size);
1331 u4 size = checked_cast<u4>(header_size + length * sizeof(address));
1332
1333 writer->start_sub_record(HPROF_GC_OBJ_ARRAY_DUMP, size);
1334 writer->write_objectID(array);
1335 writer->write_u4(STACK_TRACE_ID);
1336 writer->write_u4(length);
1337
1338 // array class ID
1339 writer->write_classID(array->klass());
1340
1341 // [id]* elements
1342 for (int index = 0; index < length; index++) {
1343 oop o = array->obj_at(index);
1344 o = mask_dormant_archived_object(o, array);
1345 writer->write_objectID(o);
1346 }
1347
1348 writer->end_sub_record();
1349 }
1350
1351 #define WRITE_ARRAY(Array, Type, Size, Length) \
1352 for (int i = 0; i < Length; i++) { writer->write_##Size((Size)Array->Type##_at(i)); }
1353
1354 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
1355 void DumperSupport::dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array) {
1356 BasicType type = TypeArrayKlass::cast(array->klass())->element_type();
1357 // 2 * sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID)
1358 short header_size = 2 * 1 + 2 * 4 + sizeof(address);
1359
1360 int length = calculate_array_max_length(writer, array, header_size);
1361 int type_size = type2aelembytes(type);
1362 u4 length_in_bytes = (u4)length * type_size;
1363 u4 size = header_size + length_in_bytes;
1364
1365 writer->start_sub_record(HPROF_GC_PRIM_ARRAY_DUMP, size);
1366 writer->write_objectID(array);
1367 writer->write_u4(STACK_TRACE_ID);
1368 writer->write_u4(length);
1369 writer->write_u1(type2tag(type));
1370
1452 int bci) {
1453 int line_number;
1454 if (m->is_native()) {
1455 line_number = -3; // native frame
1456 } else {
1457 line_number = m->line_number_from_bci(bci);
1458 }
1459
1460 write_header(writer, HPROF_FRAME, 4*oopSize + 2*sizeof(u4));
1461 writer->write_id(frame_serial_num); // frame serial number
1462 writer->write_symbolID(m->name()); // method's name
1463 writer->write_symbolID(m->signature()); // method's signature
1464
1465 assert(m->method_holder()->is_instance_klass(), "not InstanceKlass");
1466 writer->write_symbolID(m->method_holder()->source_file_name()); // source file name
1467 writer->write_u4(class_serial_num); // class serial number
1468 writer->write_u4((u4) line_number); // line number
1469 }
1470
1471
1472 // Support class used to generate HPROF_UTF8 records from the entries in the
1473 // SymbolTable.
1474
1475 class SymbolTableDumper : public SymbolClosure {
1476 private:
1477 AbstractDumpWriter* _writer;
1478 AbstractDumpWriter* writer() const { return _writer; }
1479 public:
1480 SymbolTableDumper(AbstractDumpWriter* writer) { _writer = writer; }
1481 void do_symbol(Symbol** p);
1482 };
1483
1484 void SymbolTableDumper::do_symbol(Symbol** p) {
1485 ResourceMark rm;
1486 Symbol* sym = *p;
1487 int len = sym->utf8_length();
1488 if (len > 0) {
1489 char* s = sym->as_utf8();
1490 DumperSupport::write_header(writer(), HPROF_UTF8, oopSize + len);
1491 writer()->write_symbolID(sym);
1975 return;
1976 }
1977 }
1978
1979 if (DumperSupport::mask_dormant_archived_object(o, nullptr) == nullptr) {
1980 return;
1981 }
1982
1983 if (o->is_instance()) {
1984 // create a HPROF_GC_INSTANCE record for each object
1985 DumperSupport::dump_instance(writer(), o, &_class_cache);
1986 // If we encounter an unmounted virtual thread it needs to be dumped explicitly
1987 // (mounted virtual threads are dumped with their carriers).
1988 if (java_lang_VirtualThread::is_instance(o)
1989 && ThreadDumper::should_dump_vthread(o) && !ThreadDumper::is_vthread_mounted(o)) {
1990 _vthread_dumper->dump_vthread(o, writer());
1991 }
1992 } else if (o->is_objArray()) {
1993 // create a HPROF_GC_OBJ_ARRAY_DUMP record for each object array
1994 DumperSupport::dump_object_array(writer(), objArrayOop(o));
1995 } else if (o->is_typeArray()) {
1996 // create a HPROF_GC_PRIM_ARRAY_DUMP record for each type array
1997 DumperSupport::dump_prim_array(writer(), typeArrayOop(o));
1998 }
1999 }
2000
2001 // The dumper controller for parallel heap dump
2002 class DumperController : public CHeapObj<mtInternal> {
2003 private:
2004 Monitor* _lock;
2005 Mutex* _global_writer_lock;
2006
2007 const uint _dumper_number;
2008 uint _complete_number;
2009
2010 bool _started; // VM dumper started and acquired global writer lock
2011
2012 public:
2013 DumperController(uint number) :
2014 // _lock and _global_writer_lock are used for synchronization between GC worker threads inside safepoint,
2054 _complete_number++;
2055 // propagate local error to global if any
2056 if (local_writer->has_error()) {
2057 global_writer->set_error(local_writer->error());
2058 }
2059 ml.notify();
2060 }
2061
2062 void wait_all_dumpers_complete() {
2063 MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
2064 while (_complete_number != _dumper_number) {
2065 ml.wait();
2066 }
2067 }
2068 };
2069
2070 // DumpMerger merges separate dump files into a complete one
2071 class DumpMerger : public StackObj {
2072 private:
2073 DumpWriter* _writer;
2074 const char* _path;
2075 bool _has_error;
2076 int _dump_seq;
2077
2078 private:
2079 void merge_file(const char* path);
2080 void merge_done();
2081 void set_error(const char* msg);
2082
2083 public:
2084 DumpMerger(const char* path, DumpWriter* writer, int dump_seq) :
2085 _writer(writer),
2086 _path(path),
2087 _has_error(_writer->has_error()),
2088 _dump_seq(dump_seq) {}
2089
2090 void do_merge();
2091
2092 // returns path for the parallel DumpWriter (resource allocated)
2093 static char* get_writer_path(const char* base_path, int seq);
2094
2095 };
2096
2097 char* DumpMerger::get_writer_path(const char* base_path, int seq) {
2098 // approximate required buffer size
2099 size_t buf_size = strlen(base_path)
2100 + 2 // ".p"
2101 + 10 // number (that's enough for 2^32 parallel dumpers)
2102 + 1; // '\0'
2103
2104 char* path = NEW_RESOURCE_ARRAY(char, buf_size);
2105 memset(path, 0, buf_size);
2106
2107 os::snprintf(path, buf_size, "%s.p%d", base_path, seq);
2108
2109 return path;
2110 }
2111
2112
2113 void DumpMerger::merge_done() {
2114 // Writes the HPROF_HEAP_DUMP_END record.
2115 if (!_has_error) {
2116 DumperSupport::end_of_dump(_writer);
2117 _writer->flush();
2118 }
2119 _dump_seq = 0; //reset
2120 }
2121
2122 void DumpMerger::set_error(const char* msg) {
2123 assert(msg != nullptr, "sanity check");
2124 log_error(heapdump)("%s (file: %s)", msg, _path);
2125 _writer->set_error(msg);
2126 _has_error = true;
2127 }
2128
2129 #ifdef LINUX
2130 // Merge segmented heap files via sendfile, it's more efficient than the
2131 // read+write combination, which would require transferring data to and from
2132 // user space.
2133 void DumpMerger::merge_file(const char* path) {
2134 TraceTime timer("Merge segmented heap file directly", TRACETIME_LOG(Info, heapdump));
2135
2136 int segment_fd = os::open(path, O_RDONLY, 0);
2137 if (segment_fd == -1) {
2217 // restore compressor for further use
2218 _writer->set_compressor(saved_compressor);
2219 merge_done();
2220 }
2221
2222 // The VM operation that performs the heap dump
2223 class VM_HeapDumper : public VM_GC_Operation, public WorkerTask, public UnmountedVThreadDumper {
2224 private:
2225 DumpWriter* _writer;
2226 JavaThread* _oome_thread;
2227 Method* _oome_constructor;
2228 bool _gc_before_heap_dump;
2229 GrowableArray<Klass*>* _klass_map;
2230
2231 ThreadDumper** _thread_dumpers; // platform, carrier and mounted virtual threads
2232 int _thread_dumpers_count;
2233 volatile int _thread_serial_num;
2234 volatile int _frame_serial_num;
2235
2236 volatile int _dump_seq;
2237 // parallel heap dump support
2238 uint _num_dumper_threads;
2239 DumperController* _dumper_controller;
2240 ParallelObjectIterator* _poi;
2241
2242 // Dumper id of VMDumper thread.
2243 static const int VMDumperId = 0;
2244 // VM dumper dumps both heap and non-heap data, other dumpers dump heap-only data.
2245 static bool is_vm_dumper(int dumper_id) { return dumper_id == VMDumperId; }
2246 // the 1st dumper calling get_next_dumper_id becomes VM dumper
2247 int get_next_dumper_id() {
2248 return Atomic::fetch_then_add(&_dump_seq, 1);
2249 }
2250
2251 DumpWriter* writer() const { return _writer; }
2252
2253 bool skip_operation() const;
2254
2255 // HPROF_GC_ROOT_THREAD_OBJ records for platform and mounted virtual threads
2256 void dump_threads(AbstractDumpWriter* writer);
2297 }
2298
2299 ~VM_HeapDumper() {
2300 if (_thread_dumpers != nullptr) {
2301 for (int i = 0; i < _thread_dumpers_count; i++) {
2302 delete _thread_dumpers[i];
2303 }
2304 FREE_C_HEAP_ARRAY(ThreadDumper*, _thread_dumpers);
2305 }
2306
2307 if (_dumper_controller != nullptr) {
2308 delete _dumper_controller;
2309 _dumper_controller = nullptr;
2310 }
2311 delete _klass_map;
2312 }
2313 int dump_seq() { return _dump_seq; }
2314 bool is_parallel_dump() { return _num_dumper_threads > 1; }
2315 void prepare_parallel_dump(WorkerThreads* workers);
2316
2317 VMOp_Type type() const { return VMOp_HeapDumper; }
2318 virtual bool doit_prologue();
2319 void doit();
2320 void work(uint worker_id);
2321
2322 // UnmountedVThreadDumper implementation
2323 void dump_vthread(oop vt, AbstractDumpWriter* segment_writer);
2324 };
2325
2326 bool VM_HeapDumper::skip_operation() const {
2327 return false;
2328 }
2329
2330 // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
2331 void DumperSupport::end_of_dump(AbstractDumpWriter* writer) {
2332 writer->finish_dump_segment();
2333
2334 writer->write_u1(HPROF_HEAP_DUMP_END);
2335 writer->write_u4(0);
2336 writer->write_u4(0);
2434 _dumper_controller->lock_global_writer();
2435 _dumper_controller->signal_start();
2436 } else {
2437 _dumper_controller->wait_for_start_signal();
2438 }
2439
2440 if (is_vm_dumper(dumper_id)) {
2441 TraceTime timer("Dump non-objects", TRACETIME_LOG(Info, heapdump));
2442 // Write the file header - we always use 1.0.2
2443 const char* header = "JAVA PROFILE 1.0.2";
2444
2445 // header is few bytes long - no chance to overflow int
2446 writer()->write_raw(header, strlen(header) + 1); // NUL terminated
2447 writer()->write_u4(oopSize);
2448 // timestamp is current time in ms
2449 writer()->write_u8(os::javaTimeMillis());
2450 // HPROF_UTF8 records
2451 SymbolTableDumper sym_dumper(writer());
2452 SymbolTable::symbols_do(&sym_dumper);
2453
2454 // write HPROF_LOAD_CLASS records
2455 {
2456 LoadedClassDumper loaded_class_dumper(writer(), _klass_map);
2457 ClassLoaderDataGraph::classes_do(&loaded_class_dumper);
2458 }
2459
2460 // write HPROF_FRAME and HPROF_TRACE records
2461 // this must be called after _klass_map is built when iterating the classes above.
2462 dump_stack_traces(writer());
2463
2464 // unlock global writer, so parallel dumpers can dump stack traces of unmounted virtual threads
2465 _dumper_controller->unlock_global_writer();
2466 }
2467
2468 // HPROF_HEAP_DUMP/HPROF_HEAP_DUMP_SEGMENT starts here
2469
2470 ResourceMark rm;
2471 // share global compressor, local DumpWriter is not responsible for its life cycle
2472 DumpWriter segment_writer(DumpMerger::get_writer_path(writer()->get_file_path(), dumper_id),
2473 writer()->is_overwrite(), writer()->compressor());
2633 (error() != nullptr) ? error() : "reason unknown");
2634 }
2635 return -1;
2636 }
2637
2638 // generate the segmented heap dump into separate files
2639 VM_HeapDumper dumper(&writer, _gc_before_heap_dump, _oome, num_dump_threads);
2640 VMThread::execute(&dumper);
2641
2642 // record any error that the writer may have encountered
2643 set_error(writer.error());
2644
2645 // Heap dump process is done in two phases
2646 //
2647 // Phase 1: Concurrent threads directly write heap data to multiple heap files.
2648 // This is done by VM_HeapDumper, which is performed within safepoint.
2649 //
2650 // Phase 2: Merge multiple heap files into one complete heap dump file.
2651 // This is done by DumpMerger, which is performed outside safepoint
2652
2653 DumpMerger merger(path, &writer, dumper.dump_seq());
2654 // Perform heapdump file merge operation in the current thread prevents us
2655 // from occupying the VM Thread, which in turn affects the occurrence of
2656 // GC and other VM operations.
2657 merger.do_merge();
2658 if (writer.error() != nullptr) {
2659 set_error(writer.error());
2660 }
2661
2662 // emit JFR event
2663 if (error() == nullptr) {
2664 event.set_destination(path);
2665 event.set_gcBeforeDump(_gc_before_heap_dump);
2666 event.set_size(writer.bytes_written());
2667 event.set_onOutOfMemoryError(_oome);
2668 event.set_overwrite(overwrite);
2669 event.set_compression(compression);
2670 event.commit();
2671 } else {
2672 log_debug(cds, heap)("Error %s while dumping heap", error());
2673 }
|
25
26 #include "precompiled.hpp"
27 #include "classfile/classLoaderData.inline.hpp"
28 #include "classfile/classLoaderDataGraph.hpp"
29 #include "classfile/javaClasses.inline.hpp"
30 #include "classfile/symbolTable.hpp"
31 #include "classfile/vmClasses.hpp"
32 #include "classfile/vmSymbols.hpp"
33 #include "gc/shared/gcLocker.hpp"
34 #include "gc/shared/gcVMOperations.hpp"
35 #include "gc/shared/workerThread.hpp"
36 #include "jfr/jfrEvents.hpp"
37 #include "jvm.h"
38 #include "memory/allocation.inline.hpp"
39 #include "memory/resourceArea.hpp"
40 #include "memory/universe.hpp"
41 #include "oops/fieldStreams.inline.hpp"
42 #include "oops/klass.inline.hpp"
43 #include "oops/objArrayKlass.hpp"
44 #include "oops/objArrayOop.inline.hpp"
45 #include "oops/flatArrayKlass.hpp"
46 #include "oops/flatArrayOop.inline.hpp"
47 #include "oops/oop.inline.hpp"
48 #include "oops/typeArrayOop.inline.hpp"
49 #include "runtime/continuationWrapper.inline.hpp"
50 #include "runtime/fieldDescriptor.inline.hpp"
51 #include "runtime/frame.inline.hpp"
52 #include "runtime/handles.inline.hpp"
53 #include "runtime/javaCalls.hpp"
54 #include "runtime/javaThread.inline.hpp"
55 #include "runtime/jniHandles.hpp"
56 #include "runtime/os.hpp"
57 #include "runtime/threads.hpp"
58 #include "runtime/threadSMR.hpp"
59 #include "runtime/vframe.hpp"
60 #include "runtime/vmOperations.hpp"
61 #include "runtime/vmThread.hpp"
62 #include "runtime/timerTrace.hpp"
63 #include "services/heapDumper.hpp"
64 #include "services/heapDumperCompression.hpp"
65 #include "services/threadService.hpp"
66 #include "utilities/checkedCast.hpp"
67 #include "utilities/macros.hpp"
68 #include "utilities/ostream.hpp"
69 #ifdef LINUX
70 #include "os_linux.hpp"
302 * 7: double array
303 * 8: byte array
304 * 9: short array
305 * 10: int array
306 * 11: long array
307 * [u1]* elements
308 *
309 * HPROF_CPU_SAMPLES a set of sample traces of running threads
310 *
311 * u4 total number of samples
312 * u4 # of traces
313 * [u4 # of samples
314 * u4]* stack trace serial number
315 *
316 * HPROF_CONTROL_SETTINGS the settings of on/off switches
317 *
318 * u4 0x00000001: alloc traces on/off
319 * 0x00000002: cpu sampling on/off
320 * u2 stack trace depth
321 *
322 * HPROF_FLAT_ARRAYS list of flat arrays
323 *
324 * [flat array sub-records]*
325 *
326 * HPROF_FLAT_ARRAY flat array
327 *
328 * id array object ID (dumped as HPROF_GC_PRIM_ARRAY_DUMP)
329 * id element class ID (dumped by HPROF_GC_CLASS_DUMP)
330 *
331 * HPROF_INLINED_FIELDS decribes inlined fields
332 *
333 * [class with inlined fields sub-records]*
334 *
335 * HPROF_CLASS_WITH_INLINED_FIELDS
336 *
337 * id class ID (dumped as HPROF_GC_CLASS_DUMP)
338 *
339 * u2 number of instance inlined fields (not including super)
340 * [u2, inlined field index,
341 * u2, synthetic field count,
342 * id, original field name,
343 * id]* inlined field class ID (dumped by HPROF_GC_CLASS_DUMP)
344 *
345 * When the header is "JAVA PROFILE 1.0.2" a heap dump can optionally
346 * be generated as a sequence of heap dump segments. This sequence is
347 * terminated by an end record. The additional tags allowed by format
348 * "JAVA PROFILE 1.0.2" are:
349 *
350 * HPROF_HEAP_DUMP_SEGMENT denote a heap dump segment
351 *
352 * [heap dump sub-records]*
353 * The same sub-record types allowed by HPROF_HEAP_DUMP
354 *
355 * HPROF_HEAP_DUMP_END denotes the end of a heap dump
356 *
357 */
358
359
360 // HPROF tags
361
362 enum hprofTag : u1 {
363 // top-level records
364 HPROF_UTF8 = 0x01,
365 HPROF_LOAD_CLASS = 0x02,
366 HPROF_UNLOAD_CLASS = 0x03,
367 HPROF_FRAME = 0x04,
368 HPROF_TRACE = 0x05,
369 HPROF_ALLOC_SITES = 0x06,
370 HPROF_HEAP_SUMMARY = 0x07,
371 HPROF_START_THREAD = 0x0A,
372 HPROF_END_THREAD = 0x0B,
373 HPROF_HEAP_DUMP = 0x0C,
374 HPROF_CPU_SAMPLES = 0x0D,
375 HPROF_CONTROL_SETTINGS = 0x0E,
376
377 // 1.0.2 record types
378 HPROF_HEAP_DUMP_SEGMENT = 0x1C,
379 HPROF_HEAP_DUMP_END = 0x2C,
380
381 // inlined object support
382 HPROF_FLAT_ARRAYS = 0x12,
383 HPROF_INLINED_FIELDS = 0x13,
384 // inlined object subrecords
385 HPROF_FLAT_ARRAY = 0x01,
386 HPROF_CLASS_WITH_INLINED_FIELDS = 0x01,
387
388 // field types
389 HPROF_ARRAY_OBJECT = 0x01,
390 HPROF_NORMAL_OBJECT = 0x02,
391 HPROF_BOOLEAN = 0x04,
392 HPROF_CHAR = 0x05,
393 HPROF_FLOAT = 0x06,
394 HPROF_DOUBLE = 0x07,
395 HPROF_BYTE = 0x08,
396 HPROF_SHORT = 0x09,
397 HPROF_INT = 0x0A,
398 HPROF_LONG = 0x0B,
399
400 // data-dump sub-records
401 HPROF_GC_ROOT_UNKNOWN = 0xFF,
402 HPROF_GC_ROOT_JNI_GLOBAL = 0x01,
403 HPROF_GC_ROOT_JNI_LOCAL = 0x02,
404 HPROF_GC_ROOT_JAVA_FRAME = 0x03,
405 HPROF_GC_ROOT_NATIVE_STACK = 0x04,
406 HPROF_GC_ROOT_STICKY_CLASS = 0x05,
407 HPROF_GC_ROOT_THREAD_BLOCK = 0x06,
408 HPROF_GC_ROOT_MONITOR_USED = 0x07,
409 HPROF_GC_ROOT_THREAD_OBJ = 0x08,
410 HPROF_GC_CLASS_DUMP = 0x20,
411 HPROF_GC_INSTANCE_DUMP = 0x21,
412 HPROF_GC_OBJ_ARRAY_DUMP = 0x22,
413 HPROF_GC_PRIM_ARRAY_DUMP = 0x23
414 };
415
416 // Default stack trace ID (used for dummy HPROF_TRACE record)
417 enum {
418 STACK_TRACE_ID = 1,
419 INITIAL_CLASS_COUNT = 200
420 };
421
422
423 class AbstractDumpWriter;
424
425 class InlinedObjects {
426
427 struct ClassInlinedFields {
428 const Klass *klass;
429 uintx base_index; // base index of the inlined field names (1st field has index base_index+1).
430 ClassInlinedFields(const Klass *klass = nullptr, uintx base_index = 0) : klass(klass), base_index(base_index) {}
431
432 // For GrowableArray::find_sorted().
433 static int compare(const ClassInlinedFields& a, const ClassInlinedFields& b) {
434 return a.klass - b.klass;
435 }
436 // For GrowableArray::sort().
437 static int compare(ClassInlinedFields* a, ClassInlinedFields* b) {
438 return compare(*a, *b);
439 }
440 };
441
442 uintx _min_string_id;
443 uintx _max_string_id;
444
445 GrowableArray<ClassInlinedFields> *_inlined_field_map;
446
447 // counters for classes with inlined fields and for the fields
448 int _classes_count;
449 int _inlined_fields_count;
450
451 static InlinedObjects *_instance;
452
453 static void inlined_field_names_callback(InlinedObjects* _this, const Klass *klass, uintx base_index, int count);
454
455 GrowableArray<oop> *_flat_arrays;
456
457 public:
458 InlinedObjects()
459 : _min_string_id(0), _max_string_id(0),
460 _inlined_field_map(nullptr),
461 _classes_count(0), _inlined_fields_count(0),
462 _flat_arrays(nullptr) {
463 }
464
465 static InlinedObjects* get_instance() {
466 return _instance;
467 }
468
469 void init();
470 void release();
471
472 void dump_inlined_field_names(AbstractDumpWriter *writer);
473
474 uintx get_base_index_for(Klass* k);
475 uintx get_next_string_id(uintx id);
476
477 void dump_classed_with_inlined_fields(AbstractDumpWriter* writer);
478
479 void add_flat_array(oop array);
480 void dump_flat_arrays(AbstractDumpWriter* writer);
481
482 };
483
484 InlinedObjects *InlinedObjects::_instance = nullptr;
485
486
487 // Supports I/O operations for a dump
488 // Base class for dump and parallel dump
489 class AbstractDumpWriter : public CHeapObj<mtInternal> {
490 protected:
491 enum {
492 io_buffer_max_size = 1*M,
493 dump_segment_header_size = 9
494 };
495
496 char* _buffer; // internal buffer
497 size_t _size;
498 size_t _pos;
499
500 bool _in_dump_segment; // Are we currently in a dump segment?
501 bool _is_huge_sub_record; // Are we writing a sub-record larger than the buffer size?
502 DEBUG_ONLY(size_t _sub_record_left;) // The bytes not written for the current sub-record.
503 DEBUG_ONLY(bool _sub_record_ended;) // True if we have called the end_sub_record().
504
505 char* buffer() const { return _buffer; }
506 size_t buffer_size() const { return _size; }
823 }
824 }
825
826 class DumperClassCacheTable;
827 class DumperClassCacheTableEntry;
828
829 // Support class with a collection of functions used when dumping the heap
830 class DumperSupport : AllStatic {
831 public:
832
833 // write a header of the given type
834 static void write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len);
835
836 // returns hprof tag for the given type signature
837 static hprofTag sig2tag(Symbol* sig);
838 // returns hprof tag for the given basic type
839 static hprofTag type2tag(BasicType type);
840 // Returns the size of the data to write.
841 static u4 sig2size(Symbol* sig);
842
843 // calculates the total size of the all fields of the given class.
844 static u4 instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry = nullptr);
845
846 // dump a jfloat
847 static void dump_float(AbstractDumpWriter* writer, jfloat f);
848 // dump a jdouble
849 static void dump_double(AbstractDumpWriter* writer, jdouble d);
850 // dumps the raw value of the given field
851 static void dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset);
852 // returns the size of the static fields; also counts the static fields
853 static u4 get_static_fields_size(InstanceKlass* ik, u2& field_count);
854 // dumps static fields of the given class
855 static void dump_static_fields(AbstractDumpWriter* writer, Klass* k);
856 // dump the raw values of the instance fields of the given identity or inlined object;
857 // for identity objects offset is 0 and 'klass' is o->klass(),
858 // for inlined objects offset is the offset in the holder object, 'klass' is inlined object class
859 static void dump_instance_fields(AbstractDumpWriter* writer, oop o, int offset, DumperClassCacheTable* class_cache, DumperClassCacheTableEntry* class_cache_entry);
860 // dump the raw values of the instance fields of the given inlined object;
861 // dump_instance_fields wrapper for inlined objects
862 static void dump_inlined_object_fields(AbstractDumpWriter* writer, oop o, int offset, DumperClassCacheTable* class_cache, DumperClassCacheTableEntry* class_cache_entry);
863
864 // get the count of the instance fields for a given class
865 static u2 get_instance_fields_count(InstanceKlass* ik);
866 // dumps the definition of the instance fields for a given class
867 static void dump_instance_field_descriptors(AbstractDumpWriter* writer, InstanceKlass* k, uintx *inlined_fields_index = nullptr);
868 // creates HPROF_GC_INSTANCE_DUMP record for the given object
869 static void dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache);
870 // creates HPROF_GC_CLASS_DUMP record for the given instance class
871 static void dump_instance_class(AbstractDumpWriter* writer, Klass* k);
872 // creates HPROF_GC_CLASS_DUMP record for a given array class
873 static void dump_array_class(AbstractDumpWriter* writer, Klass* k);
874
875 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
876 static void dump_object_array(AbstractDumpWriter* writer, objArrayOop array);
877 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given flat array
878 static void dump_flat_array(AbstractDumpWriter* writer, flatArrayOop array, DumperClassCacheTable* class_cache);
879 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
880 static void dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array);
881 // create HPROF_FRAME record for the given method and bci
882 static void dump_stack_frame(AbstractDumpWriter* writer, int frame_serial_num, int class_serial_num, Method* m, int bci);
883
884 // check if we need to truncate an array
885 static int calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size);
886 // extended version to dump flat arrays as primitive arrays;
887 // type_size specifies size of the inlined objects.
888 static int calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, int type_size, short header_size);
889
890 // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
891 static void end_of_dump(AbstractDumpWriter* writer);
892
893 static oop mask_dormant_archived_object(oop o, oop ref_obj) {
894 if (o != nullptr && o->klass()->java_mirror_no_keepalive() == nullptr) {
895 // Ignore this object since the corresponding java mirror is not loaded.
896 // Might be a dormant archive object.
897 report_dormant_archived_object(o, ref_obj);
898 return nullptr;
899 } else {
900 return o;
901 }
902 }
903
904 // helper methods for inlined fields.
905 static bool is_inlined_field(const fieldDescriptor& fld) {
906 return fld.is_flat();
907 }
908 static InlineKlass* get_inlined_field_klass(const fieldDescriptor& fld) {
909 assert(is_inlined_field(fld), "must be inlined field");
910 InstanceKlass* holder_klass = fld.field_holder();
911 return InlineKlass::cast(holder_klass->get_inline_type_field_klass(fld.index()));
912 }
913
914 static void report_dormant_archived_object(oop o, oop ref_obj) {
915 if (log_is_enabled(Trace, cds, heap)) {
916 ResourceMark rm;
917 if (ref_obj != nullptr) {
918 log_trace(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s) referenced by " INTPTR_FORMAT " (%s)",
919 p2i(o), o->klass()->external_name(),
920 p2i(ref_obj), ref_obj->klass()->external_name());
921 } else {
922 log_trace(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s)",
923 p2i(o), o->klass()->external_name());
924 }
925 }
926 }
927 };
928
929 // Hash table of klasses to the klass metadata. This should greatly improve the
930 // hash dumping performance. This hash table is supposed to be used by a single
931 // thread only.
932 //
933 class DumperClassCacheTableEntry : public CHeapObj<mtServiceability> {
934 friend class DumperClassCacheTable;
935 private:
936 GrowableArray<char> _sigs_start;
937 GrowableArray<int> _offsets;
938 GrowableArray<InlineKlass*> _inline_klasses;
939 u4 _instance_size;
940 int _entries;
941
942 public:
943 DumperClassCacheTableEntry() : _instance_size(0), _entries(0) {};
944
945 int field_count() { return _entries; }
946 char sig_start(int field_idx) { return _sigs_start.at(field_idx); }
947 void push_sig_start_inlined() { _sigs_start.push('Q'); }
948 bool is_inlined(int field_idx){ return _sigs_start.at(field_idx) == 'Q'; }
949 InlineKlass* inline_klass(int field_idx) { assert(is_inlined(field_idx), "Not inlined"); return _inline_klasses.at(field_idx); }
950 int offset(int field_idx) { return _offsets.at(field_idx); }
951 u4 instance_size() { return _instance_size; }
952 };
953
954 class DumperClassCacheTable {
955 private:
956 // ResourceHashtable SIZE is specified at compile time so we
957 // use 1031 which is the first prime after 1024.
958 static constexpr size_t TABLE_SIZE = 1031;
959
960 // Maintain the cache for N classes. This limits memory footprint
961 // impact, regardless of how many classes we have in the dump.
962 // This also improves look up performance by keeping the statically
963 // sized table from overloading.
964 static constexpr int CACHE_TOP = 256;
965
966 typedef ResourceHashtable<InstanceKlass*, DumperClassCacheTableEntry*,
967 TABLE_SIZE, AnyObj::C_HEAP, mtServiceability> PtrTable;
968 PtrTable* _ptrs;
969
978 bool do_entry(InstanceKlass*& key, DumperClassCacheTableEntry*& entry) {
979 delete entry;
980 return true;
981 }
982 } cleanup;
983 table->unlink(&cleanup);
984 }
985
986 public:
987 DumperClassCacheTableEntry* lookup_or_create(InstanceKlass* ik) {
988 if (_last_ik == ik) {
989 return _last_entry;
990 }
991
992 DumperClassCacheTableEntry* entry;
993 DumperClassCacheTableEntry** from_cache = _ptrs->get(ik);
994 if (from_cache == nullptr) {
995 entry = new DumperClassCacheTableEntry();
996 for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
997 if (!fld.access_flags().is_static()) {
998 InlineKlass* inlineKlass = nullptr;
999 if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
1000 inlineKlass = DumperSupport::get_inlined_field_klass(fld.field_descriptor());
1001 entry->push_sig_start_inlined();
1002 entry->_instance_size += DumperSupport::instance_size(inlineKlass);
1003 } else {
1004 Symbol* sig = fld.signature();
1005 entry->_sigs_start.push(sig->char_at(0));
1006 entry->_instance_size += DumperSupport::sig2size(sig);
1007 }
1008 entry->_inline_klasses.push(inlineKlass);
1009 entry->_offsets.push(fld.offset());
1010 entry->_entries++;
1011 }
1012 }
1013
1014 if (_ptrs->number_of_entries() >= CACHE_TOP) {
1015 // We do not track the individual hit rates for table entries.
1016 // Purge the entire table, and let the cache catch up with new
1017 // distribution.
1018 unlink_all(_ptrs);
1019 }
1020
1021 _ptrs->put(ik, entry);
1022 } else {
1023 entry = *from_cache;
1024 }
1025
1026 // Remember for single-slot cache.
1027 _last_ik = ik;
1028 _last_entry = entry;
1029
1030 return entry;
1100 }
1101
1102 // dump a jfloat
1103 void DumperSupport::dump_float(AbstractDumpWriter* writer, jfloat f) {
1104 if (g_isnan(f)) {
1105 writer->write_u4(0x7fc00000); // collapsing NaNs
1106 } else {
1107 writer->write_u4(bit_cast<u4>(f));
1108 }
1109 }
1110
1111 // dump a jdouble
1112 void DumperSupport::dump_double(AbstractDumpWriter* writer, jdouble d) {
1113 if (g_isnan(d)) {
1114 writer->write_u8(0x7ff80000ull << 32); // collapsing NaNs
1115 } else {
1116 writer->write_u8(bit_cast<u8>(d));
1117 }
1118 }
1119
1120
1121 // dumps the raw value of the given field
1122 void DumperSupport::dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset) {
1123 switch (type) {
1124 case JVM_SIGNATURE_CLASS :
1125 case JVM_SIGNATURE_ARRAY : {
1126 oop o = obj->obj_field_access<ON_UNKNOWN_OOP_REF | AS_NO_KEEPALIVE>(offset);
1127 o = mask_dormant_archived_object(o, obj);
1128 assert(oopDesc::is_oop_or_null(o), "Expected an oop or nullptr at " PTR_FORMAT, p2i(o));
1129 writer->write_objectID(o);
1130 break;
1131 }
1132 case JVM_SIGNATURE_BYTE : {
1133 jbyte b = obj->byte_field(offset);
1134 writer->write_u1(b);
1135 break;
1136 }
1137 case JVM_SIGNATURE_CHAR : {
1138 jchar c = obj->char_field(offset);
1139 writer->write_u2(c);
1140 break;
1159 writer->write_u4(i);
1160 break;
1161 }
1162 case JVM_SIGNATURE_LONG : {
1163 jlong l = obj->long_field(offset);
1164 writer->write_u8(l);
1165 break;
1166 }
1167 case JVM_SIGNATURE_BOOLEAN : {
1168 jboolean b = obj->bool_field(offset);
1169 writer->write_u1(b);
1170 break;
1171 }
1172 default : {
1173 ShouldNotReachHere();
1174 break;
1175 }
1176 }
1177 }
1178
1179 // calculates the total size of the all fields of the given class.
1180 u4 DumperSupport::instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry) {
1181 if (class_cache_entry != nullptr) {
1182 return class_cache_entry->instance_size();
1183 } else {
1184 u4 size = 0;
1185 for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1186 if (!fld.access_flags().is_static()) {
1187 if (is_inlined_field(fld.field_descriptor())) {
1188 size += instance_size(get_inlined_field_klass(fld.field_descriptor()));
1189 } else {
1190 size += sig2size(fld.signature());
1191 }
1192 }
1193 }
1194 return size;
1195 }
1196 }
1197
1198 u4 DumperSupport::get_static_fields_size(InstanceKlass* ik, u2& field_count) {
1199 field_count = 0;
1200 u4 size = 0;
1201
1202 for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1203 if (fldc.access_flags().is_static()) {
1204 assert(!is_inlined_field(fldc.field_descriptor()), "static fields cannot be inlined");
1205
1206 field_count++;
1207 size += sig2size(fldc.signature());
1208 }
1209 }
1210
1211 // Add in resolved_references which is referenced by the cpCache
1212 // The resolved_references is an array per InstanceKlass holding the
1213 // strings and other oops resolved from the constant pool.
1214 oop resolved_references = ik->constants()->resolved_references_or_null();
1215 if (resolved_references != nullptr) {
1216 field_count++;
1217 size += sizeof(address);
1218
1219 // Add in the resolved_references of the used previous versions of the class
1220 // in the case of RedefineClasses
1221 InstanceKlass* prev = ik->previous_versions();
1222 while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1223 field_count++;
1224 size += sizeof(address);
1225 prev = prev->previous_versions();
1228
1229 // Also provide a pointer to the init_lock if present, so there aren't unreferenced int[0]
1230 // arrays.
1231 oop init_lock = ik->init_lock();
1232 if (init_lock != nullptr) {
1233 field_count++;
1234 size += sizeof(address);
1235 }
1236
1237 // We write the value itself plus a name and a one byte type tag per field.
1238 return checked_cast<u4>(size + field_count * (sizeof(address) + 1));
1239 }
1240
1241 // dumps static fields of the given class
1242 void DumperSupport::dump_static_fields(AbstractDumpWriter* writer, Klass* k) {
1243 InstanceKlass* ik = InstanceKlass::cast(k);
1244
1245 // dump the field descriptors and raw values
1246 for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1247 if (fld.access_flags().is_static()) {
1248 assert(!is_inlined_field(fld.field_descriptor()), "static fields cannot be inlined");
1249
1250 Symbol* sig = fld.signature();
1251
1252 writer->write_symbolID(fld.name()); // name
1253 writer->write_u1(sig2tag(sig)); // type
1254
1255 // value
1256 dump_field_value(writer, sig->char_at(0), ik->java_mirror(), fld.offset());
1257 }
1258 }
1259
1260 // Add resolved_references for each class that has them
1261 oop resolved_references = ik->constants()->resolved_references_or_null();
1262 if (resolved_references != nullptr) {
1263 writer->write_symbolID(vmSymbols::resolved_references_name()); // name
1264 writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1265 writer->write_objectID(resolved_references);
1266
1267 // Also write any previous versions
1268 InstanceKlass* prev = ik->previous_versions();
1269 while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1270 writer->write_symbolID(vmSymbols::resolved_references_name()); // name
1271 writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1272 writer->write_objectID(prev->constants()->resolved_references());
1273 prev = prev->previous_versions();
1274 }
1275 }
1276
1277 // Add init lock to the end if the class is not yet initialized
1278 oop init_lock = ik->init_lock();
1279 if (init_lock != nullptr) {
1280 writer->write_symbolID(vmSymbols::init_lock_name()); // name
1281 writer->write_u1(sig2tag(vmSymbols::int_array_signature())); // type
1282 writer->write_objectID(init_lock);
1283 }
1284 }
1285
1286 // dump the raw values of the instance fields of the given identity or inlined object;
1287 // for identity objects offset is 0 and 'klass' is o->klass(),
1288 // for inlined objects offset is the offset in the holder object, 'klass' is inlined object class.
1289 void DumperSupport::dump_instance_fields(AbstractDumpWriter* writer, oop o, int offset, DumperClassCacheTable* class_cache, DumperClassCacheTableEntry* class_cache_entry) {
1290 assert(class_cache_entry != nullptr, "Pre-condition: must be provided");
1291 for (int idx = 0; idx < class_cache_entry->field_count(); idx++) {
1292 if (class_cache_entry->is_inlined(idx)) {
1293 InlineKlass* field_klass = class_cache_entry->inline_klass(idx);
1294 int fields_offset = offset + (class_cache_entry->offset(idx) - field_klass->first_field_offset());
1295 DumperClassCacheTableEntry* inline_class_cache_entry = class_cache->lookup_or_create(field_klass);
1296 dump_inlined_object_fields(writer, o, fields_offset, class_cache, inline_class_cache_entry);
1297 } else {
1298 dump_field_value(writer, class_cache_entry->sig_start(idx), o, class_cache_entry->offset(idx));
1299 }
1300 }
1301 }
1302
1303 void DumperSupport::dump_inlined_object_fields(AbstractDumpWriter* writer, oop o, int offset, DumperClassCacheTable* class_cache, DumperClassCacheTableEntry* class_cache_entry) {
1304 // the object is inlined, so all its fields are stored without headers.
1305 dump_instance_fields(writer, o, offset, class_cache, class_cache_entry);
1306 }
1307
1308 // gets the count of the instance fields for a given class
1309 u2 DumperSupport::get_instance_fields_count(InstanceKlass* ik) {
1310 u2 field_count = 0;
1311
1312 for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1313 if (!fldc.access_flags().is_static()) {
1314 if (is_inlined_field(fldc.field_descriptor())) {
1315 // add "synthetic" fields for inlined fields.
1316 field_count += get_instance_fields_count(get_inlined_field_klass(fldc.field_descriptor()));
1317 } else {
1318 field_count++;
1319 }
1320 }
1321 }
1322
1323 return field_count;
1324 }
1325
1326 // dumps the definition of the instance fields for a given class
1327 // inlined_fields_id is not-nullptr for inlined fields (to get synthetic field name IDs
1328 // by using InlinedObjects::get_next_string_id()).
1329 void DumperSupport::dump_instance_field_descriptors(AbstractDumpWriter* writer, InstanceKlass* ik, uintx* inlined_fields_id) {
1330 // inlined_fields_id != nullptr means ik is a class of inlined field.
1331 // Inlined field id pointer for this class; lazyly initialized
1332 // if the class has inlined field(s) and the caller didn't provide inlined_fields_id.
1333 uintx *this_klass_inlined_fields_id = inlined_fields_id;
1334 uintx inlined_id = 0;
1335
1336 // dump the field descriptors
1337 for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1338 if (!fld.access_flags().is_static()) {
1339 if (is_inlined_field(fld.field_descriptor())) {
1340 // dump "synthetic" fields for inlined fields.
1341 if (this_klass_inlined_fields_id == nullptr) {
1342 inlined_id = InlinedObjects::get_instance()->get_base_index_for(ik);
1343 this_klass_inlined_fields_id = &inlined_id;
1344 }
1345 dump_instance_field_descriptors(writer, get_inlined_field_klass(fld.field_descriptor()), this_klass_inlined_fields_id);
1346 } else {
1347 Symbol* sig = fld.signature();
1348 Symbol* name = nullptr;
1349 // Use inlined_fields_id provided by caller.
1350 if (inlined_fields_id != nullptr) {
1351 uintx name_id = InlinedObjects::get_instance()->get_next_string_id(*inlined_fields_id);
1352
1353 // name_id == 0 is returned on error. use original field signature.
1354 if (name_id != 0) {
1355 *inlined_fields_id = name_id;
1356 name = reinterpret_cast<Symbol*>(name_id);
1357 }
1358 }
1359 if (name == nullptr) {
1360 name = fld.name();
1361 }
1362
1363 writer->write_symbolID(name); // name
1364 writer->write_u1(sig2tag(sig)); // type
1365 }
1366 }
1367 }
1368 }
1369
1370 // creates HPROF_GC_INSTANCE_DUMP record for the given object
1371 void DumperSupport::dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache) {
1372 InstanceKlass* ik = InstanceKlass::cast(o->klass());
1373
1374 DumperClassCacheTableEntry* cache_entry = class_cache->lookup_or_create(ik);
1375
1376 u4 is = instance_size(ik, cache_entry);
1377 u4 size = 1 + sizeof(address) + 4 + sizeof(address) + 4 + is;
1378
1379 writer->start_sub_record(HPROF_GC_INSTANCE_DUMP, size);
1380 writer->write_objectID(o);
1381 writer->write_u4(STACK_TRACE_ID);
1382
1383 // class ID
1384 writer->write_classID(ik);
1385
1386 // number of bytes that follow
1387 writer->write_u4(is);
1388
1389 // field values
1390 dump_instance_fields(writer, o, 0, class_cache, cache_entry);
1391
1392 writer->end_sub_record();
1393 }
1394
1395 // creates HPROF_GC_CLASS_DUMP record for the given instance class
1396 void DumperSupport::dump_instance_class(AbstractDumpWriter* writer, Klass* k) {
1397 InstanceKlass* ik = InstanceKlass::cast(k);
1398
1399 // We can safepoint and do a heap dump at a point where we have a Klass,
1400 // but no java mirror class has been setup for it. So we need to check
1401 // that the class is at least loaded, to avoid crash from a null mirror.
1402 if (!ik->is_loaded()) {
1403 return;
1404 }
1405
1406 u2 static_fields_count = 0;
1407 u4 static_size = get_static_fields_size(ik, static_fields_count);
1408 u2 instance_fields_count = get_instance_fields_count(ik);
1409 u4 instance_fields_size = instance_fields_count * (sizeof(address) + 1);
1410 u4 size = checked_cast<u4>(1 + sizeof(address) + 4 + 6 * sizeof(address) + 4 + 2 + 2 + static_size + 2 + instance_fields_size);
1415 writer->write_classID(ik);
1416 writer->write_u4(STACK_TRACE_ID);
1417
1418 // super class ID
1419 InstanceKlass* java_super = ik->java_super();
1420 if (java_super == nullptr) {
1421 writer->write_objectID(oop(nullptr));
1422 } else {
1423 writer->write_classID(java_super);
1424 }
1425
1426 writer->write_objectID(ik->class_loader());
1427 writer->write_objectID(ik->signers());
1428 writer->write_objectID(ik->protection_domain());
1429
1430 // reserved
1431 writer->write_objectID(oop(nullptr));
1432 writer->write_objectID(oop(nullptr));
1433
1434 // instance size
1435 writer->write_u4(HeapWordSize * ik->size_helper());
1436
1437 // size of constant pool - ignored by HAT 1.1
1438 writer->write_u2(0);
1439
1440 // static fields
1441 writer->write_u2(static_fields_count);
1442 dump_static_fields(writer, ik);
1443
1444 // description of instance fields
1445 writer->write_u2(instance_fields_count);
1446 dump_instance_field_descriptors(writer, ik);
1447
1448 writer->end_sub_record();
1449 }
1450
1451 // creates HPROF_GC_CLASS_DUMP record for the given array class
1452 void DumperSupport::dump_array_class(AbstractDumpWriter* writer, Klass* k) {
1453 InstanceKlass* ik = nullptr; // bottom class for object arrays, null for primitive type arrays
1454 if (k->is_objArray_klass()) {
1455 Klass *bk = ObjArrayKlass::cast(k)->bottom_klass();
1469 assert(java_super != nullptr, "checking");
1470 writer->write_classID(java_super);
1471
1472 writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->class_loader());
1473 writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->signers());
1474 writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->protection_domain());
1475
1476 writer->write_objectID(oop(nullptr)); // reserved
1477 writer->write_objectID(oop(nullptr));
1478 writer->write_u4(0); // instance size
1479 writer->write_u2(0); // constant pool
1480 writer->write_u2(0); // static fields
1481 writer->write_u2(0); // instance fields
1482
1483 writer->end_sub_record();
1484
1485 }
1486
1487 // Hprof uses an u4 as record length field,
1488 // which means we need to truncate arrays that are too long.
1489 int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, int type_size, short header_size) {
1490 int length = array->length();
1491
1492 size_t length_in_bytes = (size_t)length * type_size;
1493 uint max_bytes = max_juint - header_size;
1494
1495 if (length_in_bytes > max_bytes) {
1496 length = max_bytes / type_size;
1497 length_in_bytes = (size_t)length * type_size;
1498
1499 BasicType type = ArrayKlass::cast(array->klass())->element_type();
1500 warning("cannot dump array of type %s[] with length %d; truncating to length %d",
1501 type2name_tab[type], array->length(), length);
1502 }
1503 return length;
1504 }
1505
1506 int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size) {
1507 BasicType type = ArrayKlass::cast(array->klass())->element_type();
1508 assert((type >= T_BOOLEAN && type <= T_OBJECT) || type == T_PRIMITIVE_OBJECT, "invalid array element type");
1509 int type_size;
1510 if (type == T_OBJECT) {
1511 type_size = sizeof(address);
1512 } else if (type == T_PRIMITIVE_OBJECT) {
1513 // TODO: FIXME
1514 fatal("Not supported yet"); // FIXME: JDK-8325678
1515 } else {
1516 type_size = type2aelembytes(type);
1517 }
1518
1519 return calculate_array_max_length(writer, array, type_size, header_size);
1520 }
1521
1522 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
1523 void DumperSupport::dump_object_array(AbstractDumpWriter* writer, objArrayOop array) {
1524 // sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID) + sizeof(classID)
1525 short header_size = 1 + 2 * 4 + 2 * sizeof(address);
1526 int length = calculate_array_max_length(writer, array, header_size);
1527 u4 size = checked_cast<u4>(header_size + length * sizeof(address));
1528
1529 writer->start_sub_record(HPROF_GC_OBJ_ARRAY_DUMP, size);
1530 writer->write_objectID(array);
1531 writer->write_u4(STACK_TRACE_ID);
1532 writer->write_u4(length);
1533
1534 // array class ID
1535 writer->write_classID(array->klass());
1536
1537 // [id]* elements
1538 for (int index = 0; index < length; index++) {
1539 oop o = array->obj_at(index);
1540 o = mask_dormant_archived_object(o, array);
1541 writer->write_objectID(o);
1542 }
1543
1544 writer->end_sub_record();
1545 }
1546
1547 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given flat array
1548 void DumperSupport::dump_flat_array(AbstractDumpWriter* writer, flatArrayOop array, DumperClassCacheTable* class_cache) {
1549 FlatArrayKlass* array_klass = FlatArrayKlass::cast(array->klass());
1550 InlineKlass* element_klass = array_klass->element_klass();
1551 int element_size = instance_size(element_klass);
1552 /* id array object ID
1553 * u4 stack trace serial number
1554 * u4 number of elements
1555 * u1 element type
1556 */
1557 short header_size = 1 + sizeof(address) + 2 * 4 + 1;
1558
1559 // TODO: use T_SHORT/T_INT/T_LONG if needed to avoid truncation
1560 BasicType type = T_BYTE;
1561 int type_size = type2aelembytes(type);
1562 int length = calculate_array_max_length(writer, array, element_size, header_size);
1563 u4 length_in_bytes = (u4)(length * element_size);
1564 u4 size = header_size + length_in_bytes;
1565
1566 writer->start_sub_record(HPROF_GC_PRIM_ARRAY_DUMP, size);
1567 writer->write_objectID(array);
1568 writer->write_u4(STACK_TRACE_ID);
1569 // TODO: round up array length for T_SHORT/T_INT/T_LONG
1570 writer->write_u4(length * element_size);
1571 writer->write_u1(type2tag(type));
1572
1573 for (int index = 0; index < length; index++) {
1574 // need offset in the holder to read inlined object. calculate it from flatArrayOop::value_at_addr()
1575 int offset = (int)((address)array->value_at_addr(index, array_klass->layout_helper())
1576 - cast_from_oop<address>(array));
1577 DumperClassCacheTableEntry* class_cache_entry = class_cache->lookup_or_create(element_klass);
1578 dump_inlined_object_fields(writer, array, offset, class_cache, class_cache_entry);
1579 }
1580
1581 // TODO: write padding bytes for T_SHORT/T_INT/T_LONG
1582
1583 InlinedObjects::get_instance()->add_flat_array(array);
1584
1585 writer->end_sub_record();
1586 }
1587
1588 #define WRITE_ARRAY(Array, Type, Size, Length) \
1589 for (int i = 0; i < Length; i++) { writer->write_##Size((Size)Array->Type##_at(i)); }
1590
1591 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
1592 void DumperSupport::dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array) {
1593 BasicType type = TypeArrayKlass::cast(array->klass())->element_type();
1594 // 2 * sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID)
1595 short header_size = 2 * 1 + 2 * 4 + sizeof(address);
1596
1597 int length = calculate_array_max_length(writer, array, header_size);
1598 int type_size = type2aelembytes(type);
1599 u4 length_in_bytes = (u4)length * type_size;
1600 u4 size = header_size + length_in_bytes;
1601
1602 writer->start_sub_record(HPROF_GC_PRIM_ARRAY_DUMP, size);
1603 writer->write_objectID(array);
1604 writer->write_u4(STACK_TRACE_ID);
1605 writer->write_u4(length);
1606 writer->write_u1(type2tag(type));
1607
1689 int bci) {
1690 int line_number;
1691 if (m->is_native()) {
1692 line_number = -3; // native frame
1693 } else {
1694 line_number = m->line_number_from_bci(bci);
1695 }
1696
1697 write_header(writer, HPROF_FRAME, 4*oopSize + 2*sizeof(u4));
1698 writer->write_id(frame_serial_num); // frame serial number
1699 writer->write_symbolID(m->name()); // method's name
1700 writer->write_symbolID(m->signature()); // method's signature
1701
1702 assert(m->method_holder()->is_instance_klass(), "not InstanceKlass");
1703 writer->write_symbolID(m->method_holder()->source_file_name()); // source file name
1704 writer->write_u4(class_serial_num); // class serial number
1705 writer->write_u4((u4) line_number); // line number
1706 }
1707
1708
1709 class InlinedFieldNameDumper : public LockedClassesDo {
1710 public:
1711 typedef void (*Callback)(InlinedObjects *owner, const Klass *klass, uintx base_index, int count);
1712
1713 private:
1714 AbstractDumpWriter* _writer;
1715 InlinedObjects *_owner;
1716 Callback _callback;
1717 uintx _index;
1718
1719 void dump_inlined_field_names(GrowableArray<Symbol*>* super_names, Symbol* field_name, InlineKlass* klass) {
1720 super_names->push(field_name);
1721 for (HierarchicalFieldStream<JavaFieldStream> fld(klass); !fld.done(); fld.next()) {
1722 if (!fld.access_flags().is_static()) {
1723 if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
1724 dump_inlined_field_names(super_names, fld.name(), DumperSupport::get_inlined_field_klass(fld.field_descriptor()));
1725 } else {
1726 // get next string ID.
1727 uintx next_index = _owner->get_next_string_id(_index);
1728 if (next_index == 0) {
1729 // something went wrong (overflow?)
1730 // stop generation; the rest of inlined objects will have original field names.
1731 return;
1732 }
1733 _index = next_index;
1734
1735 // Calculate length.
1736 int len = fld.name()->utf8_length();
1737 for (GrowableArrayIterator<Symbol*> it = super_names->begin(); it != super_names->end(); ++it) {
1738 len += (*it)->utf8_length() + 1; // +1 for ".".
1739 }
1740
1741 DumperSupport::write_header(_writer, HPROF_UTF8, oopSize + len);
1742 _writer->write_symbolID(reinterpret_cast<Symbol*>(_index));
1743 // Write the string value.
1744 // 1) super_names.
1745 for (GrowableArrayIterator<Symbol*> it = super_names->begin(); it != super_names->end(); ++it) {
1746 _writer->write_raw((*it)->bytes(), (*it)->utf8_length());
1747 _writer->write_u1('.');
1748 }
1749 // 2) field name.
1750 _writer->write_raw(fld.name()->bytes(), fld.name()->utf8_length());
1751 }
1752 }
1753 }
1754 super_names->pop();
1755 }
1756
1757 void dump_inlined_field_names(Symbol* field_name, InlineKlass* field_klass) {
1758 GrowableArray<Symbol*> super_names(4, mtServiceability);
1759 dump_inlined_field_names(&super_names, field_name, field_klass);
1760 }
1761
1762 public:
1763 InlinedFieldNameDumper(AbstractDumpWriter* writer, InlinedObjects* owner, Callback callback)
1764 : _writer(writer), _owner(owner), _callback(callback), _index(0) {
1765 }
1766
1767 void do_klass(Klass* k) {
1768 if (!k->is_instance_klass()) {
1769 return;
1770 }
1771 InstanceKlass* ik = InstanceKlass::cast(k);
1772 // if (ik->has_inline_type_fields()) {
1773 // return;
1774 // }
1775
1776 uintx base_index = _index;
1777 int count = 0;
1778
1779 for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1780 if (!fld.access_flags().is_static()) {
1781 if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
1782 dump_inlined_field_names(fld.name(), DumperSupport::get_inlined_field_klass(fld.field_descriptor()));
1783 count++;
1784 }
1785 }
1786 }
1787
1788 if (count != 0) {
1789 _callback(_owner, k, base_index, count);
1790 }
1791 }
1792 };
1793
1794 class InlinedFieldsDumper : public LockedClassesDo {
1795 private:
1796 AbstractDumpWriter* _writer;
1797
1798 public:
1799 InlinedFieldsDumper(AbstractDumpWriter* writer) : _writer(writer) {}
1800
1801 void do_klass(Klass* k) {
1802 if (!k->is_instance_klass()) {
1803 return;
1804 }
1805 InstanceKlass* ik = InstanceKlass::cast(k);
1806 // if (ik->has_inline_type_fields()) {
1807 // return;
1808 // }
1809
1810 // We can be at a point where java mirror does not exist yet.
1811 // So we need to check that the class is at least loaded, to avoid crash from a null mirror.
1812 if (!ik->is_loaded()) {
1813 return;
1814 }
1815
1816 u2 inlined_count = 0;
1817 for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1818 if (!fld.access_flags().is_static()) {
1819 if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
1820 inlined_count++;
1821 }
1822 }
1823 }
1824 if (inlined_count != 0) {
1825 _writer->write_u1(HPROF_CLASS_WITH_INLINED_FIELDS);
1826
1827 // class ID
1828 _writer->write_classID(ik);
1829 // number of inlined fields
1830 _writer->write_u2(inlined_count);
1831 u2 index = 0;
1832 for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1833 if (!fld.access_flags().is_static()) {
1834 if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
1835 // inlined field index
1836 _writer->write_u2(index);
1837 // synthetic field count
1838 u2 field_count = DumperSupport::get_instance_fields_count(DumperSupport::get_inlined_field_klass(fld.field_descriptor()));
1839 _writer->write_u2(field_count);
1840 // original field name
1841 _writer->write_symbolID(fld.name());
1842 // inlined field class ID
1843 _writer->write_classID(DumperSupport::get_inlined_field_klass(fld.field_descriptor()));
1844
1845 index += field_count;
1846 } else {
1847 index++;
1848 }
1849 }
1850 }
1851 }
1852 }
1853 };
1854
1855
1856 void InlinedObjects::init() {
1857 _instance = this;
1858
1859 struct Closure : public SymbolClosure {
1860 uintx _min_id = max_uintx;
1861 uintx _max_id = 0;
1862 Closure() : _min_id(max_uintx), _max_id(0) {}
1863
1864 void do_symbol(Symbol** p) {
1865 uintx val = reinterpret_cast<uintx>(*p);
1866 if (val < _min_id) {
1867 _min_id = val;
1868 }
1869 if (val > _max_id) {
1870 _max_id = val;
1871 }
1872 }
1873 } closure;
1874
1875 SymbolTable::symbols_do(&closure);
1876
1877 _min_string_id = closure._min_id;
1878 _max_string_id = closure._max_id;
1879 }
1880
1881 void InlinedObjects::release() {
1882 _instance = nullptr;
1883
1884 if (_inlined_field_map != nullptr) {
1885 delete _inlined_field_map;
1886 _inlined_field_map = nullptr;
1887 }
1888 if (_flat_arrays != nullptr) {
1889 delete _flat_arrays;
1890 _flat_arrays = nullptr;
1891 }
1892 }
1893
1894 void InlinedObjects::inlined_field_names_callback(InlinedObjects* _this, const Klass* klass, uintx base_index, int count) {
1895 if (_this->_inlined_field_map == nullptr) {
1896 _this->_inlined_field_map = new (mtServiceability) GrowableArray<ClassInlinedFields>(100, mtServiceability);
1897 }
1898 _this->_inlined_field_map->append(ClassInlinedFields(klass, base_index));
1899
1900 // counters for dumping classes with inlined fields
1901 _this->_classes_count++;
1902 _this->_inlined_fields_count += count;
1903 }
1904
1905 void InlinedObjects::dump_inlined_field_names(AbstractDumpWriter* writer) {
1906 InlinedFieldNameDumper nameDumper(writer, this, inlined_field_names_callback);
1907 ClassLoaderDataGraph::classes_do(&nameDumper);
1908
1909 if (_inlined_field_map != nullptr) {
1910 // prepare the map for get_base_index_for().
1911 _inlined_field_map->sort(ClassInlinedFields::compare);
1912 }
1913 }
1914
1915 uintx InlinedObjects::get_base_index_for(Klass* k) {
1916 if (_inlined_field_map != nullptr) {
1917 bool found = false;
1918 int idx = _inlined_field_map->find_sorted<ClassInlinedFields, ClassInlinedFields::compare>(ClassInlinedFields(k, 0), found);
1919 if (found) {
1920 return _inlined_field_map->at(idx).base_index;
1921 }
1922 }
1923
1924 // return max_uintx, so get_next_string_id returns 0.
1925 return max_uintx;
1926 }
1927
1928 uintx InlinedObjects::get_next_string_id(uintx id) {
1929 if (++id == _min_string_id) {
1930 return _max_string_id + 1;
1931 }
1932 return id;
1933 }
1934
1935 void InlinedObjects::dump_classed_with_inlined_fields(AbstractDumpWriter* writer) {
1936 if (_classes_count != 0) {
1937 // Record for each class contains tag(u1), class ID and count(u2)
1938 // for each inlined field index(u2), synthetic fields count(u2), original field name and class ID
1939 int size = _classes_count * (1 + sizeof(address) + 2)
1940 + _inlined_fields_count * (2 + 2 + sizeof(address) + sizeof(address));
1941 DumperSupport::write_header(writer, HPROF_INLINED_FIELDS, (u4)size);
1942
1943 InlinedFieldsDumper dumper(writer);
1944 ClassLoaderDataGraph::classes_do(&dumper);
1945 }
1946 }
1947
1948 void InlinedObjects::add_flat_array(oop array) {
1949 if (_flat_arrays == nullptr) {
1950 _flat_arrays = new (mtServiceability) GrowableArray<oop>(100, mtServiceability);
1951 }
1952 _flat_arrays->append(array);
1953 }
1954
1955 void InlinedObjects::dump_flat_arrays(AbstractDumpWriter* writer) {
1956 if (_flat_arrays != nullptr) {
1957 // For each flat array the record contains tag (u1), object ID and class ID.
1958 int size = _flat_arrays->length() * (1 + sizeof(address) + sizeof(address));
1959
1960 DumperSupport::write_header(writer, HPROF_FLAT_ARRAYS, (u4)size);
1961 for (GrowableArrayIterator<oop> it = _flat_arrays->begin(); it != _flat_arrays->end(); ++it) {
1962 flatArrayOop array = flatArrayOop(*it);
1963 FlatArrayKlass* array_klass = FlatArrayKlass::cast(array->klass());
1964 InlineKlass* element_klass = array_klass->element_klass();
1965 writer->write_u1(HPROF_FLAT_ARRAY);
1966 writer->write_objectID(array);
1967 writer->write_classID(element_klass);
1968 }
1969 }
1970 }
1971
1972
1973 // Support class used to generate HPROF_UTF8 records from the entries in the
1974 // SymbolTable.
1975
1976 class SymbolTableDumper : public SymbolClosure {
1977 private:
1978 AbstractDumpWriter* _writer;
1979 AbstractDumpWriter* writer() const { return _writer; }
1980 public:
1981 SymbolTableDumper(AbstractDumpWriter* writer) { _writer = writer; }
1982 void do_symbol(Symbol** p);
1983 };
1984
1985 void SymbolTableDumper::do_symbol(Symbol** p) {
1986 ResourceMark rm;
1987 Symbol* sym = *p;
1988 int len = sym->utf8_length();
1989 if (len > 0) {
1990 char* s = sym->as_utf8();
1991 DumperSupport::write_header(writer(), HPROF_UTF8, oopSize + len);
1992 writer()->write_symbolID(sym);
2476 return;
2477 }
2478 }
2479
2480 if (DumperSupport::mask_dormant_archived_object(o, nullptr) == nullptr) {
2481 return;
2482 }
2483
2484 if (o->is_instance()) {
2485 // create a HPROF_GC_INSTANCE record for each object
2486 DumperSupport::dump_instance(writer(), o, &_class_cache);
2487 // If we encounter an unmounted virtual thread it needs to be dumped explicitly
2488 // (mounted virtual threads are dumped with their carriers).
2489 if (java_lang_VirtualThread::is_instance(o)
2490 && ThreadDumper::should_dump_vthread(o) && !ThreadDumper::is_vthread_mounted(o)) {
2491 _vthread_dumper->dump_vthread(o, writer());
2492 }
2493 } else if (o->is_objArray()) {
2494 // create a HPROF_GC_OBJ_ARRAY_DUMP record for each object array
2495 DumperSupport::dump_object_array(writer(), objArrayOop(o));
2496 } else if (o->is_flatArray()) {
2497 DumperSupport::dump_flat_array(writer(), flatArrayOop(o), &_class_cache);
2498 } else if (o->is_typeArray()) {
2499 // create a HPROF_GC_PRIM_ARRAY_DUMP record for each type array
2500 DumperSupport::dump_prim_array(writer(), typeArrayOop(o));
2501 }
2502 }
2503
2504 // The dumper controller for parallel heap dump
2505 class DumperController : public CHeapObj<mtInternal> {
2506 private:
2507 Monitor* _lock;
2508 Mutex* _global_writer_lock;
2509
2510 const uint _dumper_number;
2511 uint _complete_number;
2512
2513 bool _started; // VM dumper started and acquired global writer lock
2514
2515 public:
2516 DumperController(uint number) :
2517 // _lock and _global_writer_lock are used for synchronization between GC worker threads inside safepoint,
2557 _complete_number++;
2558 // propagate local error to global if any
2559 if (local_writer->has_error()) {
2560 global_writer->set_error(local_writer->error());
2561 }
2562 ml.notify();
2563 }
2564
2565 void wait_all_dumpers_complete() {
2566 MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
2567 while (_complete_number != _dumper_number) {
2568 ml.wait();
2569 }
2570 }
2571 };
2572
2573 // DumpMerger merges separate dump files into a complete one
2574 class DumpMerger : public StackObj {
2575 private:
2576 DumpWriter* _writer;
2577 InlinedObjects* _inlined_objects;
2578 const char* _path;
2579 bool _has_error;
2580 int _dump_seq;
2581
2582 private:
2583 void merge_file(const char* path);
2584 void merge_done();
2585 void set_error(const char* msg);
2586
2587 public:
2588 DumpMerger(const char* path, DumpWriter* writer, InlinedObjects* inlined_objects, int dump_seq) :
2589 _writer(writer),
2590 _inlined_objects(inlined_objects),
2591 _path(path),
2592 _has_error(_writer->has_error()),
2593 _dump_seq(dump_seq) {}
2594
2595 void do_merge();
2596
2597 // returns path for the parallel DumpWriter (resource allocated)
2598 static char* get_writer_path(const char* base_path, int seq);
2599
2600 };
2601
2602 char* DumpMerger::get_writer_path(const char* base_path, int seq) {
2603 // approximate required buffer size
2604 size_t buf_size = strlen(base_path)
2605 + 2 // ".p"
2606 + 10 // number (that's enough for 2^32 parallel dumpers)
2607 + 1; // '\0'
2608
2609 char* path = NEW_RESOURCE_ARRAY(char, buf_size);
2610 memset(path, 0, buf_size);
2611
2612 os::snprintf(path, buf_size, "%s.p%d", base_path, seq);
2613
2614 return path;
2615 }
2616
2617
2618 void DumpMerger::merge_done() {
2619 // Writes the HPROF_HEAP_DUMP_END record.
2620 if (!_has_error) {
2621 DumperSupport::end_of_dump(_writer);
2622 _inlined_objects->dump_flat_arrays(_writer);
2623 _writer->flush();
2624 _inlined_objects->release();
2625 }
2626 _dump_seq = 0; //reset
2627 }
2628
2629 void DumpMerger::set_error(const char* msg) {
2630 assert(msg != nullptr, "sanity check");
2631 log_error(heapdump)("%s (file: %s)", msg, _path);
2632 _writer->set_error(msg);
2633 _has_error = true;
2634 }
2635
2636 #ifdef LINUX
2637 // Merge segmented heap files via sendfile, it's more efficient than the
2638 // read+write combination, which would require transferring data to and from
2639 // user space.
2640 void DumpMerger::merge_file(const char* path) {
2641 TraceTime timer("Merge segmented heap file directly", TRACETIME_LOG(Info, heapdump));
2642
2643 int segment_fd = os::open(path, O_RDONLY, 0);
2644 if (segment_fd == -1) {
2724 // restore compressor for further use
2725 _writer->set_compressor(saved_compressor);
2726 merge_done();
2727 }
2728
2729 // The VM operation that performs the heap dump
2730 class VM_HeapDumper : public VM_GC_Operation, public WorkerTask, public UnmountedVThreadDumper {
2731 private:
2732 DumpWriter* _writer;
2733 JavaThread* _oome_thread;
2734 Method* _oome_constructor;
2735 bool _gc_before_heap_dump;
2736 GrowableArray<Klass*>* _klass_map;
2737
2738 ThreadDumper** _thread_dumpers; // platform, carrier and mounted virtual threads
2739 int _thread_dumpers_count;
2740 volatile int _thread_serial_num;
2741 volatile int _frame_serial_num;
2742
2743 volatile int _dump_seq;
2744
2745 // Inlined object support.
2746 InlinedObjects _inlined_objects;
2747
2748 // parallel heap dump support
2749 uint _num_dumper_threads;
2750 DumperController* _dumper_controller;
2751 ParallelObjectIterator* _poi;
2752
2753 // Dumper id of VMDumper thread.
2754 static const int VMDumperId = 0;
2755 // VM dumper dumps both heap and non-heap data, other dumpers dump heap-only data.
2756 static bool is_vm_dumper(int dumper_id) { return dumper_id == VMDumperId; }
2757 // the 1st dumper calling get_next_dumper_id becomes VM dumper
2758 int get_next_dumper_id() {
2759 return Atomic::fetch_then_add(&_dump_seq, 1);
2760 }
2761
2762 DumpWriter* writer() const { return _writer; }
2763
2764 bool skip_operation() const;
2765
2766 // HPROF_GC_ROOT_THREAD_OBJ records for platform and mounted virtual threads
2767 void dump_threads(AbstractDumpWriter* writer);
2808 }
2809
2810 ~VM_HeapDumper() {
2811 if (_thread_dumpers != nullptr) {
2812 for (int i = 0; i < _thread_dumpers_count; i++) {
2813 delete _thread_dumpers[i];
2814 }
2815 FREE_C_HEAP_ARRAY(ThreadDumper*, _thread_dumpers);
2816 }
2817
2818 if (_dumper_controller != nullptr) {
2819 delete _dumper_controller;
2820 _dumper_controller = nullptr;
2821 }
2822 delete _klass_map;
2823 }
2824 int dump_seq() { return _dump_seq; }
2825 bool is_parallel_dump() { return _num_dumper_threads > 1; }
2826 void prepare_parallel_dump(WorkerThreads* workers);
2827
2828 InlinedObjects* inlined_objects() { return &_inlined_objects; }
2829
2830 VMOp_Type type() const { return VMOp_HeapDumper; }
2831 virtual bool doit_prologue();
2832 void doit();
2833 void work(uint worker_id);
2834
2835 // UnmountedVThreadDumper implementation
2836 void dump_vthread(oop vt, AbstractDumpWriter* segment_writer);
2837 };
2838
2839 bool VM_HeapDumper::skip_operation() const {
2840 return false;
2841 }
2842
2843 // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
2844 void DumperSupport::end_of_dump(AbstractDumpWriter* writer) {
2845 writer->finish_dump_segment();
2846
2847 writer->write_u1(HPROF_HEAP_DUMP_END);
2848 writer->write_u4(0);
2849 writer->write_u4(0);
2947 _dumper_controller->lock_global_writer();
2948 _dumper_controller->signal_start();
2949 } else {
2950 _dumper_controller->wait_for_start_signal();
2951 }
2952
2953 if (is_vm_dumper(dumper_id)) {
2954 TraceTime timer("Dump non-objects", TRACETIME_LOG(Info, heapdump));
2955 // Write the file header - we always use 1.0.2
2956 const char* header = "JAVA PROFILE 1.0.2";
2957
2958 // header is few bytes long - no chance to overflow int
2959 writer()->write_raw(header, strlen(header) + 1); // NUL terminated
2960 writer()->write_u4(oopSize);
2961 // timestamp is current time in ms
2962 writer()->write_u8(os::javaTimeMillis());
2963 // HPROF_UTF8 records
2964 SymbolTableDumper sym_dumper(writer());
2965 SymbolTable::symbols_do(&sym_dumper);
2966
2967 // HPROF_UTF8 records for inlined field names.
2968 inlined_objects()->init();
2969 inlined_objects()->dump_inlined_field_names(writer());
2970
2971 // HPROF_INLINED_FIELDS
2972 inlined_objects()->dump_classed_with_inlined_fields(writer());
2973
2974 // write HPROF_LOAD_CLASS records
2975 {
2976 LoadedClassDumper loaded_class_dumper(writer(), _klass_map);
2977 ClassLoaderDataGraph::classes_do(&loaded_class_dumper);
2978 }
2979
2980 // write HPROF_FRAME and HPROF_TRACE records
2981 // this must be called after _klass_map is built when iterating the classes above.
2982 dump_stack_traces(writer());
2983
2984 // unlock global writer, so parallel dumpers can dump stack traces of unmounted virtual threads
2985 _dumper_controller->unlock_global_writer();
2986 }
2987
2988 // HPROF_HEAP_DUMP/HPROF_HEAP_DUMP_SEGMENT starts here
2989
2990 ResourceMark rm;
2991 // share global compressor, local DumpWriter is not responsible for its life cycle
2992 DumpWriter segment_writer(DumpMerger::get_writer_path(writer()->get_file_path(), dumper_id),
2993 writer()->is_overwrite(), writer()->compressor());
3153 (error() != nullptr) ? error() : "reason unknown");
3154 }
3155 return -1;
3156 }
3157
3158 // generate the segmented heap dump into separate files
3159 VM_HeapDumper dumper(&writer, _gc_before_heap_dump, _oome, num_dump_threads);
3160 VMThread::execute(&dumper);
3161
3162 // record any error that the writer may have encountered
3163 set_error(writer.error());
3164
3165 // Heap dump process is done in two phases
3166 //
3167 // Phase 1: Concurrent threads directly write heap data to multiple heap files.
3168 // This is done by VM_HeapDumper, which is performed within safepoint.
3169 //
3170 // Phase 2: Merge multiple heap files into one complete heap dump file.
3171 // This is done by DumpMerger, which is performed outside safepoint
3172
3173 DumpMerger merger(path, &writer, dumper.inlined_objects(), dumper.dump_seq());
3174 // Perform heapdump file merge operation in the current thread prevents us
3175 // from occupying the VM Thread, which in turn affects the occurrence of
3176 // GC and other VM operations.
3177 merger.do_merge();
3178 if (writer.error() != nullptr) {
3179 set_error(writer.error());
3180 }
3181
3182 // emit JFR event
3183 if (error() == nullptr) {
3184 event.set_destination(path);
3185 event.set_gcBeforeDump(_gc_before_heap_dump);
3186 event.set_size(writer.bytes_written());
3187 event.set_onOutOfMemoryError(_oome);
3188 event.set_overwrite(overwrite);
3189 event.set_compression(compression);
3190 event.commit();
3191 } else {
3192 log_debug(cds, heap)("Error %s while dumping heap", error());
3193 }
|