24 */
25
26 #include "classfile/classLoaderData.inline.hpp"
27 #include "classfile/classLoaderDataGraph.hpp"
28 #include "classfile/javaClasses.inline.hpp"
29 #include "classfile/symbolTable.hpp"
30 #include "classfile/vmClasses.hpp"
31 #include "classfile/vmSymbols.hpp"
32 #include "gc/shared/gcLocker.hpp"
33 #include "gc/shared/gcVMOperations.hpp"
34 #include "gc/shared/workerThread.hpp"
35 #include "jfr/jfrEvents.hpp"
36 #include "jvm.h"
37 #include "memory/allocation.inline.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "memory/universe.hpp"
40 #include "oops/fieldStreams.inline.hpp"
41 #include "oops/klass.inline.hpp"
42 #include "oops/objArrayKlass.hpp"
43 #include "oops/objArrayOop.inline.hpp"
44 #include "oops/oop.inline.hpp"
45 #include "oops/typeArrayOop.inline.hpp"
46 #include "runtime/continuationWrapper.inline.hpp"
47 #include "runtime/frame.inline.hpp"
48 #include "runtime/handles.inline.hpp"
49 #include "runtime/javaCalls.hpp"
50 #include "runtime/javaThread.inline.hpp"
51 #include "runtime/jniHandles.hpp"
52 #include "runtime/os.hpp"
53 #include "runtime/threads.hpp"
54 #include "runtime/threadSMR.hpp"
55 #include "runtime/vframe.hpp"
56 #include "runtime/vmOperations.hpp"
57 #include "runtime/vmThread.hpp"
58 #include "runtime/timerTrace.hpp"
59 #include "services/heapDumper.hpp"
60 #include "services/heapDumperCompression.hpp"
61 #include "services/threadService.hpp"
62 #include "utilities/checkedCast.hpp"
63 #include "utilities/macros.hpp"
64 #include "utilities/ostream.hpp"
65 #ifdef LINUX
66 #include "os_linux.hpp"
298 * 7: double array
299 * 8: byte array
300 * 9: short array
301 * 10: int array
302 * 11: long array
303 * [u1]* elements
304 *
305 * HPROF_CPU_SAMPLES a set of sample traces of running threads
306 *
307 * u4 total number of samples
308 * u4 # of traces
309 * [u4 # of samples
310 * u4]* stack trace serial number
311 *
312 * HPROF_CONTROL_SETTINGS the settings of on/off switches
313 *
314 * u4 0x00000001: alloc traces on/off
315 * 0x00000002: cpu sampling on/off
316 * u2 stack trace depth
317 *
318 *
319 * When the header is "JAVA PROFILE 1.0.2" a heap dump can optionally
320 * be generated as a sequence of heap dump segments. This sequence is
321 * terminated by an end record. The additional tags allowed by format
322 * "JAVA PROFILE 1.0.2" are:
323 *
324 * HPROF_HEAP_DUMP_SEGMENT denote a heap dump segment
325 *
326 * [heap dump sub-records]*
327 * The same sub-record types allowed by HPROF_HEAP_DUMP
328 *
329 * HPROF_HEAP_DUMP_END denotes the end of a heap dump
330 *
331 */
332
333
334 // HPROF tags
335
336 enum hprofTag : u1 {
337 // top-level records
338 HPROF_UTF8 = 0x01,
339 HPROF_LOAD_CLASS = 0x02,
340 HPROF_UNLOAD_CLASS = 0x03,
341 HPROF_FRAME = 0x04,
342 HPROF_TRACE = 0x05,
343 HPROF_ALLOC_SITES = 0x06,
344 HPROF_HEAP_SUMMARY = 0x07,
345 HPROF_START_THREAD = 0x0A,
346 HPROF_END_THREAD = 0x0B,
347 HPROF_HEAP_DUMP = 0x0C,
348 HPROF_CPU_SAMPLES = 0x0D,
349 HPROF_CONTROL_SETTINGS = 0x0E,
350
351 // 1.0.2 record types
352 HPROF_HEAP_DUMP_SEGMENT = 0x1C,
353 HPROF_HEAP_DUMP_END = 0x2C,
354
355 // field types
356 HPROF_ARRAY_OBJECT = 0x01,
357 HPROF_NORMAL_OBJECT = 0x02,
358 HPROF_BOOLEAN = 0x04,
359 HPROF_CHAR = 0x05,
360 HPROF_FLOAT = 0x06,
361 HPROF_DOUBLE = 0x07,
362 HPROF_BYTE = 0x08,
363 HPROF_SHORT = 0x09,
364 HPROF_INT = 0x0A,
365 HPROF_LONG = 0x0B,
366
367 // data-dump sub-records
368 HPROF_GC_ROOT_UNKNOWN = 0xFF,
369 HPROF_GC_ROOT_JNI_GLOBAL = 0x01,
370 HPROF_GC_ROOT_JNI_LOCAL = 0x02,
371 HPROF_GC_ROOT_JAVA_FRAME = 0x03,
372 HPROF_GC_ROOT_NATIVE_STACK = 0x04,
373 HPROF_GC_ROOT_STICKY_CLASS = 0x05,
374 HPROF_GC_ROOT_THREAD_BLOCK = 0x06,
375 HPROF_GC_ROOT_MONITOR_USED = 0x07,
376 HPROF_GC_ROOT_THREAD_OBJ = 0x08,
377 HPROF_GC_CLASS_DUMP = 0x20,
378 HPROF_GC_INSTANCE_DUMP = 0x21,
379 HPROF_GC_OBJ_ARRAY_DUMP = 0x22,
380 HPROF_GC_PRIM_ARRAY_DUMP = 0x23
381 };
382
383 // Default stack trace ID (used for dummy HPROF_TRACE record)
384 enum {
385 STACK_TRACE_ID = 1,
386 INITIAL_CLASS_COUNT = 200
387 };
388
389 // Supports I/O operations for a dump
390 // Base class for dump and parallel dump
391 class AbstractDumpWriter : public CHeapObj<mtInternal> {
392 protected:
393 enum {
394 io_buffer_max_size = 1*M,
395 dump_segment_header_size = 9
396 };
397
398 char* _buffer; // internal buffer
399 size_t _size;
400 size_t _pos;
401
402 bool _in_dump_segment; // Are we currently in a dump segment?
403 bool _is_huge_sub_record; // Are we writing a sub-record larger than the buffer size?
404 DEBUG_ONLY(size_t _sub_record_left;) // The bytes not written for the current sub-record.
405 DEBUG_ONLY(bool _sub_record_ended;) // True if we have called the end_sub_record().
406
407 char* buffer() const { return _buffer; }
408 size_t buffer_size() const { return _size; }
725 }
726 }
727
728 class DumperClassCacheTable;
729 class DumperClassCacheTableEntry;
730
731 // Support class with a collection of functions used when dumping the heap
732 class DumperSupport : AllStatic {
733 public:
734
735 // write a header of the given type
736 static void write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len);
737
738 // returns hprof tag for the given type signature
739 static hprofTag sig2tag(Symbol* sig);
740 // returns hprof tag for the given basic type
741 static hprofTag type2tag(BasicType type);
742 // Returns the size of the data to write.
743 static u4 sig2size(Symbol* sig);
744
745 // returns the size of the instance of the given class
746 static u4 instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry = nullptr);
747
748 // dump a jfloat
749 static void dump_float(AbstractDumpWriter* writer, jfloat f);
750 // dump a jdouble
751 static void dump_double(AbstractDumpWriter* writer, jdouble d);
752 // dumps the raw value of the given field
753 static void dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset);
754 // returns the size of the static fields; also counts the static fields
755 static u4 get_static_fields_size(InstanceKlass* ik, u2& field_count);
756 // dumps static fields of the given class
757 static void dump_static_fields(AbstractDumpWriter* writer, Klass* k);
758 // dump the raw values of the instance fields of the given object
759 static void dump_instance_fields(AbstractDumpWriter* writer, oop o, DumperClassCacheTableEntry* class_cache_entry);
760 // get the count of the instance fields for a given class
761 static u2 get_instance_fields_count(InstanceKlass* ik);
762 // dumps the definition of the instance fields for a given class
763 static void dump_instance_field_descriptors(AbstractDumpWriter* writer, Klass* k);
764 // creates HPROF_GC_INSTANCE_DUMP record for the given object
765 static void dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache);
766 // creates HPROF_GC_CLASS_DUMP record for the given instance class
767 static void dump_instance_class(AbstractDumpWriter* writer, Klass* k);
768 // creates HPROF_GC_CLASS_DUMP record for a given array class
769 static void dump_array_class(AbstractDumpWriter* writer, Klass* k);
770
771 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
772 static void dump_object_array(AbstractDumpWriter* writer, objArrayOop array);
773 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
774 static void dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array);
775 // create HPROF_FRAME record for the given method and bci
776 static void dump_stack_frame(AbstractDumpWriter* writer, int frame_serial_num, int class_serial_num, Method* m, int bci);
777
778 // check if we need to truncate an array
779 static int calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size);
780
781 // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
782 static void end_of_dump(AbstractDumpWriter* writer);
783
784 static oop mask_dormant_archived_object(oop o, oop ref_obj) {
785 if (o != nullptr && o->klass()->java_mirror_no_keepalive() == nullptr) {
786 // Ignore this object since the corresponding java mirror is not loaded.
787 // Might be a dormant archive object.
788 report_dormant_archived_object(o, ref_obj);
789 return nullptr;
790 } else {
791 return o;
792 }
793 }
794
795 static void report_dormant_archived_object(oop o, oop ref_obj) {
796 if (log_is_enabled(Trace, cds, heap)) {
797 ResourceMark rm;
798 if (ref_obj != nullptr) {
799 log_trace(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s) referenced by " INTPTR_FORMAT " (%s)",
800 p2i(o), o->klass()->external_name(),
801 p2i(ref_obj), ref_obj->klass()->external_name());
802 } else {
803 log_trace(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s)",
804 p2i(o), o->klass()->external_name());
805 }
806 }
807 }
808 };
809
810 // Hash table of klasses to the klass metadata. This should greatly improve the
811 // hash dumping performance. This hash table is supposed to be used by a single
812 // thread only.
813 //
814 class DumperClassCacheTableEntry : public CHeapObj<mtServiceability> {
815 friend class DumperClassCacheTable;
816 private:
817 GrowableArray<char> _sigs_start;
818 GrowableArray<int> _offsets;
819 u4 _instance_size;
820 int _entries;
821
822 public:
823 DumperClassCacheTableEntry() : _instance_size(0), _entries(0) {};
824
825 int field_count() { return _entries; }
826 char sig_start(int field_idx) { return _sigs_start.at(field_idx); }
827 int offset(int field_idx) { return _offsets.at(field_idx); }
828 u4 instance_size() { return _instance_size; }
829 };
830
831 class DumperClassCacheTable {
832 private:
833 // ResourceHashtable SIZE is specified at compile time so we
834 // use 1031 which is the first prime after 1024.
835 static constexpr size_t TABLE_SIZE = 1031;
836
837 // Maintain the cache for N classes. This limits memory footprint
838 // impact, regardless of how many classes we have in the dump.
839 // This also improves look up performance by keeping the statically
840 // sized table from overloading.
841 static constexpr int CACHE_TOP = 256;
842
843 typedef ResourceHashtable<InstanceKlass*, DumperClassCacheTableEntry*,
844 TABLE_SIZE, AnyObj::C_HEAP, mtServiceability> PtrTable;
845 PtrTable* _ptrs;
846
855 bool do_entry(InstanceKlass*& key, DumperClassCacheTableEntry*& entry) {
856 delete entry;
857 return true;
858 }
859 } cleanup;
860 table->unlink(&cleanup);
861 }
862
863 public:
864 DumperClassCacheTableEntry* lookup_or_create(InstanceKlass* ik) {
865 if (_last_ik == ik) {
866 return _last_entry;
867 }
868
869 DumperClassCacheTableEntry* entry;
870 DumperClassCacheTableEntry** from_cache = _ptrs->get(ik);
871 if (from_cache == nullptr) {
872 entry = new DumperClassCacheTableEntry();
873 for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
874 if (!fld.access_flags().is_static()) {
875 Symbol* sig = fld.signature();
876 entry->_sigs_start.push(sig->char_at(0));
877 entry->_offsets.push(fld.offset());
878 entry->_entries++;
879 entry->_instance_size += DumperSupport::sig2size(sig);
880 }
881 }
882
883 if (_ptrs->number_of_entries() >= CACHE_TOP) {
884 // We do not track the individual hit rates for table entries.
885 // Purge the entire table, and let the cache catch up with new
886 // distribution.
887 unlink_all(_ptrs);
888 }
889
890 _ptrs->put(ik, entry);
891 } else {
892 entry = *from_cache;
893 }
894
895 // Remember for single-slot cache.
896 _last_ik = ik;
897 _last_entry = entry;
898
899 return entry;
969 }
970
971 // dump a jfloat
972 void DumperSupport::dump_float(AbstractDumpWriter* writer, jfloat f) {
973 if (g_isnan(f)) {
974 writer->write_u4(0x7fc00000); // collapsing NaNs
975 } else {
976 writer->write_u4(bit_cast<u4>(f));
977 }
978 }
979
980 // dump a jdouble
981 void DumperSupport::dump_double(AbstractDumpWriter* writer, jdouble d) {
982 if (g_isnan(d)) {
983 writer->write_u8(0x7ff80000ull << 32); // collapsing NaNs
984 } else {
985 writer->write_u8(bit_cast<u8>(d));
986 }
987 }
988
989 // dumps the raw value of the given field
990 void DumperSupport::dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset) {
991 switch (type) {
992 case JVM_SIGNATURE_CLASS :
993 case JVM_SIGNATURE_ARRAY : {
994 oop o = obj->obj_field_access<ON_UNKNOWN_OOP_REF | AS_NO_KEEPALIVE>(offset);
995 o = mask_dormant_archived_object(o, obj);
996 assert(oopDesc::is_oop_or_null(o), "Expected an oop or nullptr at " PTR_FORMAT, p2i(o));
997 writer->write_objectID(o);
998 break;
999 }
1000 case JVM_SIGNATURE_BYTE : {
1001 jbyte b = obj->byte_field(offset);
1002 writer->write_u1(b);
1003 break;
1004 }
1005 case JVM_SIGNATURE_CHAR : {
1006 jchar c = obj->char_field(offset);
1007 writer->write_u2(c);
1008 break;
1027 writer->write_u4(i);
1028 break;
1029 }
1030 case JVM_SIGNATURE_LONG : {
1031 jlong l = obj->long_field(offset);
1032 writer->write_u8(l);
1033 break;
1034 }
1035 case JVM_SIGNATURE_BOOLEAN : {
1036 jboolean b = obj->bool_field(offset);
1037 writer->write_u1(b);
1038 break;
1039 }
1040 default : {
1041 ShouldNotReachHere();
1042 break;
1043 }
1044 }
1045 }
1046
1047 // returns the size of the instance of the given class
1048 u4 DumperSupport::instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry) {
1049 if (class_cache_entry != nullptr) {
1050 return class_cache_entry->instance_size();
1051 } else {
1052 u4 size = 0;
1053 for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1054 if (!fld.access_flags().is_static()) {
1055 size += sig2size(fld.signature());
1056 }
1057 }
1058 return size;
1059 }
1060 }
1061
1062 u4 DumperSupport::get_static_fields_size(InstanceKlass* ik, u2& field_count) {
1063 field_count = 0;
1064 u4 size = 0;
1065
1066 for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1067 if (fldc.access_flags().is_static()) {
1068 field_count++;
1069 size += sig2size(fldc.signature());
1070 }
1071 }
1072
1073 // Add in resolved_references which is referenced by the cpCache
1074 // The resolved_references is an array per InstanceKlass holding the
1075 // strings and other oops resolved from the constant pool.
1076 oop resolved_references = ik->constants()->resolved_references_or_null();
1077 if (resolved_references != nullptr) {
1078 field_count++;
1079 size += sizeof(address);
1080
1081 // Add in the resolved_references of the used previous versions of the class
1082 // in the case of RedefineClasses
1083 InstanceKlass* prev = ik->previous_versions();
1084 while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1085 field_count++;
1086 size += sizeof(address);
1087 prev = prev->previous_versions();
1090
1091 // Also provide a pointer to the init_lock if present, so there aren't unreferenced int[0]
1092 // arrays.
1093 oop init_lock = ik->init_lock();
1094 if (init_lock != nullptr) {
1095 field_count++;
1096 size += sizeof(address);
1097 }
1098
1099 // We write the value itself plus a name and a one byte type tag per field.
1100 return checked_cast<u4>(size + field_count * (sizeof(address) + 1));
1101 }
1102
1103 // dumps static fields of the given class
1104 void DumperSupport::dump_static_fields(AbstractDumpWriter* writer, Klass* k) {
1105 InstanceKlass* ik = InstanceKlass::cast(k);
1106
1107 // dump the field descriptors and raw values
1108 for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1109 if (fld.access_flags().is_static()) {
1110 Symbol* sig = fld.signature();
1111
1112 writer->write_symbolID(fld.name()); // name
1113 writer->write_u1(sig2tag(sig)); // type
1114
1115 // value
1116 dump_field_value(writer, sig->char_at(0), ik->java_mirror(), fld.offset());
1117 }
1118 }
1119
1120 // Add resolved_references for each class that has them
1121 oop resolved_references = ik->constants()->resolved_references_or_null();
1122 if (resolved_references != nullptr) {
1123 writer->write_symbolID(vmSymbols::resolved_references_name()); // name
1124 writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1125 writer->write_objectID(resolved_references);
1126
1127 // Also write any previous versions
1128 InstanceKlass* prev = ik->previous_versions();
1129 while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1130 writer->write_symbolID(vmSymbols::resolved_references_name()); // name
1131 writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1132 writer->write_objectID(prev->constants()->resolved_references());
1133 prev = prev->previous_versions();
1134 }
1135 }
1136
1137 // Add init lock to the end if the class is not yet initialized
1138 oop init_lock = ik->init_lock();
1139 if (init_lock != nullptr) {
1140 writer->write_symbolID(vmSymbols::init_lock_name()); // name
1141 writer->write_u1(sig2tag(vmSymbols::int_array_signature())); // type
1142 writer->write_objectID(init_lock);
1143 }
1144 }
1145
1146 // dump the raw values of the instance fields of the given object
1147 void DumperSupport::dump_instance_fields(AbstractDumpWriter* writer, oop o, DumperClassCacheTableEntry* class_cache_entry) {
1148 assert(class_cache_entry != nullptr, "Pre-condition: must be provided");
1149 for (int idx = 0; idx < class_cache_entry->field_count(); idx++) {
1150 dump_field_value(writer, class_cache_entry->sig_start(idx), o, class_cache_entry->offset(idx));
1151 }
1152 }
1153
1154 // dumps the definition of the instance fields for a given class
1155 u2 DumperSupport::get_instance_fields_count(InstanceKlass* ik) {
1156 u2 field_count = 0;
1157
1158 for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1159 if (!fldc.access_flags().is_static()) field_count++;
1160 }
1161
1162 return field_count;
1163 }
1164
1165 // dumps the definition of the instance fields for a given class
1166 void DumperSupport::dump_instance_field_descriptors(AbstractDumpWriter* writer, Klass* k) {
1167 InstanceKlass* ik = InstanceKlass::cast(k);
1168
1169 // dump the field descriptors
1170 for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1171 if (!fld.access_flags().is_static()) {
1172 Symbol* sig = fld.signature();
1173
1174 writer->write_symbolID(fld.name()); // name
1175 writer->write_u1(sig2tag(sig)); // type
1176 }
1177 }
1178 }
1179
1180 // creates HPROF_GC_INSTANCE_DUMP record for the given object
1181 void DumperSupport::dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache) {
1182 InstanceKlass* ik = InstanceKlass::cast(o->klass());
1183
1184 DumperClassCacheTableEntry* cache_entry = class_cache->lookup_or_create(ik);
1185
1186 u4 is = instance_size(ik, cache_entry);
1187 u4 size = 1 + sizeof(address) + 4 + sizeof(address) + 4 + is;
1188
1189 writer->start_sub_record(HPROF_GC_INSTANCE_DUMP, size);
1190 writer->write_objectID(o);
1191 writer->write_u4(STACK_TRACE_ID);
1192
1193 // class ID
1194 writer->write_classID(ik);
1195
1196 // number of bytes that follow
1197 writer->write_u4(is);
1198
1199 // field values
1200 dump_instance_fields(writer, o, cache_entry);
1201
1202 writer->end_sub_record();
1203 }
1204
1205 // creates HPROF_GC_CLASS_DUMP record for the given instance class
1206 void DumperSupport::dump_instance_class(AbstractDumpWriter* writer, Klass* k) {
1207 InstanceKlass* ik = InstanceKlass::cast(k);
1208
1209 // We can safepoint and do a heap dump at a point where we have a Klass,
1210 // but no java mirror class has been setup for it. So we need to check
1211 // that the class is at least loaded, to avoid crash from a null mirror.
1212 if (!ik->is_loaded()) {
1213 return;
1214 }
1215
1216 u2 static_fields_count = 0;
1217 u4 static_size = get_static_fields_size(ik, static_fields_count);
1218 u2 instance_fields_count = get_instance_fields_count(ik);
1219 u4 instance_fields_size = instance_fields_count * (sizeof(address) + 1);
1220 u4 size = checked_cast<u4>(1 + sizeof(address) + 4 + 6 * sizeof(address) + 4 + 2 + 2 + static_size + 2 + instance_fields_size);
1225 writer->write_classID(ik);
1226 writer->write_u4(STACK_TRACE_ID);
1227
1228 // super class ID
1229 InstanceKlass* java_super = ik->java_super();
1230 if (java_super == nullptr) {
1231 writer->write_objectID(oop(nullptr));
1232 } else {
1233 writer->write_classID(java_super);
1234 }
1235
1236 writer->write_objectID(ik->class_loader());
1237 writer->write_objectID(ik->signers());
1238 writer->write_objectID(ik->protection_domain());
1239
1240 // reserved
1241 writer->write_objectID(oop(nullptr));
1242 writer->write_objectID(oop(nullptr));
1243
1244 // instance size
1245 writer->write_u4(DumperSupport::instance_size(ik));
1246
1247 // size of constant pool - ignored by HAT 1.1
1248 writer->write_u2(0);
1249
1250 // static fields
1251 writer->write_u2(static_fields_count);
1252 dump_static_fields(writer, ik);
1253
1254 // description of instance fields
1255 writer->write_u2(instance_fields_count);
1256 dump_instance_field_descriptors(writer, ik);
1257
1258 writer->end_sub_record();
1259 }
1260
1261 // creates HPROF_GC_CLASS_DUMP record for the given array class
1262 void DumperSupport::dump_array_class(AbstractDumpWriter* writer, Klass* k) {
1263 InstanceKlass* ik = nullptr; // bottom class for object arrays, null for primitive type arrays
1264 if (k->is_objArray_klass()) {
1265 Klass *bk = ObjArrayKlass::cast(k)->bottom_klass();
1279 assert(java_super != nullptr, "checking");
1280 writer->write_classID(java_super);
1281
1282 writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->class_loader());
1283 writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->signers());
1284 writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->protection_domain());
1285
1286 writer->write_objectID(oop(nullptr)); // reserved
1287 writer->write_objectID(oop(nullptr));
1288 writer->write_u4(0); // instance size
1289 writer->write_u2(0); // constant pool
1290 writer->write_u2(0); // static fields
1291 writer->write_u2(0); // instance fields
1292
1293 writer->end_sub_record();
1294
1295 }
1296
1297 // Hprof uses an u4 as record length field,
1298 // which means we need to truncate arrays that are too long.
1299 int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size) {
1300 BasicType type = ArrayKlass::cast(array->klass())->element_type();
1301 assert(type >= T_BOOLEAN && type <= T_OBJECT, "invalid array element type");
1302
1303 int length = array->length();
1304
1305 int type_size;
1306 if (type == T_OBJECT) {
1307 type_size = sizeof(address);
1308 } else {
1309 type_size = type2aelembytes(type);
1310 }
1311
1312 size_t length_in_bytes = (size_t)length * type_size;
1313 uint max_bytes = max_juint - header_size;
1314
1315 if (length_in_bytes > max_bytes) {
1316 length = max_bytes / type_size;
1317 length_in_bytes = (size_t)length * type_size;
1318
1319 warning("cannot dump array of type %s[] with length %d; truncating to length %d",
1320 type2name_tab[type], array->length(), length);
1321 }
1322 return length;
1323 }
1324
1325 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
1326 void DumperSupport::dump_object_array(AbstractDumpWriter* writer, objArrayOop array) {
1327 // sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID) + sizeof(classID)
1328 short header_size = 1 + 2 * 4 + 2 * sizeof(address);
1329 int length = calculate_array_max_length(writer, array, header_size);
1330 u4 size = checked_cast<u4>(header_size + length * sizeof(address));
1331
1332 writer->start_sub_record(HPROF_GC_OBJ_ARRAY_DUMP, size);
1333 writer->write_objectID(array);
1334 writer->write_u4(STACK_TRACE_ID);
1335 writer->write_u4(length);
1336
1337 // array class ID
1338 writer->write_classID(array->klass());
1339
1340 // [id]* elements
1341 for (int index = 0; index < length; index++) {
1342 oop o = array->obj_at(index);
1343 o = mask_dormant_archived_object(o, array);
1344 writer->write_objectID(o);
1345 }
1346
1347 writer->end_sub_record();
1348 }
1349
1350 #define WRITE_ARRAY(Array, Type, Size, Length) \
1351 for (int i = 0; i < Length; i++) { writer->write_##Size((Size)Array->Type##_at(i)); }
1352
1353 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
1354 void DumperSupport::dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array) {
1355 BasicType type = TypeArrayKlass::cast(array->klass())->element_type();
1356 // 2 * sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID)
1357 short header_size = 2 * 1 + 2 * 4 + sizeof(address);
1358
1359 int length = calculate_array_max_length(writer, array, header_size);
1360 int type_size = type2aelembytes(type);
1361 u4 length_in_bytes = (u4)length * type_size;
1362 u4 size = header_size + length_in_bytes;
1363
1364 writer->start_sub_record(HPROF_GC_PRIM_ARRAY_DUMP, size);
1365 writer->write_objectID(array);
1366 writer->write_u4(STACK_TRACE_ID);
1367 writer->write_u4(length);
1368 writer->write_u1(type2tag(type));
1369
1451 int bci) {
1452 int line_number;
1453 if (m->is_native()) {
1454 line_number = -3; // native frame
1455 } else {
1456 line_number = m->line_number_from_bci(bci);
1457 }
1458
1459 write_header(writer, HPROF_FRAME, 4*oopSize + 2*sizeof(u4));
1460 writer->write_id(frame_serial_num); // frame serial number
1461 writer->write_symbolID(m->name()); // method's name
1462 writer->write_symbolID(m->signature()); // method's signature
1463
1464 assert(m->method_holder()->is_instance_klass(), "not InstanceKlass");
1465 writer->write_symbolID(m->method_holder()->source_file_name()); // source file name
1466 writer->write_u4(class_serial_num); // class serial number
1467 writer->write_u4((u4) line_number); // line number
1468 }
1469
1470
1471 // Support class used to generate HPROF_UTF8 records from the entries in the
1472 // SymbolTable.
1473
1474 class SymbolTableDumper : public SymbolClosure {
1475 private:
1476 AbstractDumpWriter* _writer;
1477 AbstractDumpWriter* writer() const { return _writer; }
1478 public:
1479 SymbolTableDumper(AbstractDumpWriter* writer) { _writer = writer; }
1480 void do_symbol(Symbol** p);
1481 };
1482
1483 void SymbolTableDumper::do_symbol(Symbol** p) {
1484 ResourceMark rm;
1485 Symbol* sym = *p;
1486 int len = sym->utf8_length();
1487 if (len > 0) {
1488 char* s = sym->as_utf8();
1489 DumperSupport::write_header(writer(), HPROF_UTF8, oopSize + len);
1490 writer()->write_symbolID(sym);
1983 return;
1984 }
1985 }
1986
1987 if (DumperSupport::mask_dormant_archived_object(o, nullptr) == nullptr) {
1988 return;
1989 }
1990
1991 if (o->is_instance()) {
1992 // create a HPROF_GC_INSTANCE record for each object
1993 DumperSupport::dump_instance(writer(), o, &_class_cache);
1994 // If we encounter an unmounted virtual thread it needs to be dumped explicitly
1995 // (mounted virtual threads are dumped with their carriers).
1996 if (java_lang_VirtualThread::is_instance(o)
1997 && ThreadDumper::should_dump_vthread(o) && !ThreadDumper::is_vthread_mounted(o)) {
1998 _vthread_dumper->dump_vthread(o, writer());
1999 }
2000 } else if (o->is_objArray()) {
2001 // create a HPROF_GC_OBJ_ARRAY_DUMP record for each object array
2002 DumperSupport::dump_object_array(writer(), objArrayOop(o));
2003 } else if (o->is_typeArray()) {
2004 // create a HPROF_GC_PRIM_ARRAY_DUMP record for each type array
2005 DumperSupport::dump_prim_array(writer(), typeArrayOop(o));
2006 }
2007 }
2008
2009 // The dumper controller for parallel heap dump
2010 class DumperController : public CHeapObj<mtInternal> {
2011 private:
2012 Monitor* _lock;
2013 Mutex* _global_writer_lock;
2014
2015 const uint _dumper_number;
2016 uint _complete_number;
2017
2018 bool _started; // VM dumper started and acquired global writer lock
2019
2020 public:
2021 DumperController(uint number) :
2022 // _lock and _global_writer_lock are used for synchronization between GC worker threads inside safepoint,
2062 _complete_number++;
2063 // propagate local error to global if any
2064 if (local_writer->has_error()) {
2065 global_writer->set_error(local_writer->error());
2066 }
2067 ml.notify();
2068 }
2069
2070 void wait_all_dumpers_complete() {
2071 MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
2072 while (_complete_number != _dumper_number) {
2073 ml.wait();
2074 }
2075 }
2076 };
2077
2078 // DumpMerger merges separate dump files into a complete one
2079 class DumpMerger : public StackObj {
2080 private:
2081 DumpWriter* _writer;
2082 const char* _path;
2083 bool _has_error;
2084 int _dump_seq;
2085
2086 private:
2087 void merge_file(const char* path);
2088 void merge_done();
2089 void set_error(const char* msg);
2090
2091 public:
2092 DumpMerger(const char* path, DumpWriter* writer, int dump_seq) :
2093 _writer(writer),
2094 _path(path),
2095 _has_error(_writer->has_error()),
2096 _dump_seq(dump_seq) {}
2097
2098 void do_merge();
2099
2100 // returns path for the parallel DumpWriter (resource allocated)
2101 static char* get_writer_path(const char* base_path, int seq);
2102
2103 };
2104
2105 char* DumpMerger::get_writer_path(const char* base_path, int seq) {
2106 // approximate required buffer size
2107 size_t buf_size = strlen(base_path)
2108 + 2 // ".p"
2109 + 10 // number (that's enough for 2^32 parallel dumpers)
2110 + 1; // '\0'
2111
2112 char* path = NEW_RESOURCE_ARRAY(char, buf_size);
2113 memset(path, 0, buf_size);
2114
2115 os::snprintf(path, buf_size, "%s.p%d", base_path, seq);
2116
2117 return path;
2118 }
2119
2120
2121 void DumpMerger::merge_done() {
2122 // Writes the HPROF_HEAP_DUMP_END record.
2123 if (!_has_error) {
2124 DumperSupport::end_of_dump(_writer);
2125 _writer->flush();
2126 }
2127 _dump_seq = 0; //reset
2128 }
2129
2130 void DumpMerger::set_error(const char* msg) {
2131 assert(msg != nullptr, "sanity check");
2132 log_error(heapdump)("%s (file: %s)", msg, _path);
2133 _writer->set_error(msg);
2134 _has_error = true;
2135 }
2136
2137 #ifdef LINUX
2138 // Merge segmented heap files via sendfile, it's more efficient than the
2139 // read+write combination, which would require transferring data to and from
2140 // user space.
2141 void DumpMerger::merge_file(const char* path) {
2142 TraceTime timer("Merge segmented heap file directly", TRACETIME_LOG(Info, heapdump));
2143
2144 int segment_fd = os::open(path, O_RDONLY, 0);
2145 if (segment_fd == -1) {
2225 // restore compressor for further use
2226 _writer->set_compressor(saved_compressor);
2227 merge_done();
2228 }
2229
2230 // The VM operation that performs the heap dump
2231 class VM_HeapDumper : public VM_GC_Operation, public WorkerTask, public UnmountedVThreadDumper {
2232 private:
2233 DumpWriter* _writer;
2234 JavaThread* _oome_thread;
2235 Method* _oome_constructor;
2236 bool _gc_before_heap_dump;
2237 GrowableArray<Klass*>* _klass_map;
2238
2239 ThreadDumper** _thread_dumpers; // platform, carrier and mounted virtual threads
2240 int _thread_dumpers_count;
2241 volatile int _thread_serial_num;
2242 volatile int _frame_serial_num;
2243
2244 volatile int _dump_seq;
2245 // parallel heap dump support
2246 uint _num_dumper_threads;
2247 DumperController* _dumper_controller;
2248 ParallelObjectIterator* _poi;
2249
2250 // Dumper id of VMDumper thread.
2251 static const int VMDumperId = 0;
2252 // VM dumper dumps both heap and non-heap data, other dumpers dump heap-only data.
2253 static bool is_vm_dumper(int dumper_id) { return dumper_id == VMDumperId; }
2254 // the 1st dumper calling get_next_dumper_id becomes VM dumper
2255 int get_next_dumper_id() {
2256 return Atomic::fetch_then_add(&_dump_seq, 1);
2257 }
2258
2259 DumpWriter* writer() const { return _writer; }
2260
2261 bool skip_operation() const;
2262
2263 // HPROF_GC_ROOT_THREAD_OBJ records for platform and mounted virtual threads
2264 void dump_threads(AbstractDumpWriter* writer);
2305 }
2306
2307 ~VM_HeapDumper() {
2308 if (_thread_dumpers != nullptr) {
2309 for (int i = 0; i < _thread_dumpers_count; i++) {
2310 delete _thread_dumpers[i];
2311 }
2312 FREE_C_HEAP_ARRAY(ThreadDumper*, _thread_dumpers);
2313 }
2314
2315 if (_dumper_controller != nullptr) {
2316 delete _dumper_controller;
2317 _dumper_controller = nullptr;
2318 }
2319 delete _klass_map;
2320 }
2321 int dump_seq() { return _dump_seq; }
2322 bool is_parallel_dump() { return _num_dumper_threads > 1; }
2323 void prepare_parallel_dump(WorkerThreads* workers);
2324
2325 VMOp_Type type() const { return VMOp_HeapDumper; }
2326 virtual bool doit_prologue();
2327 void doit();
2328 void work(uint worker_id);
2329
2330 // UnmountedVThreadDumper implementation
2331 void dump_vthread(oop vt, AbstractDumpWriter* segment_writer);
2332 };
2333
2334 bool VM_HeapDumper::skip_operation() const {
2335 return false;
2336 }
2337
2338 // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
2339 void DumperSupport::end_of_dump(AbstractDumpWriter* writer) {
2340 writer->finish_dump_segment();
2341
2342 writer->write_u1(HPROF_HEAP_DUMP_END);
2343 writer->write_u4(0);
2344 writer->write_u4(0);
2441 _dumper_controller->lock_global_writer();
2442 _dumper_controller->signal_start();
2443 } else {
2444 _dumper_controller->wait_for_start_signal();
2445 }
2446
2447 if (is_vm_dumper(dumper_id)) {
2448 TraceTime timer("Dump non-objects", TRACETIME_LOG(Info, heapdump));
2449 // Write the file header - we always use 1.0.2
2450 const char* header = "JAVA PROFILE 1.0.2";
2451
2452 // header is few bytes long - no chance to overflow int
2453 writer()->write_raw(header, strlen(header) + 1); // NUL terminated
2454 writer()->write_u4(oopSize);
2455 // timestamp is current time in ms
2456 writer()->write_u8(os::javaTimeMillis());
2457 // HPROF_UTF8 records
2458 SymbolTableDumper sym_dumper(writer());
2459 SymbolTable::symbols_do(&sym_dumper);
2460
2461 // write HPROF_LOAD_CLASS records
2462 {
2463 LoadedClassDumper loaded_class_dumper(writer(), _klass_map);
2464 ClassLoaderDataGraph::classes_do(&loaded_class_dumper);
2465 }
2466
2467 // write HPROF_FRAME and HPROF_TRACE records
2468 // this must be called after _klass_map is built when iterating the classes above.
2469 dump_stack_traces(writer());
2470
2471 // unlock global writer, so parallel dumpers can dump stack traces of unmounted virtual threads
2472 _dumper_controller->unlock_global_writer();
2473 }
2474
2475 // HPROF_HEAP_DUMP/HPROF_HEAP_DUMP_SEGMENT starts here
2476
2477 ResourceMark rm;
2478 // share global compressor, local DumpWriter is not responsible for its life cycle
2479 DumpWriter segment_writer(DumpMerger::get_writer_path(writer()->get_file_path(), dumper_id),
2480 writer()->is_overwrite(), writer()->compressor());
2640 (error() != nullptr) ? error() : "reason unknown");
2641 }
2642 return -1;
2643 }
2644
2645 // generate the segmented heap dump into separate files
2646 VM_HeapDumper dumper(&writer, _gc_before_heap_dump, _oome, num_dump_threads);
2647 VMThread::execute(&dumper);
2648
2649 // record any error that the writer may have encountered
2650 set_error(writer.error());
2651
2652 // Heap dump process is done in two phases
2653 //
2654 // Phase 1: Concurrent threads directly write heap data to multiple heap files.
2655 // This is done by VM_HeapDumper, which is performed within safepoint.
2656 //
2657 // Phase 2: Merge multiple heap files into one complete heap dump file.
2658 // This is done by DumpMerger, which is performed outside safepoint
2659
2660 DumpMerger merger(path, &writer, dumper.dump_seq());
2661 // Perform heapdump file merge operation in the current thread prevents us
2662 // from occupying the VM Thread, which in turn affects the occurrence of
2663 // GC and other VM operations.
2664 merger.do_merge();
2665 if (writer.error() != nullptr) {
2666 set_error(writer.error());
2667 }
2668
2669 // emit JFR event
2670 if (error() == nullptr) {
2671 event.set_destination(path);
2672 event.set_gcBeforeDump(_gc_before_heap_dump);
2673 event.set_size(writer.bytes_written());
2674 event.set_onOutOfMemoryError(_oome);
2675 event.set_overwrite(overwrite);
2676 event.set_compression(compression);
2677 event.commit();
2678 } else {
2679 log_debug(cds, heap)("Error %s while dumping heap", error());
2680 }
|
24 */
25
26 #include "classfile/classLoaderData.inline.hpp"
27 #include "classfile/classLoaderDataGraph.hpp"
28 #include "classfile/javaClasses.inline.hpp"
29 #include "classfile/symbolTable.hpp"
30 #include "classfile/vmClasses.hpp"
31 #include "classfile/vmSymbols.hpp"
32 #include "gc/shared/gcLocker.hpp"
33 #include "gc/shared/gcVMOperations.hpp"
34 #include "gc/shared/workerThread.hpp"
35 #include "jfr/jfrEvents.hpp"
36 #include "jvm.h"
37 #include "memory/allocation.inline.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "memory/universe.hpp"
40 #include "oops/fieldStreams.inline.hpp"
41 #include "oops/klass.inline.hpp"
42 #include "oops/objArrayKlass.hpp"
43 #include "oops/objArrayOop.inline.hpp"
44 #include "oops/flatArrayKlass.hpp"
45 #include "oops/flatArrayOop.inline.hpp"
46 #include "oops/oop.inline.hpp"
47 #include "oops/typeArrayOop.inline.hpp"
48 #include "runtime/continuationWrapper.inline.hpp"
49 #include "runtime/fieldDescriptor.inline.hpp"
50 #include "runtime/frame.inline.hpp"
51 #include "runtime/handles.inline.hpp"
52 #include "runtime/javaCalls.hpp"
53 #include "runtime/javaThread.inline.hpp"
54 #include "runtime/jniHandles.hpp"
55 #include "runtime/os.hpp"
56 #include "runtime/threads.hpp"
57 #include "runtime/threadSMR.hpp"
58 #include "runtime/vframe.hpp"
59 #include "runtime/vmOperations.hpp"
60 #include "runtime/vmThread.hpp"
61 #include "runtime/timerTrace.hpp"
62 #include "services/heapDumper.hpp"
63 #include "services/heapDumperCompression.hpp"
64 #include "services/threadService.hpp"
65 #include "utilities/checkedCast.hpp"
66 #include "utilities/macros.hpp"
67 #include "utilities/ostream.hpp"
68 #ifdef LINUX
69 #include "os_linux.hpp"
301 * 7: double array
302 * 8: byte array
303 * 9: short array
304 * 10: int array
305 * 11: long array
306 * [u1]* elements
307 *
308 * HPROF_CPU_SAMPLES a set of sample traces of running threads
309 *
310 * u4 total number of samples
311 * u4 # of traces
312 * [u4 # of samples
313 * u4]* stack trace serial number
314 *
315 * HPROF_CONTROL_SETTINGS the settings of on/off switches
316 *
317 * u4 0x00000001: alloc traces on/off
318 * 0x00000002: cpu sampling on/off
319 * u2 stack trace depth
320 *
321 * HPROF_FLAT_ARRAYS list of flat arrays
322 *
323 * [flat array sub-records]*
324 *
325 * HPROF_FLAT_ARRAY flat array
326 *
327 * id array object ID (dumped as HPROF_GC_PRIM_ARRAY_DUMP)
328 * id element class ID (dumped by HPROF_GC_CLASS_DUMP)
329 *
330 * HPROF_INLINED_FIELDS decribes inlined fields
331 *
332 * [class with inlined fields sub-records]*
333 *
334 * HPROF_CLASS_WITH_INLINED_FIELDS
335 *
336 * id class ID (dumped as HPROF_GC_CLASS_DUMP)
337 *
338 * u2 number of instance inlined fields (not including super)
339 * [u2, inlined field index,
340 * u2, synthetic field count,
341 * id, original field name,
342 * id]* inlined field class ID (dumped by HPROF_GC_CLASS_DUMP)
343 *
344 * When the header is "JAVA PROFILE 1.0.2" a heap dump can optionally
345 * be generated as a sequence of heap dump segments. This sequence is
346 * terminated by an end record. The additional tags allowed by format
347 * "JAVA PROFILE 1.0.2" are:
348 *
349 * HPROF_HEAP_DUMP_SEGMENT denote a heap dump segment
350 *
351 * [heap dump sub-records]*
352 * The same sub-record types allowed by HPROF_HEAP_DUMP
353 *
354 * HPROF_HEAP_DUMP_END denotes the end of a heap dump
355 *
356 */
357
358
359 // HPROF tags
360
361 enum hprofTag : u1 {
362 // top-level records
363 HPROF_UTF8 = 0x01,
364 HPROF_LOAD_CLASS = 0x02,
365 HPROF_UNLOAD_CLASS = 0x03,
366 HPROF_FRAME = 0x04,
367 HPROF_TRACE = 0x05,
368 HPROF_ALLOC_SITES = 0x06,
369 HPROF_HEAP_SUMMARY = 0x07,
370 HPROF_START_THREAD = 0x0A,
371 HPROF_END_THREAD = 0x0B,
372 HPROF_HEAP_DUMP = 0x0C,
373 HPROF_CPU_SAMPLES = 0x0D,
374 HPROF_CONTROL_SETTINGS = 0x0E,
375
376 // 1.0.2 record types
377 HPROF_HEAP_DUMP_SEGMENT = 0x1C,
378 HPROF_HEAP_DUMP_END = 0x2C,
379
380 // inlined object support
381 HPROF_FLAT_ARRAYS = 0x12,
382 HPROF_INLINED_FIELDS = 0x13,
383 // inlined object subrecords
384 HPROF_FLAT_ARRAY = 0x01,
385 HPROF_CLASS_WITH_INLINED_FIELDS = 0x01,
386
387 // field types
388 HPROF_ARRAY_OBJECT = 0x01,
389 HPROF_NORMAL_OBJECT = 0x02,
390 HPROF_BOOLEAN = 0x04,
391 HPROF_CHAR = 0x05,
392 HPROF_FLOAT = 0x06,
393 HPROF_DOUBLE = 0x07,
394 HPROF_BYTE = 0x08,
395 HPROF_SHORT = 0x09,
396 HPROF_INT = 0x0A,
397 HPROF_LONG = 0x0B,
398
399 // data-dump sub-records
400 HPROF_GC_ROOT_UNKNOWN = 0xFF,
401 HPROF_GC_ROOT_JNI_GLOBAL = 0x01,
402 HPROF_GC_ROOT_JNI_LOCAL = 0x02,
403 HPROF_GC_ROOT_JAVA_FRAME = 0x03,
404 HPROF_GC_ROOT_NATIVE_STACK = 0x04,
405 HPROF_GC_ROOT_STICKY_CLASS = 0x05,
406 HPROF_GC_ROOT_THREAD_BLOCK = 0x06,
407 HPROF_GC_ROOT_MONITOR_USED = 0x07,
408 HPROF_GC_ROOT_THREAD_OBJ = 0x08,
409 HPROF_GC_CLASS_DUMP = 0x20,
410 HPROF_GC_INSTANCE_DUMP = 0x21,
411 HPROF_GC_OBJ_ARRAY_DUMP = 0x22,
412 HPROF_GC_PRIM_ARRAY_DUMP = 0x23
413 };
414
415 // Default stack trace ID (used for dummy HPROF_TRACE record)
416 enum {
417 STACK_TRACE_ID = 1,
418 INITIAL_CLASS_COUNT = 200
419 };
420
421
422 class AbstractDumpWriter;
423
424 class InlinedObjects {
425
426 struct ClassInlinedFields {
427 const Klass *klass;
428 uintx base_index; // base index of the inlined field names (1st field has index base_index+1).
429 ClassInlinedFields(const Klass *klass = nullptr, uintx base_index = 0) : klass(klass), base_index(base_index) {}
430
431 // For GrowableArray::find_sorted().
432 static int compare(const ClassInlinedFields& a, const ClassInlinedFields& b) {
433 return a.klass - b.klass;
434 }
435 // For GrowableArray::sort().
436 static int compare(ClassInlinedFields* a, ClassInlinedFields* b) {
437 return compare(*a, *b);
438 }
439 };
440
441 uintx _min_string_id;
442 uintx _max_string_id;
443
444 GrowableArray<ClassInlinedFields> *_inlined_field_map;
445
446 // counters for classes with inlined fields and for the fields
447 int _classes_count;
448 int _inlined_fields_count;
449
450 static InlinedObjects *_instance;
451
452 static void inlined_field_names_callback(InlinedObjects* _this, const Klass *klass, uintx base_index, int count);
453
454 GrowableArray<oop> *_flat_arrays;
455
456 public:
457 InlinedObjects()
458 : _min_string_id(0), _max_string_id(0),
459 _inlined_field_map(nullptr),
460 _classes_count(0), _inlined_fields_count(0),
461 _flat_arrays(nullptr) {
462 }
463
464 static InlinedObjects* get_instance() {
465 return _instance;
466 }
467
468 void init();
469 void release();
470
471 void dump_inlined_field_names(AbstractDumpWriter *writer);
472
473 uintx get_base_index_for(Klass* k);
474 uintx get_next_string_id(uintx id);
475
476 void dump_classed_with_inlined_fields(AbstractDumpWriter* writer);
477
478 void add_flat_array(oop array);
479 void dump_flat_arrays(AbstractDumpWriter* writer);
480
481 };
482
483 InlinedObjects *InlinedObjects::_instance = nullptr;
484
485
486 // Supports I/O operations for a dump
487 // Base class for dump and parallel dump
488 class AbstractDumpWriter : public CHeapObj<mtInternal> {
489 protected:
490 enum {
491 io_buffer_max_size = 1*M,
492 dump_segment_header_size = 9
493 };
494
495 char* _buffer; // internal buffer
496 size_t _size;
497 size_t _pos;
498
499 bool _in_dump_segment; // Are we currently in a dump segment?
500 bool _is_huge_sub_record; // Are we writing a sub-record larger than the buffer size?
501 DEBUG_ONLY(size_t _sub_record_left;) // The bytes not written for the current sub-record.
502 DEBUG_ONLY(bool _sub_record_ended;) // True if we have called the end_sub_record().
503
504 char* buffer() const { return _buffer; }
505 size_t buffer_size() const { return _size; }
822 }
823 }
824
825 class DumperClassCacheTable;
826 class DumperClassCacheTableEntry;
827
828 // Support class with a collection of functions used when dumping the heap
829 class DumperSupport : AllStatic {
830 public:
831
832 // write a header of the given type
833 static void write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len);
834
835 // returns hprof tag for the given type signature
836 static hprofTag sig2tag(Symbol* sig);
837 // returns hprof tag for the given basic type
838 static hprofTag type2tag(BasicType type);
839 // Returns the size of the data to write.
840 static u4 sig2size(Symbol* sig);
841
842 // calculates the total size of the all fields of the given class.
843 static u4 instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry = nullptr);
844
845 // dump a jfloat
846 static void dump_float(AbstractDumpWriter* writer, jfloat f);
847 // dump a jdouble
848 static void dump_double(AbstractDumpWriter* writer, jdouble d);
849 // dumps the raw value of the given field
850 static void dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset);
851 // returns the size of the static fields; also counts the static fields
852 static u4 get_static_fields_size(InstanceKlass* ik, u2& field_count);
853 // dumps static fields of the given class
854 static void dump_static_fields(AbstractDumpWriter* writer, Klass* k);
855 // dump the raw values of the instance fields of the given identity or inlined object;
856 // for identity objects offset is 0 and 'klass' is o->klass(),
857 // for inlined objects offset is the offset in the holder object, 'klass' is inlined object class
858 static void dump_instance_fields(AbstractDumpWriter* writer, oop o, int offset, DumperClassCacheTable* class_cache, DumperClassCacheTableEntry* class_cache_entry);
859 // dump the raw values of the instance fields of the given inlined object;
860 // dump_instance_fields wrapper for inlined objects
861 static void dump_inlined_object_fields(AbstractDumpWriter* writer, oop o, int offset, DumperClassCacheTable* class_cache, DumperClassCacheTableEntry* class_cache_entry);
862
863 // get the count of the instance fields for a given class
864 static u2 get_instance_fields_count(InstanceKlass* ik);
865 // dumps the definition of the instance fields for a given class
866 static void dump_instance_field_descriptors(AbstractDumpWriter* writer, InstanceKlass* k, uintx *inlined_fields_index = nullptr);
867 // creates HPROF_GC_INSTANCE_DUMP record for the given object
868 static void dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache);
869 // creates HPROF_GC_CLASS_DUMP record for the given instance class
870 static void dump_instance_class(AbstractDumpWriter* writer, Klass* k);
871 // creates HPROF_GC_CLASS_DUMP record for a given array class
872 static void dump_array_class(AbstractDumpWriter* writer, Klass* k);
873
874 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
875 static void dump_object_array(AbstractDumpWriter* writer, objArrayOop array);
876 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given flat array
877 static void dump_flat_array(AbstractDumpWriter* writer, flatArrayOop array, DumperClassCacheTable* class_cache);
878 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
879 static void dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array);
880 // create HPROF_FRAME record for the given method and bci
881 static void dump_stack_frame(AbstractDumpWriter* writer, int frame_serial_num, int class_serial_num, Method* m, int bci);
882
883 // check if we need to truncate an array
884 static int calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size);
885 // extended version to dump flat arrays as primitive arrays;
886 // type_size specifies size of the inlined objects.
887 static int calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, int type_size, short header_size);
888
889 // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
890 static void end_of_dump(AbstractDumpWriter* writer);
891
892 static oop mask_dormant_archived_object(oop o, oop ref_obj) {
893 if (o != nullptr && o->klass()->java_mirror_no_keepalive() == nullptr) {
894 // Ignore this object since the corresponding java mirror is not loaded.
895 // Might be a dormant archive object.
896 report_dormant_archived_object(o, ref_obj);
897 return nullptr;
898 } else {
899 return o;
900 }
901 }
902
903 // helper methods for inlined fields.
904 static bool is_inlined_field(const fieldDescriptor& fld) {
905 return fld.is_flat();
906 }
907 static InlineKlass* get_inlined_field_klass(const fieldDescriptor& fld) {
908 assert(is_inlined_field(fld), "must be inlined field");
909 InstanceKlass* holder_klass = fld.field_holder();
910 return InlineKlass::cast(holder_klass->get_inline_type_field_klass(fld.index()));
911 }
912
913 static void report_dormant_archived_object(oop o, oop ref_obj) {
914 if (log_is_enabled(Trace, cds, heap)) {
915 ResourceMark rm;
916 if (ref_obj != nullptr) {
917 log_trace(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s) referenced by " INTPTR_FORMAT " (%s)",
918 p2i(o), o->klass()->external_name(),
919 p2i(ref_obj), ref_obj->klass()->external_name());
920 } else {
921 log_trace(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s)",
922 p2i(o), o->klass()->external_name());
923 }
924 }
925 }
926 };
927
928 // Hash table of klasses to the klass metadata. This should greatly improve the
929 // hash dumping performance. This hash table is supposed to be used by a single
930 // thread only.
931 //
932 class DumperClassCacheTableEntry : public CHeapObj<mtServiceability> {
933 friend class DumperClassCacheTable;
934 private:
935 GrowableArray<char> _sigs_start;
936 GrowableArray<int> _offsets;
937 GrowableArray<InlineKlass*> _inline_klasses;
938 u4 _instance_size;
939 int _entries;
940
941 public:
942 DumperClassCacheTableEntry() : _instance_size(0), _entries(0) {};
943
944 int field_count() { return _entries; }
945 char sig_start(int field_idx) { return _sigs_start.at(field_idx); }
946 void push_sig_start_inlined() { _sigs_start.push('Q'); }
947 bool is_inlined(int field_idx){ return _sigs_start.at(field_idx) == 'Q'; }
948 InlineKlass* inline_klass(int field_idx) { assert(is_inlined(field_idx), "Not inlined"); return _inline_klasses.at(field_idx); }
949 int offset(int field_idx) { return _offsets.at(field_idx); }
950 u4 instance_size() { return _instance_size; }
951 };
952
953 class DumperClassCacheTable {
954 private:
955 // ResourceHashtable SIZE is specified at compile time so we
956 // use 1031 which is the first prime after 1024.
957 static constexpr size_t TABLE_SIZE = 1031;
958
959 // Maintain the cache for N classes. This limits memory footprint
960 // impact, regardless of how many classes we have in the dump.
961 // This also improves look up performance by keeping the statically
962 // sized table from overloading.
963 static constexpr int CACHE_TOP = 256;
964
965 typedef ResourceHashtable<InstanceKlass*, DumperClassCacheTableEntry*,
966 TABLE_SIZE, AnyObj::C_HEAP, mtServiceability> PtrTable;
967 PtrTable* _ptrs;
968
977 bool do_entry(InstanceKlass*& key, DumperClassCacheTableEntry*& entry) {
978 delete entry;
979 return true;
980 }
981 } cleanup;
982 table->unlink(&cleanup);
983 }
984
985 public:
986 DumperClassCacheTableEntry* lookup_or_create(InstanceKlass* ik) {
987 if (_last_ik == ik) {
988 return _last_entry;
989 }
990
991 DumperClassCacheTableEntry* entry;
992 DumperClassCacheTableEntry** from_cache = _ptrs->get(ik);
993 if (from_cache == nullptr) {
994 entry = new DumperClassCacheTableEntry();
995 for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
996 if (!fld.access_flags().is_static()) {
997 InlineKlass* inlineKlass = nullptr;
998 if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
999 inlineKlass = DumperSupport::get_inlined_field_klass(fld.field_descriptor());
1000 entry->push_sig_start_inlined();
1001 entry->_instance_size += DumperSupport::instance_size(inlineKlass);
1002 } else {
1003 Symbol* sig = fld.signature();
1004 entry->_sigs_start.push(sig->char_at(0));
1005 entry->_instance_size += DumperSupport::sig2size(sig);
1006 }
1007 entry->_inline_klasses.push(inlineKlass);
1008 entry->_offsets.push(fld.offset());
1009 entry->_entries++;
1010 }
1011 }
1012
1013 if (_ptrs->number_of_entries() >= CACHE_TOP) {
1014 // We do not track the individual hit rates for table entries.
1015 // Purge the entire table, and let the cache catch up with new
1016 // distribution.
1017 unlink_all(_ptrs);
1018 }
1019
1020 _ptrs->put(ik, entry);
1021 } else {
1022 entry = *from_cache;
1023 }
1024
1025 // Remember for single-slot cache.
1026 _last_ik = ik;
1027 _last_entry = entry;
1028
1029 return entry;
1099 }
1100
1101 // dump a jfloat
1102 void DumperSupport::dump_float(AbstractDumpWriter* writer, jfloat f) {
1103 if (g_isnan(f)) {
1104 writer->write_u4(0x7fc00000); // collapsing NaNs
1105 } else {
1106 writer->write_u4(bit_cast<u4>(f));
1107 }
1108 }
1109
1110 // dump a jdouble
1111 void DumperSupport::dump_double(AbstractDumpWriter* writer, jdouble d) {
1112 if (g_isnan(d)) {
1113 writer->write_u8(0x7ff80000ull << 32); // collapsing NaNs
1114 } else {
1115 writer->write_u8(bit_cast<u8>(d));
1116 }
1117 }
1118
1119
1120 // dumps the raw value of the given field
1121 void DumperSupport::dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset) {
1122 switch (type) {
1123 case JVM_SIGNATURE_CLASS :
1124 case JVM_SIGNATURE_ARRAY : {
1125 oop o = obj->obj_field_access<ON_UNKNOWN_OOP_REF | AS_NO_KEEPALIVE>(offset);
1126 o = mask_dormant_archived_object(o, obj);
1127 assert(oopDesc::is_oop_or_null(o), "Expected an oop or nullptr at " PTR_FORMAT, p2i(o));
1128 writer->write_objectID(o);
1129 break;
1130 }
1131 case JVM_SIGNATURE_BYTE : {
1132 jbyte b = obj->byte_field(offset);
1133 writer->write_u1(b);
1134 break;
1135 }
1136 case JVM_SIGNATURE_CHAR : {
1137 jchar c = obj->char_field(offset);
1138 writer->write_u2(c);
1139 break;
1158 writer->write_u4(i);
1159 break;
1160 }
1161 case JVM_SIGNATURE_LONG : {
1162 jlong l = obj->long_field(offset);
1163 writer->write_u8(l);
1164 break;
1165 }
1166 case JVM_SIGNATURE_BOOLEAN : {
1167 jboolean b = obj->bool_field(offset);
1168 writer->write_u1(b);
1169 break;
1170 }
1171 default : {
1172 ShouldNotReachHere();
1173 break;
1174 }
1175 }
1176 }
1177
1178 // calculates the total size of the all fields of the given class.
1179 u4 DumperSupport::instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry) {
1180 if (class_cache_entry != nullptr) {
1181 return class_cache_entry->instance_size();
1182 } else {
1183 u4 size = 0;
1184 for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1185 if (!fld.access_flags().is_static()) {
1186 if (is_inlined_field(fld.field_descriptor())) {
1187 size += instance_size(get_inlined_field_klass(fld.field_descriptor()));
1188 } else {
1189 size += sig2size(fld.signature());
1190 }
1191 }
1192 }
1193 return size;
1194 }
1195 }
1196
1197 u4 DumperSupport::get_static_fields_size(InstanceKlass* ik, u2& field_count) {
1198 field_count = 0;
1199 u4 size = 0;
1200
1201 for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1202 if (fldc.access_flags().is_static()) {
1203 assert(!is_inlined_field(fldc.field_descriptor()), "static fields cannot be inlined");
1204
1205 field_count++;
1206 size += sig2size(fldc.signature());
1207 }
1208 }
1209
1210 // Add in resolved_references which is referenced by the cpCache
1211 // The resolved_references is an array per InstanceKlass holding the
1212 // strings and other oops resolved from the constant pool.
1213 oop resolved_references = ik->constants()->resolved_references_or_null();
1214 if (resolved_references != nullptr) {
1215 field_count++;
1216 size += sizeof(address);
1217
1218 // Add in the resolved_references of the used previous versions of the class
1219 // in the case of RedefineClasses
1220 InstanceKlass* prev = ik->previous_versions();
1221 while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1222 field_count++;
1223 size += sizeof(address);
1224 prev = prev->previous_versions();
1227
1228 // Also provide a pointer to the init_lock if present, so there aren't unreferenced int[0]
1229 // arrays.
1230 oop init_lock = ik->init_lock();
1231 if (init_lock != nullptr) {
1232 field_count++;
1233 size += sizeof(address);
1234 }
1235
1236 // We write the value itself plus a name and a one byte type tag per field.
1237 return checked_cast<u4>(size + field_count * (sizeof(address) + 1));
1238 }
1239
1240 // dumps static fields of the given class
1241 void DumperSupport::dump_static_fields(AbstractDumpWriter* writer, Klass* k) {
1242 InstanceKlass* ik = InstanceKlass::cast(k);
1243
1244 // dump the field descriptors and raw values
1245 for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1246 if (fld.access_flags().is_static()) {
1247 assert(!is_inlined_field(fld.field_descriptor()), "static fields cannot be inlined");
1248
1249 Symbol* sig = fld.signature();
1250
1251 writer->write_symbolID(fld.name()); // name
1252 writer->write_u1(sig2tag(sig)); // type
1253
1254 // value
1255 dump_field_value(writer, sig->char_at(0), ik->java_mirror(), fld.offset());
1256 }
1257 }
1258
1259 // Add resolved_references for each class that has them
1260 oop resolved_references = ik->constants()->resolved_references_or_null();
1261 if (resolved_references != nullptr) {
1262 writer->write_symbolID(vmSymbols::resolved_references_name()); // name
1263 writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1264 writer->write_objectID(resolved_references);
1265
1266 // Also write any previous versions
1267 InstanceKlass* prev = ik->previous_versions();
1268 while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1269 writer->write_symbolID(vmSymbols::resolved_references_name()); // name
1270 writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1271 writer->write_objectID(prev->constants()->resolved_references());
1272 prev = prev->previous_versions();
1273 }
1274 }
1275
1276 // Add init lock to the end if the class is not yet initialized
1277 oop init_lock = ik->init_lock();
1278 if (init_lock != nullptr) {
1279 writer->write_symbolID(vmSymbols::init_lock_name()); // name
1280 writer->write_u1(sig2tag(vmSymbols::int_array_signature())); // type
1281 writer->write_objectID(init_lock);
1282 }
1283 }
1284
1285 // dump the raw values of the instance fields of the given identity or inlined object;
1286 // for identity objects offset is 0 and 'klass' is o->klass(),
1287 // for inlined objects offset is the offset in the holder object, 'klass' is inlined object class.
1288 void DumperSupport::dump_instance_fields(AbstractDumpWriter* writer, oop o, int offset, DumperClassCacheTable* class_cache, DumperClassCacheTableEntry* class_cache_entry) {
1289 assert(class_cache_entry != nullptr, "Pre-condition: must be provided");
1290 for (int idx = 0; idx < class_cache_entry->field_count(); idx++) {
1291 if (class_cache_entry->is_inlined(idx)) {
1292 InlineKlass* field_klass = class_cache_entry->inline_klass(idx);
1293 int fields_offset = offset + (class_cache_entry->offset(idx) - field_klass->payload_offset());
1294 DumperClassCacheTableEntry* inline_class_cache_entry = class_cache->lookup_or_create(field_klass);
1295 dump_inlined_object_fields(writer, o, fields_offset, class_cache, inline_class_cache_entry);
1296 } else {
1297 dump_field_value(writer, class_cache_entry->sig_start(idx), o, class_cache_entry->offset(idx));
1298 }
1299 }
1300 }
1301
1302 void DumperSupport::dump_inlined_object_fields(AbstractDumpWriter* writer, oop o, int offset, DumperClassCacheTable* class_cache, DumperClassCacheTableEntry* class_cache_entry) {
1303 // the object is inlined, so all its fields are stored without headers.
1304 dump_instance_fields(writer, o, offset, class_cache, class_cache_entry);
1305 }
1306
1307 // gets the count of the instance fields for a given class
1308 u2 DumperSupport::get_instance_fields_count(InstanceKlass* ik) {
1309 u2 field_count = 0;
1310
1311 for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1312 if (!fldc.access_flags().is_static()) {
1313 if (is_inlined_field(fldc.field_descriptor())) {
1314 // add "synthetic" fields for inlined fields.
1315 field_count += get_instance_fields_count(get_inlined_field_klass(fldc.field_descriptor()));
1316 } else {
1317 field_count++;
1318 }
1319 }
1320 }
1321
1322 return field_count;
1323 }
1324
1325 // dumps the definition of the instance fields for a given class
1326 // inlined_fields_id is not-nullptr for inlined fields (to get synthetic field name IDs
1327 // by using InlinedObjects::get_next_string_id()).
1328 void DumperSupport::dump_instance_field_descriptors(AbstractDumpWriter* writer, InstanceKlass* ik, uintx* inlined_fields_id) {
1329 // inlined_fields_id != nullptr means ik is a class of inlined field.
1330 // Inlined field id pointer for this class; lazyly initialized
1331 // if the class has inlined field(s) and the caller didn't provide inlined_fields_id.
1332 uintx *this_klass_inlined_fields_id = inlined_fields_id;
1333 uintx inlined_id = 0;
1334
1335 // dump the field descriptors
1336 for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1337 if (!fld.access_flags().is_static()) {
1338 if (is_inlined_field(fld.field_descriptor())) {
1339 // dump "synthetic" fields for inlined fields.
1340 if (this_klass_inlined_fields_id == nullptr) {
1341 inlined_id = InlinedObjects::get_instance()->get_base_index_for(ik);
1342 this_klass_inlined_fields_id = &inlined_id;
1343 }
1344 dump_instance_field_descriptors(writer, get_inlined_field_klass(fld.field_descriptor()), this_klass_inlined_fields_id);
1345 } else {
1346 Symbol* sig = fld.signature();
1347 Symbol* name = nullptr;
1348 // Use inlined_fields_id provided by caller.
1349 if (inlined_fields_id != nullptr) {
1350 uintx name_id = InlinedObjects::get_instance()->get_next_string_id(*inlined_fields_id);
1351
1352 // name_id == 0 is returned on error. use original field signature.
1353 if (name_id != 0) {
1354 *inlined_fields_id = name_id;
1355 name = reinterpret_cast<Symbol*>(name_id);
1356 }
1357 }
1358 if (name == nullptr) {
1359 name = fld.name();
1360 }
1361
1362 writer->write_symbolID(name); // name
1363 writer->write_u1(sig2tag(sig)); // type
1364 }
1365 }
1366 }
1367 }
1368
1369 // creates HPROF_GC_INSTANCE_DUMP record for the given object
1370 void DumperSupport::dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache) {
1371 InstanceKlass* ik = InstanceKlass::cast(o->klass());
1372
1373 DumperClassCacheTableEntry* cache_entry = class_cache->lookup_or_create(ik);
1374
1375 u4 is = instance_size(ik, cache_entry);
1376 u4 size = 1 + sizeof(address) + 4 + sizeof(address) + 4 + is;
1377
1378 writer->start_sub_record(HPROF_GC_INSTANCE_DUMP, size);
1379 writer->write_objectID(o);
1380 writer->write_u4(STACK_TRACE_ID);
1381
1382 // class ID
1383 writer->write_classID(ik);
1384
1385 // number of bytes that follow
1386 writer->write_u4(is);
1387
1388 // field values
1389 dump_instance_fields(writer, o, 0, class_cache, cache_entry);
1390
1391 writer->end_sub_record();
1392 }
1393
1394 // creates HPROF_GC_CLASS_DUMP record for the given instance class
1395 void DumperSupport::dump_instance_class(AbstractDumpWriter* writer, Klass* k) {
1396 InstanceKlass* ik = InstanceKlass::cast(k);
1397
1398 // We can safepoint and do a heap dump at a point where we have a Klass,
1399 // but no java mirror class has been setup for it. So we need to check
1400 // that the class is at least loaded, to avoid crash from a null mirror.
1401 if (!ik->is_loaded()) {
1402 return;
1403 }
1404
1405 u2 static_fields_count = 0;
1406 u4 static_size = get_static_fields_size(ik, static_fields_count);
1407 u2 instance_fields_count = get_instance_fields_count(ik);
1408 u4 instance_fields_size = instance_fields_count * (sizeof(address) + 1);
1409 u4 size = checked_cast<u4>(1 + sizeof(address) + 4 + 6 * sizeof(address) + 4 + 2 + 2 + static_size + 2 + instance_fields_size);
1414 writer->write_classID(ik);
1415 writer->write_u4(STACK_TRACE_ID);
1416
1417 // super class ID
1418 InstanceKlass* java_super = ik->java_super();
1419 if (java_super == nullptr) {
1420 writer->write_objectID(oop(nullptr));
1421 } else {
1422 writer->write_classID(java_super);
1423 }
1424
1425 writer->write_objectID(ik->class_loader());
1426 writer->write_objectID(ik->signers());
1427 writer->write_objectID(ik->protection_domain());
1428
1429 // reserved
1430 writer->write_objectID(oop(nullptr));
1431 writer->write_objectID(oop(nullptr));
1432
1433 // instance size
1434 writer->write_u4(HeapWordSize * ik->size_helper());
1435
1436 // size of constant pool - ignored by HAT 1.1
1437 writer->write_u2(0);
1438
1439 // static fields
1440 writer->write_u2(static_fields_count);
1441 dump_static_fields(writer, ik);
1442
1443 // description of instance fields
1444 writer->write_u2(instance_fields_count);
1445 dump_instance_field_descriptors(writer, ik);
1446
1447 writer->end_sub_record();
1448 }
1449
1450 // creates HPROF_GC_CLASS_DUMP record for the given array class
1451 void DumperSupport::dump_array_class(AbstractDumpWriter* writer, Klass* k) {
1452 InstanceKlass* ik = nullptr; // bottom class for object arrays, null for primitive type arrays
1453 if (k->is_objArray_klass()) {
1454 Klass *bk = ObjArrayKlass::cast(k)->bottom_klass();
1468 assert(java_super != nullptr, "checking");
1469 writer->write_classID(java_super);
1470
1471 writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->class_loader());
1472 writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->signers());
1473 writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->protection_domain());
1474
1475 writer->write_objectID(oop(nullptr)); // reserved
1476 writer->write_objectID(oop(nullptr));
1477 writer->write_u4(0); // instance size
1478 writer->write_u2(0); // constant pool
1479 writer->write_u2(0); // static fields
1480 writer->write_u2(0); // instance fields
1481
1482 writer->end_sub_record();
1483
1484 }
1485
1486 // Hprof uses an u4 as record length field,
1487 // which means we need to truncate arrays that are too long.
1488 int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, int type_size, short header_size) {
1489 int length = array->length();
1490
1491 size_t length_in_bytes = (size_t)length * type_size;
1492 uint max_bytes = max_juint - header_size;
1493
1494 if (length_in_bytes > max_bytes) {
1495 length = max_bytes / type_size;
1496 length_in_bytes = (size_t)length * type_size;
1497
1498 BasicType type = ArrayKlass::cast(array->klass())->element_type();
1499 warning("cannot dump array of type %s[] with length %d; truncating to length %d",
1500 type2name_tab[type], array->length(), length);
1501 }
1502 return length;
1503 }
1504
1505 int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size) {
1506 BasicType type = ArrayKlass::cast(array->klass())->element_type();
1507 assert((type >= T_BOOLEAN && type <= T_OBJECT) || type == T_FLAT_ELEMENT, "invalid array element type");
1508 int type_size;
1509 if (type == T_OBJECT) {
1510 type_size = sizeof(address);
1511 } else if (type == T_FLAT_ELEMENT) {
1512 // TODO: FIXME
1513 fatal("Not supported yet"); // FIXME: JDK-8325678
1514 } else {
1515 type_size = type2aelembytes(type);
1516 }
1517
1518 return calculate_array_max_length(writer, array, type_size, header_size);
1519 }
1520
1521 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
1522 void DumperSupport::dump_object_array(AbstractDumpWriter* writer, objArrayOop array) {
1523 // sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID) + sizeof(classID)
1524 short header_size = 1 + 2 * 4 + 2 * sizeof(address);
1525 int length = calculate_array_max_length(writer, array, header_size);
1526 u4 size = checked_cast<u4>(header_size + length * sizeof(address));
1527
1528 writer->start_sub_record(HPROF_GC_OBJ_ARRAY_DUMP, size);
1529 writer->write_objectID(array);
1530 writer->write_u4(STACK_TRACE_ID);
1531 writer->write_u4(length);
1532
1533 // array class ID
1534 writer->write_classID(array->klass());
1535
1536 // [id]* elements
1537 for (int index = 0; index < length; index++) {
1538 oop o = array->obj_at(index);
1539 o = mask_dormant_archived_object(o, array);
1540 writer->write_objectID(o);
1541 }
1542
1543 writer->end_sub_record();
1544 }
1545
1546 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given flat array
1547 void DumperSupport::dump_flat_array(AbstractDumpWriter* writer, flatArrayOop array, DumperClassCacheTable* class_cache) {
1548 FlatArrayKlass* array_klass = FlatArrayKlass::cast(array->klass());
1549 InlineKlass* element_klass = array_klass->element_klass();
1550 int element_size = instance_size(element_klass);
1551 /* id array object ID
1552 * u4 stack trace serial number
1553 * u4 number of elements
1554 * u1 element type
1555 */
1556 short header_size = 1 + sizeof(address) + 2 * 4 + 1;
1557
1558 // TODO: use T_SHORT/T_INT/T_LONG if needed to avoid truncation
1559 BasicType type = T_BYTE;
1560 int type_size = type2aelembytes(type);
1561 int length = calculate_array_max_length(writer, array, element_size, header_size);
1562 u4 length_in_bytes = (u4)(length * element_size);
1563 u4 size = header_size + length_in_bytes;
1564
1565 writer->start_sub_record(HPROF_GC_PRIM_ARRAY_DUMP, size);
1566 writer->write_objectID(array);
1567 writer->write_u4(STACK_TRACE_ID);
1568 // TODO: round up array length for T_SHORT/T_INT/T_LONG
1569 writer->write_u4(length * element_size);
1570 writer->write_u1(type2tag(type));
1571
1572 for (int index = 0; index < length; index++) {
1573 // need offset in the holder to read inlined object. calculate it from flatArrayOop::value_at_addr()
1574 int offset = (int)((address)array->value_at_addr(index, array_klass->layout_helper())
1575 - cast_from_oop<address>(array));
1576 DumperClassCacheTableEntry* class_cache_entry = class_cache->lookup_or_create(element_klass);
1577 dump_inlined_object_fields(writer, array, offset, class_cache, class_cache_entry);
1578 }
1579
1580 // TODO: write padding bytes for T_SHORT/T_INT/T_LONG
1581
1582 InlinedObjects::get_instance()->add_flat_array(array);
1583
1584 writer->end_sub_record();
1585 }
1586
1587 #define WRITE_ARRAY(Array, Type, Size, Length) \
1588 for (int i = 0; i < Length; i++) { writer->write_##Size((Size)Array->Type##_at(i)); }
1589
1590 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
1591 void DumperSupport::dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array) {
1592 BasicType type = TypeArrayKlass::cast(array->klass())->element_type();
1593 // 2 * sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID)
1594 short header_size = 2 * 1 + 2 * 4 + sizeof(address);
1595
1596 int length = calculate_array_max_length(writer, array, header_size);
1597 int type_size = type2aelembytes(type);
1598 u4 length_in_bytes = (u4)length * type_size;
1599 u4 size = header_size + length_in_bytes;
1600
1601 writer->start_sub_record(HPROF_GC_PRIM_ARRAY_DUMP, size);
1602 writer->write_objectID(array);
1603 writer->write_u4(STACK_TRACE_ID);
1604 writer->write_u4(length);
1605 writer->write_u1(type2tag(type));
1606
1688 int bci) {
1689 int line_number;
1690 if (m->is_native()) {
1691 line_number = -3; // native frame
1692 } else {
1693 line_number = m->line_number_from_bci(bci);
1694 }
1695
1696 write_header(writer, HPROF_FRAME, 4*oopSize + 2*sizeof(u4));
1697 writer->write_id(frame_serial_num); // frame serial number
1698 writer->write_symbolID(m->name()); // method's name
1699 writer->write_symbolID(m->signature()); // method's signature
1700
1701 assert(m->method_holder()->is_instance_klass(), "not InstanceKlass");
1702 writer->write_symbolID(m->method_holder()->source_file_name()); // source file name
1703 writer->write_u4(class_serial_num); // class serial number
1704 writer->write_u4((u4) line_number); // line number
1705 }
1706
1707
1708 class InlinedFieldNameDumper : public LockedClassesDo {
1709 public:
1710 typedef void (*Callback)(InlinedObjects *owner, const Klass *klass, uintx base_index, int count);
1711
1712 private:
1713 AbstractDumpWriter* _writer;
1714 InlinedObjects *_owner;
1715 Callback _callback;
1716 uintx _index;
1717
1718 void dump_inlined_field_names(GrowableArray<Symbol*>* super_names, Symbol* field_name, InlineKlass* klass) {
1719 super_names->push(field_name);
1720 for (HierarchicalFieldStream<JavaFieldStream> fld(klass); !fld.done(); fld.next()) {
1721 if (!fld.access_flags().is_static()) {
1722 if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
1723 dump_inlined_field_names(super_names, fld.name(), DumperSupport::get_inlined_field_klass(fld.field_descriptor()));
1724 } else {
1725 // get next string ID.
1726 uintx next_index = _owner->get_next_string_id(_index);
1727 if (next_index == 0) {
1728 // something went wrong (overflow?)
1729 // stop generation; the rest of inlined objects will have original field names.
1730 return;
1731 }
1732 _index = next_index;
1733
1734 // Calculate length.
1735 int len = fld.name()->utf8_length();
1736 for (GrowableArrayIterator<Symbol*> it = super_names->begin(); it != super_names->end(); ++it) {
1737 len += (*it)->utf8_length() + 1; // +1 for ".".
1738 }
1739
1740 DumperSupport::write_header(_writer, HPROF_UTF8, oopSize + len);
1741 _writer->write_symbolID(reinterpret_cast<Symbol*>(_index));
1742 // Write the string value.
1743 // 1) super_names.
1744 for (GrowableArrayIterator<Symbol*> it = super_names->begin(); it != super_names->end(); ++it) {
1745 _writer->write_raw((*it)->bytes(), (*it)->utf8_length());
1746 _writer->write_u1('.');
1747 }
1748 // 2) field name.
1749 _writer->write_raw(fld.name()->bytes(), fld.name()->utf8_length());
1750 }
1751 }
1752 }
1753 super_names->pop();
1754 }
1755
1756 void dump_inlined_field_names(Symbol* field_name, InlineKlass* field_klass) {
1757 GrowableArray<Symbol*> super_names(4, mtServiceability);
1758 dump_inlined_field_names(&super_names, field_name, field_klass);
1759 }
1760
1761 public:
1762 InlinedFieldNameDumper(AbstractDumpWriter* writer, InlinedObjects* owner, Callback callback)
1763 : _writer(writer), _owner(owner), _callback(callback), _index(0) {
1764 }
1765
1766 void do_klass(Klass* k) {
1767 if (!k->is_instance_klass()) {
1768 return;
1769 }
1770 InstanceKlass* ik = InstanceKlass::cast(k);
1771 // if (ik->has_inline_type_fields()) {
1772 // return;
1773 // }
1774
1775 uintx base_index = _index;
1776 int count = 0;
1777
1778 for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1779 if (!fld.access_flags().is_static()) {
1780 if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
1781 dump_inlined_field_names(fld.name(), DumperSupport::get_inlined_field_klass(fld.field_descriptor()));
1782 count++;
1783 }
1784 }
1785 }
1786
1787 if (count != 0) {
1788 _callback(_owner, k, base_index, count);
1789 }
1790 }
1791 };
1792
1793 class InlinedFieldsDumper : public LockedClassesDo {
1794 private:
1795 AbstractDumpWriter* _writer;
1796
1797 public:
1798 InlinedFieldsDumper(AbstractDumpWriter* writer) : _writer(writer) {}
1799
1800 void do_klass(Klass* k) {
1801 if (!k->is_instance_klass()) {
1802 return;
1803 }
1804 InstanceKlass* ik = InstanceKlass::cast(k);
1805 // if (ik->has_inline_type_fields()) {
1806 // return;
1807 // }
1808
1809 // We can be at a point where java mirror does not exist yet.
1810 // So we need to check that the class is at least loaded, to avoid crash from a null mirror.
1811 if (!ik->is_loaded()) {
1812 return;
1813 }
1814
1815 u2 inlined_count = 0;
1816 for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1817 if (!fld.access_flags().is_static()) {
1818 if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
1819 inlined_count++;
1820 }
1821 }
1822 }
1823 if (inlined_count != 0) {
1824 _writer->write_u1(HPROF_CLASS_WITH_INLINED_FIELDS);
1825
1826 // class ID
1827 _writer->write_classID(ik);
1828 // number of inlined fields
1829 _writer->write_u2(inlined_count);
1830 u2 index = 0;
1831 for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1832 if (!fld.access_flags().is_static()) {
1833 if (DumperSupport::is_inlined_field(fld.field_descriptor())) {
1834 // inlined field index
1835 _writer->write_u2(index);
1836 // synthetic field count
1837 u2 field_count = DumperSupport::get_instance_fields_count(DumperSupport::get_inlined_field_klass(fld.field_descriptor()));
1838 _writer->write_u2(field_count);
1839 // original field name
1840 _writer->write_symbolID(fld.name());
1841 // inlined field class ID
1842 _writer->write_classID(DumperSupport::get_inlined_field_klass(fld.field_descriptor()));
1843
1844 index += field_count;
1845 } else {
1846 index++;
1847 }
1848 }
1849 }
1850 }
1851 }
1852 };
1853
1854
1855 void InlinedObjects::init() {
1856 _instance = this;
1857
1858 struct Closure : public SymbolClosure {
1859 uintx _min_id = max_uintx;
1860 uintx _max_id = 0;
1861 Closure() : _min_id(max_uintx), _max_id(0) {}
1862
1863 void do_symbol(Symbol** p) {
1864 uintx val = reinterpret_cast<uintx>(*p);
1865 if (val < _min_id) {
1866 _min_id = val;
1867 }
1868 if (val > _max_id) {
1869 _max_id = val;
1870 }
1871 }
1872 } closure;
1873
1874 SymbolTable::symbols_do(&closure);
1875
1876 _min_string_id = closure._min_id;
1877 _max_string_id = closure._max_id;
1878 }
1879
1880 void InlinedObjects::release() {
1881 _instance = nullptr;
1882
1883 if (_inlined_field_map != nullptr) {
1884 delete _inlined_field_map;
1885 _inlined_field_map = nullptr;
1886 }
1887 if (_flat_arrays != nullptr) {
1888 delete _flat_arrays;
1889 _flat_arrays = nullptr;
1890 }
1891 }
1892
1893 void InlinedObjects::inlined_field_names_callback(InlinedObjects* _this, const Klass* klass, uintx base_index, int count) {
1894 if (_this->_inlined_field_map == nullptr) {
1895 _this->_inlined_field_map = new (mtServiceability) GrowableArray<ClassInlinedFields>(100, mtServiceability);
1896 }
1897 _this->_inlined_field_map->append(ClassInlinedFields(klass, base_index));
1898
1899 // counters for dumping classes with inlined fields
1900 _this->_classes_count++;
1901 _this->_inlined_fields_count += count;
1902 }
1903
1904 void InlinedObjects::dump_inlined_field_names(AbstractDumpWriter* writer) {
1905 InlinedFieldNameDumper nameDumper(writer, this, inlined_field_names_callback);
1906 ClassLoaderDataGraph::classes_do(&nameDumper);
1907
1908 if (_inlined_field_map != nullptr) {
1909 // prepare the map for get_base_index_for().
1910 _inlined_field_map->sort(ClassInlinedFields::compare);
1911 }
1912 }
1913
1914 uintx InlinedObjects::get_base_index_for(Klass* k) {
1915 if (_inlined_field_map != nullptr) {
1916 bool found = false;
1917 int idx = _inlined_field_map->find_sorted<ClassInlinedFields, ClassInlinedFields::compare>(ClassInlinedFields(k, 0), found);
1918 if (found) {
1919 return _inlined_field_map->at(idx).base_index;
1920 }
1921 }
1922
1923 // return max_uintx, so get_next_string_id returns 0.
1924 return max_uintx;
1925 }
1926
1927 uintx InlinedObjects::get_next_string_id(uintx id) {
1928 if (++id == _min_string_id) {
1929 return _max_string_id + 1;
1930 }
1931 return id;
1932 }
1933
1934 void InlinedObjects::dump_classed_with_inlined_fields(AbstractDumpWriter* writer) {
1935 if (_classes_count != 0) {
1936 // Record for each class contains tag(u1), class ID and count(u2)
1937 // for each inlined field index(u2), synthetic fields count(u2), original field name and class ID
1938 int size = _classes_count * (1 + sizeof(address) + 2)
1939 + _inlined_fields_count * (2 + 2 + sizeof(address) + sizeof(address));
1940 DumperSupport::write_header(writer, HPROF_INLINED_FIELDS, (u4)size);
1941
1942 InlinedFieldsDumper dumper(writer);
1943 ClassLoaderDataGraph::classes_do(&dumper);
1944 }
1945 }
1946
1947 void InlinedObjects::add_flat_array(oop array) {
1948 if (_flat_arrays == nullptr) {
1949 _flat_arrays = new (mtServiceability) GrowableArray<oop>(100, mtServiceability);
1950 }
1951 _flat_arrays->append(array);
1952 }
1953
1954 void InlinedObjects::dump_flat_arrays(AbstractDumpWriter* writer) {
1955 if (_flat_arrays != nullptr) {
1956 // For each flat array the record contains tag (u1), object ID and class ID.
1957 int size = _flat_arrays->length() * (1 + sizeof(address) + sizeof(address));
1958
1959 DumperSupport::write_header(writer, HPROF_FLAT_ARRAYS, (u4)size);
1960 for (GrowableArrayIterator<oop> it = _flat_arrays->begin(); it != _flat_arrays->end(); ++it) {
1961 flatArrayOop array = flatArrayOop(*it);
1962 FlatArrayKlass* array_klass = FlatArrayKlass::cast(array->klass());
1963 InlineKlass* element_klass = array_klass->element_klass();
1964 writer->write_u1(HPROF_FLAT_ARRAY);
1965 writer->write_objectID(array);
1966 writer->write_classID(element_klass);
1967 }
1968 }
1969 }
1970
1971
1972 // Support class used to generate HPROF_UTF8 records from the entries in the
1973 // SymbolTable.
1974
1975 class SymbolTableDumper : public SymbolClosure {
1976 private:
1977 AbstractDumpWriter* _writer;
1978 AbstractDumpWriter* writer() const { return _writer; }
1979 public:
1980 SymbolTableDumper(AbstractDumpWriter* writer) { _writer = writer; }
1981 void do_symbol(Symbol** p);
1982 };
1983
1984 void SymbolTableDumper::do_symbol(Symbol** p) {
1985 ResourceMark rm;
1986 Symbol* sym = *p;
1987 int len = sym->utf8_length();
1988 if (len > 0) {
1989 char* s = sym->as_utf8();
1990 DumperSupport::write_header(writer(), HPROF_UTF8, oopSize + len);
1991 writer()->write_symbolID(sym);
2484 return;
2485 }
2486 }
2487
2488 if (DumperSupport::mask_dormant_archived_object(o, nullptr) == nullptr) {
2489 return;
2490 }
2491
2492 if (o->is_instance()) {
2493 // create a HPROF_GC_INSTANCE record for each object
2494 DumperSupport::dump_instance(writer(), o, &_class_cache);
2495 // If we encounter an unmounted virtual thread it needs to be dumped explicitly
2496 // (mounted virtual threads are dumped with their carriers).
2497 if (java_lang_VirtualThread::is_instance(o)
2498 && ThreadDumper::should_dump_vthread(o) && !ThreadDumper::is_vthread_mounted(o)) {
2499 _vthread_dumper->dump_vthread(o, writer());
2500 }
2501 } else if (o->is_objArray()) {
2502 // create a HPROF_GC_OBJ_ARRAY_DUMP record for each object array
2503 DumperSupport::dump_object_array(writer(), objArrayOop(o));
2504 } else if (o->is_flatArray()) {
2505 DumperSupport::dump_flat_array(writer(), flatArrayOop(o), &_class_cache);
2506 } else if (o->is_typeArray()) {
2507 // create a HPROF_GC_PRIM_ARRAY_DUMP record for each type array
2508 DumperSupport::dump_prim_array(writer(), typeArrayOop(o));
2509 }
2510 }
2511
2512 // The dumper controller for parallel heap dump
2513 class DumperController : public CHeapObj<mtInternal> {
2514 private:
2515 Monitor* _lock;
2516 Mutex* _global_writer_lock;
2517
2518 const uint _dumper_number;
2519 uint _complete_number;
2520
2521 bool _started; // VM dumper started and acquired global writer lock
2522
2523 public:
2524 DumperController(uint number) :
2525 // _lock and _global_writer_lock are used for synchronization between GC worker threads inside safepoint,
2565 _complete_number++;
2566 // propagate local error to global if any
2567 if (local_writer->has_error()) {
2568 global_writer->set_error(local_writer->error());
2569 }
2570 ml.notify();
2571 }
2572
2573 void wait_all_dumpers_complete() {
2574 MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
2575 while (_complete_number != _dumper_number) {
2576 ml.wait();
2577 }
2578 }
2579 };
2580
2581 // DumpMerger merges separate dump files into a complete one
2582 class DumpMerger : public StackObj {
2583 private:
2584 DumpWriter* _writer;
2585 InlinedObjects* _inlined_objects;
2586 const char* _path;
2587 bool _has_error;
2588 int _dump_seq;
2589
2590 private:
2591 void merge_file(const char* path);
2592 void merge_done();
2593 void set_error(const char* msg);
2594
2595 public:
2596 DumpMerger(const char* path, DumpWriter* writer, InlinedObjects* inlined_objects, int dump_seq) :
2597 _writer(writer),
2598 _inlined_objects(inlined_objects),
2599 _path(path),
2600 _has_error(_writer->has_error()),
2601 _dump_seq(dump_seq) {}
2602
2603 void do_merge();
2604
2605 // returns path for the parallel DumpWriter (resource allocated)
2606 static char* get_writer_path(const char* base_path, int seq);
2607
2608 };
2609
2610 char* DumpMerger::get_writer_path(const char* base_path, int seq) {
2611 // approximate required buffer size
2612 size_t buf_size = strlen(base_path)
2613 + 2 // ".p"
2614 + 10 // number (that's enough for 2^32 parallel dumpers)
2615 + 1; // '\0'
2616
2617 char* path = NEW_RESOURCE_ARRAY(char, buf_size);
2618 memset(path, 0, buf_size);
2619
2620 os::snprintf(path, buf_size, "%s.p%d", base_path, seq);
2621
2622 return path;
2623 }
2624
2625
2626 void DumpMerger::merge_done() {
2627 // Writes the HPROF_HEAP_DUMP_END record.
2628 if (!_has_error) {
2629 DumperSupport::end_of_dump(_writer);
2630 _inlined_objects->dump_flat_arrays(_writer);
2631 _writer->flush();
2632 _inlined_objects->release();
2633 }
2634 _dump_seq = 0; //reset
2635 }
2636
2637 void DumpMerger::set_error(const char* msg) {
2638 assert(msg != nullptr, "sanity check");
2639 log_error(heapdump)("%s (file: %s)", msg, _path);
2640 _writer->set_error(msg);
2641 _has_error = true;
2642 }
2643
2644 #ifdef LINUX
2645 // Merge segmented heap files via sendfile, it's more efficient than the
2646 // read+write combination, which would require transferring data to and from
2647 // user space.
2648 void DumpMerger::merge_file(const char* path) {
2649 TraceTime timer("Merge segmented heap file directly", TRACETIME_LOG(Info, heapdump));
2650
2651 int segment_fd = os::open(path, O_RDONLY, 0);
2652 if (segment_fd == -1) {
2732 // restore compressor for further use
2733 _writer->set_compressor(saved_compressor);
2734 merge_done();
2735 }
2736
2737 // The VM operation that performs the heap dump
2738 class VM_HeapDumper : public VM_GC_Operation, public WorkerTask, public UnmountedVThreadDumper {
2739 private:
2740 DumpWriter* _writer;
2741 JavaThread* _oome_thread;
2742 Method* _oome_constructor;
2743 bool _gc_before_heap_dump;
2744 GrowableArray<Klass*>* _klass_map;
2745
2746 ThreadDumper** _thread_dumpers; // platform, carrier and mounted virtual threads
2747 int _thread_dumpers_count;
2748 volatile int _thread_serial_num;
2749 volatile int _frame_serial_num;
2750
2751 volatile int _dump_seq;
2752
2753 // Inlined object support.
2754 InlinedObjects _inlined_objects;
2755
2756 // parallel heap dump support
2757 uint _num_dumper_threads;
2758 DumperController* _dumper_controller;
2759 ParallelObjectIterator* _poi;
2760
2761 // Dumper id of VMDumper thread.
2762 static const int VMDumperId = 0;
2763 // VM dumper dumps both heap and non-heap data, other dumpers dump heap-only data.
2764 static bool is_vm_dumper(int dumper_id) { return dumper_id == VMDumperId; }
2765 // the 1st dumper calling get_next_dumper_id becomes VM dumper
2766 int get_next_dumper_id() {
2767 return Atomic::fetch_then_add(&_dump_seq, 1);
2768 }
2769
2770 DumpWriter* writer() const { return _writer; }
2771
2772 bool skip_operation() const;
2773
2774 // HPROF_GC_ROOT_THREAD_OBJ records for platform and mounted virtual threads
2775 void dump_threads(AbstractDumpWriter* writer);
2816 }
2817
2818 ~VM_HeapDumper() {
2819 if (_thread_dumpers != nullptr) {
2820 for (int i = 0; i < _thread_dumpers_count; i++) {
2821 delete _thread_dumpers[i];
2822 }
2823 FREE_C_HEAP_ARRAY(ThreadDumper*, _thread_dumpers);
2824 }
2825
2826 if (_dumper_controller != nullptr) {
2827 delete _dumper_controller;
2828 _dumper_controller = nullptr;
2829 }
2830 delete _klass_map;
2831 }
2832 int dump_seq() { return _dump_seq; }
2833 bool is_parallel_dump() { return _num_dumper_threads > 1; }
2834 void prepare_parallel_dump(WorkerThreads* workers);
2835
2836 InlinedObjects* inlined_objects() { return &_inlined_objects; }
2837
2838 VMOp_Type type() const { return VMOp_HeapDumper; }
2839 virtual bool doit_prologue();
2840 void doit();
2841 void work(uint worker_id);
2842
2843 // UnmountedVThreadDumper implementation
2844 void dump_vthread(oop vt, AbstractDumpWriter* segment_writer);
2845 };
2846
2847 bool VM_HeapDumper::skip_operation() const {
2848 return false;
2849 }
2850
2851 // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
2852 void DumperSupport::end_of_dump(AbstractDumpWriter* writer) {
2853 writer->finish_dump_segment();
2854
2855 writer->write_u1(HPROF_HEAP_DUMP_END);
2856 writer->write_u4(0);
2857 writer->write_u4(0);
2954 _dumper_controller->lock_global_writer();
2955 _dumper_controller->signal_start();
2956 } else {
2957 _dumper_controller->wait_for_start_signal();
2958 }
2959
2960 if (is_vm_dumper(dumper_id)) {
2961 TraceTime timer("Dump non-objects", TRACETIME_LOG(Info, heapdump));
2962 // Write the file header - we always use 1.0.2
2963 const char* header = "JAVA PROFILE 1.0.2";
2964
2965 // header is few bytes long - no chance to overflow int
2966 writer()->write_raw(header, strlen(header) + 1); // NUL terminated
2967 writer()->write_u4(oopSize);
2968 // timestamp is current time in ms
2969 writer()->write_u8(os::javaTimeMillis());
2970 // HPROF_UTF8 records
2971 SymbolTableDumper sym_dumper(writer());
2972 SymbolTable::symbols_do(&sym_dumper);
2973
2974 // HPROF_UTF8 records for inlined field names.
2975 inlined_objects()->init();
2976 inlined_objects()->dump_inlined_field_names(writer());
2977
2978 // HPROF_INLINED_FIELDS
2979 inlined_objects()->dump_classed_with_inlined_fields(writer());
2980
2981 // write HPROF_LOAD_CLASS records
2982 {
2983 LoadedClassDumper loaded_class_dumper(writer(), _klass_map);
2984 ClassLoaderDataGraph::classes_do(&loaded_class_dumper);
2985 }
2986
2987 // write HPROF_FRAME and HPROF_TRACE records
2988 // this must be called after _klass_map is built when iterating the classes above.
2989 dump_stack_traces(writer());
2990
2991 // unlock global writer, so parallel dumpers can dump stack traces of unmounted virtual threads
2992 _dumper_controller->unlock_global_writer();
2993 }
2994
2995 // HPROF_HEAP_DUMP/HPROF_HEAP_DUMP_SEGMENT starts here
2996
2997 ResourceMark rm;
2998 // share global compressor, local DumpWriter is not responsible for its life cycle
2999 DumpWriter segment_writer(DumpMerger::get_writer_path(writer()->get_file_path(), dumper_id),
3000 writer()->is_overwrite(), writer()->compressor());
3160 (error() != nullptr) ? error() : "reason unknown");
3161 }
3162 return -1;
3163 }
3164
3165 // generate the segmented heap dump into separate files
3166 VM_HeapDumper dumper(&writer, _gc_before_heap_dump, _oome, num_dump_threads);
3167 VMThread::execute(&dumper);
3168
3169 // record any error that the writer may have encountered
3170 set_error(writer.error());
3171
3172 // Heap dump process is done in two phases
3173 //
3174 // Phase 1: Concurrent threads directly write heap data to multiple heap files.
3175 // This is done by VM_HeapDumper, which is performed within safepoint.
3176 //
3177 // Phase 2: Merge multiple heap files into one complete heap dump file.
3178 // This is done by DumpMerger, which is performed outside safepoint
3179
3180 DumpMerger merger(path, &writer, dumper.inlined_objects(), dumper.dump_seq());
3181 // Perform heapdump file merge operation in the current thread prevents us
3182 // from occupying the VM Thread, which in turn affects the occurrence of
3183 // GC and other VM operations.
3184 merger.do_merge();
3185 if (writer.error() != nullptr) {
3186 set_error(writer.error());
3187 }
3188
3189 // emit JFR event
3190 if (error() == nullptr) {
3191 event.set_destination(path);
3192 event.set_gcBeforeDump(_gc_before_heap_dump);
3193 event.set_size(writer.bytes_written());
3194 event.set_onOutOfMemoryError(_oome);
3195 event.set_overwrite(overwrite);
3196 event.set_compression(compression);
3197 event.commit();
3198 } else {
3199 log_debug(cds, heap)("Error %s while dumping heap", error());
3200 }
|