21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "classfile/classLoaderData.inline.hpp"
27 #include "classfile/classLoaderDataGraph.hpp"
28 #include "classfile/javaClasses.inline.hpp"
29 #include "classfile/symbolTable.hpp"
30 #include "classfile/vmClasses.hpp"
31 #include "classfile/vmSymbols.hpp"
32 #include "gc/shared/gcLocker.hpp"
33 #include "gc/shared/gcVMOperations.hpp"
34 #include "gc/shared/workerThread.hpp"
35 #include "jfr/jfrEvents.hpp"
36 #include "jvm.h"
37 #include "memory/allocation.inline.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "memory/universe.hpp"
40 #include "oops/fieldStreams.inline.hpp"
41 #include "oops/klass.inline.hpp"
42 #include "oops/objArrayKlass.hpp"
43 #include "oops/objArrayOop.inline.hpp"
44 #include "oops/oop.inline.hpp"
45 #include "oops/typeArrayOop.inline.hpp"
46 #include "runtime/arguments.hpp"
47 #include "runtime/continuationWrapper.inline.hpp"
48 #include "runtime/frame.inline.hpp"
49 #include "runtime/handles.inline.hpp"
50 #include "runtime/javaCalls.hpp"
51 #include "runtime/javaThread.inline.hpp"
52 #include "runtime/jniHandles.hpp"
53 #include "runtime/os.hpp"
54 #include "runtime/threads.hpp"
55 #include "runtime/threadSMR.hpp"
56 #include "runtime/timerTrace.hpp"
57 #include "runtime/vframe.hpp"
58 #include "runtime/vmOperations.hpp"
59 #include "runtime/vmThread.hpp"
60 #include "services/heapDumper.hpp"
61 #include "services/heapDumperCompression.hpp"
62 #include "services/threadService.hpp"
63 #include "utilities/checkedCast.hpp"
64 #include "utilities/macros.hpp"
65 #include "utilities/ostream.hpp"
66 #ifdef LINUX
420 public:
421 AbstractDumpWriter() :
422 _buffer(nullptr),
423 _size(io_buffer_max_size),
424 _pos(0),
425 _in_dump_segment(false) { }
426
427 // Total number of bytes written to the disk
428 virtual julong bytes_written() const = 0;
429 // Return non-null if error occurred
430 virtual char const* error() const = 0;
431
432 size_t position() const { return _pos; }
433 // writer functions
434 virtual void write_raw(const void* s, size_t len);
435 void write_u1(u1 x);
436 void write_u2(u2 x);
437 void write_u4(u4 x);
438 void write_u8(u8 x);
439 void write_objectID(oop o);
440 void write_rootID(oop* p);
441 void write_symbolID(Symbol* o);
442 void write_classID(Klass* k);
443 void write_id(u4 x);
444
445 // Start a new sub-record. Starts a new heap dump segment if needed.
446 void start_sub_record(u1 tag, u4 len);
447 // Ends the current sub-record.
448 void end_sub_record();
449 // Finishes the current dump segment if not already finished.
450 void finish_dump_segment();
451 // Flush internal buffer to persistent storage
452 virtual void flush() = 0;
453 };
454
455 void AbstractDumpWriter::write_fast(const void* s, size_t len) {
456 assert(!_in_dump_segment || (_sub_record_left >= len), "sub-record too large");
457 assert(buffer_size() - position() >= len, "Must fit");
458 DEBUG_ONLY(_sub_record_left -= len);
459 memcpy(buffer() + position(), s, len);
506 }
507
508 void AbstractDumpWriter::write_u8(u8 x) {
509 u8 v;
510 Bytes::put_Java_u8((address)&v, x);
511 WRITE_KNOWN_TYPE(&v, 8);
512 }
513
514 void AbstractDumpWriter::write_address(address a) {
515 #ifdef _LP64
516 write_u8((u8)a);
517 #else
518 write_u4((u4)a);
519 #endif
520 }
521
522 void AbstractDumpWriter::write_objectID(oop o) {
523 write_address(cast_from_oop<address>(o));
524 }
525
526 void AbstractDumpWriter::write_rootID(oop* p) {
527 write_address((address)p);
528 }
529
530 void AbstractDumpWriter::write_symbolID(Symbol* s) {
531 write_address((address)((uintptr_t)s));
532 }
533
534 void AbstractDumpWriter::write_id(u4 x) {
535 #ifdef _LP64
536 write_u8((u8) x);
537 #else
538 write_u4(x);
539 #endif
540 }
541
542 // We use java mirror as the class ID
543 void AbstractDumpWriter::write_classID(Klass* k) {
544 write_objectID(k->java_mirror());
545 }
711 }
712 }
713 _pos = 0; // reset pos to make internal buffer available
714
715 if (result != nullptr) {
716 set_error(result);
717 }
718 }
719
720 void DumpWriter::do_compress() {
721 const char* msg = _compressor->compress(_buffer, _pos, _out_buffer, _out_size,
722 _tmp_buffer, _tmp_size, &_out_pos);
723
724 if (msg != nullptr) {
725 set_error(msg);
726 }
727 }
728
729 class DumperClassCacheTable;
730 class DumperClassCacheTableEntry;
731
732 // Support class with a collection of functions used when dumping the heap
733 class DumperSupport : AllStatic {
734 public:
735
736 // write a header of the given type
737 static void write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len);
738
739 // returns hprof tag for the given type signature
740 static hprofTag sig2tag(Symbol* sig);
741 // returns hprof tag for the given basic type
742 static hprofTag type2tag(BasicType type);
743 // Returns the size of the data to write.
744 static u4 sig2size(Symbol* sig);
745
746 // returns the size of the instance of the given class
747 static u4 instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry = nullptr);
748
749 // dump a jfloat
750 static void dump_float(AbstractDumpWriter* writer, jfloat f);
751 // dump a jdouble
752 static void dump_double(AbstractDumpWriter* writer, jdouble d);
753 // dumps the raw value of the given field
754 static void dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset);
755 // returns the size of the static fields; also counts the static fields
756 static u4 get_static_fields_size(InstanceKlass* ik, u2& field_count);
757 // dumps static fields of the given class
758 static void dump_static_fields(AbstractDumpWriter* writer, Klass* k);
759 // dump the raw values of the instance fields of the given object
760 static void dump_instance_fields(AbstractDumpWriter* writer, oop o, DumperClassCacheTableEntry* class_cache_entry);
761 // get the count of the instance fields for a given class
762 static u2 get_instance_fields_count(InstanceKlass* ik);
763 // dumps the definition of the instance fields for a given class
764 static void dump_instance_field_descriptors(AbstractDumpWriter* writer, Klass* k);
765 // creates HPROF_GC_INSTANCE_DUMP record for the given object
766 static void dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache);
767 // creates HPROF_GC_CLASS_DUMP record for the given instance class
768 static void dump_instance_class(AbstractDumpWriter* writer, InstanceKlass* ik);
769 // creates HPROF_GC_CLASS_DUMP record for a given array class
770 static void dump_array_class(AbstractDumpWriter* writer, Klass* k);
771
772 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
773 static void dump_object_array(AbstractDumpWriter* writer, objArrayOop array);
774 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
775 static void dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array);
776 // create HPROF_FRAME record for the given method and bci
777 static void dump_stack_frame(AbstractDumpWriter* writer, int frame_serial_num, int class_serial_num, Method* m, int bci);
778
779 // check if we need to truncate an array
780 static int calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size);
781
782 // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
783 static void end_of_dump(AbstractDumpWriter* writer);
784
785 static oop mask_dormant_archived_object(oop o, oop ref_obj) {
786 if (o != nullptr && o->klass()->java_mirror_no_keepalive() == nullptr) {
787 // Ignore this object since the corresponding java mirror is not loaded.
788 // Might be a dormant archive object.
789 report_dormant_archived_object(o, ref_obj);
790 return nullptr;
791 } else {
792 return o;
793 }
794 }
795
796 static void report_dormant_archived_object(oop o, oop ref_obj) {
797 if (log_is_enabled(Trace, aot, heap)) {
798 ResourceMark rm;
799 if (ref_obj != nullptr) {
800 log_trace(aot, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s) referenced by " INTPTR_FORMAT " (%s)",
801 p2i(o), o->klass()->external_name(),
802 p2i(ref_obj), ref_obj->klass()->external_name());
803 } else {
804 log_trace(aot, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s)",
805 p2i(o), o->klass()->external_name());
806 }
807 }
808 }
809 };
810
811 // Hash table of klasses to the klass metadata. This should greatly improve the
812 // hash dumping performance. This hash table is supposed to be used by a single
813 // thread only.
814 //
815 class DumperClassCacheTableEntry : public CHeapObj<mtServiceability> {
816 friend class DumperClassCacheTable;
817 private:
818 GrowableArray<char> _sigs_start;
819 GrowableArray<int> _offsets;
820 u4 _instance_size;
821 int _entries;
822
823 public:
824 DumperClassCacheTableEntry() : _instance_size(0), _entries(0) {};
825
826 int field_count() { return _entries; }
827 char sig_start(int field_idx) { return _sigs_start.at(field_idx); }
828 int offset(int field_idx) { return _offsets.at(field_idx); }
829 u4 instance_size() { return _instance_size; }
830 };
831
832 class DumperClassCacheTable {
833 private:
834 // HashTable SIZE is specified at compile time so we
835 // use 1031 which is the first prime after 1024.
836 static constexpr size_t TABLE_SIZE = 1031;
837
838 // Maintain the cache for N classes. This limits memory footprint
839 // impact, regardless of how many classes we have in the dump.
840 // This also improves look up performance by keeping the statically
841 // sized table from overloading.
842 static constexpr int CACHE_TOP = 256;
843
844 typedef HashTable<InstanceKlass*, DumperClassCacheTableEntry*,
845 TABLE_SIZE, AnyObj::C_HEAP, mtServiceability> PtrTable;
846 PtrTable* _ptrs;
847
848 // Single-slot cache to handle the major case of objects of the same
849 // class back-to-back, e.g. from T[].
850 InstanceKlass* _last_ik;
851 DumperClassCacheTableEntry* _last_entry;
852
853 void unlink_all(PtrTable* table) {
854 class CleanupEntry: StackObj {
855 public:
856 bool do_entry(InstanceKlass*& key, DumperClassCacheTableEntry*& entry) {
857 delete entry;
858 return true;
859 }
860 } cleanup;
861 table->unlink(&cleanup);
862 }
863
864 public:
865 DumperClassCacheTableEntry* lookup_or_create(InstanceKlass* ik) {
866 if (_last_ik == ik) {
867 return _last_entry;
868 }
869
870 DumperClassCacheTableEntry* entry;
871 DumperClassCacheTableEntry** from_cache = _ptrs->get(ik);
872 if (from_cache == nullptr) {
873 entry = new DumperClassCacheTableEntry();
874 for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
875 if (!fld.access_flags().is_static()) {
876 Symbol* sig = fld.signature();
877 entry->_sigs_start.push(sig->char_at(0));
878 entry->_offsets.push(fld.offset());
879 entry->_entries++;
880 entry->_instance_size += DumperSupport::sig2size(sig);
881 }
882 }
883
884 if (_ptrs->number_of_entries() >= CACHE_TOP) {
885 // We do not track the individual hit rates for table entries.
886 // Purge the entire table, and let the cache catch up with new
887 // distribution.
888 unlink_all(_ptrs);
889 }
890
891 _ptrs->put(ik, entry);
892 } else {
893 entry = *from_cache;
894 }
895
896 // Remember for single-slot cache.
897 _last_ik = ik;
898 _last_entry = entry;
899
900 return entry;
901 }
902
903 DumperClassCacheTable() : _ptrs(new (mtServiceability) PtrTable), _last_ik(nullptr), _last_entry(nullptr) {}
904
905 ~DumperClassCacheTable() {
906 unlink_all(_ptrs);
907 delete _ptrs;
908 }
909 };
910
911 // write a header of the given type
912 void DumperSupport:: write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len) {
913 writer->write_u1(tag);
914 writer->write_u4(0); // current ticks
915 writer->write_u4(len);
916 }
917
918 // returns hprof tag for the given type signature
919 hprofTag DumperSupport::sig2tag(Symbol* sig) {
920 switch (sig->char_at(0)) {
921 case JVM_SIGNATURE_CLASS : return HPROF_NORMAL_OBJECT;
922 case JVM_SIGNATURE_ARRAY : return HPROF_NORMAL_OBJECT;
923 case JVM_SIGNATURE_BYTE : return HPROF_BYTE;
924 case JVM_SIGNATURE_CHAR : return HPROF_CHAR;
925 case JVM_SIGNATURE_FLOAT : return HPROF_FLOAT;
926 case JVM_SIGNATURE_DOUBLE : return HPROF_DOUBLE;
927 case JVM_SIGNATURE_INT : return HPROF_INT;
928 case JVM_SIGNATURE_LONG : return HPROF_LONG;
929 case JVM_SIGNATURE_SHORT : return HPROF_SHORT;
930 case JVM_SIGNATURE_BOOLEAN : return HPROF_BOOLEAN;
1029 break;
1030 }
1031 case JVM_SIGNATURE_LONG : {
1032 jlong l = obj->long_field(offset);
1033 writer->write_u8(l);
1034 break;
1035 }
1036 case JVM_SIGNATURE_BOOLEAN : {
1037 jboolean b = obj->bool_field(offset);
1038 writer->write_u1(b);
1039 break;
1040 }
1041 default : {
1042 ShouldNotReachHere();
1043 break;
1044 }
1045 }
1046 }
1047
1048 // returns the size of the instance of the given class
1049 u4 DumperSupport::instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry) {
1050 if (class_cache_entry != nullptr) {
1051 return class_cache_entry->instance_size();
1052 } else {
1053 u4 size = 0;
1054 for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1055 if (!fld.access_flags().is_static()) {
1056 size += sig2size(fld.signature());
1057 }
1058 }
1059 return size;
1060 }
1061 }
1062
1063 u4 DumperSupport::get_static_fields_size(InstanceKlass* ik, u2& field_count) {
1064 field_count = 0;
1065 u4 size = 0;
1066
1067 for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1068 if (fldc.access_flags().is_static()) {
1069 field_count++;
1070 size += sig2size(fldc.signature());
1071 }
1072 }
1073
1074 // Add in resolved_references which is referenced by the cpCache
1075 // The resolved_references is an array per InstanceKlass holding the
1076 // strings and other oops resolved from the constant pool.
1077 oop resolved_references = ik->constants()->resolved_references_or_null();
1078 if (resolved_references != nullptr) {
1079 field_count++;
1080 size += sizeof(address);
1081
1082 // Add in the resolved_references of the used previous versions of the class
1083 // in the case of RedefineClasses
1084 InstanceKlass* prev = ik->previous_versions();
1085 while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1086 field_count++;
1087 size += sizeof(address);
1088 prev = prev->previous_versions();
1091
1092 // Also provide a pointer to the init_lock if present, so there aren't unreferenced int[0]
1093 // arrays.
1094 oop init_lock = ik->init_lock();
1095 if (init_lock != nullptr) {
1096 field_count++;
1097 size += sizeof(address);
1098 }
1099
1100 // We write the value itself plus a name and a one byte type tag per field.
1101 return checked_cast<u4>(size + field_count * (sizeof(address) + 1));
1102 }
1103
1104 // dumps static fields of the given class
1105 void DumperSupport::dump_static_fields(AbstractDumpWriter* writer, Klass* k) {
1106 InstanceKlass* ik = InstanceKlass::cast(k);
1107
1108 // dump the field descriptors and raw values
1109 for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1110 if (fld.access_flags().is_static()) {
1111 Symbol* sig = fld.signature();
1112
1113 writer->write_symbolID(fld.name()); // name
1114 writer->write_u1(sig2tag(sig)); // type
1115
1116 // value
1117 dump_field_value(writer, sig->char_at(0), ik->java_mirror(), fld.offset());
1118 }
1119 }
1120
1121 // Add resolved_references for each class that has them
1122 oop resolved_references = ik->constants()->resolved_references_or_null();
1123 if (resolved_references != nullptr) {
1124 writer->write_symbolID(vmSymbols::resolved_references_name()); // name
1125 writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1126 writer->write_objectID(resolved_references);
1127
1128 // Also write any previous versions
1129 InstanceKlass* prev = ik->previous_versions();
1130 while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1131 writer->write_symbolID(vmSymbols::resolved_references_name()); // name
1132 writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1133 writer->write_objectID(prev->constants()->resolved_references());
1134 prev = prev->previous_versions();
1135 }
1136 }
1137
1138 // Add init lock to the end if the class is not yet initialized
1139 oop init_lock = ik->init_lock();
1140 if (init_lock != nullptr) {
1141 writer->write_symbolID(vmSymbols::init_lock_name()); // name
1142 writer->write_u1(sig2tag(vmSymbols::int_array_signature())); // type
1143 writer->write_objectID(init_lock);
1144 }
1145 }
1146
1147 // dump the raw values of the instance fields of the given object
1148 void DumperSupport::dump_instance_fields(AbstractDumpWriter* writer, oop o, DumperClassCacheTableEntry* class_cache_entry) {
1149 assert(class_cache_entry != nullptr, "Pre-condition: must be provided");
1150 for (int idx = 0; idx < class_cache_entry->field_count(); idx++) {
1151 dump_field_value(writer, class_cache_entry->sig_start(idx), o, class_cache_entry->offset(idx));
1152 }
1153 }
1154
1155 // dumps the definition of the instance fields for a given class
1156 u2 DumperSupport::get_instance_fields_count(InstanceKlass* ik) {
1157 u2 field_count = 0;
1158
1159 for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1160 if (!fldc.access_flags().is_static()) field_count++;
1161 }
1162
1163 return field_count;
1164 }
1165
1166 // dumps the definition of the instance fields for a given class
1167 void DumperSupport::dump_instance_field_descriptors(AbstractDumpWriter* writer, Klass* k) {
1168 InstanceKlass* ik = InstanceKlass::cast(k);
1169
1170 // dump the field descriptors
1171 for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1172 if (!fld.access_flags().is_static()) {
1173 Symbol* sig = fld.signature();
1174
1175 writer->write_symbolID(fld.name()); // name
1176 writer->write_u1(sig2tag(sig)); // type
1177 }
1178 }
1179 }
1180
1181 // creates HPROF_GC_INSTANCE_DUMP record for the given object
1182 void DumperSupport::dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache) {
1183 InstanceKlass* ik = InstanceKlass::cast(o->klass());
1184
1185 DumperClassCacheTableEntry* cache_entry = class_cache->lookup_or_create(ik);
1186
1187 u4 is = instance_size(ik, cache_entry);
1188 u4 size = 1 + sizeof(address) + 4 + sizeof(address) + 4 + is;
1189
1190 writer->start_sub_record(HPROF_GC_INSTANCE_DUMP, size);
1191 writer->write_objectID(o);
1192 writer->write_u4(STACK_TRACE_ID);
1193
1194 // class ID
1195 writer->write_classID(ik);
1196
1197 // number of bytes that follow
1198 writer->write_u4(is);
1199
1200 // field values
1201 dump_instance_fields(writer, o, cache_entry);
1202
1203 writer->end_sub_record();
1204 }
1205
1206 // creates HPROF_GC_CLASS_DUMP record for the given instance class
1207 void DumperSupport::dump_instance_class(AbstractDumpWriter* writer, InstanceKlass* ik) {
1208 // We can safepoint and do a heap dump at a point where we have a Klass,
1209 // but no java mirror class has been setup for it. So we need to check
1210 // that the class is at least loaded, to avoid crash from a null mirror.
1211 if (!ik->is_loaded()) {
1212 return;
1213 }
1214
1215 u2 static_fields_count = 0;
1216 u4 static_size = get_static_fields_size(ik, static_fields_count);
1217 u2 instance_fields_count = get_instance_fields_count(ik);
1218 u4 instance_fields_size = instance_fields_count * (sizeof(address) + 1);
1219 u4 size = checked_cast<u4>(1 + sizeof(address) + 4 + 6 * sizeof(address) + 4 + 2 + 2 + static_size + 2 + instance_fields_size);
1220
1221 writer->start_sub_record(HPROF_GC_CLASS_DUMP, size);
1280
1281 writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->class_loader());
1282 writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->signers());
1283 writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->protection_domain());
1284
1285 writer->write_objectID(oop(nullptr)); // reserved
1286 writer->write_objectID(oop(nullptr));
1287 writer->write_u4(0); // instance size
1288 writer->write_u2(0); // constant pool
1289 writer->write_u2(0); // static fields
1290 writer->write_u2(0); // instance fields
1291
1292 writer->end_sub_record();
1293
1294 }
1295
1296 // Hprof uses an u4 as record length field,
1297 // which means we need to truncate arrays that are too long.
1298 int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size) {
1299 BasicType type = ArrayKlass::cast(array->klass())->element_type();
1300 assert(type >= T_BOOLEAN && type <= T_OBJECT, "invalid array element type");
1301
1302 int length = array->length();
1303
1304 int type_size;
1305 if (type == T_OBJECT) {
1306 type_size = sizeof(address);
1307 } else {
1308 type_size = type2aelembytes(type);
1309 }
1310
1311 size_t length_in_bytes = (size_t)length * type_size;
1312 uint max_bytes = max_juint - header_size;
1313
1314 if (length_in_bytes > max_bytes) {
1315 length = max_bytes / type_size;
1316 length_in_bytes = (size_t)length * type_size;
1317
1318 warning("cannot dump array of type %s[] with length %d; truncating to length %d",
1319 type2name_tab[type], array->length(), length);
1320 }
1321 return length;
1322 }
1323
1324 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
1325 void DumperSupport::dump_object_array(AbstractDumpWriter* writer, objArrayOop array) {
1326 // sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID) + sizeof(classID)
1327 short header_size = 1 + 2 * 4 + 2 * sizeof(address);
1328 int length = calculate_array_max_length(writer, array, header_size);
1329 u4 size = checked_cast<u4>(header_size + length * sizeof(address));
1330
1331 writer->start_sub_record(HPROF_GC_OBJ_ARRAY_DUMP, size);
1332 writer->write_objectID(array);
1333 writer->write_u4(STACK_TRACE_ID);
1334 writer->write_u4(length);
1335
1336 // array class ID
1337 writer->write_classID(array->klass());
1338
1339 // [id]* elements
1340 for (int index = 0; index < length; index++) {
1341 oop o = array->obj_at(index);
1342 o = mask_dormant_archived_object(o, array);
1343 writer->write_objectID(o);
1344 }
1345
1346 writer->end_sub_record();
1347 }
1348
1349 #define WRITE_ARRAY(Array, Type, Size, Length) \
1350 for (int i = 0; i < Length; i++) { writer->write_##Size((Size)Array->Type##_at(i)); }
1351
1352 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
1353 void DumperSupport::dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array) {
1354 BasicType type = TypeArrayKlass::cast(array->klass())->element_type();
1355 // 2 * sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID)
1356 short header_size = 2 * 1 + 2 * 4 + sizeof(address);
1357
1358 int length = calculate_array_max_length(writer, array, header_size);
1359 int type_size = type2aelembytes(type);
1360 u4 length_in_bytes = (u4)length * type_size;
1361 u4 size = header_size + length_in_bytes;
1362
1363 writer->start_sub_record(HPROF_GC_PRIM_ARRAY_DUMP, size);
1485 int len = sym->utf8_length();
1486 if (len > 0) {
1487 char* s = sym->as_utf8();
1488 DumperSupport::write_header(writer(), HPROF_UTF8, oopSize + len);
1489 writer()->write_symbolID(sym);
1490 writer()->write_raw(s, len);
1491 }
1492 }
1493
1494 // Support class used to generate HPROF_GC_CLASS_DUMP records
1495
1496 class ClassDumper : public KlassClosure {
1497 private:
1498 AbstractDumpWriter* _writer;
1499 AbstractDumpWriter* writer() const { return _writer; }
1500
1501 public:
1502 ClassDumper(AbstractDumpWriter* writer) : _writer(writer) {}
1503
1504 void do_klass(Klass* k) {
1505 if (k->is_instance_klass()) {
1506 DumperSupport::dump_instance_class(writer(), InstanceKlass::cast(k));
1507 } else {
1508 DumperSupport::dump_array_class(writer(), k);
1509 }
1510 }
1511 };
1512
1513 // Support class used to generate HPROF_LOAD_CLASS records
1514
1515 class LoadedClassDumper : public LockedClassesDo {
1516 private:
1517 AbstractDumpWriter* _writer;
1518 GrowableArray<Klass*>* _klass_map;
1519 u4 _class_serial_num;
1520 AbstractDumpWriter* writer() const { return _writer; }
1521 void add_class_serial_number(Klass* k, int serial_num) {
1522 _klass_map->at_put_grow(serial_num, k);
1523 }
1524 public:
1525 LoadedClassDumper(AbstractDumpWriter* writer, GrowableArray<Klass*>* klass_map)
1526 : _writer(writer), _klass_map(klass_map), _class_serial_num(0) {}
1527
1528 void do_klass(Klass* k) {
1529 // len of HPROF_LOAD_CLASS record
1530 u4 remaining = 2 * oopSize + 2 * sizeof(u4);
1531 DumperSupport::write_header(writer(), HPROF_LOAD_CLASS, remaining);
1532 // class serial number is just a number
1533 writer()->write_u4(++_class_serial_num);
1534 // class ID
1535 writer()->write_classID(k);
1536 // add the Klass* and class serial number pair
1537 add_class_serial_number(k, _class_serial_num);
1538 writer()->write_u4(STACK_TRACE_ID);
1539 // class name ID
1540 Symbol* name = k->name();
1541 writer()->write_symbolID(name);
1542 }
1543 };
1544
1545 // Support class used to generate HPROF_GC_ROOT_JNI_LOCAL records
1546
1547 class JNILocalsDumper : public OopClosure {
1548 private:
1934 RegisterMap::WalkContinuation::skip);
1935 switch (_thread_type) {
1936 case ThreadType::Platform:
1937 if (!_java_thread->has_last_Java_frame()) {
1938 return nullptr;
1939 }
1940 return _java_thread->is_vthread_mounted()
1941 ? _java_thread->carrier_last_java_vframe(®_map)
1942 : _java_thread->platform_thread_last_java_vframe(®_map);
1943
1944 case ThreadType::MountedVirtual:
1945 return _java_thread->last_java_vframe(®_map);
1946
1947 default: // make compilers happy
1948 break;
1949 }
1950 ShouldNotReachHere();
1951 return nullptr;
1952 }
1953
1954 // Callback to dump thread-related data for unmounted virtual threads;
1955 // implemented by VM_HeapDumper.
1956 class UnmountedVThreadDumper {
1957 public:
1958 virtual void dump_vthread(oop vt, AbstractDumpWriter* segment_writer) = 0;
1959 };
1960
1961 // Support class used when iterating over the heap.
1962 class HeapObjectDumper : public ObjectClosure {
1963 private:
1964 AbstractDumpWriter* _writer;
1965 AbstractDumpWriter* writer() { return _writer; }
1966 UnmountedVThreadDumper* _vthread_dumper;
1967
1968 DumperClassCacheTable _class_cache;
1969
1970 public:
1971 HeapObjectDumper(AbstractDumpWriter* writer, UnmountedVThreadDumper* vthread_dumper)
1972 : _writer(writer), _vthread_dumper(vthread_dumper) {}
1973
1974 // called for each object in the heap
1975 void do_object(oop o);
1976 };
1977
1978 void HeapObjectDumper::do_object(oop o) {
1979 // skip classes as these emitted as HPROF_GC_CLASS_DUMP records
1980 if (o->klass() == vmClasses::Class_klass()) {
1981 if (!java_lang_Class::is_primitive(o)) {
1982 return;
1983 }
1984 }
1985
1986 if (DumperSupport::mask_dormant_archived_object(o, nullptr) == nullptr) {
1987 return;
1988 }
1989
1990 if (o->is_instance()) {
1991 // create a HPROF_GC_INSTANCE record for each object
1992 DumperSupport::dump_instance(writer(), o, &_class_cache);
1993 // If we encounter an unmounted virtual thread it needs to be dumped explicitly
1994 // (mounted virtual threads are dumped with their carriers).
1995 if (java_lang_VirtualThread::is_instance(o)
1996 && ThreadDumper::should_dump_vthread(o) && !ThreadDumper::is_vthread_mounted(o)) {
1997 _vthread_dumper->dump_vthread(o, writer());
1998 }
1999 } else if (o->is_objArray()) {
2000 // create a HPROF_GC_OBJ_ARRAY_DUMP record for each object array
2001 DumperSupport::dump_object_array(writer(), objArrayOop(o));
2002 } else if (o->is_typeArray()) {
2003 // create a HPROF_GC_PRIM_ARRAY_DUMP record for each type array
2004 DumperSupport::dump_prim_array(writer(), typeArrayOop(o));
2005 }
2006 }
2007
2008 // The dumper controller for parallel heap dump
2009 class DumperController : public CHeapObj<mtInternal> {
2010 private:
2011 Monitor* _lock;
2012 Mutex* _global_writer_lock;
2013
2014 const uint _dumper_number;
2015 uint _complete_number;
2016
2017 bool _started; // VM dumper started and acquired global writer lock
2018
2019 public:
2020 DumperController(uint number) :
2021 // _lock and _global_writer_lock are used for synchronization between GC worker threads inside safepoint,
2229 // The VM operation that performs the heap dump
2230 class VM_HeapDumper : public VM_GC_Operation, public WorkerTask, public UnmountedVThreadDumper {
2231 private:
2232 DumpWriter* _writer;
2233 JavaThread* _oome_thread;
2234 Method* _oome_constructor;
2235 bool _gc_before_heap_dump;
2236 GrowableArray<Klass*>* _klass_map;
2237
2238 ThreadDumper** _thread_dumpers; // platform, carrier and mounted virtual threads
2239 int _thread_dumpers_count;
2240 volatile int _thread_serial_num;
2241 volatile int _frame_serial_num;
2242
2243 volatile int _dump_seq;
2244 // parallel heap dump support
2245 uint _num_dumper_threads;
2246 DumperController* _dumper_controller;
2247 ParallelObjectIterator* _poi;
2248
2249 // Dumper id of VMDumper thread.
2250 static const int VMDumperId = 0;
2251 // VM dumper dumps both heap and non-heap data, other dumpers dump heap-only data.
2252 static bool is_vm_dumper(int dumper_id) { return dumper_id == VMDumperId; }
2253 // the 1st dumper calling get_next_dumper_id becomes VM dumper
2254 int get_next_dumper_id() {
2255 return AtomicAccess::fetch_then_add(&_dump_seq, 1);
2256 }
2257
2258 DumpWriter* writer() const { return _writer; }
2259
2260 bool skip_operation() const;
2261
2262 // HPROF_GC_ROOT_THREAD_OBJ records for platform and mounted virtual threads
2263 void dump_threads(AbstractDumpWriter* writer);
2264
2265 bool is_oom_thread(JavaThread* thread) const {
2266 return thread == _oome_thread && _oome_constructor != nullptr;
2267 }
2268
2493 JNIHandles::oops_do(&jni_dumper);
2494 // technically not jni roots, but global roots
2495 // for things like preallocated throwable backtraces
2496 Universe::vm_global()->oops_do(&jni_dumper);
2497 // HPROF_GC_ROOT_STICKY_CLASS
2498 // These should be classes in the null class loader data, and not all classes
2499 // if !ClassUnloading
2500 StickyClassDumper stiky_class_dumper(&segment_writer);
2501 ClassLoaderData::the_null_class_loader_data()->classes_do(&stiky_class_dumper);
2502 }
2503
2504 // Heap iteration.
2505 // writes HPROF_GC_INSTANCE_DUMP records.
2506 // After each sub-record is written check_segment_length will be invoked
2507 // to check if the current segment exceeds a threshold. If so, a new
2508 // segment is started.
2509 // The HPROF_GC_CLASS_DUMP and HPROF_GC_INSTANCE_DUMP are the vast bulk
2510 // of the heap dump.
2511
2512 TraceTime timer(is_parallel_dump() ? "Dump heap objects in parallel" : "Dump heap objects", TRACETIME_LOG(Info, heapdump));
2513 HeapObjectDumper obj_dumper(&segment_writer, this);
2514 if (!is_parallel_dump()) {
2515 Universe::heap()->object_iterate(&obj_dumper);
2516 } else {
2517 // == Parallel dump
2518 _poi->object_iterate(&obj_dumper, worker_id);
2519 }
2520
2521 segment_writer.finish_dump_segment();
2522 segment_writer.flush();
2523 }
2524
2525 _dumper_controller->dumper_complete(&segment_writer, writer());
2526
2527 if (is_vm_dumper(dumper_id)) {
2528 _dumper_controller->wait_all_dumpers_complete();
2529
2530 // flush global writer
2531 writer()->flush();
2532
2533 // At this point, all fragments of the heapdump have been written to separate files.
|
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "classfile/classLoaderData.inline.hpp"
27 #include "classfile/classLoaderDataGraph.hpp"
28 #include "classfile/javaClasses.inline.hpp"
29 #include "classfile/symbolTable.hpp"
30 #include "classfile/vmClasses.hpp"
31 #include "classfile/vmSymbols.hpp"
32 #include "gc/shared/gcLocker.hpp"
33 #include "gc/shared/gcVMOperations.hpp"
34 #include "gc/shared/workerThread.hpp"
35 #include "jfr/jfrEvents.hpp"
36 #include "jvm.h"
37 #include "memory/allocation.inline.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "memory/universe.hpp"
40 #include "oops/fieldStreams.inline.hpp"
41 #include "oops/flatArrayKlass.hpp"
42 #include "oops/flatArrayOop.inline.hpp"
43 #include "oops/klass.inline.hpp"
44 #include "oops/objArrayKlass.hpp"
45 #include "oops/objArrayOop.inline.hpp"
46 #include "oops/oop.inline.hpp"
47 #include "oops/typeArrayOop.inline.hpp"
48 #include "runtime/arguments.hpp"
49 #include "runtime/atomicAccess.hpp"
50 #include "runtime/continuationWrapper.inline.hpp"
51 #include "runtime/frame.inline.hpp"
52 #include "runtime/handles.inline.hpp"
53 #include "runtime/javaCalls.hpp"
54 #include "runtime/javaThread.inline.hpp"
55 #include "runtime/jniHandles.hpp"
56 #include "runtime/os.hpp"
57 #include "runtime/threads.hpp"
58 #include "runtime/threadSMR.hpp"
59 #include "runtime/timerTrace.hpp"
60 #include "runtime/vframe.hpp"
61 #include "runtime/vmOperations.hpp"
62 #include "runtime/vmThread.hpp"
63 #include "services/heapDumper.hpp"
64 #include "services/heapDumperCompression.hpp"
65 #include "services/threadService.hpp"
66 #include "utilities/checkedCast.hpp"
67 #include "utilities/macros.hpp"
68 #include "utilities/ostream.hpp"
69 #ifdef LINUX
423 public:
424 AbstractDumpWriter() :
425 _buffer(nullptr),
426 _size(io_buffer_max_size),
427 _pos(0),
428 _in_dump_segment(false) { }
429
430 // Total number of bytes written to the disk
431 virtual julong bytes_written() const = 0;
432 // Return non-null if error occurred
433 virtual char const* error() const = 0;
434
435 size_t position() const { return _pos; }
436 // writer functions
437 virtual void write_raw(const void* s, size_t len);
438 void write_u1(u1 x);
439 void write_u2(u2 x);
440 void write_u4(u4 x);
441 void write_u8(u8 x);
442 void write_objectID(oop o);
443 void write_objectID(uintptr_t id);
444 void write_rootID(oop* p);
445 void write_symbolID(Symbol* o);
446 void write_classID(Klass* k);
447 void write_id(u4 x);
448
449 // Start a new sub-record. Starts a new heap dump segment if needed.
450 void start_sub_record(u1 tag, u4 len);
451 // Ends the current sub-record.
452 void end_sub_record();
453 // Finishes the current dump segment if not already finished.
454 void finish_dump_segment();
455 // Flush internal buffer to persistent storage
456 virtual void flush() = 0;
457 };
458
459 void AbstractDumpWriter::write_fast(const void* s, size_t len) {
460 assert(!_in_dump_segment || (_sub_record_left >= len), "sub-record too large");
461 assert(buffer_size() - position() >= len, "Must fit");
462 DEBUG_ONLY(_sub_record_left -= len);
463 memcpy(buffer() + position(), s, len);
510 }
511
512 void AbstractDumpWriter::write_u8(u8 x) {
513 u8 v;
514 Bytes::put_Java_u8((address)&v, x);
515 WRITE_KNOWN_TYPE(&v, 8);
516 }
517
518 void AbstractDumpWriter::write_address(address a) {
519 #ifdef _LP64
520 write_u8((u8)a);
521 #else
522 write_u4((u4)a);
523 #endif
524 }
525
526 void AbstractDumpWriter::write_objectID(oop o) {
527 write_address(cast_from_oop<address>(o));
528 }
529
530 void AbstractDumpWriter::write_objectID(uintptr_t id) {
531 write_address((address)id);
532 }
533
534 void AbstractDumpWriter::write_rootID(oop* p) {
535 write_address((address)p);
536 }
537
538 void AbstractDumpWriter::write_symbolID(Symbol* s) {
539 write_address((address)((uintptr_t)s));
540 }
541
542 void AbstractDumpWriter::write_id(u4 x) {
543 #ifdef _LP64
544 write_u8((u8) x);
545 #else
546 write_u4(x);
547 #endif
548 }
549
550 // We use java mirror as the class ID
551 void AbstractDumpWriter::write_classID(Klass* k) {
552 write_objectID(k->java_mirror());
553 }
719 }
720 }
721 _pos = 0; // reset pos to make internal buffer available
722
723 if (result != nullptr) {
724 set_error(result);
725 }
726 }
727
728 void DumpWriter::do_compress() {
729 const char* msg = _compressor->compress(_buffer, _pos, _out_buffer, _out_size,
730 _tmp_buffer, _tmp_size, &_out_pos);
731
732 if (msg != nullptr) {
733 set_error(msg);
734 }
735 }
736
737 class DumperClassCacheTable;
738 class DumperClassCacheTableEntry;
739 class DumperFlatObject;
740 class DumperFlatObjectList;
741
742 // Support class with a collection of functions used when dumping the heap
743 class DumperSupport : AllStatic {
744 public:
745
746 // write a header of the given type
747 static void write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len);
748
749 // returns hprof tag for the given type signature
750 static hprofTag sig2tag(Symbol* sig);
751 // returns hprof tag for the given basic type
752 static hprofTag type2tag(BasicType type);
753 // Returns the size of the data to write.
754 static u4 sig2size(Symbol* sig);
755
756 // returns the size of the instance of the given class
757 static u4 instance_size(InstanceKlass* ik);
758
759 // dump a jfloat
760 static void dump_float(AbstractDumpWriter* writer, jfloat f);
761 // dump a jdouble
762 static void dump_double(AbstractDumpWriter* writer, jdouble d);
763 // dumps the raw value of the given field
764 static void dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset);
765 // returns the size of the static fields; also counts the static fields
766 static u4 get_static_fields_size(InstanceKlass* ik, u2& field_count);
767 // dumps static fields of the given class
768 static void dump_static_fields(AbstractDumpWriter* writer, Klass* k);
769 // dump the raw values of the instance fields of the given object, fills flat_fields
770 static void dump_instance_fields(AbstractDumpWriter* writer, oop o, int offset,
771 DumperClassCacheTableEntry* class_cache_entry, DumperFlatObjectList* flat_fields);
772 // get the count of the instance fields for a given class
773 static u2 get_instance_fields_count(InstanceKlass* ik);
774 // dumps the definition of the instance fields for a given class
775 static void dump_instance_field_descriptors(AbstractDumpWriter* writer, InstanceKlass* k);
776 // creates HPROF_GC_INSTANCE_DUMP record for the given object, fills flat_fields
777 static void dump_instance(AbstractDumpWriter* writer, uintptr_t id, oop o, int offset, InstanceKlass* ik,
778 DumperClassCacheTable* class_cache, DumperFlatObjectList* flat_fields);
779 // creates HPROF_GC_CLASS_DUMP record for the given instance class
780 static void dump_instance_class(AbstractDumpWriter* writer, InstanceKlass* ik);
781 // creates HPROF_GC_CLASS_DUMP record for a given array class
782 static void dump_array_class(AbstractDumpWriter* writer, Klass* k);
783
784 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array, fills flat_elements if the object is flat array
785 static void dump_object_array(AbstractDumpWriter* writer, objArrayOop array, DumperFlatObjectList* flat_elements);
786 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
787 static void dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array);
788 // create HPROF_FRAME record for the given method and bci
789 static void dump_stack_frame(AbstractDumpWriter* writer, int frame_serial_num, int class_serial_num, Method* m, int bci);
790
791 // check if we need to truncate an array
792 static int calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size);
793
794 // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
795 static void end_of_dump(AbstractDumpWriter* writer);
796
797 static oop mask_dormant_archived_object(oop o, oop ref_obj) {
798 if (o != nullptr && o->klass()->java_mirror_no_keepalive() == nullptr) {
799 // Ignore this object since the corresponding java mirror is not loaded.
800 // Might be a dormant archive object.
801 report_dormant_archived_object(o, ref_obj);
802 return nullptr;
803 } else {
804 return o;
805 }
806 }
807
808 static void report_dormant_archived_object(oop o, oop ref_obj) {
809 if (log_is_enabled(Trace, aot, heap)) {
810 ResourceMark rm;
811 if (ref_obj != nullptr) {
812 log_trace(aot, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s) referenced by " INTPTR_FORMAT " (%s)",
813 p2i(o), o->klass()->external_name(),
814 p2i(ref_obj), ref_obj->klass()->external_name());
815 } else {
816 log_trace(aot, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s)",
817 p2i(o), o->klass()->external_name());
818 }
819 }
820 }
821
822 // Direct instances of ObjArrayKlass represent the Java types that Java code can see.
823 // RefArrayKlass/FlatArrayKlass describe different implementations of the arrays, filter them out to avoid duplicates.
824 static bool filter_out_klass(Klass* k) {
825 if (k->is_objArray_klass() && k->kind() != Klass::KlassKind::ObjArrayKlassKind) {
826 return true;
827 }
828 return false;
829 }
830 };
831
832 // Hash table of klasses to the klass metadata. This should greatly improve the
833 // hash dumping performance. This hash table is supposed to be used by a single
834 // thread only.
835 //
836 class DumperClassCacheTableEntry : public CHeapObj<mtServiceability> {
837 friend class DumperClassCacheTable;
838 public:
839 class FieldDescriptor {
840 private:
841 char _sigs_start;
842 int _offset;
843 InlineKlass* _inline_klass; // nullptr for heap object
844 LayoutKind _layout_kind;
845 public:
846 FieldDescriptor(): _sigs_start(0), _offset(0), _inline_klass(nullptr), _layout_kind(LayoutKind::UNKNOWN) {}
847
848 template<typename FieldStreamType>
849 FieldDescriptor(const FieldStreamType& field)
850 : _sigs_start(field.signature()->char_at(0)), _offset(field.offset())
851 {
852 if (field.is_flat()) {
853 const fieldDescriptor& fd = field.field_descriptor();
854 InstanceKlass* holder_klass = fd.field_holder();
855 InlineLayoutInfo* layout_info = holder_klass->inline_layout_info_adr(fd.index());
856 _inline_klass = layout_info->klass();
857 _layout_kind = layout_info->kind();
858 } else {
859 _inline_klass = nullptr;
860 _layout_kind = LayoutKind::REFERENCE;
861 }
862 }
863
864 char sig_start() const { return _sigs_start; }
865 int offset() const { return _offset; }
866 bool is_flat() const { return _inline_klass != nullptr; }
867 InlineKlass* inline_klass() const { return _inline_klass; }
868 LayoutKind layout_kind() const { return _layout_kind; }
869 bool is_flat_nullable() const { return _layout_kind == LayoutKind::NULLABLE_ATOMIC_FLAT; }
870 };
871
872 private:
873 GrowableArray<FieldDescriptor> _fields;
874 u4 _instance_size;
875
876 public:
877 DumperClassCacheTableEntry(): _instance_size(0) {}
878
879 template<typename FieldStreamType>
880 void add_field(const FieldStreamType& field) {
881 _fields.push(FieldDescriptor(field));
882 _instance_size += DumperSupport::sig2size(field.signature());
883 }
884
885 const FieldDescriptor& field(int index) const { return _fields.at(index); }
886 int field_count() const { return _fields.length(); }
887 u4 instance_size() const { return _instance_size; }
888 };
889
890 class DumperClassCacheTable {
891 private:
892 // HashTable SIZE is specified at compile time so we
893 // use 1031 which is the first prime after 1024.
894 static constexpr size_t TABLE_SIZE = 1031;
895
896 // Maintain the cache for N classes. This limits memory footprint
897 // impact, regardless of how many classes we have in the dump.
898 // This also improves look up performance by keeping the statically
899 // sized table from overloading.
900 static constexpr int CACHE_TOP = 256;
901
902 typedef HashTable<InstanceKlass*, DumperClassCacheTableEntry*,
903 TABLE_SIZE, AnyObj::C_HEAP, mtServiceability> PtrTable;
904 PtrTable* _ptrs;
905
906 // Single-slot cache to handle the major case of objects of the same
907 // class back-to-back, e.g. from T[].
908 InstanceKlass* _last_ik;
909 DumperClassCacheTableEntry* _last_entry;
910
911 void unlink_all(PtrTable* table) {
912 class CleanupEntry: StackObj {
913 public:
914 bool do_entry(InstanceKlass*& key, DumperClassCacheTableEntry*& entry) {
915 delete entry;
916 return true;
917 }
918 } cleanup;
919 table->unlink(&cleanup);
920 }
921
922 public:
923 DumperClassCacheTableEntry* lookup_or_create(InstanceKlass* ik) {
924 if (_last_ik == ik) {
925 return _last_entry;
926 }
927
928 DumperClassCacheTableEntry* entry;
929 DumperClassCacheTableEntry** from_cache = _ptrs->get(ik);
930 if (from_cache == nullptr) {
931 entry = new DumperClassCacheTableEntry();
932 for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
933 if (!fld.access_flags().is_static()) {
934 entry->add_field(fld);
935 }
936 }
937
938 if (_ptrs->number_of_entries() >= CACHE_TOP) {
939 // We do not track the individual hit rates for table entries.
940 // Purge the entire table, and let the cache catch up with new
941 // distribution.
942 unlink_all(_ptrs);
943 }
944
945 _ptrs->put(ik, entry);
946 } else {
947 entry = *from_cache;
948 }
949
950 // Remember for single-slot cache.
951 _last_ik = ik;
952 _last_entry = entry;
953
954 return entry;
955 }
956
957 DumperClassCacheTable() : _ptrs(new (mtServiceability) PtrTable), _last_ik(nullptr), _last_entry(nullptr) {}
958
959 ~DumperClassCacheTable() {
960 unlink_all(_ptrs);
961 delete _ptrs;
962 }
963 };
964
965 // Describes flat object (flatted field or element of flat array) in the holder oop
966 class DumperFlatObject: public CHeapObj<mtServiceability> {
967 friend class DumperFlatObjectList;
968 private:
969 DumperFlatObject* _next;
970
971 const uintptr_t _id; // object id
972
973 const int _offset;
974 InlineKlass* const _inline_klass;
975
976 public:
977 DumperFlatObject(uintptr_t id, int offset, InlineKlass* inline_klass)
978 : _next(nullptr), _id(id), _offset(offset), _inline_klass(inline_klass) {
979 }
980
981 uintptr_t object_id() const { return _id; }
982 int offset() const { return _offset; }
983 InlineKlass* inline_klass() const { return _inline_klass; }
984 };
985
986 class FlatObjectIdProvider {
987 public:
988 virtual uintptr_t get_id() = 0;
989 };
990
991 // Simple FIFO.
992 class DumperFlatObjectList {
993 private:
994 FlatObjectIdProvider* _id_provider;
995 DumperFlatObject* _head;
996 DumperFlatObject* _tail;
997
998 void push(DumperFlatObject* obj) {
999 if (_head == nullptr) {
1000 _head = _tail = obj;
1001 } else {
1002 assert(_tail != nullptr, "must be");
1003 _tail->_next = obj;
1004 _tail = obj;
1005 }
1006 }
1007
1008 public:
1009 DumperFlatObjectList(FlatObjectIdProvider* id_provider): _id_provider(id_provider), _head(nullptr), _tail(nullptr) {}
1010
1011 bool is_empty() const { return _head == nullptr; }
1012
1013 uintptr_t push(int offset, InlineKlass* inline_klass) {
1014 uintptr_t id = _id_provider->get_id();
1015 DumperFlatObject* obj = new DumperFlatObject(id, offset, inline_klass);
1016 push(obj);
1017 return id;
1018 }
1019
1020 DumperFlatObject* pop() {
1021 assert(!is_empty(), "sanity");
1022 DumperFlatObject* element = _head;
1023 _head = element->_next;
1024 element->_next = nullptr;
1025 return element;
1026 }
1027 };
1028
1029 // write a header of the given type
1030 void DumperSupport:: write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len) {
1031 writer->write_u1(tag);
1032 writer->write_u4(0); // current ticks
1033 writer->write_u4(len);
1034 }
1035
1036 // returns hprof tag for the given type signature
1037 hprofTag DumperSupport::sig2tag(Symbol* sig) {
1038 switch (sig->char_at(0)) {
1039 case JVM_SIGNATURE_CLASS : return HPROF_NORMAL_OBJECT;
1040 case JVM_SIGNATURE_ARRAY : return HPROF_NORMAL_OBJECT;
1041 case JVM_SIGNATURE_BYTE : return HPROF_BYTE;
1042 case JVM_SIGNATURE_CHAR : return HPROF_CHAR;
1043 case JVM_SIGNATURE_FLOAT : return HPROF_FLOAT;
1044 case JVM_SIGNATURE_DOUBLE : return HPROF_DOUBLE;
1045 case JVM_SIGNATURE_INT : return HPROF_INT;
1046 case JVM_SIGNATURE_LONG : return HPROF_LONG;
1047 case JVM_SIGNATURE_SHORT : return HPROF_SHORT;
1048 case JVM_SIGNATURE_BOOLEAN : return HPROF_BOOLEAN;
1147 break;
1148 }
1149 case JVM_SIGNATURE_LONG : {
1150 jlong l = obj->long_field(offset);
1151 writer->write_u8(l);
1152 break;
1153 }
1154 case JVM_SIGNATURE_BOOLEAN : {
1155 jboolean b = obj->bool_field(offset);
1156 writer->write_u1(b);
1157 break;
1158 }
1159 default : {
1160 ShouldNotReachHere();
1161 break;
1162 }
1163 }
1164 }
1165
1166 // returns the size of the instance of the given class
1167 u4 DumperSupport::instance_size(InstanceKlass* ik) {
1168 u4 size = 0;
1169 for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1170 if (!fld.access_flags().is_static()) {
1171 size += sig2size(fld.signature());
1172 }
1173 }
1174 return size;
1175 }
1176
1177 u4 DumperSupport::get_static_fields_size(InstanceKlass* ik, u2& field_count) {
1178 field_count = 0;
1179 u4 size = 0;
1180
1181 for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1182 if (fldc.access_flags().is_static()) {
1183 assert(!fldc.is_flat(), "static fields cannot be flat");
1184
1185 field_count++;
1186 size += sig2size(fldc.signature());
1187 }
1188 }
1189
1190 // Add in resolved_references which is referenced by the cpCache
1191 // The resolved_references is an array per InstanceKlass holding the
1192 // strings and other oops resolved from the constant pool.
1193 oop resolved_references = ik->constants()->resolved_references_or_null();
1194 if (resolved_references != nullptr) {
1195 field_count++;
1196 size += sizeof(address);
1197
1198 // Add in the resolved_references of the used previous versions of the class
1199 // in the case of RedefineClasses
1200 InstanceKlass* prev = ik->previous_versions();
1201 while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1202 field_count++;
1203 size += sizeof(address);
1204 prev = prev->previous_versions();
1207
1208 // Also provide a pointer to the init_lock if present, so there aren't unreferenced int[0]
1209 // arrays.
1210 oop init_lock = ik->init_lock();
1211 if (init_lock != nullptr) {
1212 field_count++;
1213 size += sizeof(address);
1214 }
1215
1216 // We write the value itself plus a name and a one byte type tag per field.
1217 return checked_cast<u4>(size + field_count * (sizeof(address) + 1));
1218 }
1219
1220 // dumps static fields of the given class
1221 void DumperSupport::dump_static_fields(AbstractDumpWriter* writer, Klass* k) {
1222 InstanceKlass* ik = InstanceKlass::cast(k);
1223
1224 // dump the field descriptors and raw values
1225 for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1226 if (fld.access_flags().is_static()) {
1227 assert(!fld.is_flat(), "static fields cannot be flat");
1228
1229 Symbol* sig = fld.signature();
1230
1231 writer->write_symbolID(fld.name()); // name
1232 writer->write_u1(sig2tag(sig)); // type
1233
1234 // value
1235 dump_field_value(writer, sig->char_at(0), ik->java_mirror(), fld.offset());
1236 }
1237 }
1238
1239 // Add resolved_references for each class that has them
1240 oop resolved_references = ik->constants()->resolved_references_or_null();
1241 if (resolved_references != nullptr) {
1242 writer->write_symbolID(vmSymbols::resolved_references_name()); // name
1243 writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1244 writer->write_objectID(resolved_references);
1245
1246 // Also write any previous versions
1247 InstanceKlass* prev = ik->previous_versions();
1248 while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1249 writer->write_symbolID(vmSymbols::resolved_references_name()); // name
1250 writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1251 writer->write_objectID(prev->constants()->resolved_references());
1252 prev = prev->previous_versions();
1253 }
1254 }
1255
1256 // Add init lock to the end if the class is not yet initialized
1257 oop init_lock = ik->init_lock();
1258 if (init_lock != nullptr) {
1259 writer->write_symbolID(vmSymbols::init_lock_name()); // name
1260 writer->write_u1(sig2tag(vmSymbols::int_array_signature())); // type
1261 writer->write_objectID(init_lock);
1262 }
1263 }
1264
1265 // dump the raw values of the instance fields of the given object, fills flat_fields
1266 void DumperSupport:: dump_instance_fields(AbstractDumpWriter* writer, oop o, int offset,
1267 DumperClassCacheTableEntry* class_cache_entry, DumperFlatObjectList* flat_fields) {
1268 assert(class_cache_entry != nullptr, "Pre-condition: must be provided");
1269 for (int idx = 0; idx < class_cache_entry->field_count(); idx++) {
1270 const DumperClassCacheTableEntry::FieldDescriptor& field = class_cache_entry->field(idx);
1271 int field_offset = offset + field.offset();
1272 if (field.is_flat()) {
1273 // check for possible nulls
1274 if (field.is_flat_nullable()) {
1275 address payload = cast_from_oop<address>(o) + field_offset;
1276 if (field.inline_klass()->is_payload_marked_as_null(payload)) {
1277 writer->write_objectID(nullptr);
1278 continue;
1279 }
1280 }
1281 uintptr_t object_id = flat_fields->push(field_offset, field.inline_klass());
1282 writer->write_objectID(object_id);
1283 } else {
1284 dump_field_value(writer, field.sig_start(), o, field_offset);
1285 }
1286 }
1287 }
1288
1289 // gets the count of the instance fields for a given class
1290 u2 DumperSupport::get_instance_fields_count(InstanceKlass* ik) {
1291 u2 field_count = 0;
1292
1293 for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1294 if (!fldc.access_flags().is_static()) {
1295 field_count++;
1296 }
1297 }
1298
1299 return field_count;
1300 }
1301
1302 // dumps the definition of the instance fields for a given class
1303 void DumperSupport::dump_instance_field_descriptors(AbstractDumpWriter* writer, InstanceKlass* ik) {
1304 // dump the field descriptors
1305 for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1306 if (!fld.access_flags().is_static()) {
1307 Symbol* sig = fld.signature();
1308
1309 writer->write_symbolID(fld.name()); // name
1310 writer->write_u1(sig2tag(sig)); // type
1311 }
1312 }
1313 }
1314
1315 // creates HPROF_GC_INSTANCE_DUMP record for the given object
1316 void DumperSupport::dump_instance(AbstractDumpWriter* writer, uintptr_t id, oop o, int offset, InstanceKlass* ik,
1317 DumperClassCacheTable* class_cache, DumperFlatObjectList* flat_fields) {
1318 DumperClassCacheTableEntry* cache_entry = class_cache->lookup_or_create(ik);
1319
1320 u4 is = cache_entry->instance_size();
1321 u4 size = 1 + sizeof(address) + 4 + sizeof(address) + 4 + is;
1322
1323 writer->start_sub_record(HPROF_GC_INSTANCE_DUMP, size);
1324 writer->write_objectID(id);
1325 writer->write_u4(STACK_TRACE_ID);
1326
1327 // class ID
1328 writer->write_classID(ik);
1329
1330 // number of bytes that follow
1331 writer->write_u4(is);
1332
1333 // field values
1334 if (offset != 0) {
1335 // the object itself if flattened, so all fields are stored without headers
1336 InlineKlass* inline_klass = InlineKlass::cast(ik);
1337 offset -= inline_klass->payload_offset();
1338 }
1339
1340 dump_instance_fields(writer, o, offset, cache_entry, flat_fields);
1341
1342 writer->end_sub_record();
1343 }
1344
1345 // creates HPROF_GC_CLASS_DUMP record for the given instance class
1346 void DumperSupport::dump_instance_class(AbstractDumpWriter* writer, InstanceKlass* ik) {
1347 // We can safepoint and do a heap dump at a point where we have a Klass,
1348 // but no java mirror class has been setup for it. So we need to check
1349 // that the class is at least loaded, to avoid crash from a null mirror.
1350 if (!ik->is_loaded()) {
1351 return;
1352 }
1353
1354 u2 static_fields_count = 0;
1355 u4 static_size = get_static_fields_size(ik, static_fields_count);
1356 u2 instance_fields_count = get_instance_fields_count(ik);
1357 u4 instance_fields_size = instance_fields_count * (sizeof(address) + 1);
1358 u4 size = checked_cast<u4>(1 + sizeof(address) + 4 + 6 * sizeof(address) + 4 + 2 + 2 + static_size + 2 + instance_fields_size);
1359
1360 writer->start_sub_record(HPROF_GC_CLASS_DUMP, size);
1419
1420 writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->class_loader());
1421 writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->signers());
1422 writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->protection_domain());
1423
1424 writer->write_objectID(oop(nullptr)); // reserved
1425 writer->write_objectID(oop(nullptr));
1426 writer->write_u4(0); // instance size
1427 writer->write_u2(0); // constant pool
1428 writer->write_u2(0); // static fields
1429 writer->write_u2(0); // instance fields
1430
1431 writer->end_sub_record();
1432
1433 }
1434
1435 // Hprof uses an u4 as record length field,
1436 // which means we need to truncate arrays that are too long.
1437 int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size) {
1438 BasicType type = ArrayKlass::cast(array->klass())->element_type();
1439 assert((type >= T_BOOLEAN && type <= T_OBJECT) || type == T_FLAT_ELEMENT, "invalid array element type");
1440
1441 int length = array->length();
1442
1443 int type_size;
1444 if (type == T_OBJECT || type == T_FLAT_ELEMENT) {
1445 type_size = sizeof(address);
1446 } else {
1447 type_size = type2aelembytes(type);
1448 }
1449
1450 size_t length_in_bytes = (size_t)length * type_size;
1451 uint max_bytes = max_juint - header_size;
1452
1453 if (length_in_bytes > max_bytes) {
1454 length = max_bytes / type_size;
1455 length_in_bytes = (size_t)length * type_size;
1456
1457 warning("cannot dump array of type %s[] with length %d; truncating to length %d",
1458 type2name_tab[type], array->length(), length);
1459 }
1460 return length;
1461 }
1462
1463 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
1464 void DumperSupport::dump_object_array(AbstractDumpWriter* writer, objArrayOop array, DumperFlatObjectList* flat_elements) {
1465 // sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID) + sizeof(classID)
1466 short header_size = 1 + 2 * 4 + 2 * sizeof(address);
1467 int length = calculate_array_max_length(writer, array, header_size);
1468 u4 size = checked_cast<u4>(header_size + length * sizeof(address));
1469
1470 writer->start_sub_record(HPROF_GC_OBJ_ARRAY_DUMP, size);
1471 writer->write_objectID(array);
1472 writer->write_u4(STACK_TRACE_ID);
1473 writer->write_u4(length);
1474
1475 // array class ID
1476 writer->write_classID(array->klass());
1477
1478 // [id]* elements
1479 if (array->is_flatArray()) {
1480 flatArrayOop farray = flatArrayOop(array);
1481 FlatArrayKlass* faklass = FlatArrayKlass::cast(farray->klass());
1482
1483 InlineKlass* vk = faklass->element_klass();
1484 bool need_null_check = faklass->layout_kind() == LayoutKind::NULLABLE_ATOMIC_FLAT;
1485
1486 for (int index = 0; index < length; index++) {
1487 address addr = (address)farray->value_at_addr(index, faklass->layout_helper());
1488 // check for null
1489 if (need_null_check) {
1490 if (vk->is_payload_marked_as_null(addr)) {
1491 writer->write_objectID(nullptr);
1492 continue;
1493 }
1494 }
1495 // offset in the array oop
1496 int offset = (int)(addr - cast_from_oop<address>(farray));
1497 uintptr_t object_id = flat_elements->push(offset, vk);
1498 writer->write_objectID(object_id);
1499 }
1500 } else {
1501 for (int index = 0; index < length; index++) {
1502 oop o = array->obj_at(index);
1503 o = mask_dormant_archived_object(o, array);
1504 writer->write_objectID(o);
1505 }
1506 }
1507
1508 writer->end_sub_record();
1509 }
1510
1511 #define WRITE_ARRAY(Array, Type, Size, Length) \
1512 for (int i = 0; i < Length; i++) { writer->write_##Size((Size)Array->Type##_at(i)); }
1513
1514 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
1515 void DumperSupport::dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array) {
1516 BasicType type = TypeArrayKlass::cast(array->klass())->element_type();
1517 // 2 * sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID)
1518 short header_size = 2 * 1 + 2 * 4 + sizeof(address);
1519
1520 int length = calculate_array_max_length(writer, array, header_size);
1521 int type_size = type2aelembytes(type);
1522 u4 length_in_bytes = (u4)length * type_size;
1523 u4 size = header_size + length_in_bytes;
1524
1525 writer->start_sub_record(HPROF_GC_PRIM_ARRAY_DUMP, size);
1647 int len = sym->utf8_length();
1648 if (len > 0) {
1649 char* s = sym->as_utf8();
1650 DumperSupport::write_header(writer(), HPROF_UTF8, oopSize + len);
1651 writer()->write_symbolID(sym);
1652 writer()->write_raw(s, len);
1653 }
1654 }
1655
1656 // Support class used to generate HPROF_GC_CLASS_DUMP records
1657
1658 class ClassDumper : public KlassClosure {
1659 private:
1660 AbstractDumpWriter* _writer;
1661 AbstractDumpWriter* writer() const { return _writer; }
1662
1663 public:
1664 ClassDumper(AbstractDumpWriter* writer) : _writer(writer) {}
1665
1666 void do_klass(Klass* k) {
1667 if (DumperSupport::filter_out_klass(k)) {
1668 return;
1669 }
1670 if (k->is_instance_klass()) {
1671 DumperSupport::dump_instance_class(writer(), InstanceKlass::cast(k));
1672 } else {
1673 DumperSupport::dump_array_class(writer(), k);
1674 }
1675 }
1676 };
1677
1678 // Support class used to generate HPROF_LOAD_CLASS records
1679
1680 class LoadedClassDumper : public LockedClassesDo {
1681 private:
1682 AbstractDumpWriter* _writer;
1683 GrowableArray<Klass*>* _klass_map;
1684 u4 _class_serial_num;
1685 AbstractDumpWriter* writer() const { return _writer; }
1686 void add_class_serial_number(Klass* k, int serial_num) {
1687 _klass_map->at_put_grow(serial_num, k);
1688 }
1689 public:
1690 LoadedClassDumper(AbstractDumpWriter* writer, GrowableArray<Klass*>* klass_map)
1691 : _writer(writer), _klass_map(klass_map), _class_serial_num(0) {}
1692
1693 void do_klass(Klass* k) {
1694 if (DumperSupport::filter_out_klass(k)) {
1695 return;
1696 }
1697 // len of HPROF_LOAD_CLASS record
1698 u4 remaining = 2 * oopSize + 2 * sizeof(u4);
1699 DumperSupport::write_header(writer(), HPROF_LOAD_CLASS, remaining);
1700 // class serial number is just a number
1701 writer()->write_u4(++_class_serial_num);
1702 // class ID
1703 writer()->write_classID(k);
1704 // add the Klass* and class serial number pair
1705 add_class_serial_number(k, _class_serial_num);
1706 writer()->write_u4(STACK_TRACE_ID);
1707 // class name ID
1708 Symbol* name = k->name();
1709 writer()->write_symbolID(name);
1710 }
1711 };
1712
1713 // Support class used to generate HPROF_GC_ROOT_JNI_LOCAL records
1714
1715 class JNILocalsDumper : public OopClosure {
1716 private:
2102 RegisterMap::WalkContinuation::skip);
2103 switch (_thread_type) {
2104 case ThreadType::Platform:
2105 if (!_java_thread->has_last_Java_frame()) {
2106 return nullptr;
2107 }
2108 return _java_thread->is_vthread_mounted()
2109 ? _java_thread->carrier_last_java_vframe(®_map)
2110 : _java_thread->platform_thread_last_java_vframe(®_map);
2111
2112 case ThreadType::MountedVirtual:
2113 return _java_thread->last_java_vframe(®_map);
2114
2115 default: // make compilers happy
2116 break;
2117 }
2118 ShouldNotReachHere();
2119 return nullptr;
2120 }
2121
2122 class FlatObjectDumper: public FlatObjectIdProvider {
2123 private:
2124 volatile uintptr_t _id_counter;
2125 public:
2126 FlatObjectDumper(): _id_counter(0) {
2127 }
2128
2129 void dump_flat_objects(AbstractDumpWriter* writer, oop holder,
2130 DumperClassCacheTable* class_cache, DumperFlatObjectList* flat_objects);
2131
2132 // FlatObjectIdProvider implementation
2133 virtual uintptr_t get_id() override {
2134 // need to protect against overflow, so use instead of fetch_then_add
2135 const uintptr_t max_value = (uintptr_t)-1;
2136 uintptr_t old_value = AtomicAccess::load(&_id_counter);
2137 while (old_value != max_value) {
2138 uintptr_t new_value = old_value + 1;
2139 // to avoid conflicts with oop addresses skip aligned values
2140 if ((new_value & MinObjAlignmentInBytesMask) == 0) {
2141 new_value++;
2142 }
2143 uintptr_t value = AtomicAccess::cmpxchg(&_id_counter, old_value, new_value);
2144 if (value == old_value) {
2145 // success
2146 return new_value;
2147 }
2148 old_value = value;
2149 }
2150 // if we are here, maximum id value is reached
2151 return max_value;
2152 }
2153
2154 };
2155
2156 void FlatObjectDumper::dump_flat_objects(AbstractDumpWriter* writer, oop holder,
2157 DumperClassCacheTable* class_cache, DumperFlatObjectList* flat_objects) {
2158 // DumperSupport::dump_instance can add entries to flat_objects
2159 while (!flat_objects->is_empty()) {
2160 DumperFlatObject* obj = flat_objects->pop();
2161 DumperSupport::dump_instance(writer, obj->object_id(), holder, obj->offset(), obj->inline_klass(), class_cache, flat_objects);
2162 delete obj;
2163 }
2164 }
2165
2166 // Callback to dump thread-related data for unmounted virtual threads;
2167 // implemented by VM_HeapDumper.
2168 class UnmountedVThreadDumper {
2169 public:
2170 virtual void dump_vthread(oop vt, AbstractDumpWriter* segment_writer) = 0;
2171 };
2172
2173
2174 // Support class used when iterating over the heap.
2175 class HeapObjectDumper : public ObjectClosure {
2176 private:
2177 AbstractDumpWriter* _writer;
2178 AbstractDumpWriter* writer() { return _writer; }
2179 UnmountedVThreadDumper* _vthread_dumper;
2180 FlatObjectDumper* _flat_dumper;
2181
2182 DumperClassCacheTable _class_cache;
2183
2184 public:
2185 HeapObjectDumper(AbstractDumpWriter* writer, UnmountedVThreadDumper* vthread_dumper, FlatObjectDumper* flat_dumper)
2186 : _writer(writer), _vthread_dumper(vthread_dumper), _flat_dumper(flat_dumper) {}
2187
2188 // called for each object in the heap
2189 void do_object(oop o);
2190 };
2191
2192 void HeapObjectDumper::do_object(oop o) {
2193 // skip classes as these emitted as HPROF_GC_CLASS_DUMP records
2194 if (o->klass() == vmClasses::Class_klass()) {
2195 if (!java_lang_Class::is_primitive(o)) {
2196 return;
2197 }
2198 }
2199
2200 if (DumperSupport::mask_dormant_archived_object(o, nullptr) == nullptr) {
2201 return;
2202 }
2203
2204 if (o->is_instance()) {
2205 DumperFlatObjectList flat_fields(_flat_dumper);
2206 // create a HPROF_GC_INSTANCE record for each object
2207 DumperSupport::dump_instance(writer(),
2208 cast_from_oop<uintptr_t>(o), // object_id is the address
2209 o, 0, // for heap instance holder is oop, offset is 0
2210 InstanceKlass::cast(o->klass()),
2211 &_class_cache, &flat_fields);
2212
2213 // if there are flattened fields, dump them
2214 if (!flat_fields.is_empty()) {
2215 _flat_dumper->dump_flat_objects(writer(), o, &_class_cache, &flat_fields);
2216 }
2217
2218 // If we encounter an unmounted virtual thread it needs to be dumped explicitly
2219 // (mounted virtual threads are dumped with their carriers).
2220 if (java_lang_VirtualThread::is_instance(o)
2221 && ThreadDumper::should_dump_vthread(o) && !ThreadDumper::is_vthread_mounted(o)) {
2222 _vthread_dumper->dump_vthread(o, writer());
2223 }
2224 } else if (o->is_objArray()) {
2225 DumperFlatObjectList flat_elements(_flat_dumper);
2226 // create a HPROF_GC_OBJ_ARRAY_DUMP record for each object array
2227 DumperSupport::dump_object_array(writer(), objArrayOop(o), &flat_elements);
2228 // if this is flat array, dump its elements
2229 if (!flat_elements.is_empty()) {
2230 _flat_dumper->dump_flat_objects(writer(), o, &_class_cache, &flat_elements);
2231 }
2232 } else if (o->is_typeArray()) {
2233 // create a HPROF_GC_PRIM_ARRAY_DUMP record for each type array
2234 DumperSupport::dump_prim_array(writer(), typeArrayOop(o));
2235 }
2236 }
2237
2238 // The dumper controller for parallel heap dump
2239 class DumperController : public CHeapObj<mtInternal> {
2240 private:
2241 Monitor* _lock;
2242 Mutex* _global_writer_lock;
2243
2244 const uint _dumper_number;
2245 uint _complete_number;
2246
2247 bool _started; // VM dumper started and acquired global writer lock
2248
2249 public:
2250 DumperController(uint number) :
2251 // _lock and _global_writer_lock are used for synchronization between GC worker threads inside safepoint,
2459 // The VM operation that performs the heap dump
2460 class VM_HeapDumper : public VM_GC_Operation, public WorkerTask, public UnmountedVThreadDumper {
2461 private:
2462 DumpWriter* _writer;
2463 JavaThread* _oome_thread;
2464 Method* _oome_constructor;
2465 bool _gc_before_heap_dump;
2466 GrowableArray<Klass*>* _klass_map;
2467
2468 ThreadDumper** _thread_dumpers; // platform, carrier and mounted virtual threads
2469 int _thread_dumpers_count;
2470 volatile int _thread_serial_num;
2471 volatile int _frame_serial_num;
2472
2473 volatile int _dump_seq;
2474 // parallel heap dump support
2475 uint _num_dumper_threads;
2476 DumperController* _dumper_controller;
2477 ParallelObjectIterator* _poi;
2478
2479 // flat value object support
2480 FlatObjectDumper _flat_dumper;
2481
2482 // Dumper id of VMDumper thread.
2483 static const int VMDumperId = 0;
2484 // VM dumper dumps both heap and non-heap data, other dumpers dump heap-only data.
2485 static bool is_vm_dumper(int dumper_id) { return dumper_id == VMDumperId; }
2486 // the 1st dumper calling get_next_dumper_id becomes VM dumper
2487 int get_next_dumper_id() {
2488 return AtomicAccess::fetch_then_add(&_dump_seq, 1);
2489 }
2490
2491 DumpWriter* writer() const { return _writer; }
2492
2493 bool skip_operation() const;
2494
2495 // HPROF_GC_ROOT_THREAD_OBJ records for platform and mounted virtual threads
2496 void dump_threads(AbstractDumpWriter* writer);
2497
2498 bool is_oom_thread(JavaThread* thread) const {
2499 return thread == _oome_thread && _oome_constructor != nullptr;
2500 }
2501
2726 JNIHandles::oops_do(&jni_dumper);
2727 // technically not jni roots, but global roots
2728 // for things like preallocated throwable backtraces
2729 Universe::vm_global()->oops_do(&jni_dumper);
2730 // HPROF_GC_ROOT_STICKY_CLASS
2731 // These should be classes in the null class loader data, and not all classes
2732 // if !ClassUnloading
2733 StickyClassDumper stiky_class_dumper(&segment_writer);
2734 ClassLoaderData::the_null_class_loader_data()->classes_do(&stiky_class_dumper);
2735 }
2736
2737 // Heap iteration.
2738 // writes HPROF_GC_INSTANCE_DUMP records.
2739 // After each sub-record is written check_segment_length will be invoked
2740 // to check if the current segment exceeds a threshold. If so, a new
2741 // segment is started.
2742 // The HPROF_GC_CLASS_DUMP and HPROF_GC_INSTANCE_DUMP are the vast bulk
2743 // of the heap dump.
2744
2745 TraceTime timer(is_parallel_dump() ? "Dump heap objects in parallel" : "Dump heap objects", TRACETIME_LOG(Info, heapdump));
2746 HeapObjectDumper obj_dumper(&segment_writer, this, &_flat_dumper);
2747 if (!is_parallel_dump()) {
2748 Universe::heap()->object_iterate(&obj_dumper);
2749 } else {
2750 // == Parallel dump
2751 _poi->object_iterate(&obj_dumper, worker_id);
2752 }
2753
2754 segment_writer.finish_dump_segment();
2755 segment_writer.flush();
2756 }
2757
2758 _dumper_controller->dumper_complete(&segment_writer, writer());
2759
2760 if (is_vm_dumper(dumper_id)) {
2761 _dumper_controller->wait_all_dumpers_complete();
2762
2763 // flush global writer
2764 writer()->flush();
2765
2766 // At this point, all fragments of the heapdump have been written to separate files.
|