1 /*
2 * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2023, Alibaba Group Holding Limited. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "classfile/classLoaderData.inline.hpp"
27 #include "classfile/classLoaderDataGraph.hpp"
28 #include "classfile/javaClasses.inline.hpp"
29 #include "classfile/symbolTable.hpp"
30 #include "classfile/vmClasses.hpp"
31 #include "classfile/vmSymbols.hpp"
32 #include "gc/shared/gcLocker.hpp"
33 #include "gc/shared/gcVMOperations.hpp"
34 #include "gc/shared/workerThread.hpp"
35 #include "jfr/jfrEvents.hpp"
36 #include "jvm.h"
37 #include "memory/allocation.inline.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "memory/universe.hpp"
40 #include "oops/fieldStreams.inline.hpp"
41 #include "oops/klass.inline.hpp"
42 #include "oops/objArrayKlass.hpp"
43 #include "oops/objArrayOop.inline.hpp"
44 #include "oops/oop.inline.hpp"
45 #include "oops/typeArrayOop.inline.hpp"
46 #include "runtime/arguments.hpp"
47 #include "runtime/continuationWrapper.inline.hpp"
48 #include "runtime/frame.inline.hpp"
49 #include "runtime/handles.inline.hpp"
50 #include "runtime/javaCalls.hpp"
51 #include "runtime/javaThread.inline.hpp"
52 #include "runtime/jniHandles.hpp"
53 #include "runtime/os.hpp"
54 #include "runtime/threads.hpp"
55 #include "runtime/threadSMR.hpp"
56 #include "runtime/timerTrace.hpp"
57 #include "runtime/vframe.hpp"
58 #include "runtime/vmOperations.hpp"
59 #include "runtime/vmThread.hpp"
60 #include "services/heapDumper.hpp"
61 #include "services/heapDumperCompression.hpp"
62 #include "services/threadService.hpp"
63 #include "utilities/checkedCast.hpp"
64 #include "utilities/macros.hpp"
65 #include "utilities/ostream.hpp"
66 #ifdef LINUX
420 public:
421 AbstractDumpWriter() :
422 _buffer(nullptr),
423 _size(io_buffer_max_size),
424 _pos(0),
425 _in_dump_segment(false) { }
426
427 // Total number of bytes written to the disk
428 virtual julong bytes_written() const = 0;
429 // Return non-null if error occurred
430 virtual char const* error() const = 0;
431
432 size_t position() const { return _pos; }
433 // writer functions
434 virtual void write_raw(const void* s, size_t len);
435 void write_u1(u1 x);
436 void write_u2(u2 x);
437 void write_u4(u4 x);
438 void write_u8(u8 x);
439 void write_objectID(oop o);
440 void write_rootID(oop* p);
441 void write_symbolID(Symbol* o);
442 void write_classID(Klass* k);
443 void write_id(u4 x);
444
445 // Start a new sub-record. Starts a new heap dump segment if needed.
446 void start_sub_record(u1 tag, u4 len);
447 // Ends the current sub-record.
448 void end_sub_record();
449 // Finishes the current dump segment if not already finished.
450 void finish_dump_segment();
451 // Flush internal buffer to persistent storage
452 virtual void flush() = 0;
453 };
454
455 void AbstractDumpWriter::write_fast(const void* s, size_t len) {
456 assert(!_in_dump_segment || (_sub_record_left >= len), "sub-record too large");
457 assert(buffer_size() - position() >= len, "Must fit");
458 DEBUG_ONLY(_sub_record_left -= len);
459 memcpy(buffer() + position(), s, len);
506 }
507
508 void AbstractDumpWriter::write_u8(u8 x) {
509 u8 v;
510 Bytes::put_Java_u8((address)&v, x);
511 WRITE_KNOWN_TYPE(&v, 8);
512 }
513
514 void AbstractDumpWriter::write_address(address a) {
515 #ifdef _LP64
516 write_u8((u8)a);
517 #else
518 write_u4((u4)a);
519 #endif
520 }
521
522 void AbstractDumpWriter::write_objectID(oop o) {
523 write_address(cast_from_oop<address>(o));
524 }
525
526 void AbstractDumpWriter::write_rootID(oop* p) {
527 write_address((address)p);
528 }
529
530 void AbstractDumpWriter::write_symbolID(Symbol* s) {
531 write_address((address)((uintptr_t)s));
532 }
533
534 void AbstractDumpWriter::write_id(u4 x) {
535 #ifdef _LP64
536 write_u8((u8) x);
537 #else
538 write_u4(x);
539 #endif
540 }
541
542 // We use java mirror as the class ID
543 void AbstractDumpWriter::write_classID(Klass* k) {
544 write_objectID(k->java_mirror());
545 }
711 }
712 }
713 _pos = 0; // reset pos to make internal buffer available
714
715 if (result != nullptr) {
716 set_error(result);
717 }
718 }
719
720 void DumpWriter::do_compress() {
721 const char* msg = _compressor->compress(_buffer, _pos, _out_buffer, _out_size,
722 _tmp_buffer, _tmp_size, &_out_pos);
723
724 if (msg != nullptr) {
725 set_error(msg);
726 }
727 }
728
729 class DumperClassCacheTable;
730 class DumperClassCacheTableEntry;
731
732 // Support class with a collection of functions used when dumping the heap
733 class DumperSupport : AllStatic {
734 public:
735
736 // write a header of the given type
737 static void write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len);
738
739 // returns hprof tag for the given type signature
740 static hprofTag sig2tag(Symbol* sig);
741 // returns hprof tag for the given basic type
742 static hprofTag type2tag(BasicType type);
743 // Returns the size of the data to write.
744 static u4 sig2size(Symbol* sig);
745
746 // returns the size of the instance of the given class
747 static u4 instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry = nullptr);
748
749 // dump a jfloat
750 static void dump_float(AbstractDumpWriter* writer, jfloat f);
751 // dump a jdouble
752 static void dump_double(AbstractDumpWriter* writer, jdouble d);
753 // dumps the raw value of the given field
754 static void dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset);
755 // returns the size of the static fields; also counts the static fields
756 static u4 get_static_fields_size(InstanceKlass* ik, u2& field_count);
757 // dumps static fields of the given class
758 static void dump_static_fields(AbstractDumpWriter* writer, Klass* k);
759 // dump the raw values of the instance fields of the given object
760 static void dump_instance_fields(AbstractDumpWriter* writer, oop o, DumperClassCacheTableEntry* class_cache_entry);
761 // get the count of the instance fields for a given class
762 static u2 get_instance_fields_count(InstanceKlass* ik);
763 // dumps the definition of the instance fields for a given class
764 static void dump_instance_field_descriptors(AbstractDumpWriter* writer, Klass* k);
765 // creates HPROF_GC_INSTANCE_DUMP record for the given object
766 static void dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache);
767 // creates HPROF_GC_CLASS_DUMP record for the given instance class
768 static void dump_instance_class(AbstractDumpWriter* writer, InstanceKlass* ik);
769 // creates HPROF_GC_CLASS_DUMP record for a given array class
770 static void dump_array_class(AbstractDumpWriter* writer, Klass* k);
771
772 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
773 static void dump_object_array(AbstractDumpWriter* writer, objArrayOop array);
774 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
775 static void dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array);
776 // create HPROF_FRAME record for the given method and bci
777 static void dump_stack_frame(AbstractDumpWriter* writer, int frame_serial_num, int class_serial_num, Method* m, int bci);
778
779 // check if we need to truncate an array
780 static int calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size);
781
782 // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
783 static void end_of_dump(AbstractDumpWriter* writer);
784
785 static oop mask_dormant_archived_object(oop o, oop ref_obj) {
786 if (o != nullptr && o->klass()->java_mirror_no_keepalive() == nullptr) {
787 // Ignore this object since the corresponding java mirror is not loaded.
788 // Might be a dormant archive object.
789 report_dormant_archived_object(o, ref_obj);
790 return nullptr;
791 } else {
792 return o;
793 }
794 }
795
796 static void report_dormant_archived_object(oop o, oop ref_obj) {
797 if (log_is_enabled(Trace, aot, heap)) {
798 ResourceMark rm;
799 if (ref_obj != nullptr) {
800 log_trace(aot, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s) referenced by " INTPTR_FORMAT " (%s)",
801 p2i(o), o->klass()->external_name(),
802 p2i(ref_obj), ref_obj->klass()->external_name());
803 } else {
804 log_trace(aot, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s)",
805 p2i(o), o->klass()->external_name());
806 }
807 }
808 }
809 };
810
811 // Hash table of klasses to the klass metadata. This should greatly improve the
812 // hash dumping performance. This hash table is supposed to be used by a single
813 // thread only.
814 //
815 class DumperClassCacheTableEntry : public CHeapObj<mtServiceability> {
816 friend class DumperClassCacheTable;
817 private:
818 GrowableArray<char> _sigs_start;
819 GrowableArray<int> _offsets;
820 u4 _instance_size;
821 int _entries;
822
823 public:
824 DumperClassCacheTableEntry() : _instance_size(0), _entries(0) {};
825
826 int field_count() { return _entries; }
827 char sig_start(int field_idx) { return _sigs_start.at(field_idx); }
828 int offset(int field_idx) { return _offsets.at(field_idx); }
829 u4 instance_size() { return _instance_size; }
830 };
831
832 class DumperClassCacheTable {
833 private:
834 // HashTable SIZE is specified at compile time so we
835 // use 1031 which is the first prime after 1024.
836 static constexpr size_t TABLE_SIZE = 1031;
837
838 // Maintain the cache for N classes. This limits memory footprint
839 // impact, regardless of how many classes we have in the dump.
840 // This also improves look up performance by keeping the statically
841 // sized table from overloading.
842 static constexpr int CACHE_TOP = 256;
843
844 typedef HashTable<InstanceKlass*, DumperClassCacheTableEntry*,
845 TABLE_SIZE, AnyObj::C_HEAP, mtServiceability> PtrTable;
846 PtrTable* _ptrs;
847
848 // Single-slot cache to handle the major case of objects of the same
849 // class back-to-back, e.g. from T[].
850 InstanceKlass* _last_ik;
851 DumperClassCacheTableEntry* _last_entry;
852
853 void unlink_all(PtrTable* table) {
854 class CleanupEntry: StackObj {
855 public:
856 bool do_entry(InstanceKlass*& key, DumperClassCacheTableEntry*& entry) {
857 delete entry;
858 return true;
859 }
860 } cleanup;
861 table->unlink(&cleanup);
862 }
863
864 public:
865 DumperClassCacheTableEntry* lookup_or_create(InstanceKlass* ik) {
866 if (_last_ik == ik) {
867 return _last_entry;
868 }
869
870 DumperClassCacheTableEntry* entry;
871 DumperClassCacheTableEntry** from_cache = _ptrs->get(ik);
872 if (from_cache == nullptr) {
873 entry = new DumperClassCacheTableEntry();
874 for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
875 if (!fld.access_flags().is_static()) {
876 Symbol* sig = fld.signature();
877 entry->_sigs_start.push(sig->char_at(0));
878 entry->_offsets.push(fld.offset());
879 entry->_entries++;
880 entry->_instance_size += DumperSupport::sig2size(sig);
881 }
882 }
883
884 if (_ptrs->number_of_entries() >= CACHE_TOP) {
885 // We do not track the individual hit rates for table entries.
886 // Purge the entire table, and let the cache catch up with new
887 // distribution.
888 unlink_all(_ptrs);
889 }
890
891 _ptrs->put(ik, entry);
892 } else {
893 entry = *from_cache;
894 }
895
896 // Remember for single-slot cache.
897 _last_ik = ik;
898 _last_entry = entry;
899
900 return entry;
901 }
902
903 DumperClassCacheTable() : _ptrs(new (mtServiceability) PtrTable), _last_ik(nullptr), _last_entry(nullptr) {}
904
905 ~DumperClassCacheTable() {
906 unlink_all(_ptrs);
907 delete _ptrs;
908 }
909 };
910
911 // write a header of the given type
912 void DumperSupport:: write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len) {
913 writer->write_u1(tag);
914 writer->write_u4(0); // current ticks
915 writer->write_u4(len);
916 }
917
918 // returns hprof tag for the given type signature
919 hprofTag DumperSupport::sig2tag(Symbol* sig) {
920 switch (sig->char_at(0)) {
921 case JVM_SIGNATURE_CLASS : return HPROF_NORMAL_OBJECT;
922 case JVM_SIGNATURE_ARRAY : return HPROF_NORMAL_OBJECT;
923 case JVM_SIGNATURE_BYTE : return HPROF_BYTE;
924 case JVM_SIGNATURE_CHAR : return HPROF_CHAR;
925 case JVM_SIGNATURE_FLOAT : return HPROF_FLOAT;
926 case JVM_SIGNATURE_DOUBLE : return HPROF_DOUBLE;
927 case JVM_SIGNATURE_INT : return HPROF_INT;
928 case JVM_SIGNATURE_LONG : return HPROF_LONG;
929 case JVM_SIGNATURE_SHORT : return HPROF_SHORT;
930 case JVM_SIGNATURE_BOOLEAN : return HPROF_BOOLEAN;
1029 break;
1030 }
1031 case JVM_SIGNATURE_LONG : {
1032 jlong l = obj->long_field(offset);
1033 writer->write_u8(l);
1034 break;
1035 }
1036 case JVM_SIGNATURE_BOOLEAN : {
1037 jboolean b = obj->bool_field(offset);
1038 writer->write_u1(b);
1039 break;
1040 }
1041 default : {
1042 ShouldNotReachHere();
1043 break;
1044 }
1045 }
1046 }
1047
1048 // returns the size of the instance of the given class
1049 u4 DumperSupport::instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry) {
1050 if (class_cache_entry != nullptr) {
1051 return class_cache_entry->instance_size();
1052 } else {
1053 u4 size = 0;
1054 for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1055 if (!fld.access_flags().is_static()) {
1056 size += sig2size(fld.signature());
1057 }
1058 }
1059 return size;
1060 }
1061 }
1062
1063 u4 DumperSupport::get_static_fields_size(InstanceKlass* ik, u2& field_count) {
1064 field_count = 0;
1065 u4 size = 0;
1066
1067 for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1068 if (fldc.access_flags().is_static()) {
1069 field_count++;
1070 size += sig2size(fldc.signature());
1071 }
1072 }
1073
1074 // Add in resolved_references which is referenced by the cpCache
1075 // The resolved_references is an array per InstanceKlass holding the
1076 // strings and other oops resolved from the constant pool.
1077 oop resolved_references = ik->constants()->resolved_references_or_null();
1078 if (resolved_references != nullptr) {
1079 field_count++;
1080 size += sizeof(address);
1081
1082 // Add in the resolved_references of the used previous versions of the class
1083 // in the case of RedefineClasses
1084 InstanceKlass* prev = ik->previous_versions();
1085 while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1086 field_count++;
1087 size += sizeof(address);
1088 prev = prev->previous_versions();
1091
1092 // Also provide a pointer to the init_lock if present, so there aren't unreferenced int[0]
1093 // arrays.
1094 oop init_lock = ik->init_lock();
1095 if (init_lock != nullptr) {
1096 field_count++;
1097 size += sizeof(address);
1098 }
1099
1100 // We write the value itself plus a name and a one byte type tag per field.
1101 return checked_cast<u4>(size + field_count * (sizeof(address) + 1));
1102 }
1103
1104 // dumps static fields of the given class
1105 void DumperSupport::dump_static_fields(AbstractDumpWriter* writer, Klass* k) {
1106 InstanceKlass* ik = InstanceKlass::cast(k);
1107
1108 // dump the field descriptors and raw values
1109 for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1110 if (fld.access_flags().is_static()) {
1111 Symbol* sig = fld.signature();
1112
1113 writer->write_symbolID(fld.name()); // name
1114 writer->write_u1(sig2tag(sig)); // type
1115
1116 // value
1117 dump_field_value(writer, sig->char_at(0), ik->java_mirror(), fld.offset());
1118 }
1119 }
1120
1121 // Add resolved_references for each class that has them
1122 oop resolved_references = ik->constants()->resolved_references_or_null();
1123 if (resolved_references != nullptr) {
1124 writer->write_symbolID(vmSymbols::resolved_references_name()); // name
1125 writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1126 writer->write_objectID(resolved_references);
1127
1128 // Also write any previous versions
1129 InstanceKlass* prev = ik->previous_versions();
1130 while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1131 writer->write_symbolID(vmSymbols::resolved_references_name()); // name
1132 writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1133 writer->write_objectID(prev->constants()->resolved_references());
1134 prev = prev->previous_versions();
1135 }
1136 }
1137
1138 // Add init lock to the end if the class is not yet initialized
1139 oop init_lock = ik->init_lock();
1140 if (init_lock != nullptr) {
1141 writer->write_symbolID(vmSymbols::init_lock_name()); // name
1142 writer->write_u1(sig2tag(vmSymbols::int_array_signature())); // type
1143 writer->write_objectID(init_lock);
1144 }
1145 }
1146
1147 // dump the raw values of the instance fields of the given object
1148 void DumperSupport::dump_instance_fields(AbstractDumpWriter* writer, oop o, DumperClassCacheTableEntry* class_cache_entry) {
1149 assert(class_cache_entry != nullptr, "Pre-condition: must be provided");
1150 for (int idx = 0; idx < class_cache_entry->field_count(); idx++) {
1151 dump_field_value(writer, class_cache_entry->sig_start(idx), o, class_cache_entry->offset(idx));
1152 }
1153 }
1154
1155 // dumps the definition of the instance fields for a given class
1156 u2 DumperSupport::get_instance_fields_count(InstanceKlass* ik) {
1157 u2 field_count = 0;
1158
1159 for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1160 if (!fldc.access_flags().is_static()) field_count++;
1161 }
1162
1163 return field_count;
1164 }
1165
1166 // dumps the definition of the instance fields for a given class
1167 void DumperSupport::dump_instance_field_descriptors(AbstractDumpWriter* writer, Klass* k) {
1168 InstanceKlass* ik = InstanceKlass::cast(k);
1169
1170 // dump the field descriptors
1171 for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1172 if (!fld.access_flags().is_static()) {
1173 Symbol* sig = fld.signature();
1174
1175 writer->write_symbolID(fld.name()); // name
1176 writer->write_u1(sig2tag(sig)); // type
1177 }
1178 }
1179 }
1180
1181 // creates HPROF_GC_INSTANCE_DUMP record for the given object
1182 void DumperSupport::dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache) {
1183 InstanceKlass* ik = InstanceKlass::cast(o->klass());
1184
1185 DumperClassCacheTableEntry* cache_entry = class_cache->lookup_or_create(ik);
1186
1187 u4 is = instance_size(ik, cache_entry);
1188 u4 size = 1 + sizeof(address) + 4 + sizeof(address) + 4 + is;
1189
1190 writer->start_sub_record(HPROF_GC_INSTANCE_DUMP, size);
1191 writer->write_objectID(o);
1192 writer->write_u4(STACK_TRACE_ID);
1193
1194 // class ID
1195 writer->write_classID(ik);
1196
1197 // number of bytes that follow
1198 writer->write_u4(is);
1199
1200 // field values
1201 dump_instance_fields(writer, o, cache_entry);
1202
1203 writer->end_sub_record();
1204 }
1205
1206 // creates HPROF_GC_CLASS_DUMP record for the given instance class
1207 void DumperSupport::dump_instance_class(AbstractDumpWriter* writer, InstanceKlass* ik) {
1208 // We can safepoint and do a heap dump at a point where we have a Klass,
1209 // but no java mirror class has been setup for it. So we need to check
1210 // that the class is at least loaded, to avoid crash from a null mirror.
1211 if (!ik->is_loaded()) {
1212 return;
1213 }
1214
1215 u2 static_fields_count = 0;
1216 u4 static_size = get_static_fields_size(ik, static_fields_count);
1217 u2 instance_fields_count = get_instance_fields_count(ik);
1218 u4 instance_fields_size = instance_fields_count * (sizeof(address) + 1);
1219 u4 size = checked_cast<u4>(1 + sizeof(address) + 4 + 6 * sizeof(address) + 4 + 2 + 2 + static_size + 2 + instance_fields_size);
1220
1221 writer->start_sub_record(HPROF_GC_CLASS_DUMP, size);
1280
1281 writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->class_loader());
1282 writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->signers());
1283 writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->protection_domain());
1284
1285 writer->write_objectID(oop(nullptr)); // reserved
1286 writer->write_objectID(oop(nullptr));
1287 writer->write_u4(0); // instance size
1288 writer->write_u2(0); // constant pool
1289 writer->write_u2(0); // static fields
1290 writer->write_u2(0); // instance fields
1291
1292 writer->end_sub_record();
1293
1294 }
1295
1296 // Hprof uses an u4 as record length field,
1297 // which means we need to truncate arrays that are too long.
1298 int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size) {
1299 BasicType type = ArrayKlass::cast(array->klass())->element_type();
1300 assert(type >= T_BOOLEAN && type <= T_OBJECT, "invalid array element type");
1301
1302 int length = array->length();
1303
1304 int type_size;
1305 if (type == T_OBJECT) {
1306 type_size = sizeof(address);
1307 } else {
1308 type_size = type2aelembytes(type);
1309 }
1310
1311 size_t length_in_bytes = (size_t)length * type_size;
1312 uint max_bytes = max_juint - header_size;
1313
1314 if (length_in_bytes > max_bytes) {
1315 length = max_bytes / type_size;
1316 length_in_bytes = (size_t)length * type_size;
1317
1318 warning("cannot dump array of type %s[] with length %d; truncating to length %d",
1319 type2name_tab[type], array->length(), length);
1320 }
1321 return length;
1322 }
1323
1324 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
1325 void DumperSupport::dump_object_array(AbstractDumpWriter* writer, objArrayOop array) {
1326 // sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID) + sizeof(classID)
1327 short header_size = 1 + 2 * 4 + 2 * sizeof(address);
1328 int length = calculate_array_max_length(writer, array, header_size);
1329 u4 size = checked_cast<u4>(header_size + length * sizeof(address));
1330
1331 writer->start_sub_record(HPROF_GC_OBJ_ARRAY_DUMP, size);
1332 writer->write_objectID(array);
1333 writer->write_u4(STACK_TRACE_ID);
1334 writer->write_u4(length);
1335
1336 // array class ID
1337 writer->write_classID(array->klass());
1338
1339 // [id]* elements
1340 for (int index = 0; index < length; index++) {
1341 oop o = array->obj_at(index);
1342 o = mask_dormant_archived_object(o, array);
1343 writer->write_objectID(o);
1344 }
1345
1346 writer->end_sub_record();
1347 }
1348
1349 #define WRITE_ARRAY(Array, Type, Size, Length) \
1350 for (int i = 0; i < Length; i++) { writer->write_##Size((Size)Array->Type##_at(i)); }
1351
1352 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
1353 void DumperSupport::dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array) {
1354 BasicType type = TypeArrayKlass::cast(array->klass())->element_type();
1355 // 2 * sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID)
1356 short header_size = 2 * 1 + 2 * 4 + sizeof(address);
1357
1358 int length = calculate_array_max_length(writer, array, header_size);
1359 int type_size = type2aelembytes(type);
1360 u4 length_in_bytes = (u4)length * type_size;
1361 u4 size = header_size + length_in_bytes;
1362
1363 writer->start_sub_record(HPROF_GC_PRIM_ARRAY_DUMP, size);
1485 int len = sym->utf8_length();
1486 if (len > 0) {
1487 char* s = sym->as_utf8();
1488 DumperSupport::write_header(writer(), HPROF_UTF8, oopSize + len);
1489 writer()->write_symbolID(sym);
1490 writer()->write_raw(s, len);
1491 }
1492 }
1493
1494 // Support class used to generate HPROF_GC_CLASS_DUMP records
1495
1496 class ClassDumper : public KlassClosure {
1497 private:
1498 AbstractDumpWriter* _writer;
1499 AbstractDumpWriter* writer() const { return _writer; }
1500
1501 public:
1502 ClassDumper(AbstractDumpWriter* writer) : _writer(writer) {}
1503
1504 void do_klass(Klass* k) {
1505 if (k->is_instance_klass()) {
1506 DumperSupport::dump_instance_class(writer(), InstanceKlass::cast(k));
1507 } else {
1508 DumperSupport::dump_array_class(writer(), k);
1509 }
1510 }
1511 };
1512
1513 // Support class used to generate HPROF_LOAD_CLASS records
1514
1515 class LoadedClassDumper : public LockedClassesDo {
1516 private:
1517 AbstractDumpWriter* _writer;
1518 GrowableArray<Klass*>* _klass_map;
1519 u4 _class_serial_num;
1520 AbstractDumpWriter* writer() const { return _writer; }
1521 void add_class_serial_number(Klass* k, int serial_num) {
1522 _klass_map->at_put_grow(serial_num, k);
1523 }
1524 public:
1525 LoadedClassDumper(AbstractDumpWriter* writer, GrowableArray<Klass*>* klass_map)
1526 : _writer(writer), _klass_map(klass_map), _class_serial_num(0) {}
1527
1528 void do_klass(Klass* k) {
1529 // len of HPROF_LOAD_CLASS record
1530 u4 remaining = 2 * oopSize + 2 * sizeof(u4);
1531 DumperSupport::write_header(writer(), HPROF_LOAD_CLASS, remaining);
1532 // class serial number is just a number
1533 writer()->write_u4(++_class_serial_num);
1534 // class ID
1535 writer()->write_classID(k);
1536 // add the Klass* and class serial number pair
1537 add_class_serial_number(k, _class_serial_num);
1538 writer()->write_u4(STACK_TRACE_ID);
1539 // class name ID
1540 Symbol* name = k->name();
1541 writer()->write_symbolID(name);
1542 }
1543 };
1544
1545 // Support class used to generate HPROF_GC_ROOT_JNI_LOCAL records
1546
1547 class JNILocalsDumper : public OopClosure {
1548 private:
1934 RegisterMap::WalkContinuation::skip);
1935 switch (_thread_type) {
1936 case ThreadType::Platform:
1937 if (!_java_thread->has_last_Java_frame()) {
1938 return nullptr;
1939 }
1940 return _java_thread->is_vthread_mounted()
1941 ? _java_thread->carrier_last_java_vframe(®_map)
1942 : _java_thread->platform_thread_last_java_vframe(®_map);
1943
1944 case ThreadType::MountedVirtual:
1945 return _java_thread->last_java_vframe(®_map);
1946
1947 default: // make compilers happy
1948 break;
1949 }
1950 ShouldNotReachHere();
1951 return nullptr;
1952 }
1953
1954 // Callback to dump thread-related data for unmounted virtual threads;
1955 // implemented by VM_HeapDumper.
1956 class UnmountedVThreadDumper {
1957 public:
1958 virtual void dump_vthread(oop vt, AbstractDumpWriter* segment_writer) = 0;
1959 };
1960
1961 // Support class used when iterating over the heap.
1962 class HeapObjectDumper : public ObjectClosure {
1963 private:
1964 AbstractDumpWriter* _writer;
1965 AbstractDumpWriter* writer() { return _writer; }
1966 UnmountedVThreadDumper* _vthread_dumper;
1967
1968 DumperClassCacheTable _class_cache;
1969
1970 public:
1971 HeapObjectDumper(AbstractDumpWriter* writer, UnmountedVThreadDumper* vthread_dumper)
1972 : _writer(writer), _vthread_dumper(vthread_dumper) {}
1973
1974 // called for each object in the heap
1975 void do_object(oop o);
1976 };
1977
1978 void HeapObjectDumper::do_object(oop o) {
1979 // skip classes as these emitted as HPROF_GC_CLASS_DUMP records
1980 if (o->klass() == vmClasses::Class_klass()) {
1981 if (!java_lang_Class::is_primitive(o)) {
1982 return;
1983 }
1984 }
1985
1986 if (DumperSupport::mask_dormant_archived_object(o, nullptr) == nullptr) {
1987 return;
1988 }
1989
1990 if (o->is_instance()) {
1991 // create a HPROF_GC_INSTANCE record for each object
1992 DumperSupport::dump_instance(writer(), o, &_class_cache);
1993 // If we encounter an unmounted virtual thread it needs to be dumped explicitly
1994 // (mounted virtual threads are dumped with their carriers).
1995 if (java_lang_VirtualThread::is_instance(o)
1996 && ThreadDumper::should_dump_vthread(o) && !ThreadDumper::is_vthread_mounted(o)) {
1997 _vthread_dumper->dump_vthread(o, writer());
1998 }
1999 } else if (o->is_objArray()) {
2000 // create a HPROF_GC_OBJ_ARRAY_DUMP record for each object array
2001 DumperSupport::dump_object_array(writer(), objArrayOop(o));
2002 } else if (o->is_typeArray()) {
2003 // create a HPROF_GC_PRIM_ARRAY_DUMP record for each type array
2004 DumperSupport::dump_prim_array(writer(), typeArrayOop(o));
2005 }
2006 }
2007
2008 // The dumper controller for parallel heap dump
2009 class DumperController : public CHeapObj<mtInternal> {
2010 private:
2011 Monitor* _lock;
2012 Mutex* _global_writer_lock;
2013
2014 const uint _dumper_number;
2015 uint _complete_number;
2016
2017 bool _started; // VM dumper started and acquired global writer lock
2018
2019 public:
2020 DumperController(uint number) :
2021 // _lock and _global_writer_lock are used for synchronization between GC worker threads inside safepoint,
2229 // The VM operation that performs the heap dump
2230 class VM_HeapDumper : public VM_GC_Operation, public WorkerTask, public UnmountedVThreadDumper {
2231 private:
2232 DumpWriter* _writer;
2233 JavaThread* _oome_thread;
2234 Method* _oome_constructor;
2235 bool _gc_before_heap_dump;
2236 GrowableArray<Klass*>* _klass_map;
2237
2238 ThreadDumper** _thread_dumpers; // platform, carrier and mounted virtual threads
2239 int _thread_dumpers_count;
2240 volatile int _thread_serial_num;
2241 volatile int _frame_serial_num;
2242
2243 volatile int _dump_seq;
2244 // parallel heap dump support
2245 uint _num_dumper_threads;
2246 DumperController* _dumper_controller;
2247 ParallelObjectIterator* _poi;
2248
2249 // Dumper id of VMDumper thread.
2250 static const int VMDumperId = 0;
2251 // VM dumper dumps both heap and non-heap data, other dumpers dump heap-only data.
2252 static bool is_vm_dumper(int dumper_id) { return dumper_id == VMDumperId; }
2253 // the 1st dumper calling get_next_dumper_id becomes VM dumper
2254 int get_next_dumper_id() {
2255 return AtomicAccess::fetch_then_add(&_dump_seq, 1);
2256 }
2257
2258 DumpWriter* writer() const { return _writer; }
2259
2260 bool skip_operation() const;
2261
2262 // HPROF_GC_ROOT_THREAD_OBJ records for platform and mounted virtual threads
2263 void dump_threads(AbstractDumpWriter* writer);
2264
2265 bool is_oom_thread(JavaThread* thread) const {
2266 return thread == _oome_thread && _oome_constructor != nullptr;
2267 }
2268
2493 JNIHandles::oops_do(&jni_dumper);
2494 // technically not jni roots, but global roots
2495 // for things like preallocated throwable backtraces
2496 Universe::vm_global()->oops_do(&jni_dumper);
2497 // HPROF_GC_ROOT_STICKY_CLASS
2498 // These should be classes in the null class loader data, and not all classes
2499 // if !ClassUnloading
2500 StickyClassDumper stiky_class_dumper(&segment_writer);
2501 ClassLoaderData::the_null_class_loader_data()->classes_do(&stiky_class_dumper);
2502 }
2503
2504 // Heap iteration.
2505 // writes HPROF_GC_INSTANCE_DUMP records.
2506 // After each sub-record is written check_segment_length will be invoked
2507 // to check if the current segment exceeds a threshold. If so, a new
2508 // segment is started.
2509 // The HPROF_GC_CLASS_DUMP and HPROF_GC_INSTANCE_DUMP are the vast bulk
2510 // of the heap dump.
2511
2512 TraceTime timer(is_parallel_dump() ? "Dump heap objects in parallel" : "Dump heap objects", TRACETIME_LOG(Info, heapdump));
2513 HeapObjectDumper obj_dumper(&segment_writer, this);
2514 if (!is_parallel_dump()) {
2515 Universe::heap()->object_iterate(&obj_dumper);
2516 } else {
2517 // == Parallel dump
2518 _poi->object_iterate(&obj_dumper, worker_id);
2519 }
2520
2521 segment_writer.finish_dump_segment();
2522 segment_writer.flush();
2523 }
2524
2525 _dumper_controller->dumper_complete(&segment_writer, writer());
2526
2527 if (is_vm_dumper(dumper_id)) {
2528 _dumper_controller->wait_all_dumpers_complete();
2529
2530 // flush global writer
2531 writer()->flush();
2532
2533 // At this point, all fragments of the heapdump have been written to separate files.
|
1 /*
2 * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2023, Alibaba Group Holding Limited. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "classfile/classLoaderData.inline.hpp"
27 #include "classfile/classLoaderDataGraph.hpp"
28 #include "classfile/javaClasses.inline.hpp"
29 #include "classfile/symbolTable.hpp"
30 #include "classfile/vmClasses.hpp"
31 #include "classfile/vmSymbols.hpp"
32 #include "gc/shared/gcLocker.hpp"
33 #include "gc/shared/gcVMOperations.hpp"
34 #include "gc/shared/workerThread.hpp"
35 #include "jfr/jfrEvents.hpp"
36 #include "jvm.h"
37 #include "memory/allocation.inline.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "memory/universe.hpp"
40 #include "oops/fieldStreams.inline.hpp"
41 #include "oops/flatArrayKlass.hpp"
42 #include "oops/flatArrayOop.inline.hpp"
43 #include "oops/klass.inline.hpp"
44 #include "oops/objArrayKlass.hpp"
45 #include "oops/objArrayOop.inline.hpp"
46 #include "oops/oop.inline.hpp"
47 #include "oops/oopCast.inline.hpp"
48 #include "oops/typeArrayOop.inline.hpp"
49 #include "runtime/arguments.hpp"
50 #include "runtime/atomicAccess.hpp"
51 #include "runtime/continuationWrapper.inline.hpp"
52 #include "runtime/frame.inline.hpp"
53 #include "runtime/handles.inline.hpp"
54 #include "runtime/javaCalls.hpp"
55 #include "runtime/javaThread.inline.hpp"
56 #include "runtime/jniHandles.hpp"
57 #include "runtime/os.hpp"
58 #include "runtime/threads.hpp"
59 #include "runtime/threadSMR.hpp"
60 #include "runtime/timerTrace.hpp"
61 #include "runtime/vframe.hpp"
62 #include "runtime/vmOperations.hpp"
63 #include "runtime/vmThread.hpp"
64 #include "services/heapDumper.hpp"
65 #include "services/heapDumperCompression.hpp"
66 #include "services/threadService.hpp"
67 #include "utilities/checkedCast.hpp"
68 #include "utilities/macros.hpp"
69 #include "utilities/ostream.hpp"
70 #ifdef LINUX
424 public:
425 AbstractDumpWriter() :
426 _buffer(nullptr),
427 _size(io_buffer_max_size),
428 _pos(0),
429 _in_dump_segment(false) { }
430
431 // Total number of bytes written to the disk
432 virtual julong bytes_written() const = 0;
433 // Return non-null if error occurred
434 virtual char const* error() const = 0;
435
436 size_t position() const { return _pos; }
437 // writer functions
438 virtual void write_raw(const void* s, size_t len);
439 void write_u1(u1 x);
440 void write_u2(u2 x);
441 void write_u4(u4 x);
442 void write_u8(u8 x);
443 void write_objectID(oop o);
444 void write_objectID(uintptr_t id);
445 void write_rootID(oop* p);
446 void write_symbolID(Symbol* o);
447 void write_classID(Klass* k);
448 void write_id(u4 x);
449
450 // Start a new sub-record. Starts a new heap dump segment if needed.
451 void start_sub_record(u1 tag, u4 len);
452 // Ends the current sub-record.
453 void end_sub_record();
454 // Finishes the current dump segment if not already finished.
455 void finish_dump_segment();
456 // Flush internal buffer to persistent storage
457 virtual void flush() = 0;
458 };
459
460 void AbstractDumpWriter::write_fast(const void* s, size_t len) {
461 assert(!_in_dump_segment || (_sub_record_left >= len), "sub-record too large");
462 assert(buffer_size() - position() >= len, "Must fit");
463 DEBUG_ONLY(_sub_record_left -= len);
464 memcpy(buffer() + position(), s, len);
511 }
512
513 void AbstractDumpWriter::write_u8(u8 x) {
514 u8 v;
515 Bytes::put_Java_u8((address)&v, x);
516 WRITE_KNOWN_TYPE(&v, 8);
517 }
518
519 void AbstractDumpWriter::write_address(address a) {
520 #ifdef _LP64
521 write_u8((u8)a);
522 #else
523 write_u4((u4)a);
524 #endif
525 }
526
527 void AbstractDumpWriter::write_objectID(oop o) {
528 write_address(cast_from_oop<address>(o));
529 }
530
531 void AbstractDumpWriter::write_objectID(uintptr_t id) {
532 write_address((address)id);
533 }
534
535 void AbstractDumpWriter::write_rootID(oop* p) {
536 write_address((address)p);
537 }
538
539 void AbstractDumpWriter::write_symbolID(Symbol* s) {
540 write_address((address)((uintptr_t)s));
541 }
542
543 void AbstractDumpWriter::write_id(u4 x) {
544 #ifdef _LP64
545 write_u8((u8) x);
546 #else
547 write_u4(x);
548 #endif
549 }
550
551 // We use java mirror as the class ID
552 void AbstractDumpWriter::write_classID(Klass* k) {
553 write_objectID(k->java_mirror());
554 }
720 }
721 }
722 _pos = 0; // reset pos to make internal buffer available
723
724 if (result != nullptr) {
725 set_error(result);
726 }
727 }
728
729 void DumpWriter::do_compress() {
730 const char* msg = _compressor->compress(_buffer, _pos, _out_buffer, _out_size,
731 _tmp_buffer, _tmp_size, &_out_pos);
732
733 if (msg != nullptr) {
734 set_error(msg);
735 }
736 }
737
738 class DumperClassCacheTable;
739 class DumperClassCacheTableEntry;
740 class DumperFlatObject;
741 class DumperFlatObjectList;
742
743 // Support class with a collection of functions used when dumping the heap
744 class DumperSupport : AllStatic {
745 public:
746
747 // write a header of the given type
748 static void write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len);
749
750 // returns hprof tag for the given type signature
751 static hprofTag sig2tag(Symbol* sig);
752 // returns hprof tag for the given basic type
753 static hprofTag type2tag(BasicType type);
754 // Returns the size of the data to write.
755 static u4 sig2size(Symbol* sig);
756
757 // returns the size of the instance of the given class
758 static u4 instance_size(InstanceKlass* ik);
759
760 // dump a jfloat
761 static void dump_float(AbstractDumpWriter* writer, jfloat f);
762 // dump a jdouble
763 static void dump_double(AbstractDumpWriter* writer, jdouble d);
764 // dumps the raw value of the given field
765 static void dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset);
766 // returns the size of the static fields; also counts the static fields
767 static u4 get_static_fields_size(InstanceKlass* ik, u2& field_count);
768 // dumps static fields of the given class
769 static void dump_static_fields(AbstractDumpWriter* writer, Klass* k);
770 // dump the raw values of the instance fields of the given object, fills flat_fields
771 static void dump_instance_fields(AbstractDumpWriter* writer, oop o, int offset,
772 DumperClassCacheTableEntry* class_cache_entry, DumperFlatObjectList* flat_fields);
773 // get the count of the instance fields for a given class
774 static u2 get_instance_fields_count(InstanceKlass* ik);
775 // dumps the definition of the instance fields for a given class
776 static void dump_instance_field_descriptors(AbstractDumpWriter* writer, InstanceKlass* k);
777 // creates HPROF_GC_INSTANCE_DUMP record for the given object, fills flat_fields
778 static void dump_instance(AbstractDumpWriter* writer, uintptr_t id, oop o, int offset, InstanceKlass* ik,
779 DumperClassCacheTable* class_cache, DumperFlatObjectList* flat_fields);
780 // creates HPROF_GC_CLASS_DUMP record for the given instance class
781 static void dump_instance_class(AbstractDumpWriter* writer, InstanceKlass* ik);
782 // creates HPROF_GC_CLASS_DUMP record for a given array class
783 static void dump_array_class(AbstractDumpWriter* writer, Klass* k);
784
785 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array, fills flat_elements if the object is flat array
786 static void dump_object_array(AbstractDumpWriter* writer, objArrayOop array, DumperFlatObjectList* flat_elements);
787 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
788 static void dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array);
789 // create HPROF_FRAME record for the given method and bci
790 static void dump_stack_frame(AbstractDumpWriter* writer, int frame_serial_num, int class_serial_num, Method* m, int bci);
791
792 // check if we need to truncate an array
793 static int calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size);
794
795 // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
796 static void end_of_dump(AbstractDumpWriter* writer);
797
798 static oop mask_dormant_archived_object(oop o, oop ref_obj) {
799 if (o != nullptr && o->klass()->java_mirror_no_keepalive() == nullptr) {
800 // Ignore this object since the corresponding java mirror is not loaded.
801 // Might be a dormant archive object.
802 report_dormant_archived_object(o, ref_obj);
803 return nullptr;
804 } else {
805 return o;
806 }
807 }
808
809 static void report_dormant_archived_object(oop o, oop ref_obj) {
810 if (log_is_enabled(Trace, aot, heap)) {
811 ResourceMark rm;
812 if (ref_obj != nullptr) {
813 log_trace(aot, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s) referenced by " INTPTR_FORMAT " (%s)",
814 p2i(o), o->klass()->external_name(),
815 p2i(ref_obj), ref_obj->klass()->external_name());
816 } else {
817 log_trace(aot, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s)",
818 p2i(o), o->klass()->external_name());
819 }
820 }
821 }
822
823 // Direct instances of ObjArrayKlass represent the Java types that Java code can see.
824 // RefArrayKlass/FlatArrayKlass describe different implementations of the arrays, filter them out to avoid duplicates.
825 static bool filter_out_klass(Klass* k) {
826 if (k->is_objArray_klass() && k->kind() != Klass::KlassKind::ObjArrayKlassKind) {
827 return true;
828 }
829 return false;
830 }
831 };
832
833 // Hash table of klasses to the klass metadata. This should greatly improve the
834 // hash dumping performance. This hash table is supposed to be used by a single
835 // thread only.
836 //
837 class DumperClassCacheTableEntry : public CHeapObj<mtServiceability> {
838 friend class DumperClassCacheTable;
839 public:
840 class FieldDescriptor {
841 private:
842 char _sigs_start;
843 int _offset;
844 InlineKlass* _inline_klass; // nullptr for heap object
845 LayoutKind _layout_kind;
846 public:
847 FieldDescriptor(): _sigs_start(0), _offset(0), _inline_klass(nullptr), _layout_kind(LayoutKind::UNKNOWN) {}
848
849 template<typename FieldStreamType>
850 FieldDescriptor(const FieldStreamType& field)
851 : _sigs_start(field.signature()->char_at(0)), _offset(field.offset())
852 {
853 if (field.is_flat()) {
854 const fieldDescriptor& fd = field.field_descriptor();
855 InstanceKlass* holder_klass = fd.field_holder();
856 InlineLayoutInfo* layout_info = holder_klass->inline_layout_info_adr(fd.index());
857 _inline_klass = layout_info->klass();
858 _layout_kind = layout_info->kind();
859 } else {
860 _inline_klass = nullptr;
861 _layout_kind = LayoutKind::REFERENCE;
862 }
863 }
864
865 char sig_start() const { return _sigs_start; }
866 int offset() const { return _offset; }
867 bool is_flat() const { return _inline_klass != nullptr; }
868 InlineKlass* inline_klass() const { return _inline_klass; }
869 LayoutKind layout_kind() const { return _layout_kind; }
870 bool is_flat_nullable() const { return LayoutKindHelper::is_nullable_flat(_layout_kind); }
871 };
872
873 private:
874 GrowableArray<FieldDescriptor> _fields;
875 u4 _instance_size;
876
877 public:
878 DumperClassCacheTableEntry(): _instance_size(0) {}
879
880 template<typename FieldStreamType>
881 void add_field(const FieldStreamType& field) {
882 _fields.push(FieldDescriptor(field));
883 _instance_size += DumperSupport::sig2size(field.signature());
884 }
885
886 const FieldDescriptor& field(int index) const { return _fields.at(index); }
887 int field_count() const { return _fields.length(); }
888 u4 instance_size() const { return _instance_size; }
889 };
890
891 class DumperClassCacheTable {
892 private:
893 // HashTable SIZE is specified at compile time so we
894 // use 1031 which is the first prime after 1024.
895 static constexpr size_t TABLE_SIZE = 1031;
896
897 // Maintain the cache for N classes. This limits memory footprint
898 // impact, regardless of how many classes we have in the dump.
899 // This also improves look up performance by keeping the statically
900 // sized table from overloading.
901 static constexpr int CACHE_TOP = 256;
902
903 typedef HashTable<InstanceKlass*, DumperClassCacheTableEntry*,
904 TABLE_SIZE, AnyObj::C_HEAP, mtServiceability> PtrTable;
905 PtrTable* _ptrs;
906
907 // Single-slot cache to handle the major case of objects of the same
908 // class back-to-back, e.g. from T[].
909 InstanceKlass* _last_ik;
910 DumperClassCacheTableEntry* _last_entry;
911
912 void unlink_all(PtrTable* table) {
913 class CleanupEntry: StackObj {
914 public:
915 bool do_entry(InstanceKlass*& key, DumperClassCacheTableEntry*& entry) {
916 delete entry;
917 return true;
918 }
919 } cleanup;
920 table->unlink(&cleanup);
921 }
922
923 public:
924 DumperClassCacheTableEntry* lookup_or_create(InstanceKlass* ik) {
925 if (_last_ik == ik) {
926 return _last_entry;
927 }
928
929 DumperClassCacheTableEntry* entry;
930 DumperClassCacheTableEntry** from_cache = _ptrs->get(ik);
931 if (from_cache == nullptr) {
932 entry = new DumperClassCacheTableEntry();
933 for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
934 if (!fld.access_flags().is_static()) {
935 entry->add_field(fld);
936 }
937 }
938
939 if (_ptrs->number_of_entries() >= CACHE_TOP) {
940 // We do not track the individual hit rates for table entries.
941 // Purge the entire table, and let the cache catch up with new
942 // distribution.
943 unlink_all(_ptrs);
944 }
945
946 _ptrs->put(ik, entry);
947 } else {
948 entry = *from_cache;
949 }
950
951 // Remember for single-slot cache.
952 _last_ik = ik;
953 _last_entry = entry;
954
955 return entry;
956 }
957
958 DumperClassCacheTable() : _ptrs(new (mtServiceability) PtrTable), _last_ik(nullptr), _last_entry(nullptr) {}
959
960 ~DumperClassCacheTable() {
961 unlink_all(_ptrs);
962 delete _ptrs;
963 }
964 };
965
966 // Describes flat object (flatted field or element of flat array) in the holder oop
967 class DumperFlatObject: public CHeapObj<mtServiceability> {
968 friend class DumperFlatObjectList;
969 private:
970 DumperFlatObject* _next;
971
972 const uintptr_t _id; // object id
973
974 const int _offset;
975 InlineKlass* const _inline_klass;
976
977 public:
978 DumperFlatObject(uintptr_t id, int offset, InlineKlass* inline_klass)
979 : _next(nullptr), _id(id), _offset(offset), _inline_klass(inline_klass) {
980 }
981
982 uintptr_t object_id() const { return _id; }
983 int offset() const { return _offset; }
984 InlineKlass* inline_klass() const { return _inline_klass; }
985 };
986
987 class FlatObjectIdProvider {
988 public:
989 virtual uintptr_t get_id() = 0;
990 };
991
992 // Simple FIFO.
993 class DumperFlatObjectList {
994 private:
995 FlatObjectIdProvider* _id_provider;
996 DumperFlatObject* _head;
997 DumperFlatObject* _tail;
998
999 void push(DumperFlatObject* obj) {
1000 if (_head == nullptr) {
1001 _head = _tail = obj;
1002 } else {
1003 assert(_tail != nullptr, "must be");
1004 _tail->_next = obj;
1005 _tail = obj;
1006 }
1007 }
1008
1009 public:
1010 DumperFlatObjectList(FlatObjectIdProvider* id_provider): _id_provider(id_provider), _head(nullptr), _tail(nullptr) {}
1011
1012 bool is_empty() const { return _head == nullptr; }
1013
1014 uintptr_t push(int offset, InlineKlass* inline_klass) {
1015 uintptr_t id = _id_provider->get_id();
1016 DumperFlatObject* obj = new DumperFlatObject(id, offset, inline_klass);
1017 push(obj);
1018 return id;
1019 }
1020
1021 DumperFlatObject* pop() {
1022 assert(!is_empty(), "sanity");
1023 DumperFlatObject* element = _head;
1024 _head = element->_next;
1025 element->_next = nullptr;
1026 return element;
1027 }
1028 };
1029
1030 // write a header of the given type
1031 void DumperSupport:: write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len) {
1032 writer->write_u1(tag);
1033 writer->write_u4(0); // current ticks
1034 writer->write_u4(len);
1035 }
1036
1037 // returns hprof tag for the given type signature
1038 hprofTag DumperSupport::sig2tag(Symbol* sig) {
1039 switch (sig->char_at(0)) {
1040 case JVM_SIGNATURE_CLASS : return HPROF_NORMAL_OBJECT;
1041 case JVM_SIGNATURE_ARRAY : return HPROF_NORMAL_OBJECT;
1042 case JVM_SIGNATURE_BYTE : return HPROF_BYTE;
1043 case JVM_SIGNATURE_CHAR : return HPROF_CHAR;
1044 case JVM_SIGNATURE_FLOAT : return HPROF_FLOAT;
1045 case JVM_SIGNATURE_DOUBLE : return HPROF_DOUBLE;
1046 case JVM_SIGNATURE_INT : return HPROF_INT;
1047 case JVM_SIGNATURE_LONG : return HPROF_LONG;
1048 case JVM_SIGNATURE_SHORT : return HPROF_SHORT;
1049 case JVM_SIGNATURE_BOOLEAN : return HPROF_BOOLEAN;
1148 break;
1149 }
1150 case JVM_SIGNATURE_LONG : {
1151 jlong l = obj->long_field(offset);
1152 writer->write_u8(l);
1153 break;
1154 }
1155 case JVM_SIGNATURE_BOOLEAN : {
1156 jboolean b = obj->bool_field(offset);
1157 writer->write_u1(b);
1158 break;
1159 }
1160 default : {
1161 ShouldNotReachHere();
1162 break;
1163 }
1164 }
1165 }
1166
1167 // returns the size of the instance of the given class
1168 u4 DumperSupport::instance_size(InstanceKlass* ik) {
1169 u4 size = 0;
1170 for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
1171 if (!fld.access_flags().is_static()) {
1172 size += sig2size(fld.signature());
1173 }
1174 }
1175 return size;
1176 }
1177
1178 u4 DumperSupport::get_static_fields_size(InstanceKlass* ik, u2& field_count) {
1179 field_count = 0;
1180 u4 size = 0;
1181
1182 for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1183 if (fldc.access_flags().is_static()) {
1184 assert(!fldc.is_flat(), "static fields cannot be flat");
1185
1186 field_count++;
1187 size += sig2size(fldc.signature());
1188 }
1189 }
1190
1191 // Add in resolved_references which is referenced by the cpCache
1192 // The resolved_references is an array per InstanceKlass holding the
1193 // strings and other oops resolved from the constant pool.
1194 oop resolved_references = ik->constants()->resolved_references_or_null();
1195 if (resolved_references != nullptr) {
1196 field_count++;
1197 size += sizeof(address);
1198
1199 // Add in the resolved_references of the used previous versions of the class
1200 // in the case of RedefineClasses
1201 InstanceKlass* prev = ik->previous_versions();
1202 while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1203 field_count++;
1204 size += sizeof(address);
1205 prev = prev->previous_versions();
1208
1209 // Also provide a pointer to the init_lock if present, so there aren't unreferenced int[0]
1210 // arrays.
1211 oop init_lock = ik->init_lock();
1212 if (init_lock != nullptr) {
1213 field_count++;
1214 size += sizeof(address);
1215 }
1216
1217 // We write the value itself plus a name and a one byte type tag per field.
1218 return checked_cast<u4>(size + field_count * (sizeof(address) + 1));
1219 }
1220
1221 // dumps static fields of the given class
1222 void DumperSupport::dump_static_fields(AbstractDumpWriter* writer, Klass* k) {
1223 InstanceKlass* ik = InstanceKlass::cast(k);
1224
1225 // dump the field descriptors and raw values
1226 for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1227 if (fld.access_flags().is_static()) {
1228 assert(!fld.is_flat(), "static fields cannot be flat");
1229
1230 Symbol* sig = fld.signature();
1231
1232 writer->write_symbolID(fld.name()); // name
1233 writer->write_u1(sig2tag(sig)); // type
1234
1235 // value
1236 dump_field_value(writer, sig->char_at(0), ik->java_mirror(), fld.offset());
1237 }
1238 }
1239
1240 // Add resolved_references for each class that has them
1241 oop resolved_references = ik->constants()->resolved_references_or_null();
1242 if (resolved_references != nullptr) {
1243 writer->write_symbolID(vmSymbols::resolved_references_name()); // name
1244 writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1245 writer->write_objectID(resolved_references);
1246
1247 // Also write any previous versions
1248 InstanceKlass* prev = ik->previous_versions();
1249 while (prev != nullptr && prev->constants()->resolved_references_or_null() != nullptr) {
1250 writer->write_symbolID(vmSymbols::resolved_references_name()); // name
1251 writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1252 writer->write_objectID(prev->constants()->resolved_references());
1253 prev = prev->previous_versions();
1254 }
1255 }
1256
1257 // Add init lock to the end if the class is not yet initialized
1258 oop init_lock = ik->init_lock();
1259 if (init_lock != nullptr) {
1260 writer->write_symbolID(vmSymbols::init_lock_name()); // name
1261 writer->write_u1(sig2tag(vmSymbols::int_array_signature())); // type
1262 writer->write_objectID(init_lock);
1263 }
1264 }
1265
1266 // dump the raw values of the instance fields of the given object, fills flat_fields
1267 void DumperSupport:: dump_instance_fields(AbstractDumpWriter* writer, oop o, int offset,
1268 DumperClassCacheTableEntry* class_cache_entry, DumperFlatObjectList* flat_fields) {
1269 assert(class_cache_entry != nullptr, "Pre-condition: must be provided");
1270 for (int idx = 0; idx < class_cache_entry->field_count(); idx++) {
1271 const DumperClassCacheTableEntry::FieldDescriptor& field = class_cache_entry->field(idx);
1272 int field_offset = offset + field.offset();
1273 if (field.is_flat()) {
1274 // check for possible nulls
1275 if (field.is_flat_nullable()) {
1276 address payload = cast_from_oop<address>(o) + field_offset;
1277 if (field.inline_klass()->is_payload_marked_as_null(payload)) {
1278 writer->write_objectID(nullptr);
1279 continue;
1280 }
1281 }
1282 uintptr_t object_id = flat_fields->push(field_offset, field.inline_klass());
1283 writer->write_objectID(object_id);
1284 } else {
1285 dump_field_value(writer, field.sig_start(), o, field_offset);
1286 }
1287 }
1288 }
1289
1290 // gets the count of the instance fields for a given class
1291 u2 DumperSupport::get_instance_fields_count(InstanceKlass* ik) {
1292 u2 field_count = 0;
1293
1294 for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
1295 if (!fldc.access_flags().is_static()) {
1296 field_count++;
1297 }
1298 }
1299
1300 return field_count;
1301 }
1302
1303 // dumps the definition of the instance fields for a given class
1304 void DumperSupport::dump_instance_field_descriptors(AbstractDumpWriter* writer, InstanceKlass* ik) {
1305 // dump the field descriptors
1306 for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
1307 if (!fld.access_flags().is_static()) {
1308 Symbol* sig = fld.signature();
1309
1310 writer->write_symbolID(fld.name()); // name
1311 writer->write_u1(sig2tag(sig)); // type
1312 }
1313 }
1314 }
1315
1316 // creates HPROF_GC_INSTANCE_DUMP record for the given object
1317 void DumperSupport::dump_instance(AbstractDumpWriter* writer, uintptr_t id, oop o, int offset, InstanceKlass* ik,
1318 DumperClassCacheTable* class_cache, DumperFlatObjectList* flat_fields) {
1319 DumperClassCacheTableEntry* cache_entry = class_cache->lookup_or_create(ik);
1320
1321 u4 is = cache_entry->instance_size();
1322 u4 size = 1 + sizeof(address) + 4 + sizeof(address) + 4 + is;
1323
1324 writer->start_sub_record(HPROF_GC_INSTANCE_DUMP, size);
1325 writer->write_objectID(id);
1326 writer->write_u4(STACK_TRACE_ID);
1327
1328 // class ID
1329 writer->write_classID(ik);
1330
1331 // number of bytes that follow
1332 writer->write_u4(is);
1333
1334 // field values
1335 if (offset != 0) {
1336 // the object itself if flattened, so all fields are stored without headers
1337 InlineKlass* inline_klass = InlineKlass::cast(ik);
1338 offset -= inline_klass->payload_offset();
1339 }
1340
1341 dump_instance_fields(writer, o, offset, cache_entry, flat_fields);
1342
1343 writer->end_sub_record();
1344 }
1345
1346 // creates HPROF_GC_CLASS_DUMP record for the given instance class
1347 void DumperSupport::dump_instance_class(AbstractDumpWriter* writer, InstanceKlass* ik) {
1348 // We can safepoint and do a heap dump at a point where we have a Klass,
1349 // but no java mirror class has been setup for it. So we need to check
1350 // that the class is at least loaded, to avoid crash from a null mirror.
1351 if (!ik->is_loaded()) {
1352 return;
1353 }
1354
1355 u2 static_fields_count = 0;
1356 u4 static_size = get_static_fields_size(ik, static_fields_count);
1357 u2 instance_fields_count = get_instance_fields_count(ik);
1358 u4 instance_fields_size = instance_fields_count * (sizeof(address) + 1);
1359 u4 size = checked_cast<u4>(1 + sizeof(address) + 4 + 6 * sizeof(address) + 4 + 2 + 2 + static_size + 2 + instance_fields_size);
1360
1361 writer->start_sub_record(HPROF_GC_CLASS_DUMP, size);
1420
1421 writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->class_loader());
1422 writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->signers());
1423 writer->write_objectID(ik == nullptr ? oop(nullptr) : ik->protection_domain());
1424
1425 writer->write_objectID(oop(nullptr)); // reserved
1426 writer->write_objectID(oop(nullptr));
1427 writer->write_u4(0); // instance size
1428 writer->write_u2(0); // constant pool
1429 writer->write_u2(0); // static fields
1430 writer->write_u2(0); // instance fields
1431
1432 writer->end_sub_record();
1433
1434 }
1435
1436 // Hprof uses an u4 as record length field,
1437 // which means we need to truncate arrays that are too long.
1438 int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size) {
1439 BasicType type = ArrayKlass::cast(array->klass())->element_type();
1440 assert((type >= T_BOOLEAN && type <= T_OBJECT) || type == T_FLAT_ELEMENT, "invalid array element type");
1441
1442 int length = array->length();
1443
1444 int type_size;
1445 if (type == T_OBJECT || type == T_FLAT_ELEMENT) {
1446 type_size = sizeof(address);
1447 } else {
1448 type_size = type2aelembytes(type);
1449 }
1450
1451 size_t length_in_bytes = (size_t)length * type_size;
1452 uint max_bytes = max_juint - header_size;
1453
1454 if (length_in_bytes > max_bytes) {
1455 length = max_bytes / type_size;
1456 length_in_bytes = (size_t)length * type_size;
1457
1458 warning("cannot dump array of type %s[] with length %d; truncating to length %d",
1459 type2name_tab[type], array->length(), length);
1460 }
1461 return length;
1462 }
1463
1464 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
1465 void DumperSupport::dump_object_array(AbstractDumpWriter* writer, objArrayOop array, DumperFlatObjectList* flat_elements) {
1466 // sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID) + sizeof(classID)
1467 short header_size = 1 + 2 * 4 + 2 * sizeof(address);
1468 int length = calculate_array_max_length(writer, array, header_size);
1469 u4 size = checked_cast<u4>(header_size + length * sizeof(address));
1470
1471 writer->start_sub_record(HPROF_GC_OBJ_ARRAY_DUMP, size);
1472 writer->write_objectID(array);
1473 writer->write_u4(STACK_TRACE_ID);
1474 writer->write_u4(length);
1475
1476 // array class ID
1477 writer->write_classID(array->klass());
1478
1479 // [id]* elements
1480 if (array->is_flatArray()) {
1481 flatArrayOop farray = flatArrayOop(array);
1482 FlatArrayKlass* faklass = FlatArrayKlass::cast(farray->klass());
1483
1484 InlineKlass* vk = faklass->element_klass();
1485 bool need_null_check = LayoutKindHelper::is_nullable_flat(faklass->layout_kind());
1486
1487 for (int index = 0; index < length; index++) {
1488 address addr = (address)farray->value_at_addr(index, faklass->layout_helper());
1489 // check for null
1490 if (need_null_check) {
1491 if (vk->is_payload_marked_as_null(addr)) {
1492 writer->write_objectID(nullptr);
1493 continue;
1494 }
1495 }
1496 // offset in the array oop
1497 int offset = (int)(addr - cast_from_oop<address>(farray));
1498 uintptr_t object_id = flat_elements->push(offset, vk);
1499 writer->write_objectID(object_id);
1500 }
1501 } else {
1502 refArrayOop rarray = oop_cast<refArrayOop>(array);
1503 for (int index = 0; index < length; index++) {
1504 oop o = rarray->obj_at(index);
1505 o = mask_dormant_archived_object(o, array);
1506 writer->write_objectID(o);
1507 }
1508 }
1509
1510 writer->end_sub_record();
1511 }
1512
1513 #define WRITE_ARRAY(Array, Type, Size, Length) \
1514 for (int i = 0; i < Length; i++) { writer->write_##Size((Size)Array->Type##_at(i)); }
1515
1516 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
1517 void DumperSupport::dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array) {
1518 BasicType type = TypeArrayKlass::cast(array->klass())->element_type();
1519 // 2 * sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID)
1520 short header_size = 2 * 1 + 2 * 4 + sizeof(address);
1521
1522 int length = calculate_array_max_length(writer, array, header_size);
1523 int type_size = type2aelembytes(type);
1524 u4 length_in_bytes = (u4)length * type_size;
1525 u4 size = header_size + length_in_bytes;
1526
1527 writer->start_sub_record(HPROF_GC_PRIM_ARRAY_DUMP, size);
1649 int len = sym->utf8_length();
1650 if (len > 0) {
1651 char* s = sym->as_utf8();
1652 DumperSupport::write_header(writer(), HPROF_UTF8, oopSize + len);
1653 writer()->write_symbolID(sym);
1654 writer()->write_raw(s, len);
1655 }
1656 }
1657
1658 // Support class used to generate HPROF_GC_CLASS_DUMP records
1659
1660 class ClassDumper : public KlassClosure {
1661 private:
1662 AbstractDumpWriter* _writer;
1663 AbstractDumpWriter* writer() const { return _writer; }
1664
1665 public:
1666 ClassDumper(AbstractDumpWriter* writer) : _writer(writer) {}
1667
1668 void do_klass(Klass* k) {
1669 if (DumperSupport::filter_out_klass(k)) {
1670 return;
1671 }
1672 if (k->is_instance_klass()) {
1673 DumperSupport::dump_instance_class(writer(), InstanceKlass::cast(k));
1674 } else {
1675 DumperSupport::dump_array_class(writer(), k);
1676 }
1677 }
1678 };
1679
1680 // Support class used to generate HPROF_LOAD_CLASS records
1681
1682 class LoadedClassDumper : public LockedClassesDo {
1683 private:
1684 AbstractDumpWriter* _writer;
1685 GrowableArray<Klass*>* _klass_map;
1686 u4 _class_serial_num;
1687 AbstractDumpWriter* writer() const { return _writer; }
1688 void add_class_serial_number(Klass* k, int serial_num) {
1689 _klass_map->at_put_grow(serial_num, k);
1690 }
1691 public:
1692 LoadedClassDumper(AbstractDumpWriter* writer, GrowableArray<Klass*>* klass_map)
1693 : _writer(writer), _klass_map(klass_map), _class_serial_num(0) {}
1694
1695 void do_klass(Klass* k) {
1696 if (DumperSupport::filter_out_klass(k)) {
1697 return;
1698 }
1699 // len of HPROF_LOAD_CLASS record
1700 u4 remaining = 2 * oopSize + 2 * sizeof(u4);
1701 DumperSupport::write_header(writer(), HPROF_LOAD_CLASS, remaining);
1702 // class serial number is just a number
1703 writer()->write_u4(++_class_serial_num);
1704 // class ID
1705 writer()->write_classID(k);
1706 // add the Klass* and class serial number pair
1707 add_class_serial_number(k, _class_serial_num);
1708 writer()->write_u4(STACK_TRACE_ID);
1709 // class name ID
1710 Symbol* name = k->name();
1711 writer()->write_symbolID(name);
1712 }
1713 };
1714
1715 // Support class used to generate HPROF_GC_ROOT_JNI_LOCAL records
1716
1717 class JNILocalsDumper : public OopClosure {
1718 private:
2104 RegisterMap::WalkContinuation::skip);
2105 switch (_thread_type) {
2106 case ThreadType::Platform:
2107 if (!_java_thread->has_last_Java_frame()) {
2108 return nullptr;
2109 }
2110 return _java_thread->is_vthread_mounted()
2111 ? _java_thread->carrier_last_java_vframe(®_map)
2112 : _java_thread->platform_thread_last_java_vframe(®_map);
2113
2114 case ThreadType::MountedVirtual:
2115 return _java_thread->last_java_vframe(®_map);
2116
2117 default: // make compilers happy
2118 break;
2119 }
2120 ShouldNotReachHere();
2121 return nullptr;
2122 }
2123
2124 class FlatObjectDumper: public FlatObjectIdProvider {
2125 private:
2126 volatile uintptr_t _id_counter;
2127 public:
2128 FlatObjectDumper(): _id_counter(0) {
2129 }
2130
2131 void dump_flat_objects(AbstractDumpWriter* writer, oop holder,
2132 DumperClassCacheTable* class_cache, DumperFlatObjectList* flat_objects);
2133
2134 // FlatObjectIdProvider implementation
2135 virtual uintptr_t get_id() override {
2136 // need to protect against overflow, so use instead of fetch_then_add
2137 const uintptr_t max_value = (uintptr_t)-1;
2138 uintptr_t old_value = AtomicAccess::load(&_id_counter);
2139 while (old_value != max_value) {
2140 uintptr_t new_value = old_value + 1;
2141 // to avoid conflicts with oop addresses skip aligned values
2142 if ((new_value & MinObjAlignmentInBytesMask) == 0) {
2143 new_value++;
2144 }
2145 uintptr_t value = AtomicAccess::cmpxchg(&_id_counter, old_value, new_value);
2146 if (value == old_value) {
2147 // success
2148 return new_value;
2149 }
2150 old_value = value;
2151 }
2152 // if we are here, maximum id value is reached
2153 return max_value;
2154 }
2155
2156 };
2157
2158 void FlatObjectDumper::dump_flat_objects(AbstractDumpWriter* writer, oop holder,
2159 DumperClassCacheTable* class_cache, DumperFlatObjectList* flat_objects) {
2160 // DumperSupport::dump_instance can add entries to flat_objects
2161 while (!flat_objects->is_empty()) {
2162 DumperFlatObject* obj = flat_objects->pop();
2163 DumperSupport::dump_instance(writer, obj->object_id(), holder, obj->offset(), obj->inline_klass(), class_cache, flat_objects);
2164 delete obj;
2165 }
2166 }
2167
2168 // Callback to dump thread-related data for unmounted virtual threads;
2169 // implemented by VM_HeapDumper.
2170 class UnmountedVThreadDumper {
2171 public:
2172 virtual void dump_vthread(oop vt, AbstractDumpWriter* segment_writer) = 0;
2173 };
2174
2175
2176 // Support class used when iterating over the heap.
2177 class HeapObjectDumper : public ObjectClosure {
2178 private:
2179 AbstractDumpWriter* _writer;
2180 AbstractDumpWriter* writer() { return _writer; }
2181 UnmountedVThreadDumper* _vthread_dumper;
2182 FlatObjectDumper* _flat_dumper;
2183
2184 DumperClassCacheTable _class_cache;
2185
2186 public:
2187 HeapObjectDumper(AbstractDumpWriter* writer, UnmountedVThreadDumper* vthread_dumper, FlatObjectDumper* flat_dumper)
2188 : _writer(writer), _vthread_dumper(vthread_dumper), _flat_dumper(flat_dumper) {}
2189
2190 // called for each object in the heap
2191 void do_object(oop o);
2192 };
2193
2194 void HeapObjectDumper::do_object(oop o) {
2195 // skip classes as these emitted as HPROF_GC_CLASS_DUMP records
2196 if (o->klass() == vmClasses::Class_klass()) {
2197 if (!java_lang_Class::is_primitive(o)) {
2198 return;
2199 }
2200 }
2201
2202 if (DumperSupport::mask_dormant_archived_object(o, nullptr) == nullptr) {
2203 return;
2204 }
2205
2206 if (o->is_instance()) {
2207 DumperFlatObjectList flat_fields(_flat_dumper);
2208 // create a HPROF_GC_INSTANCE record for each object
2209 DumperSupport::dump_instance(writer(),
2210 cast_from_oop<uintptr_t>(o), // object_id is the address
2211 o, 0, // for heap instance holder is oop, offset is 0
2212 InstanceKlass::cast(o->klass()),
2213 &_class_cache, &flat_fields);
2214
2215 // if there are flattened fields, dump them
2216 if (!flat_fields.is_empty()) {
2217 _flat_dumper->dump_flat_objects(writer(), o, &_class_cache, &flat_fields);
2218 }
2219
2220 // If we encounter an unmounted virtual thread it needs to be dumped explicitly
2221 // (mounted virtual threads are dumped with their carriers).
2222 if (java_lang_VirtualThread::is_instance(o)
2223 && ThreadDumper::should_dump_vthread(o) && !ThreadDumper::is_vthread_mounted(o)) {
2224 _vthread_dumper->dump_vthread(o, writer());
2225 }
2226 } else if (o->is_objArray()) {
2227 DumperFlatObjectList flat_elements(_flat_dumper);
2228 // create a HPROF_GC_OBJ_ARRAY_DUMP record for each object array
2229 DumperSupport::dump_object_array(writer(), objArrayOop(o), &flat_elements);
2230 // if this is flat array, dump its elements
2231 if (!flat_elements.is_empty()) {
2232 _flat_dumper->dump_flat_objects(writer(), o, &_class_cache, &flat_elements);
2233 }
2234 } else if (o->is_typeArray()) {
2235 // create a HPROF_GC_PRIM_ARRAY_DUMP record for each type array
2236 DumperSupport::dump_prim_array(writer(), typeArrayOop(o));
2237 }
2238 }
2239
2240 // The dumper controller for parallel heap dump
2241 class DumperController : public CHeapObj<mtInternal> {
2242 private:
2243 Monitor* _lock;
2244 Mutex* _global_writer_lock;
2245
2246 const uint _dumper_number;
2247 uint _complete_number;
2248
2249 bool _started; // VM dumper started and acquired global writer lock
2250
2251 public:
2252 DumperController(uint number) :
2253 // _lock and _global_writer_lock are used for synchronization between GC worker threads inside safepoint,
2461 // The VM operation that performs the heap dump
2462 class VM_HeapDumper : public VM_GC_Operation, public WorkerTask, public UnmountedVThreadDumper {
2463 private:
2464 DumpWriter* _writer;
2465 JavaThread* _oome_thread;
2466 Method* _oome_constructor;
2467 bool _gc_before_heap_dump;
2468 GrowableArray<Klass*>* _klass_map;
2469
2470 ThreadDumper** _thread_dumpers; // platform, carrier and mounted virtual threads
2471 int _thread_dumpers_count;
2472 volatile int _thread_serial_num;
2473 volatile int _frame_serial_num;
2474
2475 volatile int _dump_seq;
2476 // parallel heap dump support
2477 uint _num_dumper_threads;
2478 DumperController* _dumper_controller;
2479 ParallelObjectIterator* _poi;
2480
2481 // flat value object support
2482 FlatObjectDumper _flat_dumper;
2483
2484 // Dumper id of VMDumper thread.
2485 static const int VMDumperId = 0;
2486 // VM dumper dumps both heap and non-heap data, other dumpers dump heap-only data.
2487 static bool is_vm_dumper(int dumper_id) { return dumper_id == VMDumperId; }
2488 // the 1st dumper calling get_next_dumper_id becomes VM dumper
2489 int get_next_dumper_id() {
2490 return AtomicAccess::fetch_then_add(&_dump_seq, 1);
2491 }
2492
2493 DumpWriter* writer() const { return _writer; }
2494
2495 bool skip_operation() const;
2496
2497 // HPROF_GC_ROOT_THREAD_OBJ records for platform and mounted virtual threads
2498 void dump_threads(AbstractDumpWriter* writer);
2499
2500 bool is_oom_thread(JavaThread* thread) const {
2501 return thread == _oome_thread && _oome_constructor != nullptr;
2502 }
2503
2728 JNIHandles::oops_do(&jni_dumper);
2729 // technically not jni roots, but global roots
2730 // for things like preallocated throwable backtraces
2731 Universe::vm_global()->oops_do(&jni_dumper);
2732 // HPROF_GC_ROOT_STICKY_CLASS
2733 // These should be classes in the null class loader data, and not all classes
2734 // if !ClassUnloading
2735 StickyClassDumper stiky_class_dumper(&segment_writer);
2736 ClassLoaderData::the_null_class_loader_data()->classes_do(&stiky_class_dumper);
2737 }
2738
2739 // Heap iteration.
2740 // writes HPROF_GC_INSTANCE_DUMP records.
2741 // After each sub-record is written check_segment_length will be invoked
2742 // to check if the current segment exceeds a threshold. If so, a new
2743 // segment is started.
2744 // The HPROF_GC_CLASS_DUMP and HPROF_GC_INSTANCE_DUMP are the vast bulk
2745 // of the heap dump.
2746
2747 TraceTime timer(is_parallel_dump() ? "Dump heap objects in parallel" : "Dump heap objects", TRACETIME_LOG(Info, heapdump));
2748 HeapObjectDumper obj_dumper(&segment_writer, this, &_flat_dumper);
2749 if (!is_parallel_dump()) {
2750 Universe::heap()->object_iterate(&obj_dumper);
2751 } else {
2752 // == Parallel dump
2753 _poi->object_iterate(&obj_dumper, worker_id);
2754 }
2755
2756 segment_writer.finish_dump_segment();
2757 segment_writer.flush();
2758 }
2759
2760 _dumper_controller->dumper_complete(&segment_writer, writer());
2761
2762 if (is_vm_dumper(dumper_id)) {
2763 _dumper_controller->wait_all_dumpers_complete();
2764
2765 // flush global writer
2766 writer()->flush();
2767
2768 // At this point, all fragments of the heapdump have been written to separate files.
|