< prev index next > src/hotspot/share/services/heapDumper.cpp
Print this page
#include "jvm.h"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/fieldStreams.inline.hpp"
+ #include "oops/flatArrayKlass.hpp"
+ #include "oops/flatArrayOop.inline.hpp"
#include "oops/klass.inline.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/typeArrayOop.inline.hpp"
#include "runtime/arguments.hpp"
+ #include "runtime/atomicAccess.hpp"
#include "runtime/continuationWrapper.inline.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/javaThread.inline.hpp"
void write_u1(u1 x);
void write_u2(u2 x);
void write_u4(u4 x);
void write_u8(u8 x);
void write_objectID(oop o);
+ void write_objectID(uintptr_t id);
void write_rootID(oop* p);
void write_symbolID(Symbol* o);
void write_classID(Klass* k);
void write_id(u4 x);
void AbstractDumpWriter::write_objectID(oop o) {
write_address(cast_from_oop<address>(o));
}
+ void AbstractDumpWriter::write_objectID(uintptr_t id) {
+ write_address((address)id);
+ }
+
void AbstractDumpWriter::write_rootID(oop* p) {
write_address((address)p);
}
void AbstractDumpWriter::write_symbolID(Symbol* s) {
}
}
class DumperClassCacheTable;
class DumperClassCacheTableEntry;
+ class DumperFlatObject;
+ class DumperFlatObjectList;
// Support class with a collection of functions used when dumping the heap
class DumperSupport : AllStatic {
public:
static hprofTag type2tag(BasicType type);
// Returns the size of the data to write.
static u4 sig2size(Symbol* sig);
// returns the size of the instance of the given class
! static u4 instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry = nullptr);
// dump a jfloat
static void dump_float(AbstractDumpWriter* writer, jfloat f);
// dump a jdouble
static void dump_double(AbstractDumpWriter* writer, jdouble d);
static hprofTag type2tag(BasicType type);
// Returns the size of the data to write.
static u4 sig2size(Symbol* sig);
// returns the size of the instance of the given class
! static u4 instance_size(InstanceKlass* ik);
// dump a jfloat
static void dump_float(AbstractDumpWriter* writer, jfloat f);
// dump a jdouble
static void dump_double(AbstractDumpWriter* writer, jdouble d);
static void dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset);
// returns the size of the static fields; also counts the static fields
static u4 get_static_fields_size(InstanceKlass* ik, u2& field_count);
// dumps static fields of the given class
static void dump_static_fields(AbstractDumpWriter* writer, Klass* k);
! // dump the raw values of the instance fields of the given object
! static void dump_instance_fields(AbstractDumpWriter* writer, oop o, DumperClassCacheTableEntry* class_cache_entry);
// get the count of the instance fields for a given class
static u2 get_instance_fields_count(InstanceKlass* ik);
// dumps the definition of the instance fields for a given class
! static void dump_instance_field_descriptors(AbstractDumpWriter* writer, Klass* k);
! // creates HPROF_GC_INSTANCE_DUMP record for the given object
! static void dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache);
// creates HPROF_GC_CLASS_DUMP record for the given instance class
static void dump_instance_class(AbstractDumpWriter* writer, InstanceKlass* ik);
// creates HPROF_GC_CLASS_DUMP record for a given array class
static void dump_array_class(AbstractDumpWriter* writer, Klass* k);
! // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
! static void dump_object_array(AbstractDumpWriter* writer, objArrayOop array);
// creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
static void dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array);
// create HPROF_FRAME record for the given method and bci
static void dump_stack_frame(AbstractDumpWriter* writer, int frame_serial_num, int class_serial_num, Method* m, int bci);
static void dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset);
// returns the size of the static fields; also counts the static fields
static u4 get_static_fields_size(InstanceKlass* ik, u2& field_count);
// dumps static fields of the given class
static void dump_static_fields(AbstractDumpWriter* writer, Klass* k);
! // dump the raw values of the instance fields of the given object, fills flat_fields
! static void dump_instance_fields(AbstractDumpWriter* writer, oop o, int offset,
+ DumperClassCacheTableEntry* class_cache_entry, DumperFlatObjectList* flat_fields);
// get the count of the instance fields for a given class
static u2 get_instance_fields_count(InstanceKlass* ik);
// dumps the definition of the instance fields for a given class
! static void dump_instance_field_descriptors(AbstractDumpWriter* writer, InstanceKlass* k);
! // creates HPROF_GC_INSTANCE_DUMP record for the given object, fills flat_fields
! static void dump_instance(AbstractDumpWriter* writer, uintptr_t id, oop o, int offset, InstanceKlass* ik,
+ DumperClassCacheTable* class_cache, DumperFlatObjectList* flat_fields);
// creates HPROF_GC_CLASS_DUMP record for the given instance class
static void dump_instance_class(AbstractDumpWriter* writer, InstanceKlass* ik);
// creates HPROF_GC_CLASS_DUMP record for a given array class
static void dump_array_class(AbstractDumpWriter* writer, Klass* k);
! // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array, fills flat_elements if the object is flat array
! static void dump_object_array(AbstractDumpWriter* writer, objArrayOop array, DumperFlatObjectList* flat_elements);
// creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
static void dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array);
// create HPROF_FRAME record for the given method and bci
static void dump_stack_frame(AbstractDumpWriter* writer, int frame_serial_num, int class_serial_num, Method* m, int bci);
log_trace(aot, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s)",
p2i(o), o->klass()->external_name());
}
}
}
};
// Hash table of klasses to the klass metadata. This should greatly improve the
// hash dumping performance. This hash table is supposed to be used by a single
// thread only.
//
class DumperClassCacheTableEntry : public CHeapObj<mtServiceability> {
friend class DumperClassCacheTable;
private:
! GrowableArray<char> _sigs_start;
- GrowableArray<int> _offsets;
u4 _instance_size;
- int _entries;
public:
! DumperClassCacheTableEntry() : _instance_size(0), _entries(0) {};
! int field_count() { return _entries; }
! char sig_start(int field_idx) { return _sigs_start.at(field_idx); }
! int offset(int field_idx) { return _offsets.at(field_idx); }
- u4 instance_size() { return _instance_size; }
};
class DumperClassCacheTable {
private:
// HashTable SIZE is specified at compile time so we
log_trace(aot, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s)",
p2i(o), o->klass()->external_name());
}
}
}
+
+ // Direct instances of ObjArrayKlass represent the Java types that Java code can see.
+ // RefArrayKlass/FlatArrayKlass describe different implementations of the arrays, filter them out to avoid duplicates.
+ static bool filter_out_klass(Klass* k) {
+ if (k->is_objArray_klass() && k->kind() != Klass::KlassKind::ObjArrayKlassKind) {
+ return true;
+ }
+ return false;
+ }
};
// Hash table of klasses to the klass metadata. This should greatly improve the
// hash dumping performance. This hash table is supposed to be used by a single
// thread only.
//
class DumperClassCacheTableEntry : public CHeapObj<mtServiceability> {
friend class DumperClassCacheTable;
+ public:
+ class FieldDescriptor {
+ private:
+ char _sigs_start;
+ int _offset;
+ InlineKlass* _inline_klass; // nullptr for heap object
+ LayoutKind _layout_kind;
+ public:
+ FieldDescriptor(): _sigs_start(0), _offset(0), _inline_klass(nullptr), _layout_kind(LayoutKind::UNKNOWN) {}
+
+ template<typename FieldStreamType>
+ FieldDescriptor(const FieldStreamType& field)
+ : _sigs_start(field.signature()->char_at(0)), _offset(field.offset())
+ {
+ if (field.is_flat()) {
+ const fieldDescriptor& fd = field.field_descriptor();
+ InstanceKlass* holder_klass = fd.field_holder();
+ InlineLayoutInfo* layout_info = holder_klass->inline_layout_info_adr(fd.index());
+ _inline_klass = layout_info->klass();
+ _layout_kind = layout_info->kind();
+ } else {
+ _inline_klass = nullptr;
+ _layout_kind = LayoutKind::REFERENCE;
+ }
+ }
+
+ char sig_start() const { return _sigs_start; }
+ int offset() const { return _offset; }
+ bool is_flat() const { return _inline_klass != nullptr; }
+ InlineKlass* inline_klass() const { return _inline_klass; }
+ LayoutKind layout_kind() const { return _layout_kind; }
+ bool is_flat_nullable() const { return _layout_kind == LayoutKind::NULLABLE_ATOMIC_FLAT; }
+ };
+
private:
! GrowableArray<FieldDescriptor> _fields;
u4 _instance_size;
public:
! DumperClassCacheTableEntry(): _instance_size(0) {}
+
+ template<typename FieldStreamType>
+ void add_field(const FieldStreamType& field) {
+ _fields.push(FieldDescriptor(field));
+ _instance_size += DumperSupport::sig2size(field.signature());
+ }
! const FieldDescriptor& field(int index) const { return _fields.at(index); }
! int field_count() const { return _fields.length(); }
! u4 instance_size() const { return _instance_size; }
};
class DumperClassCacheTable {
private:
// HashTable SIZE is specified at compile time so we
// This also improves look up performance by keeping the statically
// sized table from overloading.
static constexpr int CACHE_TOP = 256;
typedef HashTable<InstanceKlass*, DumperClassCacheTableEntry*,
! TABLE_SIZE, AnyObj::C_HEAP, mtServiceability> PtrTable;
PtrTable* _ptrs;
// Single-slot cache to handle the major case of objects of the same
// class back-to-back, e.g. from T[].
InstanceKlass* _last_ik;
// This also improves look up performance by keeping the statically
// sized table from overloading.
static constexpr int CACHE_TOP = 256;
typedef HashTable<InstanceKlass*, DumperClassCacheTableEntry*,
! TABLE_SIZE, AnyObj::C_HEAP, mtServiceability> PtrTable;
PtrTable* _ptrs;
// Single-slot cache to handle the major case of objects of the same
// class back-to-back, e.g. from T[].
InstanceKlass* _last_ik;
DumperClassCacheTableEntry** from_cache = _ptrs->get(ik);
if (from_cache == nullptr) {
entry = new DumperClassCacheTableEntry();
for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
if (!fld.access_flags().is_static()) {
! Symbol* sig = fld.signature();
- entry->_sigs_start.push(sig->char_at(0));
- entry->_offsets.push(fld.offset());
- entry->_entries++;
- entry->_instance_size += DumperSupport::sig2size(sig);
}
}
if (_ptrs->number_of_entries() >= CACHE_TOP) {
// We do not track the individual hit rates for table entries.
DumperClassCacheTableEntry** from_cache = _ptrs->get(ik);
if (from_cache == nullptr) {
entry = new DumperClassCacheTableEntry();
for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
if (!fld.access_flags().is_static()) {
! entry->add_field(fld);
}
}
if (_ptrs->number_of_entries() >= CACHE_TOP) {
// We do not track the individual hit rates for table entries.
unlink_all(_ptrs);
delete _ptrs;
}
};
+ // Describes flat object (flatted field or element of flat array) in the holder oop
+ class DumperFlatObject: public CHeapObj<mtServiceability> {
+ friend class DumperFlatObjectList;
+ private:
+ DumperFlatObject* _next;
+
+ const uintptr_t _id; // object id
+
+ const int _offset;
+ InlineKlass* const _inline_klass;
+
+ public:
+ DumperFlatObject(uintptr_t id, int offset, InlineKlass* inline_klass)
+ : _next(nullptr), _id(id), _offset(offset), _inline_klass(inline_klass) {
+ }
+
+ uintptr_t object_id() const { return _id; }
+ int offset() const { return _offset; }
+ InlineKlass* inline_klass() const { return _inline_klass; }
+ };
+
+ class FlatObjectIdProvider {
+ public:
+ virtual uintptr_t get_id() = 0;
+ };
+
+ // Simple FIFO.
+ class DumperFlatObjectList {
+ private:
+ FlatObjectIdProvider* _id_provider;
+ DumperFlatObject* _head;
+ DumperFlatObject* _tail;
+
+ void push(DumperFlatObject* obj) {
+ if (_head == nullptr) {
+ _head = _tail = obj;
+ } else {
+ assert(_tail != nullptr, "must be");
+ _tail->_next = obj;
+ _tail = obj;
+ }
+ }
+
+ public:
+ DumperFlatObjectList(FlatObjectIdProvider* id_provider): _id_provider(id_provider), _head(nullptr), _tail(nullptr) {}
+
+ bool is_empty() const { return _head == nullptr; }
+
+ uintptr_t push(int offset, InlineKlass* inline_klass) {
+ uintptr_t id = _id_provider->get_id();
+ DumperFlatObject* obj = new DumperFlatObject(id, offset, inline_klass);
+ push(obj);
+ return id;
+ }
+
+ DumperFlatObject* pop() {
+ assert(!is_empty(), "sanity");
+ DumperFlatObject* element = _head;
+ _head = element->_next;
+ element->_next = nullptr;
+ return element;
+ }
+ };
+
// write a header of the given type
void DumperSupport:: write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len) {
writer->write_u1(tag);
writer->write_u4(0); // current ticks
writer->write_u4(len);
}
}
}
// returns the size of the instance of the given class
! u4 DumperSupport::instance_size(InstanceKlass* ik, DumperClassCacheTableEntry* class_cache_entry) {
! if (class_cache_entry != nullptr) {
! return class_cache_entry->instance_size();
! } else {
! u4 size = 0;
- for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
- if (!fld.access_flags().is_static()) {
- size += sig2size(fld.signature());
- }
}
- return size;
}
}
u4 DumperSupport::get_static_fields_size(InstanceKlass* ik, u2& field_count) {
field_count = 0;
u4 size = 0;
for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
if (fldc.access_flags().is_static()) {
field_count++;
size += sig2size(fldc.signature());
}
}
}
}
}
// returns the size of the instance of the given class
! u4 DumperSupport::instance_size(InstanceKlass* ik) {
! u4 size = 0;
! for (HierarchicalFieldStream<JavaFieldStream> fld(ik); !fld.done(); fld.next()) {
! if (!fld.access_flags().is_static()) {
! size += sig2size(fld.signature());
}
}
+ return size;
}
u4 DumperSupport::get_static_fields_size(InstanceKlass* ik, u2& field_count) {
field_count = 0;
u4 size = 0;
for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
if (fldc.access_flags().is_static()) {
+ assert(!fldc.is_flat(), "static fields cannot be flat");
+
field_count++;
size += sig2size(fldc.signature());
}
}
InstanceKlass* ik = InstanceKlass::cast(k);
// dump the field descriptors and raw values
for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
if (fld.access_flags().is_static()) {
+ assert(!fld.is_flat(), "static fields cannot be flat");
+
Symbol* sig = fld.signature();
writer->write_symbolID(fld.name()); // name
writer->write_u1(sig2tag(sig)); // type
writer->write_u1(sig2tag(vmSymbols::int_array_signature())); // type
writer->write_objectID(init_lock);
}
}
! // dump the raw values of the instance fields of the given object
! void DumperSupport::dump_instance_fields(AbstractDumpWriter* writer, oop o, DumperClassCacheTableEntry* class_cache_entry) {
assert(class_cache_entry != nullptr, "Pre-condition: must be provided");
for (int idx = 0; idx < class_cache_entry->field_count(); idx++) {
! dump_field_value(writer, class_cache_entry->sig_start(idx), o, class_cache_entry->offset(idx));
}
}
! // dumps the definition of the instance fields for a given class
u2 DumperSupport::get_instance_fields_count(InstanceKlass* ik) {
u2 field_count = 0;
for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
! if (!fldc.access_flags().is_static()) field_count++;
}
return field_count;
}
// dumps the definition of the instance fields for a given class
! void DumperSupport::dump_instance_field_descriptors(AbstractDumpWriter* writer, Klass* k) {
- InstanceKlass* ik = InstanceKlass::cast(k);
-
// dump the field descriptors
for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
if (!fld.access_flags().is_static()) {
Symbol* sig = fld.signature();
writer->write_u1(sig2tag(vmSymbols::int_array_signature())); // type
writer->write_objectID(init_lock);
}
}
! // dump the raw values of the instance fields of the given object, fills flat_fields
! void DumperSupport:: dump_instance_fields(AbstractDumpWriter* writer, oop o, int offset,
+ DumperClassCacheTableEntry* class_cache_entry, DumperFlatObjectList* flat_fields) {
assert(class_cache_entry != nullptr, "Pre-condition: must be provided");
for (int idx = 0; idx < class_cache_entry->field_count(); idx++) {
! const DumperClassCacheTableEntry::FieldDescriptor& field = class_cache_entry->field(idx);
+ int field_offset = offset + field.offset();
+ if (field.is_flat()) {
+ // check for possible nulls
+ if (field.is_flat_nullable()) {
+ address payload = cast_from_oop<address>(o) + field_offset;
+ if (field.inline_klass()->is_payload_marked_as_null(payload)) {
+ writer->write_objectID(nullptr);
+ continue;
+ }
+ }
+ uintptr_t object_id = flat_fields->push(field_offset, field.inline_klass());
+ writer->write_objectID(object_id);
+ } else {
+ dump_field_value(writer, field.sig_start(), o, field_offset);
+ }
}
}
! // gets the count of the instance fields for a given class
u2 DumperSupport::get_instance_fields_count(InstanceKlass* ik) {
u2 field_count = 0;
for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
! if (!fldc.access_flags().is_static()) {
+ field_count++;
+ }
}
return field_count;
}
// dumps the definition of the instance fields for a given class
! void DumperSupport::dump_instance_field_descriptors(AbstractDumpWriter* writer, InstanceKlass* ik) {
// dump the field descriptors
for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
if (!fld.access_flags().is_static()) {
Symbol* sig = fld.signature();
}
}
}
// creates HPROF_GC_INSTANCE_DUMP record for the given object
! void DumperSupport::dump_instance(AbstractDumpWriter* writer, oop o, DumperClassCacheTable* class_cache) {
! InstanceKlass* ik = InstanceKlass::cast(o->klass());
-
DumperClassCacheTableEntry* cache_entry = class_cache->lookup_or_create(ik);
! u4 is = instance_size(ik, cache_entry);
u4 size = 1 + sizeof(address) + 4 + sizeof(address) + 4 + is;
writer->start_sub_record(HPROF_GC_INSTANCE_DUMP, size);
! writer->write_objectID(o);
writer->write_u4(STACK_TRACE_ID);
// class ID
writer->write_classID(ik);
// number of bytes that follow
writer->write_u4(is);
// field values
! dump_instance_fields(writer, o, cache_entry);
writer->end_sub_record();
}
// creates HPROF_GC_CLASS_DUMP record for the given instance class
}
}
}
// creates HPROF_GC_INSTANCE_DUMP record for the given object
! void DumperSupport::dump_instance(AbstractDumpWriter* writer, uintptr_t id, oop o, int offset, InstanceKlass* ik,
! DumperClassCacheTable* class_cache, DumperFlatObjectList* flat_fields) {
DumperClassCacheTableEntry* cache_entry = class_cache->lookup_or_create(ik);
! u4 is = cache_entry->instance_size();
u4 size = 1 + sizeof(address) + 4 + sizeof(address) + 4 + is;
writer->start_sub_record(HPROF_GC_INSTANCE_DUMP, size);
! writer->write_objectID(id);
writer->write_u4(STACK_TRACE_ID);
// class ID
writer->write_classID(ik);
// number of bytes that follow
writer->write_u4(is);
// field values
! if (offset != 0) {
+ // the object itself if flattened, so all fields are stored without headers
+ InlineKlass* inline_klass = InlineKlass::cast(ik);
+ offset -= inline_klass->payload_offset();
+ }
+
+ dump_instance_fields(writer, o, offset, cache_entry, flat_fields);
writer->end_sub_record();
}
// creates HPROF_GC_CLASS_DUMP record for the given instance class
// Hprof uses an u4 as record length field,
// which means we need to truncate arrays that are too long.
int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size) {
BasicType type = ArrayKlass::cast(array->klass())->element_type();
! assert(type >= T_BOOLEAN && type <= T_OBJECT, "invalid array element type");
int length = array->length();
int type_size;
! if (type == T_OBJECT) {
type_size = sizeof(address);
} else {
type_size = type2aelembytes(type);
}
// Hprof uses an u4 as record length field,
// which means we need to truncate arrays that are too long.
int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size) {
BasicType type = ArrayKlass::cast(array->klass())->element_type();
! assert((type >= T_BOOLEAN && type <= T_OBJECT) || type == T_FLAT_ELEMENT, "invalid array element type");
int length = array->length();
int type_size;
! if (type == T_OBJECT || type == T_FLAT_ELEMENT) {
type_size = sizeof(address);
} else {
type_size = type2aelembytes(type);
}
}
return length;
}
// creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
! void DumperSupport::dump_object_array(AbstractDumpWriter* writer, objArrayOop array) {
// sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID) + sizeof(classID)
short header_size = 1 + 2 * 4 + 2 * sizeof(address);
int length = calculate_array_max_length(writer, array, header_size);
u4 size = checked_cast<u4>(header_size + length * sizeof(address));
}
return length;
}
// creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
! void DumperSupport::dump_object_array(AbstractDumpWriter* writer, objArrayOop array, DumperFlatObjectList* flat_elements) {
// sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID) + sizeof(classID)
short header_size = 1 + 2 * 4 + 2 * sizeof(address);
int length = calculate_array_max_length(writer, array, header_size);
u4 size = checked_cast<u4>(header_size + length * sizeof(address));
// array class ID
writer->write_classID(array->klass());
// [id]* elements
! for (int index = 0; index < length; index++) {
! oop o = array->obj_at(index);
! o = mask_dormant_archived_object(o, array);
! writer->write_objectID(o);
}
writer->end_sub_record();
}
// array class ID
writer->write_classID(array->klass());
// [id]* elements
! if (array->is_flatArray()) {
! flatArrayOop farray = flatArrayOop(array);
! FlatArrayKlass* faklass = FlatArrayKlass::cast(farray->klass());
!
+ InlineKlass* vk = faklass->element_klass();
+ bool need_null_check = faklass->layout_kind() == LayoutKind::NULLABLE_ATOMIC_FLAT;
+
+ for (int index = 0; index < length; index++) {
+ address addr = (address)farray->value_at_addr(index, faklass->layout_helper());
+ // check for null
+ if (need_null_check) {
+ if (vk->is_payload_marked_as_null(addr)) {
+ writer->write_objectID(nullptr);
+ continue;
+ }
+ }
+ // offset in the array oop
+ int offset = (int)(addr - cast_from_oop<address>(farray));
+ uintptr_t object_id = flat_elements->push(offset, vk);
+ writer->write_objectID(object_id);
+ }
+ } else {
+ for (int index = 0; index < length; index++) {
+ oop o = array->obj_at(index);
+ o = mask_dormant_archived_object(o, array);
+ writer->write_objectID(o);
+ }
}
writer->end_sub_record();
}
public:
ClassDumper(AbstractDumpWriter* writer) : _writer(writer) {}
void do_klass(Klass* k) {
+ if (DumperSupport::filter_out_klass(k)) {
+ return;
+ }
if (k->is_instance_klass()) {
DumperSupport::dump_instance_class(writer(), InstanceKlass::cast(k));
} else {
DumperSupport::dump_array_class(writer(), k);
}
public:
LoadedClassDumper(AbstractDumpWriter* writer, GrowableArray<Klass*>* klass_map)
: _writer(writer), _klass_map(klass_map), _class_serial_num(0) {}
void do_klass(Klass* k) {
+ if (DumperSupport::filter_out_klass(k)) {
+ return;
+ }
// len of HPROF_LOAD_CLASS record
u4 remaining = 2 * oopSize + 2 * sizeof(u4);
DumperSupport::write_header(writer(), HPROF_LOAD_CLASS, remaining);
// class serial number is just a number
writer()->write_u4(++_class_serial_num);
}
ShouldNotReachHere();
return nullptr;
}
// Callback to dump thread-related data for unmounted virtual threads;
// implemented by VM_HeapDumper.
class UnmountedVThreadDumper {
! public:
virtual void dump_vthread(oop vt, AbstractDumpWriter* segment_writer) = 0;
};
// Support class used when iterating over the heap.
class HeapObjectDumper : public ObjectClosure {
private:
AbstractDumpWriter* _writer;
AbstractDumpWriter* writer() { return _writer; }
UnmountedVThreadDumper* _vthread_dumper;
DumperClassCacheTable _class_cache;
public:
! HeapObjectDumper(AbstractDumpWriter* writer, UnmountedVThreadDumper* vthread_dumper)
! : _writer(writer), _vthread_dumper(vthread_dumper) {}
// called for each object in the heap
void do_object(oop o);
};
}
ShouldNotReachHere();
return nullptr;
}
+ class FlatObjectDumper: public FlatObjectIdProvider {
+ private:
+ volatile uintptr_t _id_counter;
+ public:
+ FlatObjectDumper(): _id_counter(0) {
+ }
+
+ void dump_flat_objects(AbstractDumpWriter* writer, oop holder,
+ DumperClassCacheTable* class_cache, DumperFlatObjectList* flat_objects);
+
+ // FlatObjectIdProvider implementation
+ virtual uintptr_t get_id() override {
+ // need to protect against overflow, so use instead of fetch_then_add
+ const uintptr_t max_value = (uintptr_t)-1;
+ uintptr_t old_value = AtomicAccess::load(&_id_counter);
+ while (old_value != max_value) {
+ uintptr_t new_value = old_value + 1;
+ // to avoid conflicts with oop addresses skip aligned values
+ if ((new_value & MinObjAlignmentInBytesMask) == 0) {
+ new_value++;
+ }
+ uintptr_t value = AtomicAccess::cmpxchg(&_id_counter, old_value, new_value);
+ if (value == old_value) {
+ // success
+ return new_value;
+ }
+ old_value = value;
+ }
+ // if we are here, maximum id value is reached
+ return max_value;
+ }
+
+ };
+
+ void FlatObjectDumper::dump_flat_objects(AbstractDumpWriter* writer, oop holder,
+ DumperClassCacheTable* class_cache, DumperFlatObjectList* flat_objects) {
+ // DumperSupport::dump_instance can add entries to flat_objects
+ while (!flat_objects->is_empty()) {
+ DumperFlatObject* obj = flat_objects->pop();
+ DumperSupport::dump_instance(writer, obj->object_id(), holder, obj->offset(), obj->inline_klass(), class_cache, flat_objects);
+ delete obj;
+ }
+ }
+
// Callback to dump thread-related data for unmounted virtual threads;
// implemented by VM_HeapDumper.
class UnmountedVThreadDumper {
! public:
virtual void dump_vthread(oop vt, AbstractDumpWriter* segment_writer) = 0;
};
+
// Support class used when iterating over the heap.
class HeapObjectDumper : public ObjectClosure {
private:
AbstractDumpWriter* _writer;
AbstractDumpWriter* writer() { return _writer; }
UnmountedVThreadDumper* _vthread_dumper;
+ FlatObjectDumper* _flat_dumper;
DumperClassCacheTable _class_cache;
public:
! HeapObjectDumper(AbstractDumpWriter* writer, UnmountedVThreadDumper* vthread_dumper, FlatObjectDumper* flat_dumper)
! : _writer(writer), _vthread_dumper(vthread_dumper), _flat_dumper(flat_dumper) {}
// called for each object in the heap
void do_object(oop o);
};
if (DumperSupport::mask_dormant_archived_object(o, nullptr) == nullptr) {
return;
}
if (o->is_instance()) {
// create a HPROF_GC_INSTANCE record for each object
! DumperSupport::dump_instance(writer(), o, &_class_cache);
// If we encounter an unmounted virtual thread it needs to be dumped explicitly
// (mounted virtual threads are dumped with their carriers).
if (java_lang_VirtualThread::is_instance(o)
&& ThreadDumper::should_dump_vthread(o) && !ThreadDumper::is_vthread_mounted(o)) {
_vthread_dumper->dump_vthread(o, writer());
}
} else if (o->is_objArray()) {
// create a HPROF_GC_OBJ_ARRAY_DUMP record for each object array
! DumperSupport::dump_object_array(writer(), objArrayOop(o));
} else if (o->is_typeArray()) {
// create a HPROF_GC_PRIM_ARRAY_DUMP record for each type array
DumperSupport::dump_prim_array(writer(), typeArrayOop(o));
}
}
if (DumperSupport::mask_dormant_archived_object(o, nullptr) == nullptr) {
return;
}
if (o->is_instance()) {
+ DumperFlatObjectList flat_fields(_flat_dumper);
// create a HPROF_GC_INSTANCE record for each object
! DumperSupport::dump_instance(writer(),
+ cast_from_oop<uintptr_t>(o), // object_id is the address
+ o, 0, // for heap instance holder is oop, offset is 0
+ InstanceKlass::cast(o->klass()),
+ &_class_cache, &flat_fields);
+
+ // if there are flattened fields, dump them
+ if (!flat_fields.is_empty()) {
+ _flat_dumper->dump_flat_objects(writer(), o, &_class_cache, &flat_fields);
+ }
+
// If we encounter an unmounted virtual thread it needs to be dumped explicitly
// (mounted virtual threads are dumped with their carriers).
if (java_lang_VirtualThread::is_instance(o)
&& ThreadDumper::should_dump_vthread(o) && !ThreadDumper::is_vthread_mounted(o)) {
_vthread_dumper->dump_vthread(o, writer());
}
} else if (o->is_objArray()) {
+ DumperFlatObjectList flat_elements(_flat_dumper);
// create a HPROF_GC_OBJ_ARRAY_DUMP record for each object array
! DumperSupport::dump_object_array(writer(), objArrayOop(o), &flat_elements);
+ // if this is flat array, dump its elements
+ if (!flat_elements.is_empty()) {
+ _flat_dumper->dump_flat_objects(writer(), o, &_class_cache, &flat_elements);
+ }
} else if (o->is_typeArray()) {
// create a HPROF_GC_PRIM_ARRAY_DUMP record for each type array
DumperSupport::dump_prim_array(writer(), typeArrayOop(o));
}
}
// parallel heap dump support
uint _num_dumper_threads;
DumperController* _dumper_controller;
ParallelObjectIterator* _poi;
+ // flat value object support
+ FlatObjectDumper _flat_dumper;
+
// Dumper id of VMDumper thread.
static const int VMDumperId = 0;
// VM dumper dumps both heap and non-heap data, other dumpers dump heap-only data.
static bool is_vm_dumper(int dumper_id) { return dumper_id == VMDumperId; }
// the 1st dumper calling get_next_dumper_id becomes VM dumper
// segment is started.
// The HPROF_GC_CLASS_DUMP and HPROF_GC_INSTANCE_DUMP are the vast bulk
// of the heap dump.
TraceTime timer(is_parallel_dump() ? "Dump heap objects in parallel" : "Dump heap objects", TRACETIME_LOG(Info, heapdump));
! HeapObjectDumper obj_dumper(&segment_writer, this);
if (!is_parallel_dump()) {
Universe::heap()->object_iterate(&obj_dumper);
} else {
// == Parallel dump
_poi->object_iterate(&obj_dumper, worker_id);
// segment is started.
// The HPROF_GC_CLASS_DUMP and HPROF_GC_INSTANCE_DUMP are the vast bulk
// of the heap dump.
TraceTime timer(is_parallel_dump() ? "Dump heap objects in parallel" : "Dump heap objects", TRACETIME_LOG(Info, heapdump));
! HeapObjectDumper obj_dumper(&segment_writer, this, &_flat_dumper);
if (!is_parallel_dump()) {
Universe::heap()->object_iterate(&obj_dumper);
} else {
// == Parallel dump
_poi->object_iterate(&obj_dumper, worker_id);
< prev index next >