1 /*
2 * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/classLoaderData.inline.hpp"
26 #include "classfile/classLoaderDataGraph.hpp"
27 #include "classfile/moduleEntry.hpp"
28 #include "classfile/vmClasses.hpp"
29 #include "gc/shared/collectedHeap.hpp"
30 #include "logging/log.hpp"
31 #include "logging/logTag.hpp"
32 #include "memory/heapInspection.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "memory/universe.hpp"
35 #include "nmt/memTracker.hpp"
36 #include "oops/fieldInfo.hpp"
37 #include "oops/fieldStreams.inline.hpp"
38 #include "oops/inlineKlass.inline.hpp"
39 #include "oops/oop.inline.hpp"
40 #include "runtime/atomicAccess.hpp"
41 #include "runtime/fieldDescriptor.inline.hpp"
42 #include "runtime/os.hpp"
43 #include "utilities/globalDefinitions.hpp"
44 #include "utilities/macros.hpp"
45 #include "utilities/stack.inline.hpp"
46
47 // HeapInspection
48
49 inline KlassInfoEntry::~KlassInfoEntry() {
50 if (_subclasses != nullptr) {
51 delete _subclasses;
52 }
53 }
54
55 inline void KlassInfoEntry::add_subclass(KlassInfoEntry* cie) {
56 if (_subclasses == nullptr) {
57 _subclasses = new (mtServiceability) GrowableArray<KlassInfoEntry*>(4, mtServiceability);
58 }
59 _subclasses->append(cie);
60 }
61
62 int KlassInfoEntry::compare(KlassInfoEntry* e1, KlassInfoEntry* e2) {
63 if(e1->_instance_words > e2->_instance_words) {
64 return -1;
65 } else if(e1->_instance_words < e2->_instance_words) {
66 return 1;
67 }
68 // Sort alphabetically, note 'Z' < '[' < 'a', but it's better to group
69 // the array classes before all the instance classes.
70 ResourceMark rm;
71 const char* name1 = e1->klass()->external_name();
72 const char* name2 = e2->klass()->external_name();
73 bool d1 = (name1[0] == JVM_SIGNATURE_ARRAY);
74 bool d2 = (name2[0] == JVM_SIGNATURE_ARRAY);
75 if (d1 && !d2) {
76 return -1;
77 } else if (d2 && !d1) {
78 return 1;
79 } else {
80 return strcmp(name1, name2);
81 }
82 }
83
84 const char* KlassInfoEntry::name() const {
85 const char* name;
86 if (_klass->name() != nullptr) {
87 name = _klass->external_name();
88 } else {
89 if (_klass == Universe::boolArrayKlass()) name = "<boolArrayKlass>"; else
90 if (_klass == Universe::charArrayKlass()) name = "<charArrayKlass>"; else
91 if (_klass == Universe::floatArrayKlass()) name = "<floatArrayKlass>"; else
92 if (_klass == Universe::doubleArrayKlass()) name = "<doubleArrayKlass>"; else
93 if (_klass == Universe::byteArrayKlass()) name = "<byteArrayKlass>"; else
94 if (_klass == Universe::shortArrayKlass()) name = "<shortArrayKlass>"; else
95 if (_klass == Universe::intArrayKlass()) name = "<intArrayKlass>"; else
96 if (_klass == Universe::longArrayKlass()) name = "<longArrayKlass>"; else
97 name = "<no name>";
98 }
99 return name;
100 }
101
102 void KlassInfoEntry::print_on(outputStream* st) const {
103 ResourceMark rm;
104
105 // simplify the formatting (ILP32 vs LP64) - always cast the numbers to 64-bit
106 ModuleEntry* module = _klass->module();
107 if (module->is_named()) {
108 st->print_cr(INT64_FORMAT_W(13) " " UINT64_FORMAT_W(13) " %s (%s%s%s)",
109 (int64_t)_instance_count,
110 (uint64_t)_instance_words * HeapWordSize,
111 name(),
112 module->name()->as_C_string(),
113 module->version() != nullptr ? "@" : "",
114 module->version() != nullptr ? module->version()->as_C_string() : "");
115 } else {
116 st->print_cr(INT64_FORMAT_W(13) " " UINT64_FORMAT_W(13) " %s",
117 (int64_t)_instance_count,
118 (uint64_t)_instance_words * HeapWordSize,
119 name());
120 }
121 }
122
123 KlassInfoEntry* KlassInfoBucket::lookup(Klass* const k) {
124 // Can happen if k is an archived class that we haven't loaded yet.
125 if (k->java_mirror_no_keepalive() == nullptr) {
126 return nullptr;
127 }
128
129 KlassInfoEntry* elt = _list;
130 while (elt != nullptr) {
131 if (elt->is_equal(k)) {
132 return elt;
133 }
134 elt = elt->next();
135 }
136 elt = new (std::nothrow) KlassInfoEntry(k, list());
137 // We may be out of space to allocate the new entry.
138 if (elt != nullptr) {
139 set_list(elt);
140 }
141 return elt;
142 }
143
144 void KlassInfoBucket::iterate(KlassInfoClosure* cic) {
145 KlassInfoEntry* elt = _list;
146 while (elt != nullptr) {
147 cic->do_cinfo(elt);
148 elt = elt->next();
149 }
150 }
151
152 void KlassInfoBucket::empty() {
153 KlassInfoEntry* elt = _list;
154 _list = nullptr;
155 while (elt != nullptr) {
156 KlassInfoEntry* next = elt->next();
157 delete elt;
158 elt = next;
159 }
160 }
161
162 class KlassInfoTable::AllClassesFinder : public LockedClassesDo {
163 KlassInfoTable *_table;
164 public:
165 AllClassesFinder(KlassInfoTable* table) : _table(table) {}
166 virtual void do_klass(Klass* k) {
167 // This has the SIDE EFFECT of creating a KlassInfoEntry
168 // for <k>, if one doesn't exist yet.
169 _table->lookup(k);
170 }
171 };
172
173
174 KlassInfoTable::KlassInfoTable(bool add_all_classes) {
175 _size_of_instances_in_words = 0;
176 _ref = (uintptr_t) Universe::boolArrayKlass();
177 _buckets =
178 (KlassInfoBucket*) AllocateHeap(sizeof(KlassInfoBucket) * _num_buckets,
179 mtInternal, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
180 if (_buckets != nullptr) {
181 for (int index = 0; index < _num_buckets; index++) {
182 _buckets[index].initialize();
183 }
184 if (add_all_classes) {
185 AllClassesFinder finder(this);
186 ClassLoaderDataGraph::classes_do(&finder);
187 }
188 }
189 }
190
191 KlassInfoTable::~KlassInfoTable() {
192 if (_buckets != nullptr) {
193 for (int index = 0; index < _num_buckets; index++) {
194 _buckets[index].empty();
195 }
196 FREE_C_HEAP_ARRAY(KlassInfoBucket, _buckets);
197 _buckets = nullptr;
198 }
199 }
200
201 uint KlassInfoTable::hash(const Klass* p) {
202 return (uint)(((uintptr_t)p - _ref) >> 2);
203 }
204
205 KlassInfoEntry* KlassInfoTable::lookup(Klass* k) {
206 uint idx = hash(k) % _num_buckets;
207 assert(_buckets != nullptr, "Allocation failure should have been caught");
208 KlassInfoEntry* e = _buckets[idx].lookup(k);
209 // Lookup may fail if this is a new klass for which we
210 // could not allocate space for an new entry, or if it's
211 // an archived class that we haven't loaded yet.
212 assert(e == nullptr || k == e->klass(), "must be equal");
213 return e;
214 }
215
216 // Return false if the entry could not be recorded on account
217 // of running out of space required to create a new entry.
218 bool KlassInfoTable::record_instance(const oop obj) {
219 Klass* k = obj->klass();
220 KlassInfoEntry* elt = lookup(k);
221 // elt may be null if it's a new klass for which we
222 // could not allocate space for a new entry in the hashtable.
223 if (elt != nullptr) {
224 elt->set_count(elt->count() + 1);
225 elt->set_words(elt->words() + obj->size());
226 _size_of_instances_in_words += obj->size();
227 return true;
228 } else {
229 return false;
230 }
231 }
232
233 void KlassInfoTable::iterate(KlassInfoClosure* cic) {
234 assert(_buckets != nullptr, "Allocation failure should have been caught");
235 for (int index = 0; index < _num_buckets; index++) {
236 _buckets[index].iterate(cic);
237 }
238 }
239
240 size_t KlassInfoTable::size_of_instances_in_words() const {
241 return _size_of_instances_in_words;
242 }
243
244 // Return false if the entry could not be recorded on account
245 // of running out of space required to create a new entry.
246 bool KlassInfoTable::merge_entry(const KlassInfoEntry* cie) {
247 Klass* k = cie->klass();
248 KlassInfoEntry* elt = lookup(k);
249 // elt may be null if it's a new klass for which we
250 // could not allocate space for a new entry in the hashtable.
251 if (elt != nullptr) {
252 elt->set_count(elt->count() + cie->count());
253 elt->set_words(elt->words() + cie->words());
254 _size_of_instances_in_words += cie->words();
255 return true;
256 }
257 return false;
258 }
259
260 class KlassInfoTableMergeClosure : public KlassInfoClosure {
261 private:
262 KlassInfoTable* _dest;
263 bool _success;
264 public:
265 KlassInfoTableMergeClosure(KlassInfoTable* table) : _dest(table), _success(true) {}
266 void do_cinfo(KlassInfoEntry* cie) {
267 _success &= _dest->merge_entry(cie);
268 }
269 bool success() { return _success; }
270 };
271
272 // merge from table
273 bool KlassInfoTable::merge(KlassInfoTable* table) {
274 KlassInfoTableMergeClosure closure(this);
275 table->iterate(&closure);
276 return closure.success();
277 }
278
279 int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) {
280 return (*e1)->compare(*e1,*e2);
281 }
282
283 KlassInfoHisto::KlassInfoHisto(KlassInfoTable* cit) :
284 _cit(cit) {
285 _elements = new (mtServiceability) GrowableArray<KlassInfoEntry*>(_histo_initial_size, mtServiceability);
286 }
287
288 KlassInfoHisto::~KlassInfoHisto() {
289 delete _elements;
290 }
291
292 void KlassInfoHisto::add(KlassInfoEntry* cie) {
293 elements()->append(cie);
294 }
295
296 void KlassInfoHisto::sort() {
297 elements()->sort(KlassInfoHisto::sort_helper);
298 }
299
300 void KlassInfoHisto::print_elements(outputStream* st) const {
301 // simplify the formatting (ILP32 vs LP64) - store the sum in 64-bit
302 int64_t total = 0;
303 uint64_t totalw = 0;
304 for(int i=0; i < elements()->length(); i++) {
305 st->print("%4d: ", i+1);
306 elements()->at(i)->print_on(st);
307 total += elements()->at(i)->count();
308 totalw += elements()->at(i)->words();
309 }
310 st->print_cr("Total " INT64_FORMAT_W(13) " " UINT64_FORMAT_W(13),
311 total, totalw * HeapWordSize);
312 }
313
314 class HierarchyClosure : public KlassInfoClosure {
315 private:
316 GrowableArray<KlassInfoEntry*> *_elements;
317 public:
318 HierarchyClosure(GrowableArray<KlassInfoEntry*> *_elements) : _elements(_elements) {}
319
320 void do_cinfo(KlassInfoEntry* cie) {
321 // ignore array classes
322 if (cie->klass()->is_instance_klass()) {
323 _elements->append(cie);
324 }
325 }
326 };
327
328 void KlassHierarchy::print_class_hierarchy(outputStream* st, bool print_interfaces,
329 bool print_subclasses, char* classname) {
330 ResourceMark rm;
331 Stack <KlassInfoEntry*, mtClass> class_stack;
332 GrowableArray<KlassInfoEntry*> elements;
333
334 // Add all classes to the KlassInfoTable, which allows for quick lookup.
335 // A KlassInfoEntry will be created for each class.
336 KlassInfoTable cit(true);
337 if (cit.allocation_failed()) {
338 st->print_cr("ERROR: Ran out of C-heap; hierarchy not generated");
339 return;
340 }
341
342 // Add all created KlassInfoEntry instances to the elements array for easy
343 // iteration, and to allow each KlassInfoEntry instance to have a unique index.
344 HierarchyClosure hc(&elements);
345 cit.iterate(&hc);
346
347 for(int i = 0; i < elements.length(); i++) {
348 KlassInfoEntry* cie = elements.at(i);
349 Klass* super = cie->klass()->super();
350
351 // Set the index for the class.
352 cie->set_index(i + 1);
353
354 // Add the class to the subclass array of its superclass.
355 if (super != nullptr) {
356 KlassInfoEntry* super_cie = cit.lookup(super);
357 assert(super_cie != nullptr, "could not lookup superclass");
358 super_cie->add_subclass(cie);
359 }
360 }
361
362 // Set the do_print flag for each class that should be printed.
363 for(int i = 0; i < elements.length(); i++) {
364 KlassInfoEntry* cie = elements.at(i);
365 if (classname == nullptr) {
366 // We are printing all classes.
367 cie->set_do_print(true);
368 } else {
369 // We are only printing the hierarchy of a specific class.
370 if (strcmp(classname, cie->klass()->external_name()) == 0) {
371 assert(cie->klass()->is_instance_klass(), "elements array contains only instance klasses");
372 KlassHierarchy::set_do_print_for_class_hierarchy(cie, &cit, print_subclasses);
373 }
374 }
375 }
376
377 // Now we do a depth first traversal of the class hierachry. The class_stack will
378 // maintain the list of classes we still need to process. Start things off
379 // by priming it with java.lang.Object.
380 KlassInfoEntry* jlo_cie = cit.lookup(vmClasses::Object_klass());
381 assert(jlo_cie != nullptr, "could not lookup java.lang.Object");
382 class_stack.push(jlo_cie);
383
384 // Repeatedly pop the top item off the stack, print its class info,
385 // and push all of its subclasses on to the stack. Do this until there
386 // are no classes left on the stack.
387 while (!class_stack.is_empty()) {
388 KlassInfoEntry* curr_cie = class_stack.pop();
389 if (curr_cie->do_print()) {
390 print_class(st, curr_cie, print_interfaces);
391 if (curr_cie->subclasses() != nullptr) {
392 // Current class has subclasses, so push all of them onto the stack.
393 for (int i = 0; i < curr_cie->subclasses()->length(); i++) {
394 KlassInfoEntry* cie = curr_cie->subclasses()->at(i);
395 if (cie->do_print()) {
396 class_stack.push(cie);
397 }
398 }
399 }
400 }
401 }
402
403 st->flush();
404 }
405
406 // Sets the do_print flag for every superclass and subclass of the specified class.
407 void KlassHierarchy::set_do_print_for_class_hierarchy(KlassInfoEntry* cie, KlassInfoTable* cit,
408 bool print_subclasses) {
409 // Set do_print for all superclasses of this class.
410 InstanceKlass* super = InstanceKlass::cast(cie->klass())->super();
411 while (super != nullptr) {
412 KlassInfoEntry* super_cie = cit->lookup(super);
413 super_cie->set_do_print(true);
414 super = super->super();
415 }
416
417 // Set do_print for this class and all of its subclasses.
418 Stack <KlassInfoEntry*, mtClass> class_stack;
419 class_stack.push(cie);
420 while (!class_stack.is_empty()) {
421 KlassInfoEntry* curr_cie = class_stack.pop();
422 curr_cie->set_do_print(true);
423 if (print_subclasses && curr_cie->subclasses() != nullptr) {
424 // Current class has subclasses, so push all of them onto the stack.
425 for (int i = 0; i < curr_cie->subclasses()->length(); i++) {
426 KlassInfoEntry* cie = curr_cie->subclasses()->at(i);
427 class_stack.push(cie);
428 }
429 }
430 }
431 }
432
433 static void print_indent(outputStream* st, int indent) {
434 while (indent != 0) {
435 st->print("|");
436 indent--;
437 if (indent != 0) {
438 st->print(" ");
439 }
440 }
441 }
442
443 // Print the class name and its unique ClassLoader identifier.
444 static void print_classname(outputStream* st, Klass* klass) {
445 oop loader_oop = klass->class_loader_data()->class_loader();
446 st->print("%s/", klass->external_name());
447 if (loader_oop == nullptr) {
448 st->print("null");
449 } else {
450 st->print(PTR_FORMAT, p2i(klass->class_loader_data()));
451 }
452 }
453
454 static void print_interface(outputStream* st, InstanceKlass* intf_klass, const char* intf_type, int indent) {
455 print_indent(st, indent);
456 st->print(" implements ");
457 print_classname(st, intf_klass);
458 st->print(" (%s intf)\n", intf_type);
459 }
460
461 void KlassHierarchy::print_class(outputStream* st, KlassInfoEntry* cie, bool print_interfaces) {
462 ResourceMark rm;
463 InstanceKlass* klass = (InstanceKlass*)cie->klass();
464 int indent = 0;
465
466 // Print indentation with proper indicators of superclass.
467 Klass* super = klass->super();
468 while (super != nullptr) {
469 super = super->super();
470 indent++;
471 }
472 print_indent(st, indent);
473 if (indent != 0) st->print("--");
474
475 // Print the class name, its unique ClassLoader identifier, and if it is an interface.
476 print_classname(st, klass);
477 if (klass->is_interface()) {
478 st->print(" (intf)");
479 }
480 st->print("\n");
481
482 // Print any interfaces the class has.
483 if (print_interfaces) {
484 Array<InstanceKlass*>* local_intfs = klass->local_interfaces();
485 Array<InstanceKlass*>* trans_intfs = klass->transitive_interfaces();
486 for (int i = 0; i < local_intfs->length(); i++) {
487 print_interface(st, local_intfs->at(i), "declared", indent);
488 }
489 for (int i = 0; i < trans_intfs->length(); i++) {
490 InstanceKlass* trans_interface = trans_intfs->at(i);
491 // Only print transitive interfaces if they are not also declared.
492 if (!local_intfs->contains(trans_interface)) {
493 print_interface(st, trans_interface, "inherited", indent);
494 }
495 }
496 }
497 }
498
499 void KlassInfoHisto::print_histo_on(outputStream* st) {
500 st->print_cr(" num #instances #bytes class name (module)");
501 st->print_cr("-------------------------------------------------------");
502 print_elements(st);
503 }
504
505 class HistoClosure : public KlassInfoClosure {
506 private:
507 KlassInfoHisto* _cih;
508 public:
509 HistoClosure(KlassInfoHisto* cih) : _cih(cih) {}
510
511 void do_cinfo(KlassInfoEntry* cie) {
512 _cih->add(cie);
513 }
514 };
515
516
517 class FindClassByNameClosure : public KlassInfoClosure {
518 private:
519 GrowableArray<Klass*>* _klasses;
520 Symbol* _classname;
521 public:
522 FindClassByNameClosure(GrowableArray<Klass*>* klasses, Symbol* classname) :
523 _klasses(klasses), _classname(classname) { }
524
525 void do_cinfo(KlassInfoEntry* cie) {
526 if (cie->klass()->name() == _classname) {
527 _klasses->append(cie->klass());
528 }
529 }
530 };
531
532 class FieldDesc {
533 private:
534 Symbol* _name;
535 Symbol* _signature;
536 int _offset;
537 int _index;
538 InstanceKlass* _holder;
539 AccessFlags _access_flags;
540 FieldInfo::FieldFlags _field_flags;
541 public:
542 FieldDesc() : _name(nullptr), _signature(nullptr), _offset(-1), _index(-1), _holder(nullptr),
543 _access_flags(AccessFlags()), _field_flags(FieldInfo::FieldFlags((u4)0)) { }
544
545 FieldDesc(fieldDescriptor& fd) : _name(fd.name()), _signature(fd.signature()), _offset(fd.offset()),
546 _index(fd.index()), _holder(fd.field_holder()),
547 _access_flags(fd.access_flags()), _field_flags(fd.field_flags()) { }
548
549 const Symbol* name() { return _name;}
550 const Symbol* signature() { return _signature; }
551 int offset() const { return _offset; }
552 int index() const { return _index; }
553 const InstanceKlass* holder() { return _holder; }
554 const AccessFlags& access_flags() { return _access_flags; }
555 bool is_null_free_inline_type() const { return _field_flags.is_null_free_inline_type(); }
556 };
557
558 static int compare_offset(FieldDesc* f1, FieldDesc* f2) {
559 return f1->offset() > f2->offset() ? 1 : -1;
560 }
561
562 static void print_field(outputStream* st, int level, int offset, FieldDesc& fd, bool is_inline_type, bool is_flat ) {
563 const char* flat_field_msg = "";
564 if (is_flat) {
565 flat_field_msg = is_flat ? "flat" : "not flat";
566 }
567 st->print_cr(" @ %d %*s \"%s\" %s %s %s",
568 offset, level * 3, "",
569 fd.name()->as_C_string(),
570 fd.signature()->as_C_string(),
571 is_inline_type ? " // inline type " : "",
572 flat_field_msg);
573 }
574
575 static void print_flat_field(outputStream* st, int level, int offset, InstanceKlass* klass) {
576 assert(klass->is_inline_klass(), "Only inline types can be flat");
577 InlineKlass* vklass = InlineKlass::cast(klass);
578 GrowableArray<FieldDesc>* fields = new (mtServiceability) GrowableArray<FieldDesc>(100, mtServiceability);
579 for (AllFieldStream fd(klass); !fd.done(); fd.next()) {
580 if (!fd.access_flags().is_static()) {
581 fields->append(FieldDesc(fd.field_descriptor()));
582 }
583 }
584 fields->sort(compare_offset);
585 for(int i = 0; i < fields->length(); i++) {
586 FieldDesc fd = fields->at(i);
587 int offset2 = offset + fd.offset() - vklass->payload_offset();
588 print_field(st, level, offset2, fd,
589 fd.is_null_free_inline_type(), fd.holder()->field_is_flat(fd.index()));
590 if (fd.holder()->field_is_flat(fd.index())) {
591 print_flat_field(st, level + 1, offset2 ,
592 InstanceKlass::cast(fd.holder()->get_inline_type_field_klass(fd.index())));
593 }
594 }
595 }
596
597 void PrintClassLayout::print_class_layout(outputStream* st, char* class_name) {
598 KlassInfoTable cit(true);
599 if (cit.allocation_failed()) {
600 st->print_cr("ERROR: Ran out of C-heap; hierarchy not generated");
601 return;
602 }
603
604 Thread* THREAD = Thread::current();
605
606 Symbol* classname = SymbolTable::probe(class_name, (int)strlen(class_name));
607
608 GrowableArray<Klass*>* klasses = new (mtServiceability) GrowableArray<Klass*>(100, mtServiceability);
609
610 FindClassByNameClosure fbnc(klasses, classname);
611 cit.iterate(&fbnc);
612
613 for(int i = 0; i < klasses->length(); i++) {
614 Klass* klass = klasses->at(i);
615 if (!klass->is_instance_klass()) continue; // Skip
616 InstanceKlass* ik = InstanceKlass::cast(klass);
617 int tab = 1;
618 st->print_cr("Class %s [@%s]:", klass->name()->as_C_string(),
619 klass->class_loader_data()->loader_name());
620 ResourceMark rm;
621 GrowableArray<FieldDesc>* fields = new (mtServiceability) GrowableArray<FieldDesc>(100, mtServiceability);
622 for (AllFieldStream fd(ik); !fd.done(); fd.next()) {
623 if (!fd.access_flags().is_static()) {
624 fields->append(FieldDesc(fd.field_descriptor()));
625 }
626 }
627 fields->sort(compare_offset);
628 for(int i = 0; i < fields->length(); i++) {
629 FieldDesc fd = fields->at(i);
630 print_field(st, 0, fd.offset(), fd, fd.is_null_free_inline_type(), fd.holder()->field_is_flat(fd.index()));
631 if (fd.holder()->field_is_flat(fd.index())) {
632 print_flat_field(st, 1, fd.offset(),
633 InstanceKlass::cast(fd.holder()->get_inline_type_field_klass(fd.index())));
634 }
635 }
636 }
637 st->cr();
638 }
639
640 class RecordInstanceClosure : public ObjectClosure {
641 private:
642 KlassInfoTable* _cit;
643 uintx _missed_count;
644 BoolObjectClosure* _filter;
645 public:
646 RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) :
647 _cit(cit), _missed_count(0), _filter(filter) {}
648
649 void do_object(oop obj) {
650 if (should_visit(obj)) {
651 if (!_cit->record_instance(obj)) {
652 _missed_count++;
653 }
654 }
655 }
656
657 uintx missed_count() { return _missed_count; }
658
659 private:
660 bool should_visit(oop obj) {
661 return _filter == nullptr || _filter->do_object_b(obj);
662 }
663 };
664
665 // Heap inspection for every worker.
666 // When native OOM happens for KlassInfoTable, set _success to false.
667 void ParHeapInspectTask::work(uint worker_id) {
668 uintx missed_count = 0;
669 bool merge_success = true;
670 if (!AtomicAccess::load(&_success)) {
671 // other worker has failed on parallel iteration.
672 return;
673 }
674
675 KlassInfoTable cit(false);
676 if (cit.allocation_failed()) {
677 // fail to allocate memory, stop parallel mode
678 AtomicAccess::store(&_success, false);
679 return;
680 }
681 RecordInstanceClosure ric(&cit, _filter);
682 _poi->object_iterate(&ric, worker_id);
683 missed_count = ric.missed_count();
684 {
685 MutexLocker x(&_mutex, Mutex::_no_safepoint_check_flag);
686 merge_success = _shared_cit->merge(&cit);
687 }
688 if (merge_success) {
689 AtomicAccess::add(&_missed_count, missed_count);
690 } else {
691 AtomicAccess::store(&_success, false);
692 }
693 }
694
695 uintx HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, WorkerThreads* workers) {
696 // Try parallel first.
697 if (workers != nullptr) {
698 ResourceMark rm;
699 ParallelObjectIterator poi(workers->active_workers());
700 ParHeapInspectTask task(&poi, cit, filter);
701 // Run task with the active workers.
702 workers->run_task(&task);
703 if (task.success()) {
704 return task.missed_count();
705 }
706 }
707
708 ResourceMark rm;
709 // If no parallel iteration available, run serially.
710 RecordInstanceClosure ric(cit, filter);
711 Universe::heap()->object_iterate(&ric);
712 return ric.missed_count();
713 }
714
715 void HeapInspection::heap_inspection(outputStream* st, WorkerThreads* workers) {
716 ResourceMark rm;
717
718 KlassInfoTable cit(false);
719 if (!cit.allocation_failed()) {
720 // populate table with object allocation info
721 uintx missed_count = populate_table(&cit, nullptr, workers);
722 if (missed_count != 0) {
723 log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted %zu"
724 " total instances in data below",
725 missed_count);
726 }
727
728 // Sort and print klass instance info
729 KlassInfoHisto histo(&cit);
730 HistoClosure hc(&histo);
731
732 cit.iterate(&hc);
733
734 histo.sort();
735 histo.print_histo_on(st);
736 } else {
737 st->print_cr("ERROR: Ran out of C-heap; histogram not generated");
738 }
739 st->flush();
740 }
741
742 class FindInstanceClosure : public ObjectClosure {
743 private:
744 Klass* _klass;
745 GrowableArray<oop>* _result;
746
747 public:
748 FindInstanceClosure(Klass* k, GrowableArray<oop>* result) : _klass(k), _result(result) {};
749
750 void do_object(oop obj) {
751 if (obj->is_a(_klass)) {
752 // obj was read with AS_NO_KEEPALIVE, or equivalent.
753 // The object needs to be kept alive when it is published.
754 Universe::heap()->keep_alive(obj);
755
756 _result->append(obj);
757 }
758 }
759 };
760
761 void HeapInspection::find_instances_at_safepoint(Klass* k, GrowableArray<oop>* result) {
762 assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
763 assert(Heap_lock->is_locked(), "should have the Heap_lock");
764
765 // Ensure that the heap is parsable
766 Universe::heap()->ensure_parsability(false); // no need to retire TALBs
767
768 // Iterate over objects in the heap
769 FindInstanceClosure fic(k, result);
770 Universe::heap()->object_iterate(&fic);
771 }