1 /* 2 * Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoaderData.inline.hpp" 27 #include "classfile/classLoaderDataGraph.hpp" 28 #include "classfile/moduleEntry.hpp" 29 #include "classfile/vmClasses.hpp" 30 #include "gc/shared/collectedHeap.hpp" 31 #include "logging/log.hpp" 32 #include "logging/logTag.hpp" 33 #include "memory/heapInspection.hpp" 34 #include "memory/resourceArea.hpp" 35 #include "memory/universe.hpp" 36 #include "nmt/memTracker.hpp" 37 #include "oops/fieldInfo.hpp" 38 #include "oops/fieldStreams.inline.hpp" 39 #include "oops/oop.inline.hpp" 40 #include "oops/inlineKlass.inline.hpp" 41 #include "runtime/atomic.hpp" 42 #include "runtime/os.hpp" 43 #include "runtime/fieldDescriptor.inline.hpp" 44 #include "utilities/globalDefinitions.hpp" 45 #include "utilities/macros.hpp" 46 #include "utilities/stack.inline.hpp" 47 48 // HeapInspection 49 50 inline KlassInfoEntry::~KlassInfoEntry() { 51 if (_subclasses != nullptr) { 52 delete _subclasses; 53 } 54 } 55 56 inline void KlassInfoEntry::add_subclass(KlassInfoEntry* cie) { 57 if (_subclasses == nullptr) { 58 _subclasses = new (mtServiceability) GrowableArray<KlassInfoEntry*>(4, mtServiceability); 59 } 60 _subclasses->append(cie); 61 } 62 63 int KlassInfoEntry::compare(KlassInfoEntry* e1, KlassInfoEntry* e2) { 64 if(e1->_instance_words > e2->_instance_words) { 65 return -1; 66 } else if(e1->_instance_words < e2->_instance_words) { 67 return 1; 68 } 69 // Sort alphabetically, note 'Z' < '[' < 'a', but it's better to group 70 // the array classes before all the instance classes. 71 ResourceMark rm; 72 const char* name1 = e1->klass()->external_name(); 73 const char* name2 = e2->klass()->external_name(); 74 bool d1 = (name1[0] == JVM_SIGNATURE_ARRAY); 75 bool d2 = (name2[0] == JVM_SIGNATURE_ARRAY); 76 if (d1 && !d2) { 77 return -1; 78 } else if (d2 && !d1) { 79 return 1; 80 } else { 81 return strcmp(name1, name2); 82 } 83 } 84 85 const char* KlassInfoEntry::name() const { 86 const char* name; 87 if (_klass->name() != nullptr) { 88 name = _klass->external_name(); 89 } else { 90 if (_klass == Universe::boolArrayKlass()) name = "<boolArrayKlass>"; else 91 if (_klass == Universe::charArrayKlass()) name = "<charArrayKlass>"; else 92 if (_klass == Universe::floatArrayKlass()) name = "<floatArrayKlass>"; else 93 if (_klass == Universe::doubleArrayKlass()) name = "<doubleArrayKlass>"; else 94 if (_klass == Universe::byteArrayKlass()) name = "<byteArrayKlass>"; else 95 if (_klass == Universe::shortArrayKlass()) name = "<shortArrayKlass>"; else 96 if (_klass == Universe::intArrayKlass()) name = "<intArrayKlass>"; else 97 if (_klass == Universe::longArrayKlass()) name = "<longArrayKlass>"; else 98 name = "<no name>"; 99 } 100 return name; 101 } 102 103 void KlassInfoEntry::print_on(outputStream* st) const { 104 ResourceMark rm; 105 106 // simplify the formatting (ILP32 vs LP64) - always cast the numbers to 64-bit 107 ModuleEntry* module = _klass->module(); 108 if (module->is_named()) { 109 st->print_cr(INT64_FORMAT_W(13) " " UINT64_FORMAT_W(13) " %s (%s%s%s)", 110 (int64_t)_instance_count, 111 (uint64_t)_instance_words * HeapWordSize, 112 name(), 113 module->name()->as_C_string(), 114 module->version() != nullptr ? "@" : "", 115 module->version() != nullptr ? module->version()->as_C_string() : ""); 116 } else { 117 st->print_cr(INT64_FORMAT_W(13) " " UINT64_FORMAT_W(13) " %s", 118 (int64_t)_instance_count, 119 (uint64_t)_instance_words * HeapWordSize, 120 name()); 121 } 122 } 123 124 KlassInfoEntry* KlassInfoBucket::lookup(Klass* const k) { 125 // Can happen if k is an archived class that we haven't loaded yet. 126 if (k->java_mirror_no_keepalive() == nullptr) { 127 return nullptr; 128 } 129 130 KlassInfoEntry* elt = _list; 131 while (elt != nullptr) { 132 if (elt->is_equal(k)) { 133 return elt; 134 } 135 elt = elt->next(); 136 } 137 elt = new (std::nothrow) KlassInfoEntry(k, list()); 138 // We may be out of space to allocate the new entry. 139 if (elt != nullptr) { 140 set_list(elt); 141 } 142 return elt; 143 } 144 145 void KlassInfoBucket::iterate(KlassInfoClosure* cic) { 146 KlassInfoEntry* elt = _list; 147 while (elt != nullptr) { 148 cic->do_cinfo(elt); 149 elt = elt->next(); 150 } 151 } 152 153 void KlassInfoBucket::empty() { 154 KlassInfoEntry* elt = _list; 155 _list = nullptr; 156 while (elt != nullptr) { 157 KlassInfoEntry* next = elt->next(); 158 delete elt; 159 elt = next; 160 } 161 } 162 163 class KlassInfoTable::AllClassesFinder : public LockedClassesDo { 164 KlassInfoTable *_table; 165 public: 166 AllClassesFinder(KlassInfoTable* table) : _table(table) {} 167 virtual void do_klass(Klass* k) { 168 // This has the SIDE EFFECT of creating a KlassInfoEntry 169 // for <k>, if one doesn't exist yet. 170 _table->lookup(k); 171 } 172 }; 173 174 175 KlassInfoTable::KlassInfoTable(bool add_all_classes) { 176 _size_of_instances_in_words = 0; 177 _ref = (uintptr_t) Universe::boolArrayKlass(); 178 _buckets = 179 (KlassInfoBucket*) AllocateHeap(sizeof(KlassInfoBucket) * _num_buckets, 180 mtInternal, CURRENT_PC, AllocFailStrategy::RETURN_NULL); 181 if (_buckets != nullptr) { 182 for (int index = 0; index < _num_buckets; index++) { 183 _buckets[index].initialize(); 184 } 185 if (add_all_classes) { 186 AllClassesFinder finder(this); 187 ClassLoaderDataGraph::classes_do(&finder); 188 } 189 } 190 } 191 192 KlassInfoTable::~KlassInfoTable() { 193 if (_buckets != nullptr) { 194 for (int index = 0; index < _num_buckets; index++) { 195 _buckets[index].empty(); 196 } 197 FREE_C_HEAP_ARRAY(KlassInfoBucket, _buckets); 198 _buckets = nullptr; 199 } 200 } 201 202 uint KlassInfoTable::hash(const Klass* p) { 203 return (uint)(((uintptr_t)p - _ref) >> 2); 204 } 205 206 KlassInfoEntry* KlassInfoTable::lookup(Klass* k) { 207 uint idx = hash(k) % _num_buckets; 208 assert(_buckets != nullptr, "Allocation failure should have been caught"); 209 KlassInfoEntry* e = _buckets[idx].lookup(k); 210 // Lookup may fail if this is a new klass for which we 211 // could not allocate space for an new entry, or if it's 212 // an archived class that we haven't loaded yet. 213 assert(e == nullptr || k == e->klass(), "must be equal"); 214 return e; 215 } 216 217 // Return false if the entry could not be recorded on account 218 // of running out of space required to create a new entry. 219 bool KlassInfoTable::record_instance(const oop obj) { 220 Klass* k = obj->klass(); 221 KlassInfoEntry* elt = lookup(k); 222 // elt may be null if it's a new klass for which we 223 // could not allocate space for a new entry in the hashtable. 224 if (elt != nullptr) { 225 elt->set_count(elt->count() + 1); 226 elt->set_words(elt->words() + obj->size()); 227 _size_of_instances_in_words += obj->size(); 228 return true; 229 } else { 230 return false; 231 } 232 } 233 234 void KlassInfoTable::iterate(KlassInfoClosure* cic) { 235 assert(_buckets != nullptr, "Allocation failure should have been caught"); 236 for (int index = 0; index < _num_buckets; index++) { 237 _buckets[index].iterate(cic); 238 } 239 } 240 241 size_t KlassInfoTable::size_of_instances_in_words() const { 242 return _size_of_instances_in_words; 243 } 244 245 // Return false if the entry could not be recorded on account 246 // of running out of space required to create a new entry. 247 bool KlassInfoTable::merge_entry(const KlassInfoEntry* cie) { 248 Klass* k = cie->klass(); 249 KlassInfoEntry* elt = lookup(k); 250 // elt may be null if it's a new klass for which we 251 // could not allocate space for a new entry in the hashtable. 252 if (elt != nullptr) { 253 elt->set_count(elt->count() + cie->count()); 254 elt->set_words(elt->words() + cie->words()); 255 _size_of_instances_in_words += cie->words(); 256 return true; 257 } 258 return false; 259 } 260 261 class KlassInfoTableMergeClosure : public KlassInfoClosure { 262 private: 263 KlassInfoTable* _dest; 264 bool _success; 265 public: 266 KlassInfoTableMergeClosure(KlassInfoTable* table) : _dest(table), _success(true) {} 267 void do_cinfo(KlassInfoEntry* cie) { 268 _success &= _dest->merge_entry(cie); 269 } 270 bool success() { return _success; } 271 }; 272 273 // merge from table 274 bool KlassInfoTable::merge(KlassInfoTable* table) { 275 KlassInfoTableMergeClosure closure(this); 276 table->iterate(&closure); 277 return closure.success(); 278 } 279 280 int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) { 281 return (*e1)->compare(*e1,*e2); 282 } 283 284 KlassInfoHisto::KlassInfoHisto(KlassInfoTable* cit) : 285 _cit(cit) { 286 _elements = new (mtServiceability) GrowableArray<KlassInfoEntry*>(_histo_initial_size, mtServiceability); 287 } 288 289 KlassInfoHisto::~KlassInfoHisto() { 290 delete _elements; 291 } 292 293 void KlassInfoHisto::add(KlassInfoEntry* cie) { 294 elements()->append(cie); 295 } 296 297 void KlassInfoHisto::sort() { 298 elements()->sort(KlassInfoHisto::sort_helper); 299 } 300 301 void KlassInfoHisto::print_elements(outputStream* st) const { 302 // simplify the formatting (ILP32 vs LP64) - store the sum in 64-bit 303 int64_t total = 0; 304 uint64_t totalw = 0; 305 for(int i=0; i < elements()->length(); i++) { 306 st->print("%4d: ", i+1); 307 elements()->at(i)->print_on(st); 308 total += elements()->at(i)->count(); 309 totalw += elements()->at(i)->words(); 310 } 311 st->print_cr("Total " INT64_FORMAT_W(13) " " UINT64_FORMAT_W(13), 312 total, totalw * HeapWordSize); 313 } 314 315 class HierarchyClosure : public KlassInfoClosure { 316 private: 317 GrowableArray<KlassInfoEntry*> *_elements; 318 public: 319 HierarchyClosure(GrowableArray<KlassInfoEntry*> *_elements) : _elements(_elements) {} 320 321 void do_cinfo(KlassInfoEntry* cie) { 322 // ignore array classes 323 if (cie->klass()->is_instance_klass()) { 324 _elements->append(cie); 325 } 326 } 327 }; 328 329 void KlassHierarchy::print_class_hierarchy(outputStream* st, bool print_interfaces, 330 bool print_subclasses, char* classname) { 331 ResourceMark rm; 332 Stack <KlassInfoEntry*, mtClass> class_stack; 333 GrowableArray<KlassInfoEntry*> elements; 334 335 // Add all classes to the KlassInfoTable, which allows for quick lookup. 336 // A KlassInfoEntry will be created for each class. 337 KlassInfoTable cit(true); 338 if (cit.allocation_failed()) { 339 st->print_cr("ERROR: Ran out of C-heap; hierarchy not generated"); 340 return; 341 } 342 343 // Add all created KlassInfoEntry instances to the elements array for easy 344 // iteration, and to allow each KlassInfoEntry instance to have a unique index. 345 HierarchyClosure hc(&elements); 346 cit.iterate(&hc); 347 348 for(int i = 0; i < elements.length(); i++) { 349 KlassInfoEntry* cie = elements.at(i); 350 Klass* super = cie->klass()->super(); 351 352 // Set the index for the class. 353 cie->set_index(i + 1); 354 355 // Add the class to the subclass array of its superclass. 356 if (super != nullptr) { 357 KlassInfoEntry* super_cie = cit.lookup(super); 358 assert(super_cie != nullptr, "could not lookup superclass"); 359 super_cie->add_subclass(cie); 360 } 361 } 362 363 // Set the do_print flag for each class that should be printed. 364 for(int i = 0; i < elements.length(); i++) { 365 KlassInfoEntry* cie = elements.at(i); 366 if (classname == nullptr) { 367 // We are printing all classes. 368 cie->set_do_print(true); 369 } else { 370 // We are only printing the hierarchy of a specific class. 371 if (strcmp(classname, cie->klass()->external_name()) == 0) { 372 KlassHierarchy::set_do_print_for_class_hierarchy(cie, &cit, print_subclasses); 373 } 374 } 375 } 376 377 // Now we do a depth first traversal of the class hierachry. The class_stack will 378 // maintain the list of classes we still need to process. Start things off 379 // by priming it with java.lang.Object. 380 KlassInfoEntry* jlo_cie = cit.lookup(vmClasses::Object_klass()); 381 assert(jlo_cie != nullptr, "could not lookup java.lang.Object"); 382 class_stack.push(jlo_cie); 383 384 // Repeatedly pop the top item off the stack, print its class info, 385 // and push all of its subclasses on to the stack. Do this until there 386 // are no classes left on the stack. 387 while (!class_stack.is_empty()) { 388 KlassInfoEntry* curr_cie = class_stack.pop(); 389 if (curr_cie->do_print()) { 390 print_class(st, curr_cie, print_interfaces); 391 if (curr_cie->subclasses() != nullptr) { 392 // Current class has subclasses, so push all of them onto the stack. 393 for (int i = 0; i < curr_cie->subclasses()->length(); i++) { 394 KlassInfoEntry* cie = curr_cie->subclasses()->at(i); 395 if (cie->do_print()) { 396 class_stack.push(cie); 397 } 398 } 399 } 400 } 401 } 402 403 st->flush(); 404 } 405 406 // Sets the do_print flag for every superclass and subclass of the specified class. 407 void KlassHierarchy::set_do_print_for_class_hierarchy(KlassInfoEntry* cie, KlassInfoTable* cit, 408 bool print_subclasses) { 409 // Set do_print for all superclasses of this class. 410 Klass* super = ((InstanceKlass*)cie->klass())->java_super(); 411 while (super != nullptr) { 412 KlassInfoEntry* super_cie = cit->lookup(super); 413 super_cie->set_do_print(true); 414 super = super->super(); 415 } 416 417 // Set do_print for this class and all of its subclasses. 418 Stack <KlassInfoEntry*, mtClass> class_stack; 419 class_stack.push(cie); 420 while (!class_stack.is_empty()) { 421 KlassInfoEntry* curr_cie = class_stack.pop(); 422 curr_cie->set_do_print(true); 423 if (print_subclasses && curr_cie->subclasses() != nullptr) { 424 // Current class has subclasses, so push all of them onto the stack. 425 for (int i = 0; i < curr_cie->subclasses()->length(); i++) { 426 KlassInfoEntry* cie = curr_cie->subclasses()->at(i); 427 class_stack.push(cie); 428 } 429 } 430 } 431 } 432 433 static void print_indent(outputStream* st, int indent) { 434 while (indent != 0) { 435 st->print("|"); 436 indent--; 437 if (indent != 0) { 438 st->print(" "); 439 } 440 } 441 } 442 443 // Print the class name and its unique ClassLoader identifier. 444 static void print_classname(outputStream* st, Klass* klass) { 445 oop loader_oop = klass->class_loader_data()->class_loader(); 446 st->print("%s/", klass->external_name()); 447 if (loader_oop == nullptr) { 448 st->print("null"); 449 } else { 450 st->print(PTR_FORMAT, p2i(klass->class_loader_data())); 451 } 452 } 453 454 static void print_interface(outputStream* st, InstanceKlass* intf_klass, const char* intf_type, int indent) { 455 print_indent(st, indent); 456 st->print(" implements "); 457 print_classname(st, intf_klass); 458 st->print(" (%s intf)\n", intf_type); 459 } 460 461 void KlassHierarchy::print_class(outputStream* st, KlassInfoEntry* cie, bool print_interfaces) { 462 ResourceMark rm; 463 InstanceKlass* klass = (InstanceKlass*)cie->klass(); 464 int indent = 0; 465 466 // Print indentation with proper indicators of superclass. 467 Klass* super = klass->super(); 468 while (super != nullptr) { 469 super = super->super(); 470 indent++; 471 } 472 print_indent(st, indent); 473 if (indent != 0) st->print("--"); 474 475 // Print the class name, its unique ClassLoader identifier, and if it is an interface. 476 print_classname(st, klass); 477 if (klass->is_interface()) { 478 st->print(" (intf)"); 479 } 480 st->print("\n"); 481 482 // Print any interfaces the class has. 483 if (print_interfaces) { 484 Array<InstanceKlass*>* local_intfs = klass->local_interfaces(); 485 Array<InstanceKlass*>* trans_intfs = klass->transitive_interfaces(); 486 for (int i = 0; i < local_intfs->length(); i++) { 487 print_interface(st, local_intfs->at(i), "declared", indent); 488 } 489 for (int i = 0; i < trans_intfs->length(); i++) { 490 InstanceKlass* trans_interface = trans_intfs->at(i); 491 // Only print transitive interfaces if they are not also declared. 492 if (!local_intfs->contains(trans_interface)) { 493 print_interface(st, trans_interface, "inherited", indent); 494 } 495 } 496 } 497 } 498 499 void KlassInfoHisto::print_histo_on(outputStream* st) { 500 st->print_cr(" num #instances #bytes class name (module)"); 501 st->print_cr("-------------------------------------------------------"); 502 print_elements(st); 503 } 504 505 class HistoClosure : public KlassInfoClosure { 506 private: 507 KlassInfoHisto* _cih; 508 public: 509 HistoClosure(KlassInfoHisto* cih) : _cih(cih) {} 510 511 void do_cinfo(KlassInfoEntry* cie) { 512 _cih->add(cie); 513 } 514 }; 515 516 517 class FindClassByNameClosure : public KlassInfoClosure { 518 private: 519 GrowableArray<Klass*>* _klasses; 520 Symbol* _classname; 521 public: 522 FindClassByNameClosure(GrowableArray<Klass*>* klasses, Symbol* classname) : 523 _klasses(klasses), _classname(classname) { } 524 525 void do_cinfo(KlassInfoEntry* cie) { 526 if (cie->klass()->name() == _classname) { 527 _klasses->append(cie->klass()); 528 } 529 } 530 }; 531 532 class FieldDesc { 533 private: 534 Symbol* _name; 535 Symbol* _signature; 536 int _offset; 537 int _index; 538 InstanceKlass* _holder; 539 AccessFlags _access_flags; 540 FieldInfo::FieldFlags _field_flags; 541 public: 542 FieldDesc() : _name(nullptr), _signature(nullptr), _offset(-1), _index(-1), _holder(nullptr), 543 _access_flags(AccessFlags()), _field_flags(FieldInfo::FieldFlags((u4)0)) { } 544 545 FieldDesc(fieldDescriptor& fd) : _name(fd.name()), _signature(fd.signature()), _offset(fd.offset()), 546 _index(fd.index()), _holder(fd.field_holder()), 547 _access_flags(fd.access_flags()), _field_flags(fd.field_flags()) { } 548 549 const Symbol* name() { return _name;} 550 const Symbol* signature() { return _signature; } 551 int offset() const { return _offset; } 552 int index() const { return _index; } 553 const InstanceKlass* holder() { return _holder; } 554 const AccessFlags& access_flags() { return _access_flags; } 555 bool is_null_free_inline_type() const { return _field_flags.is_null_free_inline_type(); } 556 }; 557 558 static int compare_offset(FieldDesc* f1, FieldDesc* f2) { 559 return f1->offset() > f2->offset() ? 1 : -1; 560 } 561 562 static void print_field(outputStream* st, int level, int offset, FieldDesc& fd, bool is_inline_type, bool is_flat ) { 563 const char* flat_field_msg = ""; 564 if (is_flat) { 565 flat_field_msg = is_flat ? "flat" : "not flat"; 566 } 567 st->print_cr(" @ %d %*s \"%s\" %s %s %s", 568 offset, level * 3, "", 569 fd.name()->as_C_string(), 570 fd.signature()->as_C_string(), 571 is_inline_type ? " // inline type " : "", 572 flat_field_msg); 573 } 574 575 static void print_flat_field(outputStream* st, int level, int offset, InstanceKlass* klass) { 576 assert(klass->is_inline_klass(), "Only inline types can be flat"); 577 InlineKlass* vklass = InlineKlass::cast(klass); 578 GrowableArray<FieldDesc>* fields = new (mtServiceability) GrowableArray<FieldDesc>(100, mtServiceability); 579 for (AllFieldStream fd(klass); !fd.done(); fd.next()) { 580 if (!fd.access_flags().is_static()) { 581 fields->append(FieldDesc(fd.field_descriptor())); 582 } 583 } 584 fields->sort(compare_offset); 585 for(int i = 0; i < fields->length(); i++) { 586 FieldDesc fd = fields->at(i); 587 int offset2 = offset + fd.offset() - vklass->first_field_offset(); 588 print_field(st, level, offset2, fd, 589 fd.is_null_free_inline_type(), fd.holder()->field_is_flat(fd.index())); 590 if (fd.holder()->field_is_flat(fd.index())) { 591 print_flat_field(st, level + 1, offset2 , 592 InstanceKlass::cast(fd.holder()->get_inline_type_field_klass(fd.index()))); 593 } 594 } 595 } 596 597 void PrintClassLayout::print_class_layout(outputStream* st, char* class_name) { 598 KlassInfoTable cit(true); 599 if (cit.allocation_failed()) { 600 st->print_cr("ERROR: Ran out of C-heap; hierarchy not generated"); 601 return; 602 } 603 604 Thread* THREAD = Thread::current(); 605 606 Symbol* classname = SymbolTable::probe(class_name, (int)strlen(class_name)); 607 608 GrowableArray<Klass*>* klasses = new (mtServiceability) GrowableArray<Klass*>(100, mtServiceability); 609 610 FindClassByNameClosure fbnc(klasses, classname); 611 cit.iterate(&fbnc); 612 613 for(int i = 0; i < klasses->length(); i++) { 614 Klass* klass = klasses->at(i); 615 if (!klass->is_instance_klass()) continue; // Skip 616 InstanceKlass* ik = InstanceKlass::cast(klass); 617 int tab = 1; 618 st->print_cr("Class %s [@%s]:", klass->name()->as_C_string(), 619 klass->class_loader_data()->loader_name()); 620 ResourceMark rm; 621 GrowableArray<FieldDesc>* fields = new (mtServiceability) GrowableArray<FieldDesc>(100, mtServiceability); 622 for (AllFieldStream fd(ik); !fd.done(); fd.next()) { 623 if (!fd.access_flags().is_static()) { 624 fields->append(FieldDesc(fd.field_descriptor())); 625 } 626 } 627 fields->sort(compare_offset); 628 for(int i = 0; i < fields->length(); i++) { 629 FieldDesc fd = fields->at(i); 630 print_field(st, 0, fd.offset(), fd, fd.is_null_free_inline_type(), fd.holder()->field_is_flat(fd.index())); 631 if (fd.holder()->field_is_flat(fd.index())) { 632 print_flat_field(st, 1, fd.offset(), 633 InstanceKlass::cast(fd.holder()->get_inline_type_field_klass(fd.index()))); 634 } 635 } 636 } 637 st->cr(); 638 } 639 640 class RecordInstanceClosure : public ObjectClosure { 641 private: 642 KlassInfoTable* _cit; 643 uintx _missed_count; 644 BoolObjectClosure* _filter; 645 public: 646 RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) : 647 _cit(cit), _missed_count(0), _filter(filter) {} 648 649 void do_object(oop obj) { 650 if (should_visit(obj)) { 651 if (!_cit->record_instance(obj)) { 652 _missed_count++; 653 } 654 } 655 } 656 657 uintx missed_count() { return _missed_count; } 658 659 private: 660 bool should_visit(oop obj) { 661 return _filter == nullptr || _filter->do_object_b(obj); 662 } 663 }; 664 665 // Heap inspection for every worker. 666 // When native OOM happens for KlassInfoTable, set _success to false. 667 void ParHeapInspectTask::work(uint worker_id) { 668 uintx missed_count = 0; 669 bool merge_success = true; 670 if (!Atomic::load(&_success)) { 671 // other worker has failed on parallel iteration. 672 return; 673 } 674 675 KlassInfoTable cit(false); 676 if (cit.allocation_failed()) { 677 // fail to allocate memory, stop parallel mode 678 Atomic::store(&_success, false); 679 return; 680 } 681 RecordInstanceClosure ric(&cit, _filter); 682 _poi->object_iterate(&ric, worker_id); 683 missed_count = ric.missed_count(); 684 { 685 MutexLocker x(&_mutex, Mutex::_no_safepoint_check_flag); 686 merge_success = _shared_cit->merge(&cit); 687 } 688 if (merge_success) { 689 Atomic::add(&_missed_count, missed_count); 690 } else { 691 Atomic::store(&_success, false); 692 } 693 } 694 695 uintx HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, WorkerThreads* workers) { 696 // Try parallel first. 697 if (workers != nullptr) { 698 ResourceMark rm; 699 ParallelObjectIterator poi(workers->active_workers()); 700 ParHeapInspectTask task(&poi, cit, filter); 701 // Run task with the active workers. 702 workers->run_task(&task); 703 if (task.success()) { 704 return task.missed_count(); 705 } 706 } 707 708 ResourceMark rm; 709 // If no parallel iteration available, run serially. 710 RecordInstanceClosure ric(cit, filter); 711 Universe::heap()->object_iterate(&ric); 712 return ric.missed_count(); 713 } 714 715 void HeapInspection::heap_inspection(outputStream* st, WorkerThreads* workers) { 716 ResourceMark rm; 717 718 KlassInfoTable cit(false); 719 if (!cit.allocation_failed()) { 720 // populate table with object allocation info 721 uintx missed_count = populate_table(&cit, nullptr, workers); 722 if (missed_count != 0) { 723 log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " UINTX_FORMAT 724 " total instances in data below", 725 missed_count); 726 } 727 728 // Sort and print klass instance info 729 KlassInfoHisto histo(&cit); 730 HistoClosure hc(&histo); 731 732 cit.iterate(&hc); 733 734 histo.sort(); 735 histo.print_histo_on(st); 736 } else { 737 st->print_cr("ERROR: Ran out of C-heap; histogram not generated"); 738 } 739 st->flush(); 740 } 741 742 class FindInstanceClosure : public ObjectClosure { 743 private: 744 Klass* _klass; 745 GrowableArray<oop>* _result; 746 747 public: 748 FindInstanceClosure(Klass* k, GrowableArray<oop>* result) : _klass(k), _result(result) {}; 749 750 void do_object(oop obj) { 751 if (obj->is_a(_klass)) { 752 // obj was read with AS_NO_KEEPALIVE, or equivalent. 753 // The object needs to be kept alive when it is published. 754 Universe::heap()->keep_alive(obj); 755 756 _result->append(obj); 757 } 758 } 759 }; 760 761 void HeapInspection::find_instances_at_safepoint(Klass* k, GrowableArray<oop>* result) { 762 assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped"); 763 assert(Heap_lock->is_locked(), "should have the Heap_lock"); 764 765 // Ensure that the heap is parsable 766 Universe::heap()->ensure_parsability(false); // no need to retire TALBs 767 768 // Iterate over objects in the heap 769 FindInstanceClosure fic(k, result); 770 Universe::heap()->object_iterate(&fic); 771 }