1 /*
  2  * Copyright (c) 2002, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/classLoaderData.inline.hpp"
 27 #include "classfile/classLoaderDataGraph.hpp"
 28 #include "classfile/moduleEntry.hpp"
 29 #include "classfile/vmClasses.hpp"
 30 #include "gc/shared/collectedHeap.hpp"
 31 #include "logging/log.hpp"
 32 #include "logging/logTag.hpp"
 33 #include "memory/heapInspection.hpp"
 34 #include "memory/resourceArea.hpp"
 35 #include "memory/universe.hpp"
 36 #include "oops/oop.inline.hpp"
 37 #include "oops/reflectionAccessorImplKlassHelper.hpp"
 38 #include "oops/inlineKlass.inline.hpp"
 39 #include "runtime/reflectionUtils.hpp"
 40 #include "runtime/atomic.hpp"
 41 #include "runtime/os.hpp"
 42 #include "runtime/fieldDescriptor.inline.hpp"
 43 #include "services/memTracker.hpp"
 44 #include "utilities/globalDefinitions.hpp"
 45 #include "utilities/macros.hpp"
 46 #include "utilities/stack.inline.hpp"
 47 
 48 // HeapInspection
 49 
 50 inline KlassInfoEntry::~KlassInfoEntry() {
 51   if (_subclasses != NULL) {
 52     delete _subclasses;
 53   }
 54 }
 55 
 56 inline void KlassInfoEntry::add_subclass(KlassInfoEntry* cie) {
 57   if (_subclasses == NULL) {
 58     _subclasses = new  (ResourceObj::C_HEAP, mtServiceability) GrowableArray<KlassInfoEntry*>(4, mtServiceability);
 59   }
 60   _subclasses->append(cie);
 61 }
 62 
 63 int KlassInfoEntry::compare(KlassInfoEntry* e1, KlassInfoEntry* e2) {
 64   if(e1->_instance_words > e2->_instance_words) {
 65     return -1;
 66   } else if(e1->_instance_words < e2->_instance_words) {
 67     return 1;
 68   }
 69   // Sort alphabetically, note 'Z' < '[' < 'a', but it's better to group
 70   // the array classes before all the instance classes.
 71   ResourceMark rm;
 72   const char* name1 = e1->klass()->external_name();
 73   const char* name2 = e2->klass()->external_name();
 74   bool d1 = (name1[0] == JVM_SIGNATURE_ARRAY);
 75   bool d2 = (name2[0] == JVM_SIGNATURE_ARRAY);
 76   if (d1 && !d2) {
 77     return -1;
 78   } else if (d2 && !d1) {
 79     return 1;
 80   } else {
 81     return strcmp(name1, name2);
 82   }
 83 }
 84 
 85 const char* KlassInfoEntry::name() const {
 86   const char* name;
 87   if (_klass->name() != NULL) {
 88     name = _klass->external_name();
 89   } else {
 90     if (_klass == Universe::boolArrayKlassObj())         name = "<boolArrayKlass>";         else
 91     if (_klass == Universe::charArrayKlassObj())         name = "<charArrayKlass>";         else
 92     if (_klass == Universe::floatArrayKlassObj())        name = "<floatArrayKlass>";        else
 93     if (_klass == Universe::doubleArrayKlassObj())       name = "<doubleArrayKlass>";       else
 94     if (_klass == Universe::byteArrayKlassObj())         name = "<byteArrayKlass>";         else
 95     if (_klass == Universe::shortArrayKlassObj())        name = "<shortArrayKlass>";        else
 96     if (_klass == Universe::intArrayKlassObj())          name = "<intArrayKlass>";          else
 97     if (_klass == Universe::longArrayKlassObj())         name = "<longArrayKlass>";         else
 98       name = "<no name>";
 99   }
100   return name;
101 }
102 
103 void KlassInfoEntry::print_on(outputStream* st) const {
104   ResourceMark rm;
105 
106   // simplify the formatting (ILP32 vs LP64) - always cast the numbers to 64-bit
107   ModuleEntry* module = _klass->module();
108   if (module->is_named()) {
109     st->print_cr(INT64_FORMAT_W(13) "  " UINT64_FORMAT_W(13) "  %s (%s%s%s)",
110                  (int64_t)_instance_count,
111                  (uint64_t)_instance_words * HeapWordSize,
112                  name(),
113                  module->name()->as_C_string(),
114                  module->version() != NULL ? "@" : "",
115                  module->version() != NULL ? module->version()->as_C_string() : "");
116   } else {
117     st->print_cr(INT64_FORMAT_W(13) "  " UINT64_FORMAT_W(13) "  %s",
118                  (int64_t)_instance_count,
119                  (uint64_t)_instance_words * HeapWordSize,
120                  name());
121   }
122 }
123 
124 KlassInfoEntry* KlassInfoBucket::lookup(Klass* const k) {
125   // Can happen if k is an archived class that we haven't loaded yet.
126   if (k->java_mirror_no_keepalive() == NULL) {
127     return NULL;
128   }
129 
130   KlassInfoEntry* elt = _list;
131   while (elt != NULL) {
132     if (elt->is_equal(k)) {
133       return elt;
134     }
135     elt = elt->next();
136   }
137   elt = new (std::nothrow) KlassInfoEntry(k, list());
138   // We may be out of space to allocate the new entry.
139   if (elt != NULL) {
140     set_list(elt);
141   }
142   return elt;
143 }
144 
145 void KlassInfoBucket::iterate(KlassInfoClosure* cic) {
146   KlassInfoEntry* elt = _list;
147   while (elt != NULL) {
148     cic->do_cinfo(elt);
149     elt = elt->next();
150   }
151 }
152 
153 void KlassInfoBucket::empty() {
154   KlassInfoEntry* elt = _list;
155   _list = NULL;
156   while (elt != NULL) {
157     KlassInfoEntry* next = elt->next();
158     delete elt;
159     elt = next;
160   }
161 }
162 
163 class KlassInfoTable::AllClassesFinder : public LockedClassesDo {
164   KlassInfoTable *_table;
165 public:
166   AllClassesFinder(KlassInfoTable* table) : _table(table) {}
167   virtual void do_klass(Klass* k) {
168     // This has the SIDE EFFECT of creating a KlassInfoEntry
169     // for <k>, if one doesn't exist yet.
170     _table->lookup(k);
171   }
172 };
173 
174 
175 KlassInfoTable::KlassInfoTable(bool add_all_classes) {
176   _size_of_instances_in_words = 0;
177   _ref = (HeapWord*) Universe::boolArrayKlassObj();
178   _buckets =
179     (KlassInfoBucket*)  AllocateHeap(sizeof(KlassInfoBucket) * _num_buckets,
180        mtInternal, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
181   if (_buckets != NULL) {
182     for (int index = 0; index < _num_buckets; index++) {
183       _buckets[index].initialize();
184     }
185     if (add_all_classes) {
186       AllClassesFinder finder(this);
187       ClassLoaderDataGraph::classes_do(&finder);
188     }
189   }
190 }
191 
192 KlassInfoTable::~KlassInfoTable() {
193   if (_buckets != NULL) {
194     for (int index = 0; index < _num_buckets; index++) {
195       _buckets[index].empty();
196     }
197     FREE_C_HEAP_ARRAY(KlassInfoBucket, _buckets);
198     _buckets = NULL;
199   }
200 }
201 
202 uint KlassInfoTable::hash(const Klass* p) {
203   return (uint)(((uintptr_t)p - (uintptr_t)_ref) >> 2);
204 }
205 
206 KlassInfoEntry* KlassInfoTable::lookup(Klass* k) {
207   uint         idx = hash(k) % _num_buckets;
208   assert(_buckets != NULL, "Allocation failure should have been caught");
209   KlassInfoEntry*  e   = _buckets[idx].lookup(k);
210   // Lookup may fail if this is a new klass for which we
211   // could not allocate space for an new entry, or if it's
212   // an archived class that we haven't loaded yet.
213   assert(e == NULL || k == e->klass(), "must be equal");
214   return e;
215 }
216 
217 // Return false if the entry could not be recorded on account
218 // of running out of space required to create a new entry.
219 bool KlassInfoTable::record_instance(const oop obj) {
220   Klass*        k = obj->klass();
221   KlassInfoEntry* elt = lookup(k);
222   // elt may be NULL if it's a new klass for which we
223   // could not allocate space for a new entry in the hashtable.
224   if (elt != NULL) {
225     elt->set_count(elt->count() + 1);
226     elt->set_words(elt->words() + obj->size());
227     _size_of_instances_in_words += obj->size();
228     return true;
229   } else {
230     return false;
231   }
232 }
233 
234 void KlassInfoTable::iterate(KlassInfoClosure* cic) {
235   assert(_buckets != NULL, "Allocation failure should have been caught");
236   for (int index = 0; index < _num_buckets; index++) {
237     _buckets[index].iterate(cic);
238   }
239 }
240 
241 size_t KlassInfoTable::size_of_instances_in_words() const {
242   return _size_of_instances_in_words;
243 }
244 
245 // Return false if the entry could not be recorded on account
246 // of running out of space required to create a new entry.
247 bool KlassInfoTable::merge_entry(const KlassInfoEntry* cie) {
248   Klass*          k = cie->klass();
249   KlassInfoEntry* elt = lookup(k);
250   // elt may be NULL if it's a new klass for which we
251   // could not allocate space for a new entry in the hashtable.
252   if (elt != NULL) {
253     elt->set_count(elt->count() + cie->count());
254     elt->set_words(elt->words() + cie->words());
255     _size_of_instances_in_words += cie->words();
256     return true;
257   }
258   return false;
259 }
260 
261 class KlassInfoTableMergeClosure : public KlassInfoClosure {
262 private:
263   KlassInfoTable* _dest;
264   bool _success;
265 public:
266   KlassInfoTableMergeClosure(KlassInfoTable* table) : _dest(table), _success(true) {}
267   void do_cinfo(KlassInfoEntry* cie) {
268     _success &= _dest->merge_entry(cie);
269   }
270   bool success() { return _success; }
271 };
272 
273 // merge from table
274 bool KlassInfoTable::merge(KlassInfoTable* table) {
275   KlassInfoTableMergeClosure closure(this);
276   table->iterate(&closure);
277   return closure.success();
278 }
279 
280 int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) {
281   return (*e1)->compare(*e1,*e2);
282 }
283 
284 KlassInfoHisto::KlassInfoHisto(KlassInfoTable* cit) :
285   _cit(cit) {
286   _elements = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<KlassInfoEntry*>(_histo_initial_size, mtServiceability);
287 }
288 
289 KlassInfoHisto::~KlassInfoHisto() {
290   delete _elements;
291 }
292 
293 void KlassInfoHisto::add(KlassInfoEntry* cie) {
294   elements()->append(cie);
295 }
296 
297 void KlassInfoHisto::sort() {
298   elements()->sort(KlassInfoHisto::sort_helper);
299 }
300 
301 void KlassInfoHisto::print_elements(outputStream* st) const {
302   // simplify the formatting (ILP32 vs LP64) - store the sum in 64-bit
303   int64_t total = 0;
304   uint64_t totalw = 0;
305   for(int i=0; i < elements()->length(); i++) {
306     st->print("%4d: ", i+1);
307     elements()->at(i)->print_on(st);
308     total += elements()->at(i)->count();
309     totalw += elements()->at(i)->words();
310   }
311   st->print_cr("Total " INT64_FORMAT_W(13) "  " UINT64_FORMAT_W(13),
312                total, totalw * HeapWordSize);
313 }
314 
315 class HierarchyClosure : public KlassInfoClosure {
316 private:
317   GrowableArray<KlassInfoEntry*> *_elements;
318 public:
319   HierarchyClosure(GrowableArray<KlassInfoEntry*> *_elements) : _elements(_elements) {}
320 
321   void do_cinfo(KlassInfoEntry* cie) {
322     // ignore array classes
323     if (cie->klass()->is_instance_klass()) {
324       _elements->append(cie);
325     }
326   }
327 };
328 
329 void KlassHierarchy::print_class_hierarchy(outputStream* st, bool print_interfaces,
330                                            bool print_subclasses, char* classname) {
331   ResourceMark rm;
332   Stack <KlassInfoEntry*, mtClass> class_stack;
333   GrowableArray<KlassInfoEntry*> elements;
334 
335   // Add all classes to the KlassInfoTable, which allows for quick lookup.
336   // A KlassInfoEntry will be created for each class.
337   KlassInfoTable cit(true);
338   if (cit.allocation_failed()) {
339     st->print_cr("ERROR: Ran out of C-heap; hierarchy not generated");
340     return;
341   }
342 
343   // Add all created KlassInfoEntry instances to the elements array for easy
344   // iteration, and to allow each KlassInfoEntry instance to have a unique index.
345   HierarchyClosure hc(&elements);
346   cit.iterate(&hc);
347 
348   for(int i = 0; i < elements.length(); i++) {
349     KlassInfoEntry* cie = elements.at(i);
350     Klass* super = cie->klass()->super();
351 
352     // Set the index for the class.
353     cie->set_index(i + 1);
354 
355     // Add the class to the subclass array of its superclass.
356     if (super != NULL) {
357       KlassInfoEntry* super_cie = cit.lookup(super);
358       assert(super_cie != NULL, "could not lookup superclass");
359       super_cie->add_subclass(cie);
360     }
361   }
362 
363   // Set the do_print flag for each class that should be printed.
364   for(int i = 0; i < elements.length(); i++) {
365     KlassInfoEntry* cie = elements.at(i);
366     if (classname == NULL) {
367       // We are printing all classes.
368       cie->set_do_print(true);
369     } else {
370       // We are only printing the hierarchy of a specific class.
371       if (strcmp(classname, cie->klass()->external_name()) == 0) {
372         KlassHierarchy::set_do_print_for_class_hierarchy(cie, &cit, print_subclasses);
373       }
374     }
375   }
376 
377   // Now we do a depth first traversal of the class hierachry. The class_stack will
378   // maintain the list of classes we still need to process. Start things off
379   // by priming it with java.lang.Object.
380   KlassInfoEntry* jlo_cie = cit.lookup(vmClasses::Object_klass());
381   assert(jlo_cie != NULL, "could not lookup java.lang.Object");
382   class_stack.push(jlo_cie);
383 
384   // Repeatedly pop the top item off the stack, print its class info,
385   // and push all of its subclasses on to the stack. Do this until there
386   // are no classes left on the stack.
387   while (!class_stack.is_empty()) {
388     KlassInfoEntry* curr_cie = class_stack.pop();
389     if (curr_cie->do_print()) {
390       print_class(st, curr_cie, print_interfaces);
391       if (curr_cie->subclasses() != NULL) {
392         // Current class has subclasses, so push all of them onto the stack.
393         for (int i = 0; i < curr_cie->subclasses()->length(); i++) {
394           KlassInfoEntry* cie = curr_cie->subclasses()->at(i);
395           if (cie->do_print()) {
396             class_stack.push(cie);
397           }
398         }
399       }
400     }
401   }
402 
403   st->flush();
404 }
405 
406 // Sets the do_print flag for every superclass and subclass of the specified class.
407 void KlassHierarchy::set_do_print_for_class_hierarchy(KlassInfoEntry* cie, KlassInfoTable* cit,
408                                                       bool print_subclasses) {
409   // Set do_print for all superclasses of this class.
410   Klass* super = ((InstanceKlass*)cie->klass())->java_super();
411   while (super != NULL) {
412     KlassInfoEntry* super_cie = cit->lookup(super);
413     super_cie->set_do_print(true);
414     super = super->super();
415   }
416 
417   // Set do_print for this class and all of its subclasses.
418   Stack <KlassInfoEntry*, mtClass> class_stack;
419   class_stack.push(cie);
420   while (!class_stack.is_empty()) {
421     KlassInfoEntry* curr_cie = class_stack.pop();
422     curr_cie->set_do_print(true);
423     if (print_subclasses && curr_cie->subclasses() != NULL) {
424       // Current class has subclasses, so push all of them onto the stack.
425       for (int i = 0; i < curr_cie->subclasses()->length(); i++) {
426         KlassInfoEntry* cie = curr_cie->subclasses()->at(i);
427         class_stack.push(cie);
428       }
429     }
430   }
431 }
432 
433 static void print_indent(outputStream* st, int indent) {
434   while (indent != 0) {
435     st->print("|");
436     indent--;
437     if (indent != 0) {
438       st->print("  ");
439     }
440   }
441 }
442 
443 // Print the class name and its unique ClassLoader identifer.
444 static void print_classname(outputStream* st, Klass* klass) {
445   oop loader_oop = klass->class_loader_data()->class_loader();
446   st->print("%s/", klass->external_name());
447   if (loader_oop == NULL) {
448     st->print("null");
449   } else {
450     st->print(INTPTR_FORMAT, p2i(klass->class_loader_data()));
451   }
452 }
453 
454 static void print_interface(outputStream* st, InstanceKlass* intf_klass, const char* intf_type, int indent) {
455   print_indent(st, indent);
456   st->print("  implements ");
457   print_classname(st, intf_klass);
458   st->print(" (%s intf)\n", intf_type);
459 }
460 
461 void KlassHierarchy::print_class(outputStream* st, KlassInfoEntry* cie, bool print_interfaces) {
462   ResourceMark rm;
463   InstanceKlass* klass = (InstanceKlass*)cie->klass();
464   int indent = 0;
465 
466   // Print indentation with proper indicators of superclass.
467   Klass* super = klass->super();
468   while (super != NULL) {
469     super = super->super();
470     indent++;
471   }
472   print_indent(st, indent);
473   if (indent != 0) st->print("--");
474 
475   // Print the class name, its unique ClassLoader identifer, and if it is an interface.
476   print_classname(st, klass);
477   if (klass->is_interface()) {
478     st->print(" (intf)");
479   }
480   // Special treatment for generated core reflection accessor classes: print invocation target.
481   if (ReflectionAccessorImplKlassHelper::is_generated_accessor(klass)) {
482     st->print(" (invokes: ");
483     ReflectionAccessorImplKlassHelper::print_invocation_target(st, klass);
484     st->print(")");
485   }
486   st->print("\n");
487 
488   // Print any interfaces the class has.
489   if (print_interfaces) {
490     Array<InstanceKlass*>* local_intfs = klass->local_interfaces();
491     Array<InstanceKlass*>* trans_intfs = klass->transitive_interfaces();
492     for (int i = 0; i < local_intfs->length(); i++) {
493       print_interface(st, local_intfs->at(i), "declared", indent);
494     }
495     for (int i = 0; i < trans_intfs->length(); i++) {
496       InstanceKlass* trans_interface = trans_intfs->at(i);
497       // Only print transitive interfaces if they are not also declared.
498       if (!local_intfs->contains(trans_interface)) {
499         print_interface(st, trans_interface, "inherited", indent);
500       }
501     }
502   }
503 }
504 
505 void KlassInfoHisto::print_histo_on(outputStream* st) {
506   st->print_cr(" num     #instances         #bytes  class name (module)");
507   st->print_cr("-------------------------------------------------------");
508   print_elements(st);
509 }
510 
511 class HistoClosure : public KlassInfoClosure {
512  private:
513   KlassInfoHisto* _cih;
514  public:
515   HistoClosure(KlassInfoHisto* cih) : _cih(cih) {}
516 
517   void do_cinfo(KlassInfoEntry* cie) {
518     _cih->add(cie);
519   }
520 };
521 
522 
523 class FindClassByNameClosure : public KlassInfoClosure {
524  private:
525   GrowableArray<Klass*>* _klasses;
526   Symbol* _classname;
527  public:
528   FindClassByNameClosure(GrowableArray<Klass*>* klasses, Symbol* classname) :
529     _klasses(klasses), _classname(classname) { }
530 
531   void do_cinfo(KlassInfoEntry* cie) {
532     if (cie->klass()->name() == _classname) {
533       _klasses->append(cie->klass());
534     }
535   }
536 };
537 
538 class FieldDesc {
539 private:
540   Symbol* _name;
541   Symbol* _signature;
542   int _offset;
543   int _index;
544   InstanceKlass* _holder;
545   AccessFlags _access_flags;
546  public:
547   FieldDesc() {
548     _name = NULL;
549     _signature = NULL;
550     _offset = -1;
551     _index = -1;
552     _holder = NULL;
553     _access_flags = AccessFlags();
554   }
555   FieldDesc(fieldDescriptor& fd) {
556     _name = fd.name();
557     _signature = fd.signature();
558     _offset = fd.offset();
559     _index = fd.index();
560     _holder = fd.field_holder();
561     _access_flags = fd.access_flags();
562   }
563   const Symbol* name() { return _name;}
564   const Symbol* signature() { return _signature; }
565   const int offset() { return _offset; }
566   const int index() { return _index; }
567   const InstanceKlass* holder() { return _holder; }
568   const AccessFlags& access_flags() { return _access_flags; }
569   const bool is_inline_type() { return Signature::basic_type(_signature) == T_INLINE_TYPE; }
570 };
571 
572 static int compare_offset(FieldDesc* f1, FieldDesc* f2) {
573    return f1->offset() > f2->offset() ? 1 : -1;
574 }
575 
576 static void print_field(outputStream* st, int level, int offset, FieldDesc& fd, bool is_inline_type, bool is_inlined ) {
577   const char* inlined_msg = "";
578   if (is_inline_type) {
579     inlined_msg = is_inlined ? "inlined" : "not inlined";
580   }
581   st->print_cr("  @ %d %*s \"%s\" %s %s %s",
582       offset, level * 3, "",
583       fd.name()->as_C_string(),
584       fd.signature()->as_C_string(),
585       is_inline_type ? " // inline type " : "",
586       inlined_msg);
587 }
588 
589 static void print_inlined_field(outputStream* st, int level, int offset, InstanceKlass* klass) {
590   assert(klass->is_inline_klass(), "Only inline types can be inlined");
591   InlineKlass* vklass = InlineKlass::cast(klass);
592   GrowableArray<FieldDesc>* fields = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<FieldDesc>(100, mtServiceability);
593   for (FieldStream fd(klass, false, false); !fd.eos(); fd.next()) {
594     if (!fd.access_flags().is_static()) {
595       fields->append(FieldDesc(fd.field_descriptor()));
596     }
597   }
598   fields->sort(compare_offset);
599   for(int i = 0; i < fields->length(); i++) {
600     FieldDesc fd = fields->at(i);
601     int offset2 = offset + fd.offset() - vklass->first_field_offset();
602     print_field(st, level, offset2, fd,
603         fd.is_inline_type(), fd.holder()->field_is_inlined(fd.index()));
604     if (fd.holder()->field_is_inlined(fd.index())) {
605       print_inlined_field(st, level + 1, offset2 ,
606           InstanceKlass::cast(fd.holder()->get_inline_type_field_klass(fd.index())));
607     }
608   }
609 }
610 
611 void PrintClassLayout::print_class_layout(outputStream* st, char* class_name) {
612   KlassInfoTable cit(true);
613   if (cit.allocation_failed()) {
614     st->print_cr("ERROR: Ran out of C-heap; hierarchy not generated");
615     return;
616   }
617 
618   Thread* THREAD = Thread::current();
619 
620   Symbol* classname = SymbolTable::probe(class_name, (int)strlen(class_name));
621 
622   GrowableArray<Klass*>* klasses = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<Klass*>(100, mtServiceability);
623 
624   FindClassByNameClosure fbnc(klasses, classname);
625   cit.iterate(&fbnc);
626 
627   for(int i = 0; i < klasses->length(); i++) {
628     Klass* klass = klasses->at(i);
629     if (!klass->is_instance_klass()) continue;  // Skip
630     InstanceKlass* ik = InstanceKlass::cast(klass);
631     int tab = 1;
632     st->print_cr("Class %s [@%s]:", klass->name()->as_C_string(),
633         klass->class_loader_data()->loader_name());
634     ResourceMark rm;
635     GrowableArray<FieldDesc>* fields = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<FieldDesc>(100, mtServiceability);
636     for (FieldStream fd(ik, false, false); !fd.eos(); fd.next()) {
637       if (!fd.access_flags().is_static()) {
638         fields->append(FieldDesc(fd.field_descriptor()));
639       }
640     }
641     fields->sort(compare_offset);
642     for(int i = 0; i < fields->length(); i++) {
643       FieldDesc fd = fields->at(i);
644       print_field(st, 0, fd.offset(), fd, fd.is_inline_type(), fd.holder()->field_is_inlined(fd.index()));
645       if (fd.holder()->field_is_inlined(fd.index())) {
646         print_inlined_field(st, 1, fd.offset(),
647             InstanceKlass::cast(fd.holder()->get_inline_type_field_klass(fd.index())));
648       }
649     }
650   }
651   st->cr();
652 }
653 
654 class RecordInstanceClosure : public ObjectClosure {
655  private:
656   KlassInfoTable* _cit;
657   uintx _missed_count;
658   BoolObjectClosure* _filter;
659  public:
660   RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) :
661     _cit(cit), _missed_count(0), _filter(filter) {}
662 
663   void do_object(oop obj) {
664     if (should_visit(obj)) {
665       if (!_cit->record_instance(obj)) {
666         _missed_count++;
667       }
668     }
669   }
670 
671   uintx missed_count() { return _missed_count; }
672 
673  private:
674   bool should_visit(oop obj) {
675     return _filter == NULL || _filter->do_object_b(obj);
676   }
677 };
678 
679 // Heap inspection for every worker.
680 // When native OOM happens for KlassInfoTable, set _success to false.
681 void ParHeapInspectTask::work(uint worker_id) {
682   uintx missed_count = 0;
683   bool merge_success = true;
684   if (!Atomic::load(&_success)) {
685     // other worker has failed on parallel iteration.
686     return;
687   }
688 
689   KlassInfoTable cit(false);
690   if (cit.allocation_failed()) {
691     // fail to allocate memory, stop parallel mode
692     Atomic::store(&_success, false);
693     return;
694   }
695   RecordInstanceClosure ric(&cit, _filter);
696   _poi->object_iterate(&ric, worker_id);
697   missed_count = ric.missed_count();
698   {
699     MutexLocker x(&_mutex, Mutex::_no_safepoint_check_flag);
700     merge_success = _shared_cit->merge(&cit);
701   }
702   if (merge_success) {
703     Atomic::add(&_missed_count, missed_count);
704   } else {
705     Atomic::store(&_success, false);
706   }
707 }
708 
709 uintx HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, uint parallel_thread_num) {
710 
711   // Try parallel first.
712   if (parallel_thread_num > 1) {
713     ResourceMark rm;
714 
715     WorkGang* gang = Universe::heap()->safepoint_workers();
716     if (gang != NULL) {
717       // The GC provided a WorkGang to be used during a safepoint.
718 
719       // Can't run with more threads than provided by the WorkGang.
720       WithUpdatedActiveWorkers update_and_restore(gang, parallel_thread_num);
721 
722       ParallelObjectIterator* poi = Universe::heap()->parallel_object_iterator(gang->active_workers());
723       if (poi != NULL) {
724         // The GC supports parallel object iteration.
725 
726         ParHeapInspectTask task(poi, cit, filter);
727         // Run task with the active workers.
728         gang->run_task(&task);
729 
730         delete poi;
731         if (task.success()) {
732           return task.missed_count();
733         }
734       }
735     }
736   }
737 
738   ResourceMark rm;
739   // If no parallel iteration available, run serially.
740   RecordInstanceClosure ric(cit, filter);
741   Universe::heap()->object_iterate(&ric);
742   return ric.missed_count();
743 }
744 
745 void HeapInspection::heap_inspection(outputStream* st, uint parallel_thread_num) {
746   ResourceMark rm;
747 
748   KlassInfoTable cit(false);
749   if (!cit.allocation_failed()) {
750     // populate table with object allocation info
751     uintx missed_count = populate_table(&cit, NULL, parallel_thread_num);
752     if (missed_count != 0) {
753       log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " UINTX_FORMAT
754                                " total instances in data below",
755                                missed_count);
756     }
757 
758     // Sort and print klass instance info
759     KlassInfoHisto histo(&cit);
760     HistoClosure hc(&histo);
761 
762     cit.iterate(&hc);
763 
764     histo.sort();
765     histo.print_histo_on(st);
766   } else {
767     st->print_cr("ERROR: Ran out of C-heap; histogram not generated");
768   }
769   st->flush();
770 }
771 
772 class FindInstanceClosure : public ObjectClosure {
773  private:
774   Klass* _klass;
775   GrowableArray<oop>* _result;
776 
777  public:
778   FindInstanceClosure(Klass* k, GrowableArray<oop>* result) : _klass(k), _result(result) {};
779 
780   void do_object(oop obj) {
781     if (obj->is_a(_klass)) {
782       // obj was read with AS_NO_KEEPALIVE, or equivalent.
783       // The object needs to be kept alive when it is published.
784       Universe::heap()->keep_alive(obj);
785 
786       _result->append(obj);
787     }
788   }
789 };
790 
791 void HeapInspection::find_instances_at_safepoint(Klass* k, GrowableArray<oop>* result) {
792   assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
793   assert(Heap_lock->is_locked(), "should have the Heap_lock");
794 
795   // Ensure that the heap is parsable
796   Universe::heap()->ensure_parsability(false);  // no need to retire TALBs
797 
798   // Iterate over objects in the heap
799   FindInstanceClosure fic(k, result);
800   Universe::heap()->object_iterate(&fic);
801 }