1 /*
  2  * Copyright (c) 2002, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/classLoaderData.inline.hpp"
 27 #include "classfile/classLoaderDataGraph.hpp"
 28 #include "classfile/moduleEntry.hpp"
 29 #include "classfile/vmClasses.hpp"
 30 #include "gc/shared/collectedHeap.hpp"
 31 #include "logging/log.hpp"
 32 #include "logging/logTag.hpp"
 33 #include "memory/heapInspection.hpp"
 34 #include "memory/resourceArea.hpp"
 35 #include "memory/universe.hpp"
 36 #include "oops/oop.inline.hpp"
 37 #include "oops/inlineKlass.inline.hpp"
 38 #include "runtime/reflectionUtils.hpp"
 39 #include "runtime/atomic.hpp"
 40 #include "runtime/os.hpp"
 41 #include "runtime/fieldDescriptor.inline.hpp"
 42 #include "services/memTracker.hpp"
 43 #include "utilities/globalDefinitions.hpp"
 44 #include "utilities/macros.hpp"
 45 #include "utilities/stack.inline.hpp"
 46 
 47 // HeapInspection
 48 
 49 inline KlassInfoEntry::~KlassInfoEntry() {
 50   if (_subclasses != NULL) {
 51     delete _subclasses;
 52   }
 53 }
 54 
 55 inline void KlassInfoEntry::add_subclass(KlassInfoEntry* cie) {
 56   if (_subclasses == NULL) {
 57     _subclasses = new  (ResourceObj::C_HEAP, mtServiceability) GrowableArray<KlassInfoEntry*>(4, mtServiceability);
 58   }
 59   _subclasses->append(cie);
 60 }
 61 
 62 int KlassInfoEntry::compare(KlassInfoEntry* e1, KlassInfoEntry* e2) {
 63   if(e1->_instance_words > e2->_instance_words) {
 64     return -1;
 65   } else if(e1->_instance_words < e2->_instance_words) {
 66     return 1;
 67   }
 68   // Sort alphabetically, note 'Z' < '[' < 'a', but it's better to group
 69   // the array classes before all the instance classes.
 70   ResourceMark rm;
 71   const char* name1 = e1->klass()->external_name();
 72   const char* name2 = e2->klass()->external_name();
 73   bool d1 = (name1[0] == JVM_SIGNATURE_ARRAY);
 74   bool d2 = (name2[0] == JVM_SIGNATURE_ARRAY);
 75   if (d1 && !d2) {
 76     return -1;
 77   } else if (d2 && !d1) {
 78     return 1;
 79   } else {
 80     return strcmp(name1, name2);
 81   }
 82 }
 83 
 84 const char* KlassInfoEntry::name() const {
 85   const char* name;
 86   if (_klass->name() != NULL) {
 87     name = _klass->external_name();
 88   } else {
 89     if (_klass == Universe::boolArrayKlassObj())         name = "<boolArrayKlass>";         else
 90     if (_klass == Universe::charArrayKlassObj())         name = "<charArrayKlass>";         else
 91     if (_klass == Universe::floatArrayKlassObj())        name = "<floatArrayKlass>";        else
 92     if (_klass == Universe::doubleArrayKlassObj())       name = "<doubleArrayKlass>";       else
 93     if (_klass == Universe::byteArrayKlassObj())         name = "<byteArrayKlass>";         else
 94     if (_klass == Universe::shortArrayKlassObj())        name = "<shortArrayKlass>";        else
 95     if (_klass == Universe::intArrayKlassObj())          name = "<intArrayKlass>";          else
 96     if (_klass == Universe::longArrayKlassObj())         name = "<longArrayKlass>";         else
 97       name = "<no name>";
 98   }
 99   return name;
100 }
101 
102 void KlassInfoEntry::print_on(outputStream* st) const {
103   ResourceMark rm;
104 
105   // simplify the formatting (ILP32 vs LP64) - always cast the numbers to 64-bit
106   ModuleEntry* module = _klass->module();
107   if (module->is_named()) {
108     st->print_cr(INT64_FORMAT_W(13) "  " UINT64_FORMAT_W(13) "  %s (%s%s%s)",
109                  (int64_t)_instance_count,
110                  (uint64_t)_instance_words * HeapWordSize,
111                  name(),
112                  module->name()->as_C_string(),
113                  module->version() != NULL ? "@" : "",
114                  module->version() != NULL ? module->version()->as_C_string() : "");
115   } else {
116     st->print_cr(INT64_FORMAT_W(13) "  " UINT64_FORMAT_W(13) "  %s",
117                  (int64_t)_instance_count,
118                  (uint64_t)_instance_words * HeapWordSize,
119                  name());
120   }
121 }
122 
123 KlassInfoEntry* KlassInfoBucket::lookup(Klass* const k) {
124   // Can happen if k is an archived class that we haven't loaded yet.
125   if (k->java_mirror_no_keepalive() == NULL) {
126     return NULL;
127   }
128 
129   KlassInfoEntry* elt = _list;
130   while (elt != NULL) {
131     if (elt->is_equal(k)) {
132       return elt;
133     }
134     elt = elt->next();
135   }
136   elt = new (std::nothrow) KlassInfoEntry(k, list());
137   // We may be out of space to allocate the new entry.
138   if (elt != NULL) {
139     set_list(elt);
140   }
141   return elt;
142 }
143 
144 void KlassInfoBucket::iterate(KlassInfoClosure* cic) {
145   KlassInfoEntry* elt = _list;
146   while (elt != NULL) {
147     cic->do_cinfo(elt);
148     elt = elt->next();
149   }
150 }
151 
152 void KlassInfoBucket::empty() {
153   KlassInfoEntry* elt = _list;
154   _list = NULL;
155   while (elt != NULL) {
156     KlassInfoEntry* next = elt->next();
157     delete elt;
158     elt = next;
159   }
160 }
161 
162 class KlassInfoTable::AllClassesFinder : public LockedClassesDo {
163   KlassInfoTable *_table;
164 public:
165   AllClassesFinder(KlassInfoTable* table) : _table(table) {}
166   virtual void do_klass(Klass* k) {
167     // This has the SIDE EFFECT of creating a KlassInfoEntry
168     // for <k>, if one doesn't exist yet.
169     _table->lookup(k);
170   }
171 };
172 
173 
174 KlassInfoTable::KlassInfoTable(bool add_all_classes) {
175   _size_of_instances_in_words = 0;
176   _ref = (HeapWord*) Universe::boolArrayKlassObj();
177   _buckets =
178     (KlassInfoBucket*)  AllocateHeap(sizeof(KlassInfoBucket) * _num_buckets,
179        mtInternal, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
180   if (_buckets != NULL) {
181     for (int index = 0; index < _num_buckets; index++) {
182       _buckets[index].initialize();
183     }
184     if (add_all_classes) {
185       AllClassesFinder finder(this);
186       ClassLoaderDataGraph::classes_do(&finder);
187     }
188   }
189 }
190 
191 KlassInfoTable::~KlassInfoTable() {
192   if (_buckets != NULL) {
193     for (int index = 0; index < _num_buckets; index++) {
194       _buckets[index].empty();
195     }
196     FREE_C_HEAP_ARRAY(KlassInfoBucket, _buckets);
197     _buckets = NULL;
198   }
199 }
200 
201 uint KlassInfoTable::hash(const Klass* p) {
202   return (uint)(((uintptr_t)p - (uintptr_t)_ref) >> 2);
203 }
204 
205 KlassInfoEntry* KlassInfoTable::lookup(Klass* k) {
206   uint         idx = hash(k) % _num_buckets;
207   assert(_buckets != NULL, "Allocation failure should have been caught");
208   KlassInfoEntry*  e   = _buckets[idx].lookup(k);
209   // Lookup may fail if this is a new klass for which we
210   // could not allocate space for an new entry, or if it's
211   // an archived class that we haven't loaded yet.
212   assert(e == NULL || k == e->klass(), "must be equal");
213   return e;
214 }
215 
216 // Return false if the entry could not be recorded on account
217 // of running out of space required to create a new entry.
218 bool KlassInfoTable::record_instance(const oop obj) {
219   Klass*        k = obj->klass();
220   KlassInfoEntry* elt = lookup(k);
221   // elt may be NULL if it's a new klass for which we
222   // could not allocate space for a new entry in the hashtable.
223   if (elt != NULL) {
224     elt->set_count(elt->count() + 1);
225     elt->set_words(elt->words() + obj->size());
226     _size_of_instances_in_words += obj->size();
227     return true;
228   } else {
229     return false;
230   }
231 }
232 
233 void KlassInfoTable::iterate(KlassInfoClosure* cic) {
234   assert(_buckets != NULL, "Allocation failure should have been caught");
235   for (int index = 0; index < _num_buckets; index++) {
236     _buckets[index].iterate(cic);
237   }
238 }
239 
240 size_t KlassInfoTable::size_of_instances_in_words() const {
241   return _size_of_instances_in_words;
242 }
243 
244 // Return false if the entry could not be recorded on account
245 // of running out of space required to create a new entry.
246 bool KlassInfoTable::merge_entry(const KlassInfoEntry* cie) {
247   Klass*          k = cie->klass();
248   KlassInfoEntry* elt = lookup(k);
249   // elt may be NULL if it's a new klass for which we
250   // could not allocate space for a new entry in the hashtable.
251   if (elt != NULL) {
252     elt->set_count(elt->count() + cie->count());
253     elt->set_words(elt->words() + cie->words());
254     _size_of_instances_in_words += cie->words();
255     return true;
256   }
257   return false;
258 }
259 
260 class KlassInfoTableMergeClosure : public KlassInfoClosure {
261 private:
262   KlassInfoTable* _dest;
263   bool _success;
264 public:
265   KlassInfoTableMergeClosure(KlassInfoTable* table) : _dest(table), _success(true) {}
266   void do_cinfo(KlassInfoEntry* cie) {
267     _success &= _dest->merge_entry(cie);
268   }
269   bool success() { return _success; }
270 };
271 
272 // merge from table
273 bool KlassInfoTable::merge(KlassInfoTable* table) {
274   KlassInfoTableMergeClosure closure(this);
275   table->iterate(&closure);
276   return closure.success();
277 }
278 
279 int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) {
280   return (*e1)->compare(*e1,*e2);
281 }
282 
283 KlassInfoHisto::KlassInfoHisto(KlassInfoTable* cit) :
284   _cit(cit) {
285   _elements = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<KlassInfoEntry*>(_histo_initial_size, mtServiceability);
286 }
287 
288 KlassInfoHisto::~KlassInfoHisto() {
289   delete _elements;
290 }
291 
292 void KlassInfoHisto::add(KlassInfoEntry* cie) {
293   elements()->append(cie);
294 }
295 
296 void KlassInfoHisto::sort() {
297   elements()->sort(KlassInfoHisto::sort_helper);
298 }
299 
300 void KlassInfoHisto::print_elements(outputStream* st) const {
301   // simplify the formatting (ILP32 vs LP64) - store the sum in 64-bit
302   int64_t total = 0;
303   uint64_t totalw = 0;
304   for(int i=0; i < elements()->length(); i++) {
305     st->print("%4d: ", i+1);
306     elements()->at(i)->print_on(st);
307     total += elements()->at(i)->count();
308     totalw += elements()->at(i)->words();
309   }
310   st->print_cr("Total " INT64_FORMAT_W(13) "  " UINT64_FORMAT_W(13),
311                total, totalw * HeapWordSize);
312 }
313 
314 class HierarchyClosure : public KlassInfoClosure {
315 private:
316   GrowableArray<KlassInfoEntry*> *_elements;
317 public:
318   HierarchyClosure(GrowableArray<KlassInfoEntry*> *_elements) : _elements(_elements) {}
319 
320   void do_cinfo(KlassInfoEntry* cie) {
321     // ignore array classes
322     if (cie->klass()->is_instance_klass()) {
323       _elements->append(cie);
324     }
325   }
326 };
327 
328 void KlassHierarchy::print_class_hierarchy(outputStream* st, bool print_interfaces,
329                                            bool print_subclasses, char* classname) {
330   ResourceMark rm;
331   Stack <KlassInfoEntry*, mtClass> class_stack;
332   GrowableArray<KlassInfoEntry*> elements;
333 
334   // Add all classes to the KlassInfoTable, which allows for quick lookup.
335   // A KlassInfoEntry will be created for each class.
336   KlassInfoTable cit(true);
337   if (cit.allocation_failed()) {
338     st->print_cr("ERROR: Ran out of C-heap; hierarchy not generated");
339     return;
340   }
341 
342   // Add all created KlassInfoEntry instances to the elements array for easy
343   // iteration, and to allow each KlassInfoEntry instance to have a unique index.
344   HierarchyClosure hc(&elements);
345   cit.iterate(&hc);
346 
347   for(int i = 0; i < elements.length(); i++) {
348     KlassInfoEntry* cie = elements.at(i);
349     Klass* super = cie->klass()->super();
350 
351     // Set the index for the class.
352     cie->set_index(i + 1);
353 
354     // Add the class to the subclass array of its superclass.
355     if (super != NULL) {
356       KlassInfoEntry* super_cie = cit.lookup(super);
357       assert(super_cie != NULL, "could not lookup superclass");
358       super_cie->add_subclass(cie);
359     }
360   }
361 
362   // Set the do_print flag for each class that should be printed.
363   for(int i = 0; i < elements.length(); i++) {
364     KlassInfoEntry* cie = elements.at(i);
365     if (classname == NULL) {
366       // We are printing all classes.
367       cie->set_do_print(true);
368     } else {
369       // We are only printing the hierarchy of a specific class.
370       if (strcmp(classname, cie->klass()->external_name()) == 0) {
371         KlassHierarchy::set_do_print_for_class_hierarchy(cie, &cit, print_subclasses);
372       }
373     }
374   }
375 
376   // Now we do a depth first traversal of the class hierachry. The class_stack will
377   // maintain the list of classes we still need to process. Start things off
378   // by priming it with java.lang.Object.
379   KlassInfoEntry* jlo_cie = cit.lookup(vmClasses::Object_klass());
380   assert(jlo_cie != NULL, "could not lookup java.lang.Object");
381   class_stack.push(jlo_cie);
382 
383   // Repeatedly pop the top item off the stack, print its class info,
384   // and push all of its subclasses on to the stack. Do this until there
385   // are no classes left on the stack.
386   while (!class_stack.is_empty()) {
387     KlassInfoEntry* curr_cie = class_stack.pop();
388     if (curr_cie->do_print()) {
389       print_class(st, curr_cie, print_interfaces);
390       if (curr_cie->subclasses() != NULL) {
391         // Current class has subclasses, so push all of them onto the stack.
392         for (int i = 0; i < curr_cie->subclasses()->length(); i++) {
393           KlassInfoEntry* cie = curr_cie->subclasses()->at(i);
394           if (cie->do_print()) {
395             class_stack.push(cie);
396           }
397         }
398       }
399     }
400   }
401 
402   st->flush();
403 }
404 
405 // Sets the do_print flag for every superclass and subclass of the specified class.
406 void KlassHierarchy::set_do_print_for_class_hierarchy(KlassInfoEntry* cie, KlassInfoTable* cit,
407                                                       bool print_subclasses) {
408   // Set do_print for all superclasses of this class.
409   Klass* super = ((InstanceKlass*)cie->klass())->java_super();
410   while (super != NULL) {
411     KlassInfoEntry* super_cie = cit->lookup(super);
412     super_cie->set_do_print(true);
413     super = super->super();
414   }
415 
416   // Set do_print for this class and all of its subclasses.
417   Stack <KlassInfoEntry*, mtClass> class_stack;
418   class_stack.push(cie);
419   while (!class_stack.is_empty()) {
420     KlassInfoEntry* curr_cie = class_stack.pop();
421     curr_cie->set_do_print(true);
422     if (print_subclasses && curr_cie->subclasses() != NULL) {
423       // Current class has subclasses, so push all of them onto the stack.
424       for (int i = 0; i < curr_cie->subclasses()->length(); i++) {
425         KlassInfoEntry* cie = curr_cie->subclasses()->at(i);
426         class_stack.push(cie);
427       }
428     }
429   }
430 }
431 
432 static void print_indent(outputStream* st, int indent) {
433   while (indent != 0) {
434     st->print("|");
435     indent--;
436     if (indent != 0) {
437       st->print("  ");
438     }
439   }
440 }
441 
442 // Print the class name and its unique ClassLoader identifer.
443 static void print_classname(outputStream* st, Klass* klass) {
444   oop loader_oop = klass->class_loader_data()->class_loader();
445   st->print("%s/", klass->external_name());
446   if (loader_oop == NULL) {
447     st->print("null");
448   } else {
449     st->print(INTPTR_FORMAT, p2i(klass->class_loader_data()));
450   }
451 }
452 
453 static void print_interface(outputStream* st, InstanceKlass* intf_klass, const char* intf_type, int indent) {
454   print_indent(st, indent);
455   st->print("  implements ");
456   print_classname(st, intf_klass);
457   st->print(" (%s intf)\n", intf_type);
458 }
459 
460 void KlassHierarchy::print_class(outputStream* st, KlassInfoEntry* cie, bool print_interfaces) {
461   ResourceMark rm;
462   InstanceKlass* klass = (InstanceKlass*)cie->klass();
463   int indent = 0;
464 
465   // Print indentation with proper indicators of superclass.
466   Klass* super = klass->super();
467   while (super != NULL) {
468     super = super->super();
469     indent++;
470   }
471   print_indent(st, indent);
472   if (indent != 0) st->print("--");
473 
474   // Print the class name, its unique ClassLoader identifer, and if it is an interface.
475   print_classname(st, klass);
476   if (klass->is_interface()) {
477     st->print(" (intf)");
478   }
479   st->print("\n");
480 
481   // Print any interfaces the class has.
482   if (print_interfaces) {
483     Array<InstanceKlass*>* local_intfs = klass->local_interfaces();
484     Array<InstanceKlass*>* trans_intfs = klass->transitive_interfaces();
485     for (int i = 0; i < local_intfs->length(); i++) {
486       print_interface(st, local_intfs->at(i), "declared", indent);
487     }
488     for (int i = 0; i < trans_intfs->length(); i++) {
489       InstanceKlass* trans_interface = trans_intfs->at(i);
490       // Only print transitive interfaces if they are not also declared.
491       if (!local_intfs->contains(trans_interface)) {
492         print_interface(st, trans_interface, "inherited", indent);
493       }
494     }
495   }
496 }
497 
498 void KlassInfoHisto::print_histo_on(outputStream* st) {
499   st->print_cr(" num     #instances         #bytes  class name (module)");
500   st->print_cr("-------------------------------------------------------");
501   print_elements(st);
502 }
503 
504 class HistoClosure : public KlassInfoClosure {
505  private:
506   KlassInfoHisto* _cih;
507  public:
508   HistoClosure(KlassInfoHisto* cih) : _cih(cih) {}
509 
510   void do_cinfo(KlassInfoEntry* cie) {
511     _cih->add(cie);
512   }
513 };
514 
515 
516 class FindClassByNameClosure : public KlassInfoClosure {
517  private:
518   GrowableArray<Klass*>* _klasses;
519   Symbol* _classname;
520  public:
521   FindClassByNameClosure(GrowableArray<Klass*>* klasses, Symbol* classname) :
522     _klasses(klasses), _classname(classname) { }
523 
524   void do_cinfo(KlassInfoEntry* cie) {
525     if (cie->klass()->name() == _classname) {
526       _klasses->append(cie->klass());
527     }
528   }
529 };
530 
531 class FieldDesc {
532 private:
533   Symbol* _name;
534   Symbol* _signature;
535   int _offset;
536   int _index;
537   InstanceKlass* _holder;
538   AccessFlags _access_flags;
539  public:
540   FieldDesc() {
541     _name = NULL;
542     _signature = NULL;
543     _offset = -1;
544     _index = -1;
545     _holder = NULL;
546     _access_flags = AccessFlags();
547   }
548   FieldDesc(fieldDescriptor& fd) {
549     _name = fd.name();
550     _signature = fd.signature();
551     _offset = fd.offset();
552     _index = fd.index();
553     _holder = fd.field_holder();
554     _access_flags = fd.access_flags();
555   }
556   const Symbol* name() { return _name;}
557   const Symbol* signature() { return _signature; }
558   const int offset() { return _offset; }
559   const int index() { return _index; }
560   const InstanceKlass* holder() { return _holder; }
561   const AccessFlags& access_flags() { return _access_flags; }
562   const bool is_inline_type() { return Signature::basic_type(_signature) == T_INLINE_TYPE; }
563 };
564 
565 static int compare_offset(FieldDesc* f1, FieldDesc* f2) {
566    return f1->offset() > f2->offset() ? 1 : -1;
567 }
568 
569 static void print_field(outputStream* st, int level, int offset, FieldDesc& fd, bool is_inline_type, bool is_inlined ) {
570   const char* inlined_msg = "";
571   if (is_inline_type) {
572     inlined_msg = is_inlined ? "inlined" : "not inlined";
573   }
574   st->print_cr("  @ %d %*s \"%s\" %s %s %s",
575       offset, level * 3, "",
576       fd.name()->as_C_string(),
577       fd.signature()->as_C_string(),
578       is_inline_type ? " // inline type " : "",
579       inlined_msg);
580 }
581 
582 static void print_inlined_field(outputStream* st, int level, int offset, InstanceKlass* klass) {
583   assert(klass->is_inline_klass(), "Only inline types can be inlined");
584   InlineKlass* vklass = InlineKlass::cast(klass);
585   GrowableArray<FieldDesc>* fields = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<FieldDesc>(100, mtServiceability);
586   for (FieldStream fd(klass, false, false); !fd.eos(); fd.next()) {
587     if (!fd.access_flags().is_static()) {
588       fields->append(FieldDesc(fd.field_descriptor()));
589     }
590   }
591   fields->sort(compare_offset);
592   for(int i = 0; i < fields->length(); i++) {
593     FieldDesc fd = fields->at(i);
594     int offset2 = offset + fd.offset() - vklass->first_field_offset();
595     print_field(st, level, offset2, fd,
596         fd.is_inline_type(), fd.holder()->field_is_inlined(fd.index()));
597     if (fd.holder()->field_is_inlined(fd.index())) {
598       print_inlined_field(st, level + 1, offset2 ,
599           InstanceKlass::cast(fd.holder()->get_inline_type_field_klass(fd.index())));
600     }
601   }
602 }
603 
604 void PrintClassLayout::print_class_layout(outputStream* st, char* class_name) {
605   KlassInfoTable cit(true);
606   if (cit.allocation_failed()) {
607     st->print_cr("ERROR: Ran out of C-heap; hierarchy not generated");
608     return;
609   }
610 
611   Thread* THREAD = Thread::current();
612 
613   Symbol* classname = SymbolTable::probe(class_name, (int)strlen(class_name));
614 
615   GrowableArray<Klass*>* klasses = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<Klass*>(100, mtServiceability);
616 
617   FindClassByNameClosure fbnc(klasses, classname);
618   cit.iterate(&fbnc);
619 
620   for(int i = 0; i < klasses->length(); i++) {
621     Klass* klass = klasses->at(i);
622     if (!klass->is_instance_klass()) continue;  // Skip
623     InstanceKlass* ik = InstanceKlass::cast(klass);
624     int tab = 1;
625     st->print_cr("Class %s [@%s]:", klass->name()->as_C_string(),
626         klass->class_loader_data()->loader_name());
627     ResourceMark rm;
628     GrowableArray<FieldDesc>* fields = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<FieldDesc>(100, mtServiceability);
629     for (FieldStream fd(ik, false, false); !fd.eos(); fd.next()) {
630       if (!fd.access_flags().is_static()) {
631         fields->append(FieldDesc(fd.field_descriptor()));
632       }
633     }
634     fields->sort(compare_offset);
635     for(int i = 0; i < fields->length(); i++) {
636       FieldDesc fd = fields->at(i);
637       print_field(st, 0, fd.offset(), fd, fd.is_inline_type(), fd.holder()->field_is_inlined(fd.index()));
638       if (fd.holder()->field_is_inlined(fd.index())) {
639         print_inlined_field(st, 1, fd.offset(),
640             InstanceKlass::cast(fd.holder()->get_inline_type_field_klass(fd.index())));
641       }
642     }
643   }
644   st->cr();
645 }
646 
647 class RecordInstanceClosure : public ObjectClosure {
648  private:
649   KlassInfoTable* _cit;
650   uintx _missed_count;
651   BoolObjectClosure* _filter;
652  public:
653   RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) :
654     _cit(cit), _missed_count(0), _filter(filter) {}
655 
656   void do_object(oop obj) {
657     if (should_visit(obj)) {
658       if (!_cit->record_instance(obj)) {
659         _missed_count++;
660       }
661     }
662   }
663 
664   uintx missed_count() { return _missed_count; }
665 
666  private:
667   bool should_visit(oop obj) {
668     return _filter == NULL || _filter->do_object_b(obj);
669   }
670 };
671 
672 // Heap inspection for every worker.
673 // When native OOM happens for KlassInfoTable, set _success to false.
674 void ParHeapInspectTask::work(uint worker_id) {
675   uintx missed_count = 0;
676   bool merge_success = true;
677   if (!Atomic::load(&_success)) {
678     // other worker has failed on parallel iteration.
679     return;
680   }
681 
682   KlassInfoTable cit(false);
683   if (cit.allocation_failed()) {
684     // fail to allocate memory, stop parallel mode
685     Atomic::store(&_success, false);
686     return;
687   }
688   RecordInstanceClosure ric(&cit, _filter);
689   _poi->object_iterate(&ric, worker_id);
690   missed_count = ric.missed_count();
691   {
692     MutexLocker x(&_mutex, Mutex::_no_safepoint_check_flag);
693     merge_success = _shared_cit->merge(&cit);
694   }
695   if (merge_success) {
696     Atomic::add(&_missed_count, missed_count);
697   } else {
698     Atomic::store(&_success, false);
699   }
700 }
701 
702 uintx HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, uint parallel_thread_num) {
703 
704   // Try parallel first.
705   if (parallel_thread_num > 1) {
706     ResourceMark rm;
707 
708     WorkerThreads* workers = Universe::heap()->safepoint_workers();
709     if (workers != NULL) {
710       // The GC provided a WorkerThreads to be used during a safepoint.
711 
712       // Can't run with more threads than provided by the WorkerThreads.
713       const uint capped_parallel_thread_num = MIN2(parallel_thread_num, workers->max_workers());
714       WithActiveWorkers with_active_workers(workers, capped_parallel_thread_num);
715 
716       ParallelObjectIterator* poi = Universe::heap()->parallel_object_iterator(workers->active_workers());
717       if (poi != NULL) {
718         // The GC supports parallel object iteration.
719 
720         ParHeapInspectTask task(poi, cit, filter);
721         // Run task with the active workers.
722         workers->run_task(&task);
723 
724         delete poi;
725         if (task.success()) {
726           return task.missed_count();
727         }
728       }
729     }
730   }
731 
732   ResourceMark rm;
733   // If no parallel iteration available, run serially.
734   RecordInstanceClosure ric(cit, filter);
735   Universe::heap()->object_iterate(&ric);
736   return ric.missed_count();
737 }
738 
739 void HeapInspection::heap_inspection(outputStream* st, uint parallel_thread_num) {
740   ResourceMark rm;
741 
742   KlassInfoTable cit(false);
743   if (!cit.allocation_failed()) {
744     // populate table with object allocation info
745     uintx missed_count = populate_table(&cit, NULL, parallel_thread_num);
746     if (missed_count != 0) {
747       log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " UINTX_FORMAT
748                                " total instances in data below",
749                                missed_count);
750     }
751 
752     // Sort and print klass instance info
753     KlassInfoHisto histo(&cit);
754     HistoClosure hc(&histo);
755 
756     cit.iterate(&hc);
757 
758     histo.sort();
759     histo.print_histo_on(st);
760   } else {
761     st->print_cr("ERROR: Ran out of C-heap; histogram not generated");
762   }
763   st->flush();
764 }
765 
766 class FindInstanceClosure : public ObjectClosure {
767  private:
768   Klass* _klass;
769   GrowableArray<oop>* _result;
770 
771  public:
772   FindInstanceClosure(Klass* k, GrowableArray<oop>* result) : _klass(k), _result(result) {};
773 
774   void do_object(oop obj) {
775     if (obj->is_a(_klass)) {
776       // obj was read with AS_NO_KEEPALIVE, or equivalent.
777       // The object needs to be kept alive when it is published.
778       Universe::heap()->keep_alive(obj);
779 
780       _result->append(obj);
781     }
782   }
783 };
784 
785 void HeapInspection::find_instances_at_safepoint(Klass* k, GrowableArray<oop>* result) {
786   assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
787   assert(Heap_lock->is_locked(), "should have the Heap_lock");
788 
789   // Ensure that the heap is parsable
790   Universe::heap()->ensure_parsability(false);  // no need to retire TALBs
791 
792   // Iterate over objects in the heap
793   FindInstanceClosure fic(k, result);
794   Universe::heap()->object_iterate(&fic);
795 }