1  /*
   2  * Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // A ClassLoaderData identifies the full set of class types that a class
  26 // loader's name resolution strategy produces for a given configuration of the
  27 // class loader.
  28 // Class types in the ClassLoaderData may be defined by from class file binaries
  29 // provided by the class loader, or from other class loader it interacts with
  30 // according to its name resolution strategy.
  31 //
  32 // Class loaders that implement a deterministic name resolution strategy
  33 // (including with respect to their delegation behavior), such as the boot, the
  34 // platform, and the system loaders of the JDK's built-in class loader
  35 // hierarchy, always produce the same linkset for a given configuration.
  36 //
  37 // ClassLoaderData carries information related to a linkset (e.g.,
  38 // metaspace holding its klass definitions).
  39 // The System Dictionary and related data structures (e.g., placeholder table,
  40 // loader constraints table) as well as the runtime representation of classes
  41 // only reference ClassLoaderData.
  42 //
  43 // Instances of java.lang.ClassLoader holds a pointer to a ClassLoaderData that
  44 // that represent the loader's "linking domain" in the JVM.
  45 //
  46 // The bootstrap loader (represented by null) also has a ClassLoaderData,
  47 // the singleton class the_null_class_loader_data().
  48 
  49 #include "cds/heapShared.hpp"
  50 #include "classfile/classLoaderData.inline.hpp"
  51 #include "classfile/classLoaderDataGraph.inline.hpp"
  52 #include "classfile/dictionary.hpp"
  53 #include "classfile/javaClasses.inline.hpp"
  54 #include "classfile/moduleEntry.hpp"
  55 #include "classfile/packageEntry.hpp"
  56 #include "classfile/symbolTable.hpp"
  57 #include "classfile/systemDictionary.hpp"
  58 #include "classfile/systemDictionaryShared.hpp"
  59 #include "classfile/vmClasses.hpp"
  60 #include "logging/log.hpp"
  61 #include "logging/logStream.hpp"
  62 #include "memory/allocation.inline.hpp"
  63 #include "memory/classLoaderMetaspace.hpp"
  64 #include "memory/metadataFactory.hpp"
  65 #include "memory/metaspace.hpp"
  66 #include "memory/resourceArea.hpp"
  67 #include "memory/universe.hpp"
  68 #include "oops/access.inline.hpp"
  69 #include "oops/inlineKlass.inline.hpp"
  70 #include "oops/jmethodIDTable.hpp"
  71 #include "oops/klass.inline.hpp"
  72 #include "oops/oop.inline.hpp"
  73 #include "oops/oopHandle.inline.hpp"
  74 #include "oops/verifyOopClosure.hpp"
  75 #include "oops/weakHandle.inline.hpp"
  76 #include "runtime/arguments.hpp"
  77 #include "runtime/atomicAccess.hpp"
  78 #include "runtime/handles.inline.hpp"
  79 #include "runtime/mutex.hpp"
  80 #include "runtime/safepoint.hpp"
  81 #include "utilities/growableArray.hpp"
  82 #include "utilities/macros.hpp"
  83 #include "utilities/ostream.hpp"
  84 
  85 ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = nullptr;
  86 
  87 void ClassLoaderData::init_null_class_loader_data() {
  88   assert(_the_null_class_loader_data == nullptr, "cannot initialize twice");
  89   assert(ClassLoaderDataGraph::_head == nullptr, "cannot initialize twice");
  90 
  91   _the_null_class_loader_data = new ClassLoaderData(Handle(), false);
  92   ClassLoaderDataGraph::_head = _the_null_class_loader_data;
  93   assert(_the_null_class_loader_data->is_the_null_class_loader_data(), "Must be");
  94 
  95   LogTarget(Trace, class, loader, data) lt;
  96   if (lt.is_enabled()) {
  97     ResourceMark rm;
  98     LogStream ls(lt);
  99     ls.print("create ");
 100     _the_null_class_loader_data->print_value_on(&ls);
 101     ls.cr();
 102   }
 103 }
 104 
 105 // Obtain and set the class loader's name within the ClassLoaderData so
 106 // it will be available for error messages, logging, JFR, etc.  The name
 107 // and klass are available after the class_loader oop is no longer alive,
 108 // during unloading.
 109 void ClassLoaderData::initialize_name(Handle class_loader) {
 110   ResourceMark rm;
 111 
 112   // Obtain the class loader's name.  If the class loader's name was not
 113   // explicitly set during construction, the CLD's _name field will be null.
 114   oop cl_name = java_lang_ClassLoader::name(class_loader());
 115   if (cl_name != nullptr) {
 116     const char* cl_instance_name = java_lang_String::as_utf8_string(cl_name);
 117 
 118     if (cl_instance_name != nullptr && cl_instance_name[0] != '\0') {
 119       _name = SymbolTable::new_symbol(cl_instance_name);
 120     }
 121   }
 122 
 123   // Obtain the class loader's name and identity hash.  If the class loader's
 124   // name was not explicitly set during construction, the class loader's name and id
 125   // will be set to the qualified class name of the class loader along with its
 126   // identity hash.
 127   // If for some reason the ClassLoader's constructor has not been run, instead of
 128   // leaving the _name_and_id field null, fall back to the external qualified class
 129   // name.  Thus CLD's _name_and_id field should never have a null value.
 130   oop cl_name_and_id = java_lang_ClassLoader::nameAndId(class_loader());
 131   const char* cl_instance_name_and_id =
 132                   (cl_name_and_id == nullptr) ? _class_loader_klass->external_name() :
 133                                              java_lang_String::as_utf8_string(cl_name_and_id);
 134   assert(cl_instance_name_and_id != nullptr && cl_instance_name_and_id[0] != '\0', "class loader has no name and id");
 135   _name_and_id = SymbolTable::new_symbol(cl_instance_name_and_id);
 136 }
 137 
 138 ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool has_class_mirror_holder) :
 139   _metaspace(nullptr),
 140   _metaspace_lock(new Mutex(Mutex::nosafepoint-2, "MetaspaceAllocation_lock")),
 141   _unloading(false), _has_class_mirror_holder(has_class_mirror_holder),
 142   _modified_oops(true),
 143   // A non-strong hidden class loader data doesn't have anything to keep
 144   // it from being unloaded during parsing of the non-strong hidden class.
 145   // The null-class-loader should always be kept alive.
 146   _keep_alive_ref_count((has_class_mirror_holder || h_class_loader.is_null()) ? 1 : 0),
 147   _claim(0),
 148   _handles(),
 149   _klasses(nullptr), _packages(nullptr), _modules(nullptr), _unnamed_module(nullptr), _dictionary(nullptr),
 150   _jmethod_ids(nullptr),
 151   _deallocate_list(nullptr),
 152   _next(nullptr),
 153   _unloading_next(nullptr),
 154   _class_loader_klass(nullptr), _name(nullptr), _name_and_id(nullptr) {
 155 
 156   if (!h_class_loader.is_null()) {
 157     _class_loader = _handles.add(h_class_loader());
 158     _class_loader_klass = h_class_loader->klass();
 159     initialize_name(h_class_loader);
 160   }
 161 
 162   if (!has_class_mirror_holder) {
 163     // The holder is initialized later for non-strong hidden classes,
 164     // and before calling anything that call class_loader().
 165     initialize_holder(h_class_loader);
 166 
 167     // A ClassLoaderData created solely for a non-strong hidden class should never
 168     // have a ModuleEntryTable or PackageEntryTable created for it.
 169     _packages = new PackageEntryTable();
 170     if (h_class_loader.is_null()) {
 171       // Create unnamed module for boot loader
 172       _unnamed_module = ModuleEntry::create_boot_unnamed_module(this);
 173     } else {
 174       // Create unnamed module for all other loaders
 175       _unnamed_module = ModuleEntry::create_unnamed_module(this);
 176     }
 177     _dictionary = create_dictionary();
 178   }
 179 
 180   NOT_PRODUCT(_dependency_count = 0); // number of class loader dependencies
 181 
 182   JFR_ONLY(INIT_ID(this);)
 183 }
 184 
 185 ClassLoaderData::ChunkedHandleList::~ChunkedHandleList() {
 186   Chunk* c = _head;
 187   while (c != nullptr) {
 188     Chunk* next = c->_next;
 189     delete c;
 190     c = next;
 191   }
 192 }
 193 
 194 OopHandle ClassLoaderData::ChunkedHandleList::add(oop o) {
 195   if (_head == nullptr || _head->_size == Chunk::CAPACITY) {
 196     Chunk* next = new Chunk(_head);
 197     AtomicAccess::release_store(&_head, next);
 198   }
 199   oop* handle = &_head->_data[_head->_size];
 200   NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, o);
 201   AtomicAccess::release_store(&_head->_size, _head->_size + 1);
 202   return OopHandle(handle);
 203 }
 204 
 205 int ClassLoaderData::ChunkedHandleList::count() const {
 206   int count = 0;
 207   Chunk* chunk = AtomicAccess::load_acquire(&_head);
 208   while (chunk != nullptr) {
 209     count += AtomicAccess::load(&chunk->_size);
 210     chunk = chunk->_next;
 211   }
 212   return count;
 213 }
 214 
 215 inline void ClassLoaderData::ChunkedHandleList::oops_do_chunk(OopClosure* f, Chunk* c, const juint size) {
 216   for (juint i = 0; i < size; i++) {
 217     f->do_oop(&c->_data[i]);
 218   }
 219 }
 220 
 221 void ClassLoaderData::ChunkedHandleList::oops_do(OopClosure* f) {
 222   Chunk* head = AtomicAccess::load_acquire(&_head);
 223   if (head != nullptr) {
 224     // Must be careful when reading size of head
 225     oops_do_chunk(f, head, AtomicAccess::load_acquire(&head->_size));
 226     for (Chunk* c = head->_next; c != nullptr; c = c->_next) {
 227       oops_do_chunk(f, c, c->_size);
 228     }
 229   }
 230 }
 231 
 232 class VerifyContainsOopClosure : public OopClosure {
 233   oop  _target;
 234   bool _found;
 235 
 236  public:
 237   VerifyContainsOopClosure(oop target) : _target(target), _found(false) {}
 238 
 239   void do_oop(oop* p) {
 240     if (p != nullptr && NativeAccess<AS_NO_KEEPALIVE>::oop_load(p) == _target) {
 241       _found = true;
 242     }
 243   }
 244 
 245   void do_oop(narrowOop* p) {
 246     // The ChunkedHandleList should not contain any narrowOop
 247     ShouldNotReachHere();
 248   }
 249 
 250   bool found() const {
 251     return _found;
 252   }
 253 };
 254 
 255 bool ClassLoaderData::ChunkedHandleList::contains(oop p) {
 256   VerifyContainsOopClosure cl(p);
 257   oops_do(&cl);
 258   return cl.found();
 259 }
 260 
 261 #ifndef PRODUCT
 262 bool ClassLoaderData::ChunkedHandleList::owner_of(oop* oop_handle) {
 263   Chunk* chunk = AtomicAccess::load_acquire(&_head);
 264   while (chunk != nullptr) {
 265     if (&(chunk->_data[0]) <= oop_handle && oop_handle < &(chunk->_data[AtomicAccess::load(&chunk->_size)])) {
 266       return true;
 267     }
 268     chunk = chunk->_next;
 269   }
 270   return false;
 271 }
 272 #endif // PRODUCT
 273 
 274 void ClassLoaderData::clear_claim(int claim) {
 275   for (;;) {
 276     int old_claim = AtomicAccess::load(&_claim);
 277     if ((old_claim & claim) == 0) {
 278       return;
 279     }
 280     int new_claim = old_claim & ~claim;
 281     if (AtomicAccess::cmpxchg(&_claim, old_claim, new_claim) == old_claim) {
 282       return;
 283     }
 284   }
 285 }
 286 
 287 #ifdef ASSERT
 288 void ClassLoaderData::verify_not_claimed(int claim) {
 289   assert((_claim & claim) == 0, "Found claim: %d bits in _claim: %d", claim, _claim);
 290 }
 291 #endif
 292 
 293 bool ClassLoaderData::try_claim(int claim) {
 294   for (;;) {
 295     int old_claim = AtomicAccess::load(&_claim);
 296     if ((old_claim & claim) == claim) {
 297       return false;
 298     }
 299     int new_claim = old_claim | claim;
 300     if (AtomicAccess::cmpxchg(&_claim, old_claim, new_claim) == old_claim) {
 301       return true;
 302     }
 303   }
 304 }
 305 
 306 void ClassLoaderData::demote_strong_roots() {
 307   // The oop handle area contains strong roots that the GC traces from. We are about
 308   // to demote them to strong native oops that the GC does *not* trace from. Conceptually,
 309   // we are retiring a rather normal strong root, and creating a strong non-root handle,
 310   // which happens to reuse the same address as the normal strong root had.
 311   // Unless we invoke the right barriers, the GC might not notice that a strong root
 312   // has been pulled from the system, and is left unprocessed by the GC. There can be
 313   // several consequences:
 314   // 1. A concurrently marking snapshot-at-the-beginning GC might assume that the contents
 315   //    of all strong roots get processed by the GC in order to keep them alive. Without
 316   //    barriers, some objects might not be kept alive.
 317   // 2. A concurrently relocating GC might assume that after moving an object, a subsequent
 318   //    tracing from all roots can fix all the pointers in the system, which doesn't play
 319   //    well with roots racingly being pulled.
 320   // 3. A concurrent GC using colored pointers, might assume that tracing the object graph
 321   //    from roots results in all pointers getting some particular color, which also doesn't
 322   //    play well with roots being pulled out from the system concurrently.
 323 
 324   class TransitionRootsOopClosure : public OopClosure {
 325   public:
 326     virtual void do_oop(oop* p) {
 327       // By loading the strong root with the access API, we can use the right barriers to
 328       // store the oop as a strong non-root handle, that happens to reuse the same memory
 329       // address as the strong root. The barriered store ensures that:
 330       // 1. The concurrent SATB marking properties are satisfied as the store will keep
 331       //    the oop alive.
 332       // 2. The concurrent object movement properties are satisfied as we store the address
 333       //    of the new location of the object, if any.
 334       // 3. The colors if any will be stored as the new good colors.
 335       oop obj = NativeAccess<>::oop_load(p); // Load the strong root
 336       NativeAccess<>::oop_store(p, obj); // Store the strong non-root
 337     }
 338 
 339     virtual void do_oop(narrowOop* p) {
 340       ShouldNotReachHere();
 341     }
 342   } cl;
 343   oops_do(&cl, ClassLoaderData::_claim_none, false /* clear_mod_oops */);
 344 }
 345 
 346 // Non-strong hidden classes have their own ClassLoaderData that is marked to keep alive
 347 // while the class is being parsed, and if the class appears on the module fixup list.
 348 // Due to the uniqueness that no other class shares the hidden class' name or
 349 // ClassLoaderData, no other non-GC thread has knowledge of the hidden class while
 350 // it is being defined, therefore _keep_alive_ref_count is not volatile or atomic.
 351 void ClassLoaderData::inc_keep_alive_ref_count() {
 352   if (has_class_mirror_holder()) {
 353     assert(_keep_alive_ref_count > 0, "Invalid keep alive increment count");
 354     _keep_alive_ref_count++;
 355   }
 356 }
 357 
 358 void ClassLoaderData::dec_keep_alive_ref_count() {
 359   if (has_class_mirror_holder()) {
 360     assert(_keep_alive_ref_count > 0, "Invalid keep alive decrement count");
 361     if (_keep_alive_ref_count == 1) {
 362       // When the keep_alive_ref_count counter is 1, the oop handle area is a strong root,
 363       // acting as input to the GC tracing. Such strong roots are part of the
 364       // snapshot-at-the-beginning, and can not just be pulled out from the
 365       // system when concurrent GCs are running at the same time, without
 366       // invoking the right barriers.
 367       demote_strong_roots();
 368     }
 369     _keep_alive_ref_count--;
 370   }
 371 }
 372 
 373 void ClassLoaderData::oops_do(OopClosure* f, int claim_value, bool clear_mod_oops) {
 374   if (claim_value != ClassLoaderData::_claim_none && !try_claim(claim_value)) {
 375     return;
 376   }
 377 
 378   // Only clear modified_oops after the ClassLoaderData is claimed.
 379   if (clear_mod_oops) {
 380     clear_modified_oops();
 381   }
 382 
 383   _handles.oops_do(f);
 384 }
 385 
 386 void ClassLoaderData::classes_do(KlassClosure* klass_closure) {
 387   // Lock-free access requires load_acquire
 388   for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
 389     klass_closure->do_klass(k);
 390     assert(k != k->next_link(), "no loops!");
 391   }
 392 }
 393 
 394 void ClassLoaderData::classes_do(void f(Klass * const)) {
 395   // Lock-free access requires load_acquire
 396   for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
 397     f(k);
 398     assert(k != k->next_link(), "no loops!");
 399   }
 400 }
 401 
 402 void ClassLoaderData::methods_do(void f(Method*)) {
 403   // Lock-free access requires load_acquire
 404   for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
 405     if (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded()) {
 406       InstanceKlass::cast(k)->methods_do(f);
 407     }
 408   }
 409 }
 410 
 411 void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) {
 412   // Lock-free access requires load_acquire
 413   for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
 414     // Filter out InstanceKlasses (or their ObjArrayKlasses) that have not entered the
 415     // loaded state.
 416     if (k->is_instance_klass()) {
 417       if (!InstanceKlass::cast(k)->is_loaded()) {
 418         continue;
 419       }
 420     } else if (k->in_aot_cache() && k->is_objArray_klass()) {
 421       Klass* bottom = ObjArrayKlass::cast(k)->bottom_klass();
 422       if (bottom->is_instance_klass() && !InstanceKlass::cast(bottom)->is_loaded()) {
 423         // This could happen if <bottom> is a shared class that has been restored
 424         // but is not yet marked as loaded. All archived array classes of the
 425         // bottom class are already restored and placed in the _klasses list.
 426         continue;
 427       }
 428     }
 429 
 430 #ifdef ASSERT
 431     oop m = k->java_mirror();
 432     assert(m != nullptr, "nullptr mirror");
 433     assert(m->is_a(vmClasses::Class_klass()), "invalid mirror");
 434 #endif
 435     klass_closure->do_klass(k);
 436   }
 437 }
 438 
 439 void ClassLoaderData::classes_do(void f(InstanceKlass*)) {
 440   // Lock-free access requires load_acquire
 441   for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
 442     if (k->is_instance_klass()) {
 443       f(InstanceKlass::cast(k));
 444     }
 445     assert(k != k->next_link(), "no loops!");
 446   }
 447 }
 448 
 449 void ClassLoaderData::inline_classes_do(void f(InlineKlass*)) {
 450   // Lock-free access requires load_acquire
 451   for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
 452     if (k->is_inline_klass()) {
 453       f(InlineKlass::cast(k));
 454     }
 455     assert(k != k->next_link(), "no loops!");
 456   }
 457 }
 458 
 459 void ClassLoaderData::modules_do(void f(ModuleEntry*)) {
 460   assert_locked_or_safepoint(Module_lock);
 461   if (_unnamed_module != nullptr) {
 462     f(_unnamed_module);
 463   }
 464   if (_modules != nullptr) {
 465     _modules->modules_do(f);
 466   }
 467 }
 468 
 469 void ClassLoaderData::packages_do(void f(PackageEntry*)) {
 470   assert_locked_or_safepoint(Module_lock);
 471   if (_packages != nullptr) {
 472     _packages->packages_do(f);
 473   }
 474 }
 475 
 476 void ClassLoaderData::record_dependency(const Klass* k) {
 477   assert(k != nullptr, "invariant");
 478 
 479   ClassLoaderData * const from_cld = this;
 480   ClassLoaderData * const to_cld = k->class_loader_data();
 481 
 482   // Do not need to record dependency if the dependency is to a class whose
 483   // class loader data is never freed.  (i.e. the dependency's class loader
 484   // is one of the three builtin class loaders and the dependency's class
 485   // loader data has a ClassLoader holder, not a Class holder.)
 486   if (to_cld->is_permanent_class_loader_data()) {
 487     return;
 488   }
 489 
 490   oop to;
 491   if (to_cld->has_class_mirror_holder()) {
 492     // Just return if a non-strong hidden class class is attempting to record a dependency
 493     // to itself.  (Note that every non-strong hidden class has its own unique class
 494     // loader data.)
 495     if (to_cld == from_cld) {
 496       return;
 497     }
 498     // Hidden class dependencies are through the mirror.
 499     to = k->java_mirror();
 500   } else {
 501     to = to_cld->class_loader();
 502     oop from = from_cld->class_loader();
 503 
 504     // Just return if this dependency is to a class with the same or a parent
 505     // class_loader.
 506     if (from == to || java_lang_ClassLoader::isAncestor(from, to)) {
 507       return; // this class loader is in the parent list, no need to add it.
 508     }
 509   }
 510 
 511   // It's a dependency we won't find through GC, add it.
 512   if (!_handles.contains(to)) {
 513     NOT_PRODUCT(AtomicAccess::inc(&_dependency_count));
 514     LogTarget(Trace, class, loader, data) lt;
 515     if (lt.is_enabled()) {
 516       ResourceMark rm;
 517       LogStream ls(lt);
 518       ls.print("adding dependency from ");
 519       print_value_on(&ls);
 520       ls.print(" to ");
 521       to_cld->print_value_on(&ls);
 522       ls.cr();
 523     }
 524     Handle dependency(Thread::current(), to);
 525     add_handle(dependency);
 526     // Added a potentially young gen oop to the ClassLoaderData
 527     record_modified_oops();
 528   }
 529 }
 530 
 531 void ClassLoaderData::add_class(Klass* k, bool publicize /* true */) {
 532   {
 533     MutexLocker ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
 534     Klass* old_value = _klasses;
 535     k->set_next_link(old_value);
 536     // Link the new item into the list, making sure the linked class is stable
 537     // since the list can be walked without a lock
 538     AtomicAccess::release_store(&_klasses, k);
 539     if (k->is_array_klass()) {
 540       ClassLoaderDataGraph::inc_array_classes(1);
 541     } else {
 542       ClassLoaderDataGraph::inc_instance_classes(1);
 543     }
 544   }
 545 
 546   if (publicize) {
 547     LogTarget(Trace, class, loader, data) lt;
 548     if (lt.is_enabled()) {
 549       ResourceMark rm;
 550       LogStream ls(lt);
 551       ls.print("Adding k: " PTR_FORMAT " %s to ", p2i(k), k->external_name());
 552       print_value_on(&ls);
 553       ls.cr();
 554     }
 555   }
 556 }
 557 
 558 void ClassLoaderData::initialize_holder(Handle loader_or_mirror) {
 559   if (loader_or_mirror() != nullptr) {
 560     assert(_holder.is_null(), "never replace holders");
 561     _holder = WeakHandle(Universe::vm_weak(), loader_or_mirror);
 562   }
 563 }
 564 
 565 // Remove a klass from the _klasses list for scratch_class during redefinition
 566 // or parsed class in the case of an error.
 567 void ClassLoaderData::remove_class(Klass* scratch_class) {
 568   assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
 569 
 570   Klass* prev = nullptr;
 571   for (Klass* k = _klasses; k != nullptr; k = k->next_link()) {
 572     if (k == scratch_class) {
 573       if (prev == nullptr) {
 574         _klasses = k->next_link();
 575       } else {
 576         Klass* next = k->next_link();
 577         prev->set_next_link(next);
 578       }
 579 
 580       if (k->is_array_klass()) {
 581         ClassLoaderDataGraph::dec_array_classes(1);
 582       } else {
 583         ClassLoaderDataGraph::dec_instance_classes(1);
 584       }
 585 
 586       return;
 587     }
 588     prev = k;
 589     assert(k != k->next_link(), "no loops!");
 590   }
 591   ShouldNotReachHere();   // should have found this class!!
 592 }
 593 
 594 void ClassLoaderData::add_jmethod_id(jmethodID mid) {
 595   MutexLocker m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
 596   if (_jmethod_ids == nullptr) {
 597     _jmethod_ids = new (mtClass) GrowableArray<jmethodID>(32, mtClass);
 598   }
 599   _jmethod_ids->push(mid);
 600 }
 601 
 602 // Method::remove_jmethod_ids removes jmethodID entries from the table which
 603 // releases memory.
 604 // Because native code (e.g., JVMTI agent) holding jmethod_ids may access them
 605 // after the associated classes and class loader are unloaded, subsequent lookups
 606 // for these ids will return null since they are no longer found in the table.
 607 // The Java Native Interface Specification says "method ID
 608 // does not prevent the VM from unloading the class from which the ID has
 609 // been derived. After the class is unloaded, the method or field ID becomes
 610 // invalid".
 611 void ClassLoaderData::remove_jmethod_ids() {
 612   MutexLocker ml(JmethodIdCreation_lock, Mutex::_no_safepoint_check_flag);
 613   for (int i = 0; i < _jmethod_ids->length(); i++) {
 614     jmethodID mid = _jmethod_ids->at(i);
 615     JmethodIDTable::remove(mid);
 616   }
 617   delete _jmethod_ids;
 618   _jmethod_ids = nullptr;
 619 }
 620 
 621 void ClassLoaderData::unload() {
 622   _unloading = true;
 623 
 624   LogTarget(Trace, class, loader, data) lt;
 625   if (lt.is_enabled()) {
 626     ResourceMark rm;
 627     LogStream ls(lt);
 628     ls.print("unload");
 629     print_value_on(&ls);
 630     ls.cr();
 631   }
 632 
 633   // Some items on the _deallocate_list need to free their C heap structures
 634   // if they are not already on the _klasses list.
 635   free_deallocate_list_C_heap_structures();
 636 
 637   inline_classes_do(InlineKlass::cleanup);
 638 
 639   // Clean up class dependencies and tell serviceability tools
 640   // these classes are unloading.  This must be called
 641   // after erroneous classes are released.
 642   classes_do(InstanceKlass::unload_class);
 643 
 644   if (_jmethod_ids != nullptr) {
 645     remove_jmethod_ids();
 646   }
 647 }
 648 
 649 ModuleEntryTable* ClassLoaderData::modules() {
 650   // Lazily create the module entry table at first request.
 651   // Lock-free access requires load_acquire.
 652   ModuleEntryTable* modules = AtomicAccess::load_acquire(&_modules);
 653   if (modules == nullptr) {
 654     MutexLocker m1(Module_lock);
 655     // Check if _modules got allocated while we were waiting for this lock.
 656     if ((modules = _modules) == nullptr) {
 657       modules = new ModuleEntryTable();
 658 
 659       {
 660         MutexLocker m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
 661         // Ensure _modules is stable, since it is examined without a lock
 662         AtomicAccess::release_store(&_modules, modules);
 663       }
 664     }
 665   }
 666   return modules;
 667 }
 668 
 669 const int _boot_loader_dictionary_size    = 1009;
 670 const int _default_loader_dictionary_size = 107;
 671 
 672 Dictionary* ClassLoaderData::create_dictionary() {
 673   assert(!has_class_mirror_holder(), "class mirror holder cld does not have a dictionary");
 674   int size;
 675   if (_the_null_class_loader_data == nullptr) {
 676     size = _boot_loader_dictionary_size;
 677   } else if (is_system_class_loader_data()) {
 678     size = _boot_loader_dictionary_size;
 679   } else {
 680     size = _default_loader_dictionary_size;
 681   }
 682   return new Dictionary(this, size);
 683 }
 684 
 685 // Tell the GC to keep this klass alive. Needed while iterating ClassLoaderDataGraph,
 686 // and any runtime code that uses klasses.
 687 oop ClassLoaderData::holder() const {
 688   // A klass that was previously considered dead can be looked up in the
 689   // CLD/SD, and its _java_mirror or _class_loader can be stored in a root
 690   // or a reachable object making it alive again. The SATB part of G1 needs
 691   // to get notified about this potential resurrection, otherwise the marking
 692   // might not find the object.
 693   if (!_holder.is_null()) {  // null class_loader
 694     return _holder.resolve();
 695   } else {
 696     return nullptr;
 697   }
 698 }
 699 
 700 // Let the GC read the holder without keeping it alive.
 701 oop ClassLoaderData::holder_no_keepalive() const {
 702   if (!_holder.is_null()) {  // null class_loader
 703     return _holder.peek();
 704   } else {
 705     return nullptr;
 706   }
 707 }
 708 
 709 // Unloading support
 710 bool ClassLoaderData::is_alive() const {
 711   bool alive = (_keep_alive_ref_count > 0) // null class loader and incomplete non-strong hidden class.
 712       || (_holder.peek() != nullptr);      // and not cleaned by the GC weak handle processing.
 713 
 714   return alive;
 715 }
 716 
 717 class ReleaseKlassClosure: public KlassClosure {
 718 private:
 719   size_t  _instance_class_released;
 720   size_t  _array_class_released;
 721 public:
 722   ReleaseKlassClosure() : _instance_class_released(0), _array_class_released(0) { }
 723 
 724   size_t instance_class_released() const { return _instance_class_released; }
 725   size_t array_class_released()    const { return _array_class_released;    }
 726 
 727   void do_klass(Klass* k) {
 728     if (k->is_array_klass()) {
 729       _array_class_released ++;
 730     } else {
 731       assert(k->is_instance_klass(), "Must be");
 732       _instance_class_released ++;
 733     }
 734     k->release_C_heap_structures();
 735   }
 736 };
 737 
 738 ClassLoaderData::~ClassLoaderData() {
 739   // Release C heap structures for all the classes.
 740   ReleaseKlassClosure cl;
 741   classes_do(&cl);
 742 
 743   ClassLoaderDataGraph::dec_array_classes(cl.array_class_released());
 744   ClassLoaderDataGraph::dec_instance_classes(cl.instance_class_released());
 745 
 746   // Release the WeakHandle
 747   _holder.release(Universe::vm_weak());
 748 
 749   // Release C heap allocated hashtable for all the packages.
 750   if (_packages != nullptr) {
 751     // Destroy the table itself
 752     delete _packages;
 753     _packages = nullptr;
 754   }
 755 
 756   // Release C heap allocated hashtable for all the modules.
 757   if (_modules != nullptr) {
 758     // Destroy the table itself
 759     delete _modules;
 760     _modules = nullptr;
 761   }
 762 
 763   // Release C heap allocated hashtable for the dictionary
 764   if (_dictionary != nullptr) {
 765     // Destroy the table itself
 766     delete _dictionary;
 767     _dictionary = nullptr;
 768   }
 769 
 770   if (_unnamed_module != nullptr) {
 771     delete _unnamed_module;
 772     _unnamed_module = nullptr;
 773   }
 774 
 775   // release the metaspace
 776   ClassLoaderMetaspace *m = _metaspace;
 777   if (m != nullptr) {
 778     _metaspace = nullptr;
 779     delete m;
 780   }
 781 
 782   // Delete lock
 783   delete _metaspace_lock;
 784 
 785   // Delete free list
 786   if (_deallocate_list != nullptr) {
 787     delete _deallocate_list;
 788   }
 789 
 790   // Decrement refcounts of Symbols if created.
 791   if (_name != nullptr) {
 792     _name->decrement_refcount();
 793   }
 794   if (_name_and_id != nullptr) {
 795     _name_and_id->decrement_refcount();
 796   }
 797 }
 798 
 799 // Returns true if this class loader data is for the app class loader
 800 // or a user defined system class loader.  (Note that the class loader
 801 // data may have a Class holder.)
 802 bool ClassLoaderData::is_system_class_loader_data() const {
 803   return SystemDictionary::is_system_class_loader(class_loader());
 804 }
 805 
 806 // Returns true if this class loader data is for the platform class loader.
 807 // (Note that the class loader data may have a Class holder.)
 808 bool ClassLoaderData::is_platform_class_loader_data() const {
 809   return SystemDictionary::is_platform_class_loader(class_loader());
 810 }
 811 
 812 // Returns true if the class loader for this class loader data is one of
 813 // the 3 builtin (boot application/system or platform) class loaders,
 814 // including a user-defined system class loader.  Note that if the class
 815 // loader data is for a non-strong hidden class then it may
 816 // get freed by a GC even if its class loader is one of these loaders.
 817 bool ClassLoaderData::is_builtin_class_loader_data() const {
 818   return (is_boot_class_loader_data() ||
 819           SystemDictionary::is_system_class_loader(class_loader()) ||
 820           SystemDictionary::is_platform_class_loader(class_loader()));
 821 }
 822 
 823 // Returns true if this class loader data is a class loader data
 824 // that is not ever freed by a GC.  It must be the CLD for one of the builtin
 825 // class loaders and not the CLD for a non-strong hidden class.
 826 bool ClassLoaderData::is_permanent_class_loader_data() const {
 827   return is_builtin_class_loader_data() && !has_class_mirror_holder();
 828 }
 829 
 830 ClassLoaderMetaspace* ClassLoaderData::metaspace_non_null() {
 831   // If the metaspace has not been allocated, create a new one.  Might want
 832   // to create smaller arena for Reflection class loaders also.
 833   // The reason for the delayed allocation is because some class loaders are
 834   // simply for delegating with no metadata of their own.
 835   // Lock-free access requires load_acquire.
 836   ClassLoaderMetaspace* metaspace = AtomicAccess::load_acquire(&_metaspace);
 837   if (metaspace == nullptr) {
 838     MutexLocker ml(_metaspace_lock,  Mutex::_no_safepoint_check_flag);
 839     // Check if _metaspace got allocated while we were waiting for this lock.
 840     if ((metaspace = _metaspace) == nullptr) {
 841       if (this == the_null_class_loader_data()) {
 842         assert (class_loader() == nullptr, "Must be");
 843         metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::BootMetaspaceType);
 844       } else if (has_class_mirror_holder()) {
 845         metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::ClassMirrorHolderMetaspaceType);
 846       } else {
 847         metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::StandardMetaspaceType);
 848       }
 849       // Ensure _metaspace is stable, since it is examined without a lock
 850       AtomicAccess::release_store(&_metaspace, metaspace);
 851     }
 852   }
 853   return metaspace;
 854 }
 855 
 856 OopHandle ClassLoaderData::add_handle(Handle h) {
 857   MutexLocker ml(metaspace_lock(),  Mutex::_no_safepoint_check_flag);
 858   record_modified_oops();
 859   return _handles.add(h());
 860 }
 861 
 862 void ClassLoaderData::remove_handle(OopHandle h) {
 863   assert(!is_unloading(), "Do not remove a handle for a CLD that is unloading");
 864   if (!h.is_empty()) {
 865     assert(_handles.owner_of(h.ptr_raw()),
 866            "Got unexpected handle " PTR_FORMAT, p2i(h.ptr_raw()));
 867     h.replace(oop(nullptr));
 868   }
 869 }
 870 
 871 void ClassLoaderData::init_handle_locked(OopHandle& dest, Handle h) {
 872   MutexLocker ml(metaspace_lock(),  Mutex::_no_safepoint_check_flag);
 873   if (dest.resolve() != nullptr) {
 874     return;
 875   } else {
 876     record_modified_oops();
 877     dest = _handles.add(h());
 878   }
 879 }
 880 
 881 // Add this metadata pointer to be freed when it's safe.  This is only during
 882 // a safepoint which checks if handles point to this metadata field.
 883 void ClassLoaderData::add_to_deallocate_list(Metadata* m) {
 884   // Metadata in shared region isn't deleted.
 885   if (!m->in_aot_cache()) {
 886     MutexLocker ml(metaspace_lock(),  Mutex::_no_safepoint_check_flag);
 887     if (_deallocate_list == nullptr) {
 888       _deallocate_list = new (mtClass) GrowableArray<Metadata*>(100, mtClass);
 889     }
 890     _deallocate_list->append_if_missing(m);
 891     ResourceMark rm;
 892     log_debug(class, loader, data)("deallocate added for %s", m->print_value_string());
 893     ClassLoaderDataGraph::set_should_clean_deallocate_lists();
 894   }
 895 }
 896 
 897 // Deallocate free metadata on the free list.  How useful the PermGen was!
 898 void ClassLoaderData::free_deallocate_list() {
 899   // This must be called at a safepoint because it depends on metadata walking at
 900   // safepoint cleanup time.
 901   assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
 902   assert(!is_unloading(), "only called for ClassLoaderData that are not unloading");
 903   if (_deallocate_list == nullptr) {
 904     return;
 905   }
 906   // Go backwards because this removes entries that are freed.
 907   for (int i = _deallocate_list->length() - 1; i >= 0; i--) {
 908     Metadata* m = _deallocate_list->at(i);
 909     if (!m->on_stack()) {
 910       _deallocate_list->remove_at(i);
 911       // There are only three types of metadata that we deallocate directly.
 912       // Cast them so they can be used by the template function.
 913       if (m->is_method()) {
 914         MetadataFactory::free_metadata(this, (Method*)m);
 915       } else if (m->is_constantPool()) {
 916         HeapShared::remove_scratch_resolved_references((ConstantPool*)m);
 917         MetadataFactory::free_metadata(this, (ConstantPool*)m);
 918       } else if (m->is_klass()) {
 919         if (!((Klass*)m)->is_inline_klass()) {
 920           MetadataFactory::free_metadata(this, (InstanceKlass*)m);
 921         } else {
 922           MetadataFactory::free_metadata(this, (InlineKlass*)m);
 923         }
 924       } else {
 925         ShouldNotReachHere();
 926       }
 927     } else {
 928       // Metadata is alive.
 929       // If scratch_class is on stack then it shouldn't be on this list!
 930       assert(!m->is_klass() || !((InstanceKlass*)m)->is_scratch_class(),
 931              "scratch classes on this list should be dead");
 932       // Also should assert that other metadata on the list was found in handles.
 933       // Some cleaning remains.
 934       ClassLoaderDataGraph::set_should_clean_deallocate_lists();
 935     }
 936   }
 937 }
 938 
 939 // This is distinct from free_deallocate_list.  For class loader data that are
 940 // unloading, this frees the C heap memory for items on the list, and unlinks
 941 // scratch or error classes so that unloading events aren't triggered for these
 942 // classes. The metadata is removed with the unloading metaspace.
 943 // There isn't C heap memory allocated for methods, so nothing is done for them.
 944 void ClassLoaderData::free_deallocate_list_C_heap_structures() {
 945   assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
 946   assert(is_unloading(), "only called for ClassLoaderData that are unloading");
 947   if (_deallocate_list == nullptr) {
 948     return;
 949   }
 950   // Go backwards because this removes entries that are freed.
 951   for (int i = _deallocate_list->length() - 1; i >= 0; i--) {
 952     Metadata* m = _deallocate_list->at(i);
 953     _deallocate_list->remove_at(i);
 954     if (m->is_constantPool()) {
 955       ((ConstantPool*)m)->release_C_heap_structures();
 956     } else if (m->is_klass()) {
 957       InstanceKlass* ik = (InstanceKlass*)m;
 958       // also releases ik->constants() C heap memory
 959       ik->release_C_heap_structures();
 960       // Remove the class so unloading events aren't triggered for
 961       // this class (scratch or error class) in do_unloading().
 962       remove_class(ik);
 963       // But still have to remove it from the dumptime_table.
 964       SystemDictionaryShared::handle_class_unloading(ik);
 965     }
 966   }
 967 }
 968 
 969 // Caller needs ResourceMark
 970 // If the class loader's _name has not been explicitly set, the class loader's
 971 // qualified class name is returned.
 972 const char* ClassLoaderData::loader_name() const {
 973    if (_class_loader_klass == nullptr) {
 974      return BOOTSTRAP_LOADER_NAME;
 975    } else if (_name != nullptr) {
 976      return _name->as_C_string();
 977    } else {
 978      return _class_loader_klass->external_name();
 979    }
 980 }
 981 
 982 // Caller needs ResourceMark
 983 // Format of the _name_and_id is as follows:
 984 //   If the defining loader has a name explicitly set then '<loader-name>' @<id>
 985 //   If the defining loader has no name then <qualified-class-name> @<id>
 986 //   If built-in loader, then omit '@<id>' as there is only one instance.
 987 const char* ClassLoaderData::loader_name_and_id() const {
 988   if (_class_loader_klass == nullptr) {
 989     return "'" BOOTSTRAP_LOADER_NAME "'";
 990   } else if (_name_and_id != nullptr) {
 991     return _name_and_id->as_C_string();
 992   } else {
 993     // May be called in a race before _name_and_id is initialized.
 994     return _class_loader_klass->external_name();
 995   }
 996 }
 997 
 998 void ClassLoaderData::print_value_on(outputStream* out) const {
 999   if (!is_unloading() && class_loader() != nullptr) {
1000     out->print("loader data: " INTPTR_FORMAT " for instance ", p2i(this));
1001     class_loader()->print_value_on(out);  // includes loader_name_and_id() and address of class loader instance
1002   } else {
1003     // loader data: 0xsomeaddr of 'bootstrap'
1004     out->print("loader data: " INTPTR_FORMAT " of %s", p2i(this), loader_name_and_id());
1005   }
1006   if (_has_class_mirror_holder) {
1007     out->print(" has a class holder");
1008   }
1009 }
1010 
1011 void ClassLoaderData::print_value() const { print_value_on(tty); }
1012 
1013 #ifndef PRODUCT
1014 class PrintKlassClosure: public KlassClosure {
1015   outputStream* _out;
1016 public:
1017   PrintKlassClosure(outputStream* out): _out(out) { }
1018 
1019   void do_klass(Klass* k) {
1020     ResourceMark rm;
1021     _out->print("%s,", k->external_name());
1022   }
1023 };
1024 
1025 void ClassLoaderData::print_on(outputStream* out) const {
1026   ResourceMark rm;
1027   out->print_cr("ClassLoaderData(" INTPTR_FORMAT ")", p2i(this));
1028   out->print_cr(" - name                %s", loader_name_and_id());
1029   if (!_holder.is_null()) {
1030     out->print   (" - holder              ");
1031     _holder.print_on(out);
1032     out->print_cr("");
1033   }
1034   if (!_unloading) {
1035     out->print_cr(" - class loader        " INTPTR_FORMAT, p2i(_class_loader.peek()));
1036   } else {
1037     out->print_cr(" - class loader        <unloading, oop is bad>");
1038   }
1039   out->print_cr(" - metaspace           " INTPTR_FORMAT, p2i(_metaspace));
1040   out->print_cr(" - unloading           %s", _unloading ? "true" : "false");
1041   out->print_cr(" - class mirror holder %s", _has_class_mirror_holder ? "true" : "false");
1042   out->print_cr(" - modified oops       %s", _modified_oops ? "true" : "false");
1043   out->print_cr(" - _keep_alive_ref_count %d", _keep_alive_ref_count);
1044   out->print   (" - claim               ");
1045   switch(_claim) {
1046     case _claim_none:                       out->print_cr("none"); break;
1047     case _claim_finalizable:                out->print_cr("finalizable"); break;
1048     case _claim_strong:                     out->print_cr("strong"); break;
1049     case _claim_stw_fullgc_mark:            out->print_cr("stw full gc mark"); break;
1050     case _claim_stw_fullgc_adjust:          out->print_cr("stw full gc adjust"); break;
1051     case _claim_other:                      out->print_cr("other"); break;
1052     case _claim_other | _claim_finalizable: out->print_cr("other and finalizable"); break;
1053     case _claim_other | _claim_strong:      out->print_cr("other and strong"); break;
1054     default:                                ShouldNotReachHere();
1055   }
1056   out->print_cr(" - handles             %d", _handles.count());
1057   out->print_cr(" - dependency count    %d", _dependency_count);
1058   out->print   (" - klasses             { ");
1059   if (Verbose) {
1060     PrintKlassClosure closure(out);
1061     ((ClassLoaderData*)this)->classes_do(&closure);
1062   } else {
1063      out->print("...");
1064   }
1065   out->print_cr(" }");
1066   out->print_cr(" - packages            " INTPTR_FORMAT, p2i(_packages));
1067   out->print_cr(" - module              " INTPTR_FORMAT, p2i(_modules));
1068   out->print_cr(" - unnamed module      " INTPTR_FORMAT, p2i(_unnamed_module));
1069   if (_dictionary != nullptr) {
1070     out->print   (" - dictionary          " INTPTR_FORMAT " ", p2i(_dictionary));
1071     _dictionary->print_size(out);
1072   } else {
1073     out->print_cr(" - dictionary          " INTPTR_FORMAT, p2i(_dictionary));
1074   }
1075   if (_jmethod_ids != nullptr) {
1076     out->print_cr(" - jmethod count       %d", _jmethod_ids->length());
1077   }
1078   out->print_cr(" - deallocate list     " INTPTR_FORMAT, p2i(_deallocate_list));
1079   out->print_cr(" - next CLD            " INTPTR_FORMAT, p2i(_next));
1080 }
1081 #endif // PRODUCT
1082 
1083 void ClassLoaderData::print() const { print_on(tty); }
1084 
1085 class VerifyHandleOops : public OopClosure {
1086   VerifyOopClosure vc;
1087  public:
1088   virtual void do_oop(oop* p) {
1089     if (p != nullptr && *p != nullptr) {
1090       oop o = *p;
1091       if (!java_lang_Class::is_instance(o)) {
1092         // is_instance will assert for an invalid oop.
1093         // Walk the resolved_references array and other assorted oops in the
1094         // CLD::_handles field.  The mirror oops are followed by other heap roots.
1095         o->oop_iterate(&vc);
1096       }
1097     }
1098   }
1099   virtual void do_oop(narrowOop* o) { ShouldNotReachHere(); }
1100 };
1101 
1102 void ClassLoaderData::verify() {
1103   assert_locked_or_safepoint(_metaspace_lock);
1104   oop cl = class_loader();
1105 
1106   guarantee(this == class_loader_data(cl) || has_class_mirror_holder(), "Must be the same");
1107   guarantee(cl != nullptr || this == ClassLoaderData::the_null_class_loader_data() || has_class_mirror_holder(), "must be");
1108 
1109   // Verify the integrity of the allocated space.
1110 #ifdef ASSERT
1111   if (metaspace_or_null() != nullptr) {
1112     metaspace_or_null()->verify();
1113   }
1114 #endif
1115 
1116   for (Klass* k = _klasses; k != nullptr; k = k->next_link()) {
1117     guarantee(k->class_loader_data() == this, "Must be the same");
1118     k->verify();
1119     assert(k != k->next_link(), "no loops!");
1120   }
1121 
1122   if (_modules != nullptr) {
1123     _modules->verify();
1124   }
1125 
1126   if (_deallocate_list != nullptr) {
1127     for (int i = _deallocate_list->length() - 1; i >= 0; i--) {
1128       Metadata* m = _deallocate_list->at(i);
1129       if (m->is_klass()) {
1130         ((Klass*)m)->verify();
1131       }
1132     }
1133   }
1134 
1135   // Check the oops in the handles area
1136   VerifyHandleOops vho;
1137   oops_do(&vho, _claim_none, false);
1138 }
1139 
1140 bool ClassLoaderData::contains_klass(Klass* klass) {
1141   // Lock-free access requires load_acquire
1142   for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
1143     if (k == klass) return true;
1144   }
1145   return false;
1146 }