1 /* 2 * Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // A ClassLoaderData identifies the full set of class types that a class 26 // loader's name resolution strategy produces for a given configuration of the 27 // class loader. 28 // Class types in the ClassLoaderData may be defined by from class file binaries 29 // provided by the class loader, or from other class loader it interacts with 30 // according to its name resolution strategy. 31 // 32 // Class loaders that implement a deterministic name resolution strategy 33 // (including with respect to their delegation behavior), such as the boot, the 34 // platform, and the system loaders of the JDK's built-in class loader 35 // hierarchy, always produce the same linkset for a given configuration. 36 // 37 // ClassLoaderData carries information related to a linkset (e.g., 38 // metaspace holding its klass definitions). 39 // The System Dictionary and related data structures (e.g., placeholder table, 40 // loader constraints table) as well as the runtime representation of classes 41 // only reference ClassLoaderData. 42 // 43 // Instances of java.lang.ClassLoader holds a pointer to a ClassLoaderData that 44 // that represent the loader's "linking domain" in the JVM. 45 // 46 // The bootstrap loader (represented by null) also has a ClassLoaderData, 47 // the singleton class the_null_class_loader_data(). 48 49 #include "classfile/classLoaderData.inline.hpp" 50 #include "classfile/classLoaderDataGraph.inline.hpp" 51 #include "classfile/dictionary.hpp" 52 #include "classfile/javaClasses.inline.hpp" 53 #include "classfile/moduleEntry.hpp" 54 #include "classfile/packageEntry.hpp" 55 #include "classfile/symbolTable.hpp" 56 #include "classfile/systemDictionary.hpp" 57 #include "classfile/systemDictionaryShared.hpp" 58 #include "classfile/vmClasses.hpp" 59 #include "logging/log.hpp" 60 #include "logging/logStream.hpp" 61 #include "memory/allocation.inline.hpp" 62 #include "memory/classLoaderMetaspace.hpp" 63 #include "memory/metadataFactory.hpp" 64 #include "memory/metaspace.hpp" 65 #include "memory/resourceArea.hpp" 66 #include "memory/universe.hpp" 67 #include "oops/access.inline.hpp" 68 #include "oops/inlineKlass.inline.hpp" 69 #include "oops/klass.inline.hpp" 70 #include "oops/oop.inline.hpp" 71 #include "oops/oopHandle.inline.hpp" 72 #include "oops/verifyOopClosure.hpp" 73 #include "oops/weakHandle.inline.hpp" 74 #include "runtime/arguments.hpp" 75 #include "runtime/atomic.hpp" 76 #include "runtime/handles.inline.hpp" 77 #include "runtime/mutex.hpp" 78 #include "runtime/safepoint.hpp" 79 #include "utilities/growableArray.hpp" 80 #include "utilities/macros.hpp" 81 #include "utilities/ostream.hpp" 82 83 ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = nullptr; 84 85 void ClassLoaderData::init_null_class_loader_data() { 86 assert(_the_null_class_loader_data == nullptr, "cannot initialize twice"); 87 assert(ClassLoaderDataGraph::_head == nullptr, "cannot initialize twice"); 88 89 _the_null_class_loader_data = new ClassLoaderData(Handle(), false); 90 ClassLoaderDataGraph::_head = _the_null_class_loader_data; 91 assert(_the_null_class_loader_data->is_the_null_class_loader_data(), "Must be"); 92 93 LogTarget(Trace, class, loader, data) lt; 94 if (lt.is_enabled()) { 95 ResourceMark rm; 96 LogStream ls(lt); 97 ls.print("create "); 98 _the_null_class_loader_data->print_value_on(&ls); 99 ls.cr(); 100 } 101 } 102 103 // Obtain and set the class loader's name within the ClassLoaderData so 104 // it will be available for error messages, logging, JFR, etc. The name 105 // and klass are available after the class_loader oop is no longer alive, 106 // during unloading. 107 void ClassLoaderData::initialize_name(Handle class_loader) { 108 ResourceMark rm; 109 110 // Obtain the class loader's name. If the class loader's name was not 111 // explicitly set during construction, the CLD's _name field will be null. 112 oop cl_name = java_lang_ClassLoader::name(class_loader()); 113 if (cl_name != nullptr) { 114 const char* cl_instance_name = java_lang_String::as_utf8_string(cl_name); 115 116 if (cl_instance_name != nullptr && cl_instance_name[0] != '\0') { 117 _name = SymbolTable::new_symbol(cl_instance_name); 118 } 119 } 120 121 // Obtain the class loader's name and identity hash. If the class loader's 122 // name was not explicitly set during construction, the class loader's name and id 123 // will be set to the qualified class name of the class loader along with its 124 // identity hash. 125 // If for some reason the ClassLoader's constructor has not been run, instead of 126 // leaving the _name_and_id field null, fall back to the external qualified class 127 // name. Thus CLD's _name_and_id field should never have a null value. 128 oop cl_name_and_id = java_lang_ClassLoader::nameAndId(class_loader()); 129 const char* cl_instance_name_and_id = 130 (cl_name_and_id == nullptr) ? _class_loader_klass->external_name() : 131 java_lang_String::as_utf8_string(cl_name_and_id); 132 assert(cl_instance_name_and_id != nullptr && cl_instance_name_and_id[0] != '\0', "class loader has no name and id"); 133 _name_and_id = SymbolTable::new_symbol(cl_instance_name_and_id); 134 } 135 136 ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool has_class_mirror_holder) : 137 _metaspace(nullptr), 138 _metaspace_lock(new Mutex(Mutex::nosafepoint-2, "MetaspaceAllocation_lock")), 139 _unloading(false), _has_class_mirror_holder(has_class_mirror_holder), 140 _modified_oops(true), 141 // A non-strong hidden class loader data doesn't have anything to keep 142 // it from being unloaded during parsing of the non-strong hidden class. 143 // The null-class-loader should always be kept alive. 144 _keep_alive_ref_count((has_class_mirror_holder || h_class_loader.is_null()) ? 1 : 0), 145 _claim(0), 146 _handles(), 147 _klasses(nullptr), _packages(nullptr), _modules(nullptr), _unnamed_module(nullptr), _dictionary(nullptr), 148 _jmethod_ids(nullptr), 149 _deallocate_list(nullptr), 150 _next(nullptr), 151 _unloading_next(nullptr), 152 _class_loader_klass(nullptr), _name(nullptr), _name_and_id(nullptr) { 153 154 if (!h_class_loader.is_null()) { 155 _class_loader = _handles.add(h_class_loader()); 156 _class_loader_klass = h_class_loader->klass(); 157 initialize_name(h_class_loader); 158 } 159 160 if (!has_class_mirror_holder) { 161 // The holder is initialized later for non-strong hidden classes, 162 // and before calling anything that call class_loader(). 163 initialize_holder(h_class_loader); 164 165 // A ClassLoaderData created solely for a non-strong hidden class should never 166 // have a ModuleEntryTable or PackageEntryTable created for it. 167 _packages = new PackageEntryTable(); 168 if (h_class_loader.is_null()) { 169 // Create unnamed module for boot loader 170 _unnamed_module = ModuleEntry::create_boot_unnamed_module(this); 171 } else { 172 // Create unnamed module for all other loaders 173 _unnamed_module = ModuleEntry::create_unnamed_module(this); 174 } 175 _dictionary = create_dictionary(); 176 } 177 178 NOT_PRODUCT(_dependency_count = 0); // number of class loader dependencies 179 180 JFR_ONLY(INIT_ID(this);) 181 } 182 183 ClassLoaderData::ChunkedHandleList::~ChunkedHandleList() { 184 Chunk* c = _head; 185 while (c != nullptr) { 186 Chunk* next = c->_next; 187 delete c; 188 c = next; 189 } 190 } 191 192 OopHandle ClassLoaderData::ChunkedHandleList::add(oop o) { 193 if (_head == nullptr || _head->_size == Chunk::CAPACITY) { 194 Chunk* next = new Chunk(_head); 195 Atomic::release_store(&_head, next); 196 } 197 oop* handle = &_head->_data[_head->_size]; 198 NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, o); 199 Atomic::release_store(&_head->_size, _head->_size + 1); 200 return OopHandle(handle); 201 } 202 203 int ClassLoaderData::ChunkedHandleList::count() const { 204 int count = 0; 205 Chunk* chunk = Atomic::load_acquire(&_head); 206 while (chunk != nullptr) { 207 count += Atomic::load(&chunk->_size); 208 chunk = chunk->_next; 209 } 210 return count; 211 } 212 213 inline void ClassLoaderData::ChunkedHandleList::oops_do_chunk(OopClosure* f, Chunk* c, const juint size) { 214 for (juint i = 0; i < size; i++) { 215 f->do_oop(&c->_data[i]); 216 } 217 } 218 219 void ClassLoaderData::ChunkedHandleList::oops_do(OopClosure* f) { 220 Chunk* head = Atomic::load_acquire(&_head); 221 if (head != nullptr) { 222 // Must be careful when reading size of head 223 oops_do_chunk(f, head, Atomic::load_acquire(&head->_size)); 224 for (Chunk* c = head->_next; c != nullptr; c = c->_next) { 225 oops_do_chunk(f, c, c->_size); 226 } 227 } 228 } 229 230 class VerifyContainsOopClosure : public OopClosure { 231 oop _target; 232 bool _found; 233 234 public: 235 VerifyContainsOopClosure(oop target) : _target(target), _found(false) {} 236 237 void do_oop(oop* p) { 238 if (p != nullptr && NativeAccess<AS_NO_KEEPALIVE>::oop_load(p) == _target) { 239 _found = true; 240 } 241 } 242 243 void do_oop(narrowOop* p) { 244 // The ChunkedHandleList should not contain any narrowOop 245 ShouldNotReachHere(); 246 } 247 248 bool found() const { 249 return _found; 250 } 251 }; 252 253 bool ClassLoaderData::ChunkedHandleList::contains(oop p) { 254 VerifyContainsOopClosure cl(p); 255 oops_do(&cl); 256 return cl.found(); 257 } 258 259 #ifndef PRODUCT 260 bool ClassLoaderData::ChunkedHandleList::owner_of(oop* oop_handle) { 261 Chunk* chunk = Atomic::load_acquire(&_head); 262 while (chunk != nullptr) { 263 if (&(chunk->_data[0]) <= oop_handle && oop_handle < &(chunk->_data[Atomic::load(&chunk->_size)])) { 264 return true; 265 } 266 chunk = chunk->_next; 267 } 268 return false; 269 } 270 #endif // PRODUCT 271 272 void ClassLoaderData::clear_claim(int claim) { 273 for (;;) { 274 int old_claim = Atomic::load(&_claim); 275 if ((old_claim & claim) == 0) { 276 return; 277 } 278 int new_claim = old_claim & ~claim; 279 if (Atomic::cmpxchg(&_claim, old_claim, new_claim) == old_claim) { 280 return; 281 } 282 } 283 } 284 285 #ifdef ASSERT 286 void ClassLoaderData::verify_not_claimed(int claim) { 287 assert((_claim & claim) == 0, "Found claim: %d bits in _claim: %d", claim, _claim); 288 } 289 #endif 290 291 bool ClassLoaderData::try_claim(int claim) { 292 for (;;) { 293 int old_claim = Atomic::load(&_claim); 294 if ((old_claim & claim) == claim) { 295 return false; 296 } 297 int new_claim = old_claim | claim; 298 if (Atomic::cmpxchg(&_claim, old_claim, new_claim) == old_claim) { 299 return true; 300 } 301 } 302 } 303 304 void ClassLoaderData::demote_strong_roots() { 305 // The oop handle area contains strong roots that the GC traces from. We are about 306 // to demote them to strong native oops that the GC does *not* trace from. Conceptually, 307 // we are retiring a rather normal strong root, and creating a strong non-root handle, 308 // which happens to reuse the same address as the normal strong root had. 309 // Unless we invoke the right barriers, the GC might not notice that a strong root 310 // has been pulled from the system, and is left unprocessed by the GC. There can be 311 // several consequences: 312 // 1. A concurrently marking snapshot-at-the-beginning GC might assume that the contents 313 // of all strong roots get processed by the GC in order to keep them alive. Without 314 // barriers, some objects might not be kept alive. 315 // 2. A concurrently relocating GC might assume that after moving an object, a subsequent 316 // tracing from all roots can fix all the pointers in the system, which doesn't play 317 // well with roots racingly being pulled. 318 // 3. A concurrent GC using colored pointers, might assume that tracing the object graph 319 // from roots results in all pointers getting some particular color, which also doesn't 320 // play well with roots being pulled out from the system concurrently. 321 322 class TransitionRootsOopClosure : public OopClosure { 323 public: 324 virtual void do_oop(oop* p) { 325 // By loading the strong root with the access API, we can use the right barriers to 326 // store the oop as a strong non-root handle, that happens to reuse the same memory 327 // address as the strong root. The barriered store ensures that: 328 // 1. The concurrent SATB marking properties are satisfied as the store will keep 329 // the oop alive. 330 // 2. The concurrent object movement properties are satisfied as we store the address 331 // of the new location of the object, if any. 332 // 3. The colors if any will be stored as the new good colors. 333 oop obj = NativeAccess<>::oop_load(p); // Load the strong root 334 NativeAccess<>::oop_store(p, obj); // Store the strong non-root 335 } 336 337 virtual void do_oop(narrowOop* p) { 338 ShouldNotReachHere(); 339 } 340 } cl; 341 oops_do(&cl, ClassLoaderData::_claim_none, false /* clear_mod_oops */); 342 } 343 344 // Non-strong hidden classes have their own ClassLoaderData that is marked to keep alive 345 // while the class is being parsed, and if the class appears on the module fixup list. 346 // Due to the uniqueness that no other class shares the hidden class' name or 347 // ClassLoaderData, no other non-GC thread has knowledge of the hidden class while 348 // it is being defined, therefore _keep_alive_ref_count is not volatile or atomic. 349 void ClassLoaderData::inc_keep_alive_ref_count() { 350 if (has_class_mirror_holder()) { 351 assert(_keep_alive_ref_count > 0, "Invalid keep alive increment count"); 352 _keep_alive_ref_count++; 353 } 354 } 355 356 void ClassLoaderData::dec_keep_alive_ref_count() { 357 if (has_class_mirror_holder()) { 358 assert(_keep_alive_ref_count > 0, "Invalid keep alive decrement count"); 359 if (_keep_alive_ref_count == 1) { 360 // When the keep_alive_ref_count counter is 1, the oop handle area is a strong root, 361 // acting as input to the GC tracing. Such strong roots are part of the 362 // snapshot-at-the-beginning, and can not just be pulled out from the 363 // system when concurrent GCs are running at the same time, without 364 // invoking the right barriers. 365 demote_strong_roots(); 366 } 367 _keep_alive_ref_count--; 368 } 369 } 370 371 void ClassLoaderData::oops_do(OopClosure* f, int claim_value, bool clear_mod_oops) { 372 if (claim_value != ClassLoaderData::_claim_none && !try_claim(claim_value)) { 373 return; 374 } 375 376 // Only clear modified_oops after the ClassLoaderData is claimed. 377 if (clear_mod_oops) { 378 clear_modified_oops(); 379 } 380 381 _handles.oops_do(f); 382 } 383 384 void ClassLoaderData::classes_do(KlassClosure* klass_closure) { 385 // Lock-free access requires load_acquire 386 for (Klass* k = Atomic::load_acquire(&_klasses); k != nullptr; k = k->next_link()) { 387 klass_closure->do_klass(k); 388 assert(k != k->next_link(), "no loops!"); 389 } 390 } 391 392 void ClassLoaderData::classes_do(void f(Klass * const)) { 393 // Lock-free access requires load_acquire 394 for (Klass* k = Atomic::load_acquire(&_klasses); k != nullptr; k = k->next_link()) { 395 f(k); 396 assert(k != k->next_link(), "no loops!"); 397 } 398 } 399 400 void ClassLoaderData::methods_do(void f(Method*)) { 401 // Lock-free access requires load_acquire 402 for (Klass* k = Atomic::load_acquire(&_klasses); k != nullptr; k = k->next_link()) { 403 if (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded()) { 404 InstanceKlass::cast(k)->methods_do(f); 405 } 406 } 407 } 408 409 void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) { 410 // Lock-free access requires load_acquire 411 for (Klass* k = Atomic::load_acquire(&_klasses); k != nullptr; k = k->next_link()) { 412 // Filter out InstanceKlasses (or their ObjArrayKlasses) that have not entered the 413 // loaded state. 414 if (k->is_instance_klass()) { 415 if (!InstanceKlass::cast(k)->is_loaded()) { 416 continue; 417 } 418 } else if (k->is_shared() && k->is_objArray_klass()) { 419 Klass* bottom = ObjArrayKlass::cast(k)->bottom_klass(); 420 if (bottom->is_instance_klass() && !InstanceKlass::cast(bottom)->is_loaded()) { 421 // This could happen if <bottom> is a shared class that has been restored 422 // but is not yet marked as loaded. All archived array classes of the 423 // bottom class are already restored and placed in the _klasses list. 424 continue; 425 } 426 } 427 428 #ifdef ASSERT 429 oop m = k->java_mirror(); 430 assert(m != nullptr, "nullptr mirror"); 431 assert(m->is_a(vmClasses::Class_klass()), "invalid mirror"); 432 #endif 433 klass_closure->do_klass(k); 434 } 435 } 436 437 void ClassLoaderData::classes_do(void f(InstanceKlass*)) { 438 // Lock-free access requires load_acquire 439 for (Klass* k = Atomic::load_acquire(&_klasses); k != nullptr; k = k->next_link()) { 440 if (k->is_instance_klass()) { 441 f(InstanceKlass::cast(k)); 442 } 443 assert(k != k->next_link(), "no loops!"); 444 } 445 } 446 447 void ClassLoaderData::inline_classes_do(void f(InlineKlass*)) { 448 // Lock-free access requires load_acquire 449 for (Klass* k = Atomic::load_acquire(&_klasses); k != nullptr; k = k->next_link()) { 450 if (k->is_inline_klass()) { 451 f(InlineKlass::cast(k)); 452 } 453 assert(k != k->next_link(), "no loops!"); 454 } 455 } 456 457 void ClassLoaderData::modules_do(void f(ModuleEntry*)) { 458 assert_locked_or_safepoint(Module_lock); 459 if (_unnamed_module != nullptr) { 460 f(_unnamed_module); 461 } 462 if (_modules != nullptr) { 463 _modules->modules_do(f); 464 } 465 } 466 467 void ClassLoaderData::packages_do(void f(PackageEntry*)) { 468 assert_locked_or_safepoint(Module_lock); 469 if (_packages != nullptr) { 470 _packages->packages_do(f); 471 } 472 } 473 474 void ClassLoaderData::record_dependency(const Klass* k) { 475 assert(k != nullptr, "invariant"); 476 477 ClassLoaderData * const from_cld = this; 478 ClassLoaderData * const to_cld = k->class_loader_data(); 479 480 // Do not need to record dependency if the dependency is to a class whose 481 // class loader data is never freed. (i.e. the dependency's class loader 482 // is one of the three builtin class loaders and the dependency's class 483 // loader data has a ClassLoader holder, not a Class holder.) 484 if (to_cld->is_permanent_class_loader_data()) { 485 return; 486 } 487 488 oop to; 489 if (to_cld->has_class_mirror_holder()) { 490 // Just return if a non-strong hidden class class is attempting to record a dependency 491 // to itself. (Note that every non-strong hidden class has its own unique class 492 // loader data.) 493 if (to_cld == from_cld) { 494 return; 495 } 496 // Hidden class dependencies are through the mirror. 497 to = k->java_mirror(); 498 } else { 499 to = to_cld->class_loader(); 500 oop from = from_cld->class_loader(); 501 502 // Just return if this dependency is to a class with the same or a parent 503 // class_loader. 504 if (from == to || java_lang_ClassLoader::isAncestor(from, to)) { 505 return; // this class loader is in the parent list, no need to add it. 506 } 507 } 508 509 // It's a dependency we won't find through GC, add it. 510 if (!_handles.contains(to)) { 511 NOT_PRODUCT(Atomic::inc(&_dependency_count)); 512 LogTarget(Trace, class, loader, data) lt; 513 if (lt.is_enabled()) { 514 ResourceMark rm; 515 LogStream ls(lt); 516 ls.print("adding dependency from "); 517 print_value_on(&ls); 518 ls.print(" to "); 519 to_cld->print_value_on(&ls); 520 ls.cr(); 521 } 522 Handle dependency(Thread::current(), to); 523 add_handle(dependency); 524 // Added a potentially young gen oop to the ClassLoaderData 525 record_modified_oops(); 526 } 527 } 528 529 void ClassLoaderData::add_class(Klass* k, bool publicize /* true */) { 530 { 531 MutexLocker ml(metaspace_lock(), Mutex::_no_safepoint_check_flag); 532 Klass* old_value = _klasses; 533 k->set_next_link(old_value); 534 // Link the new item into the list, making sure the linked class is stable 535 // since the list can be walked without a lock 536 Atomic::release_store(&_klasses, k); 537 if (k->is_array_klass()) { 538 ClassLoaderDataGraph::inc_array_classes(1); 539 } else { 540 ClassLoaderDataGraph::inc_instance_classes(1); 541 } 542 } 543 544 if (publicize) { 545 LogTarget(Trace, class, loader, data) lt; 546 if (lt.is_enabled()) { 547 ResourceMark rm; 548 LogStream ls(lt); 549 ls.print("Adding k: " PTR_FORMAT " %s to ", p2i(k), k->external_name()); 550 print_value_on(&ls); 551 ls.cr(); 552 } 553 } 554 } 555 556 void ClassLoaderData::initialize_holder(Handle loader_or_mirror) { 557 if (loader_or_mirror() != nullptr) { 558 assert(_holder.is_null(), "never replace holders"); 559 _holder = WeakHandle(Universe::vm_weak(), loader_or_mirror); 560 } 561 } 562 563 // Remove a klass from the _klasses list for scratch_class during redefinition 564 // or parsed class in the case of an error. 565 void ClassLoaderData::remove_class(Klass* scratch_class) { 566 assert_locked_or_safepoint(ClassLoaderDataGraph_lock); 567 568 Klass* prev = nullptr; 569 for (Klass* k = _klasses; k != nullptr; k = k->next_link()) { 570 if (k == scratch_class) { 571 if (prev == nullptr) { 572 _klasses = k->next_link(); 573 } else { 574 Klass* next = k->next_link(); 575 prev->set_next_link(next); 576 } 577 578 if (k->is_array_klass()) { 579 ClassLoaderDataGraph::dec_array_classes(1); 580 } else { 581 ClassLoaderDataGraph::dec_instance_classes(1); 582 } 583 584 return; 585 } 586 prev = k; 587 assert(k != k->next_link(), "no loops!"); 588 } 589 ShouldNotReachHere(); // should have found this class!! 590 } 591 592 void ClassLoaderData::unload() { 593 _unloading = true; 594 595 LogTarget(Trace, class, loader, data) lt; 596 if (lt.is_enabled()) { 597 ResourceMark rm; 598 LogStream ls(lt); 599 ls.print("unload"); 600 print_value_on(&ls); 601 ls.cr(); 602 } 603 604 // Some items on the _deallocate_list need to free their C heap structures 605 // if they are not already on the _klasses list. 606 free_deallocate_list_C_heap_structures(); 607 608 inline_classes_do(InlineKlass::cleanup); 609 610 // Clean up class dependencies and tell serviceability tools 611 // these classes are unloading. This must be called 612 // after erroneous classes are released. 613 classes_do(InstanceKlass::unload_class); 614 615 // Method::clear_jmethod_ids only sets the jmethod_ids to null without 616 // releasing the memory for related JNIMethodBlocks and JNIMethodBlockNodes. 617 // This is done intentionally because native code (e.g. JVMTI agent) holding 618 // jmethod_ids may access them after the associated classes and class loader 619 // are unloaded. The Java Native Interface Specification says "method ID 620 // does not prevent the VM from unloading the class from which the ID has 621 // been derived. After the class is unloaded, the method or field ID becomes 622 // invalid". In real world usages, the native code may rely on jmethod_ids 623 // being null after class unloading. Hence, it is unsafe to free the memory 624 // from the VM side without knowing when native code is going to stop using 625 // them. 626 if (_jmethod_ids != nullptr) { 627 Method::clear_jmethod_ids(this); 628 } 629 } 630 631 ModuleEntryTable* ClassLoaderData::modules() { 632 // Lazily create the module entry table at first request. 633 // Lock-free access requires load_acquire. 634 ModuleEntryTable* modules = Atomic::load_acquire(&_modules); 635 if (modules == nullptr) { 636 MutexLocker m1(Module_lock); 637 // Check if _modules got allocated while we were waiting for this lock. 638 if ((modules = _modules) == nullptr) { 639 modules = new ModuleEntryTable(); 640 641 { 642 MutexLocker m1(metaspace_lock(), Mutex::_no_safepoint_check_flag); 643 // Ensure _modules is stable, since it is examined without a lock 644 Atomic::release_store(&_modules, modules); 645 } 646 } 647 } 648 return modules; 649 } 650 651 const int _boot_loader_dictionary_size = 1009; 652 const int _default_loader_dictionary_size = 107; 653 654 Dictionary* ClassLoaderData::create_dictionary() { 655 assert(!has_class_mirror_holder(), "class mirror holder cld does not have a dictionary"); 656 int size; 657 if (_the_null_class_loader_data == nullptr) { 658 size = _boot_loader_dictionary_size; 659 } else if (is_system_class_loader_data()) { 660 size = _boot_loader_dictionary_size; 661 } else { 662 size = _default_loader_dictionary_size; 663 } 664 return new Dictionary(this, size); 665 } 666 667 // Tell the GC to keep this klass alive. Needed while iterating ClassLoaderDataGraph, 668 // and any runtime code that uses klasses. 669 oop ClassLoaderData::holder() const { 670 // A klass that was previously considered dead can be looked up in the 671 // CLD/SD, and its _java_mirror or _class_loader can be stored in a root 672 // or a reachable object making it alive again. The SATB part of G1 needs 673 // to get notified about this potential resurrection, otherwise the marking 674 // might not find the object. 675 if (!_holder.is_null()) { // null class_loader 676 return _holder.resolve(); 677 } else { 678 return nullptr; 679 } 680 } 681 682 // Let the GC read the holder without keeping it alive. 683 oop ClassLoaderData::holder_no_keepalive() const { 684 if (!_holder.is_null()) { // null class_loader 685 return _holder.peek(); 686 } else { 687 return nullptr; 688 } 689 } 690 691 // Unloading support 692 bool ClassLoaderData::is_alive() const { 693 bool alive = (_keep_alive_ref_count > 0) // null class loader and incomplete non-strong hidden class. 694 || (_holder.peek() != nullptr); // and not cleaned by the GC weak handle processing. 695 696 return alive; 697 } 698 699 class ReleaseKlassClosure: public KlassClosure { 700 private: 701 size_t _instance_class_released; 702 size_t _array_class_released; 703 public: 704 ReleaseKlassClosure() : _instance_class_released(0), _array_class_released(0) { } 705 706 size_t instance_class_released() const { return _instance_class_released; } 707 size_t array_class_released() const { return _array_class_released; } 708 709 void do_klass(Klass* k) { 710 if (k->is_array_klass()) { 711 _array_class_released ++; 712 } else { 713 assert(k->is_instance_klass(), "Must be"); 714 _instance_class_released ++; 715 } 716 k->release_C_heap_structures(); 717 } 718 }; 719 720 ClassLoaderData::~ClassLoaderData() { 721 // Release C heap structures for all the classes. 722 ReleaseKlassClosure cl; 723 classes_do(&cl); 724 725 ClassLoaderDataGraph::dec_array_classes(cl.array_class_released()); 726 ClassLoaderDataGraph::dec_instance_classes(cl.instance_class_released()); 727 728 // Release the WeakHandle 729 _holder.release(Universe::vm_weak()); 730 731 // Release C heap allocated hashtable for all the packages. 732 if (_packages != nullptr) { 733 // Destroy the table itself 734 delete _packages; 735 _packages = nullptr; 736 } 737 738 // Release C heap allocated hashtable for all the modules. 739 if (_modules != nullptr) { 740 // Destroy the table itself 741 delete _modules; 742 _modules = nullptr; 743 } 744 745 // Release C heap allocated hashtable for the dictionary 746 if (_dictionary != nullptr) { 747 // Destroy the table itself 748 delete _dictionary; 749 _dictionary = nullptr; 750 } 751 752 if (_unnamed_module != nullptr) { 753 delete _unnamed_module; 754 _unnamed_module = nullptr; 755 } 756 757 // release the metaspace 758 ClassLoaderMetaspace *m = _metaspace; 759 if (m != nullptr) { 760 _metaspace = nullptr; 761 delete m; 762 } 763 764 // Delete lock 765 delete _metaspace_lock; 766 767 // Delete free list 768 if (_deallocate_list != nullptr) { 769 delete _deallocate_list; 770 } 771 772 // Decrement refcounts of Symbols if created. 773 if (_name != nullptr) { 774 _name->decrement_refcount(); 775 } 776 if (_name_and_id != nullptr) { 777 _name_and_id->decrement_refcount(); 778 } 779 } 780 781 // Returns true if this class loader data is for the app class loader 782 // or a user defined system class loader. (Note that the class loader 783 // data may have a Class holder.) 784 bool ClassLoaderData::is_system_class_loader_data() const { 785 return SystemDictionary::is_system_class_loader(class_loader()); 786 } 787 788 // Returns true if this class loader data is for the platform class loader. 789 // (Note that the class loader data may have a Class holder.) 790 bool ClassLoaderData::is_platform_class_loader_data() const { 791 return SystemDictionary::is_platform_class_loader(class_loader()); 792 } 793 794 // Returns true if the class loader for this class loader data is one of 795 // the 3 builtin (boot application/system or platform) class loaders, 796 // including a user-defined system class loader. Note that if the class 797 // loader data is for a non-strong hidden class then it may 798 // get freed by a GC even if its class loader is one of these loaders. 799 bool ClassLoaderData::is_builtin_class_loader_data() const { 800 return (is_boot_class_loader_data() || 801 SystemDictionary::is_system_class_loader(class_loader()) || 802 SystemDictionary::is_platform_class_loader(class_loader())); 803 } 804 805 // Returns true if this class loader data is a class loader data 806 // that is not ever freed by a GC. It must be the CLD for one of the builtin 807 // class loaders and not the CLD for a non-strong hidden class. 808 bool ClassLoaderData::is_permanent_class_loader_data() const { 809 return is_builtin_class_loader_data() && !has_class_mirror_holder(); 810 } 811 812 ClassLoaderMetaspace* ClassLoaderData::metaspace_non_null() { 813 // If the metaspace has not been allocated, create a new one. Might want 814 // to create smaller arena for Reflection class loaders also. 815 // The reason for the delayed allocation is because some class loaders are 816 // simply for delegating with no metadata of their own. 817 // Lock-free access requires load_acquire. 818 ClassLoaderMetaspace* metaspace = Atomic::load_acquire(&_metaspace); 819 if (metaspace == nullptr) { 820 MutexLocker ml(_metaspace_lock, Mutex::_no_safepoint_check_flag); 821 // Check if _metaspace got allocated while we were waiting for this lock. 822 if ((metaspace = _metaspace) == nullptr) { 823 if (this == the_null_class_loader_data()) { 824 assert (class_loader() == nullptr, "Must be"); 825 metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::BootMetaspaceType); 826 } else if (has_class_mirror_holder()) { 827 metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::ClassMirrorHolderMetaspaceType); 828 } else { 829 metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::StandardMetaspaceType); 830 } 831 // Ensure _metaspace is stable, since it is examined without a lock 832 Atomic::release_store(&_metaspace, metaspace); 833 } 834 } 835 return metaspace; 836 } 837 838 OopHandle ClassLoaderData::add_handle(Handle h) { 839 MutexLocker ml(metaspace_lock(), Mutex::_no_safepoint_check_flag); 840 record_modified_oops(); 841 return _handles.add(h()); 842 } 843 844 void ClassLoaderData::remove_handle(OopHandle h) { 845 assert(!is_unloading(), "Do not remove a handle for a CLD that is unloading"); 846 if (!h.is_empty()) { 847 assert(_handles.owner_of(h.ptr_raw()), 848 "Got unexpected handle " PTR_FORMAT, p2i(h.ptr_raw())); 849 h.replace(oop(nullptr)); 850 } 851 } 852 853 void ClassLoaderData::init_handle_locked(OopHandle& dest, Handle h) { 854 MutexLocker ml(metaspace_lock(), Mutex::_no_safepoint_check_flag); 855 if (dest.resolve() != nullptr) { 856 return; 857 } else { 858 record_modified_oops(); 859 dest = _handles.add(h()); 860 } 861 } 862 863 // Add this metadata pointer to be freed when it's safe. This is only during 864 // a safepoint which checks if handles point to this metadata field. 865 void ClassLoaderData::add_to_deallocate_list(Metadata* m) { 866 // Metadata in shared region isn't deleted. 867 if (!m->is_shared()) { 868 MutexLocker ml(metaspace_lock(), Mutex::_no_safepoint_check_flag); 869 if (_deallocate_list == nullptr) { 870 _deallocate_list = new (mtClass) GrowableArray<Metadata*>(100, mtClass); 871 } 872 _deallocate_list->append_if_missing(m); 873 ResourceMark rm; 874 log_debug(class, loader, data)("deallocate added for %s", m->print_value_string()); 875 ClassLoaderDataGraph::set_should_clean_deallocate_lists(); 876 } 877 } 878 879 // Deallocate free metadata on the free list. How useful the PermGen was! 880 void ClassLoaderData::free_deallocate_list() { 881 // This must be called at a safepoint because it depends on metadata walking at 882 // safepoint cleanup time. 883 assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint"); 884 assert(!is_unloading(), "only called for ClassLoaderData that are not unloading"); 885 if (_deallocate_list == nullptr) { 886 return; 887 } 888 // Go backwards because this removes entries that are freed. 889 for (int i = _deallocate_list->length() - 1; i >= 0; i--) { 890 Metadata* m = _deallocate_list->at(i); 891 if (!m->on_stack()) { 892 _deallocate_list->remove_at(i); 893 // There are only three types of metadata that we deallocate directly. 894 // Cast them so they can be used by the template function. 895 if (m->is_method()) { 896 MetadataFactory::free_metadata(this, (Method*)m); 897 } else if (m->is_constantPool()) { 898 MetadataFactory::free_metadata(this, (ConstantPool*)m); 899 } else if (m->is_klass()) { 900 if (!((Klass*)m)->is_inline_klass()) { 901 MetadataFactory::free_metadata(this, (InstanceKlass*)m); 902 } else { 903 MetadataFactory::free_metadata(this, (InlineKlass*)m); 904 } 905 } else { 906 ShouldNotReachHere(); 907 } 908 } else { 909 // Metadata is alive. 910 // If scratch_class is on stack then it shouldn't be on this list! 911 assert(!m->is_klass() || !((InstanceKlass*)m)->is_scratch_class(), 912 "scratch classes on this list should be dead"); 913 // Also should assert that other metadata on the list was found in handles. 914 // Some cleaning remains. 915 ClassLoaderDataGraph::set_should_clean_deallocate_lists(); 916 } 917 } 918 } 919 920 // This is distinct from free_deallocate_list. For class loader data that are 921 // unloading, this frees the C heap memory for items on the list, and unlinks 922 // scratch or error classes so that unloading events aren't triggered for these 923 // classes. The metadata is removed with the unloading metaspace. 924 // There isn't C heap memory allocated for methods, so nothing is done for them. 925 void ClassLoaderData::free_deallocate_list_C_heap_structures() { 926 assert_locked_or_safepoint(ClassLoaderDataGraph_lock); 927 assert(is_unloading(), "only called for ClassLoaderData that are unloading"); 928 if (_deallocate_list == nullptr) { 929 return; 930 } 931 // Go backwards because this removes entries that are freed. 932 for (int i = _deallocate_list->length() - 1; i >= 0; i--) { 933 Metadata* m = _deallocate_list->at(i); 934 _deallocate_list->remove_at(i); 935 if (m->is_constantPool()) { 936 ((ConstantPool*)m)->release_C_heap_structures(); 937 } else if (m->is_klass()) { 938 InstanceKlass* ik = (InstanceKlass*)m; 939 // also releases ik->constants() C heap memory 940 ik->release_C_heap_structures(); 941 // Remove the class so unloading events aren't triggered for 942 // this class (scratch or error class) in do_unloading(). 943 remove_class(ik); 944 // But still have to remove it from the dumptime_table. 945 SystemDictionaryShared::handle_class_unloading(ik); 946 } 947 } 948 } 949 950 // Caller needs ResourceMark 951 // If the class loader's _name has not been explicitly set, the class loader's 952 // qualified class name is returned. 953 const char* ClassLoaderData::loader_name() const { 954 if (_class_loader_klass == nullptr) { 955 return BOOTSTRAP_LOADER_NAME; 956 } else if (_name != nullptr) { 957 return _name->as_C_string(); 958 } else { 959 return _class_loader_klass->external_name(); 960 } 961 } 962 963 // Caller needs ResourceMark 964 // Format of the _name_and_id is as follows: 965 // If the defining loader has a name explicitly set then '<loader-name>' @<id> 966 // If the defining loader has no name then <qualified-class-name> @<id> 967 // If built-in loader, then omit '@<id>' as there is only one instance. 968 const char* ClassLoaderData::loader_name_and_id() const { 969 if (_class_loader_klass == nullptr) { 970 return "'" BOOTSTRAP_LOADER_NAME "'"; 971 } else if (_name_and_id != nullptr) { 972 return _name_and_id->as_C_string(); 973 } else { 974 // May be called in a race before _name_and_id is initialized. 975 return _class_loader_klass->external_name(); 976 } 977 } 978 979 void ClassLoaderData::print_value_on(outputStream* out) const { 980 if (!is_unloading() && class_loader() != nullptr) { 981 out->print("loader data: " INTPTR_FORMAT " for instance ", p2i(this)); 982 class_loader()->print_value_on(out); // includes loader_name_and_id() and address of class loader instance 983 } else { 984 // loader data: 0xsomeaddr of 'bootstrap' 985 out->print("loader data: " INTPTR_FORMAT " of %s", p2i(this), loader_name_and_id()); 986 } 987 if (_has_class_mirror_holder) { 988 out->print(" has a class holder"); 989 } 990 } 991 992 void ClassLoaderData::print_value() const { print_value_on(tty); } 993 994 #ifndef PRODUCT 995 class PrintKlassClosure: public KlassClosure { 996 outputStream* _out; 997 public: 998 PrintKlassClosure(outputStream* out): _out(out) { } 999 1000 void do_klass(Klass* k) { 1001 ResourceMark rm; 1002 _out->print("%s,", k->external_name()); 1003 } 1004 }; 1005 1006 void ClassLoaderData::print_on(outputStream* out) const { 1007 ResourceMark rm; 1008 out->print_cr("ClassLoaderData(" INTPTR_FORMAT ")", p2i(this)); 1009 out->print_cr(" - name %s", loader_name_and_id()); 1010 if (!_holder.is_null()) { 1011 out->print (" - holder "); 1012 _holder.print_on(out); 1013 out->print_cr(""); 1014 } 1015 if (!_unloading) { 1016 out->print_cr(" - class loader " INTPTR_FORMAT, p2i(_class_loader.peek())); 1017 } else { 1018 out->print_cr(" - class loader <unloading, oop is bad>"); 1019 } 1020 out->print_cr(" - metaspace " INTPTR_FORMAT, p2i(_metaspace)); 1021 out->print_cr(" - unloading %s", _unloading ? "true" : "false"); 1022 out->print_cr(" - class mirror holder %s", _has_class_mirror_holder ? "true" : "false"); 1023 out->print_cr(" - modified oops %s", _modified_oops ? "true" : "false"); 1024 out->print_cr(" - _keep_alive_ref_count %d", _keep_alive_ref_count); 1025 out->print (" - claim "); 1026 switch(_claim) { 1027 case _claim_none: out->print_cr("none"); break; 1028 case _claim_finalizable: out->print_cr("finalizable"); break; 1029 case _claim_strong: out->print_cr("strong"); break; 1030 case _claim_stw_fullgc_mark: out->print_cr("stw full gc mark"); break; 1031 case _claim_stw_fullgc_adjust: out->print_cr("stw full gc adjust"); break; 1032 case _claim_other: out->print_cr("other"); break; 1033 case _claim_other | _claim_finalizable: out->print_cr("other and finalizable"); break; 1034 case _claim_other | _claim_strong: out->print_cr("other and strong"); break; 1035 default: ShouldNotReachHere(); 1036 } 1037 out->print_cr(" - handles %d", _handles.count()); 1038 out->print_cr(" - dependency count %d", _dependency_count); 1039 out->print (" - klasses { "); 1040 if (Verbose) { 1041 PrintKlassClosure closure(out); 1042 ((ClassLoaderData*)this)->classes_do(&closure); 1043 } else { 1044 out->print("..."); 1045 } 1046 out->print_cr(" }"); 1047 out->print_cr(" - packages " INTPTR_FORMAT, p2i(_packages)); 1048 out->print_cr(" - module " INTPTR_FORMAT, p2i(_modules)); 1049 out->print_cr(" - unnamed module " INTPTR_FORMAT, p2i(_unnamed_module)); 1050 if (_dictionary != nullptr) { 1051 out->print (" - dictionary " INTPTR_FORMAT " ", p2i(_dictionary)); 1052 _dictionary->print_size(out); 1053 } else { 1054 out->print_cr(" - dictionary " INTPTR_FORMAT, p2i(_dictionary)); 1055 } 1056 if (_jmethod_ids != nullptr) { 1057 out->print (" - jmethod count "); 1058 Method::print_jmethod_ids_count(this, out); 1059 out->print_cr(""); 1060 } 1061 out->print_cr(" - deallocate list " INTPTR_FORMAT, p2i(_deallocate_list)); 1062 out->print_cr(" - next CLD " INTPTR_FORMAT, p2i(_next)); 1063 } 1064 #endif // PRODUCT 1065 1066 void ClassLoaderData::print() const { print_on(tty); } 1067 1068 class VerifyHandleOops : public OopClosure { 1069 VerifyOopClosure vc; 1070 public: 1071 virtual void do_oop(oop* p) { 1072 if (p != nullptr && *p != nullptr) { 1073 oop o = *p; 1074 if (!java_lang_Class::is_instance(o)) { 1075 // is_instance will assert for an invalid oop. 1076 // Walk the resolved_references array and other assorted oops in the 1077 // CLD::_handles field. The mirror oops are followed by other heap roots. 1078 o->oop_iterate(&vc); 1079 } 1080 } 1081 } 1082 virtual void do_oop(narrowOop* o) { ShouldNotReachHere(); } 1083 }; 1084 1085 void ClassLoaderData::verify() { 1086 assert_locked_or_safepoint(_metaspace_lock); 1087 oop cl = class_loader(); 1088 1089 guarantee(this == class_loader_data(cl) || has_class_mirror_holder(), "Must be the same"); 1090 guarantee(cl != nullptr || this == ClassLoaderData::the_null_class_loader_data() || has_class_mirror_holder(), "must be"); 1091 1092 // Verify the integrity of the allocated space. 1093 #ifdef ASSERT 1094 if (metaspace_or_null() != nullptr) { 1095 metaspace_or_null()->verify(); 1096 } 1097 #endif 1098 1099 for (Klass* k = _klasses; k != nullptr; k = k->next_link()) { 1100 guarantee(k->class_loader_data() == this, "Must be the same"); 1101 k->verify(); 1102 assert(k != k->next_link(), "no loops!"); 1103 } 1104 1105 if (_modules != nullptr) { 1106 _modules->verify(); 1107 } 1108 1109 if (_deallocate_list != nullptr) { 1110 for (int i = _deallocate_list->length() - 1; i >= 0; i--) { 1111 Metadata* m = _deallocate_list->at(i); 1112 if (m->is_klass()) { 1113 ((Klass*)m)->verify(); 1114 } 1115 } 1116 } 1117 1118 // Check the oops in the handles area 1119 VerifyHandleOops vho; 1120 oops_do(&vho, _claim_none, false); 1121 } 1122 1123 bool ClassLoaderData::contains_klass(Klass* klass) { 1124 // Lock-free access requires load_acquire 1125 for (Klass* k = Atomic::load_acquire(&_klasses); k != nullptr; k = k->next_link()) { 1126 if (k == klass) return true; 1127 } 1128 return false; 1129 }