1 /*
2 * Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 // A ClassLoaderData identifies the full set of class types that a class
26 // loader's name resolution strategy produces for a given configuration of the
27 // class loader.
28 // Class types in the ClassLoaderData may be defined by from class file binaries
29 // provided by the class loader, or from other class loader it interacts with
30 // according to its name resolution strategy.
31 //
32 // Class loaders that implement a deterministic name resolution strategy
33 // (including with respect to their delegation behavior), such as the boot, the
34 // platform, and the system loaders of the JDK's built-in class loader
35 // hierarchy, always produce the same linkset for a given configuration.
36 //
37 // ClassLoaderData carries information related to a linkset (e.g.,
38 // metaspace holding its klass definitions).
39 // The System Dictionary and related data structures (e.g., placeholder table,
40 // loader constraints table) as well as the runtime representation of classes
41 // only reference ClassLoaderData.
42 //
43 // Instances of java.lang.ClassLoader holds a pointer to a ClassLoaderData that
44 // that represent the loader's "linking domain" in the JVM.
45 //
46 // The bootstrap loader (represented by null) also has a ClassLoaderData,
47 // the singleton class the_null_class_loader_data().
48
49 #include "cds/heapShared.hpp"
50 #include "classfile/classLoaderData.inline.hpp"
51 #include "classfile/classLoaderDataGraph.inline.hpp"
52 #include "classfile/dictionary.hpp"
53 #include "classfile/javaClasses.inline.hpp"
54 #include "classfile/moduleEntry.hpp"
55 #include "classfile/packageEntry.hpp"
56 #include "classfile/symbolTable.hpp"
57 #include "classfile/systemDictionary.hpp"
58 #include "classfile/systemDictionaryShared.hpp"
59 #include "classfile/vmClasses.hpp"
60 #include "logging/log.hpp"
61 #include "logging/logStream.hpp"
62 #include "memory/allocation.inline.hpp"
63 #include "memory/classLoaderMetaspace.hpp"
64 #include "memory/metadataFactory.hpp"
65 #include "memory/metaspace.hpp"
66 #include "memory/resourceArea.hpp"
67 #include "memory/universe.hpp"
68 #include "oops/access.inline.hpp"
69 #include "oops/jmethodIDTable.hpp"
70 #include "oops/klass.inline.hpp"
71 #include "oops/oop.inline.hpp"
72 #include "oops/oopHandle.inline.hpp"
73 #include "oops/verifyOopClosure.hpp"
74 #include "oops/weakHandle.inline.hpp"
75 #include "runtime/arguments.hpp"
76 #include "runtime/atomicAccess.hpp"
77 #include "runtime/handles.inline.hpp"
78 #include "runtime/mutex.hpp"
79 #include "runtime/safepoint.hpp"
80 #include "utilities/growableArray.hpp"
81 #include "utilities/macros.hpp"
82 #include "utilities/ostream.hpp"
83
84 ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = nullptr;
85
86 void ClassLoaderData::init_null_class_loader_data() {
87 assert(_the_null_class_loader_data == nullptr, "cannot initialize twice");
88 assert(ClassLoaderDataGraph::_head == nullptr, "cannot initialize twice");
89
90 _the_null_class_loader_data = new ClassLoaderData(Handle(), false);
91 ClassLoaderDataGraph::_head = _the_null_class_loader_data;
92 assert(_the_null_class_loader_data->is_the_null_class_loader_data(), "Must be");
93
94 LogTarget(Trace, class, loader, data) lt;
95 if (lt.is_enabled()) {
96 ResourceMark rm;
97 LogStream ls(lt);
98 ls.print("create ");
99 _the_null_class_loader_data->print_value_on(&ls);
100 ls.cr();
101 }
102 }
103
104 // Obtain and set the class loader's name within the ClassLoaderData so
105 // it will be available for error messages, logging, JFR, etc. The name
106 // and klass are available after the class_loader oop is no longer alive,
107 // during unloading.
108 void ClassLoaderData::initialize_name(Handle class_loader) {
109 ResourceMark rm;
110
111 // Obtain the class loader's name. If the class loader's name was not
112 // explicitly set during construction, the CLD's _name field will be null.
113 oop cl_name = java_lang_ClassLoader::name(class_loader());
114 if (cl_name != nullptr) {
115 const char* cl_instance_name = java_lang_String::as_utf8_string(cl_name);
116
117 if (cl_instance_name != nullptr && cl_instance_name[0] != '\0') {
118 _name = SymbolTable::new_symbol(cl_instance_name);
119 }
120 }
121
122 // Obtain the class loader's name and identity hash. If the class loader's
123 // name was not explicitly set during construction, the class loader's name and id
124 // will be set to the qualified class name of the class loader along with its
125 // identity hash.
126 // If for some reason the ClassLoader's constructor has not been run, instead of
127 // leaving the _name_and_id field null, fall back to the external qualified class
128 // name. Thus CLD's _name_and_id field should never have a null value.
129 oop cl_name_and_id = java_lang_ClassLoader::nameAndId(class_loader());
130 const char* cl_instance_name_and_id =
131 (cl_name_and_id == nullptr) ? _class_loader_klass->external_name() :
132 java_lang_String::as_utf8_string(cl_name_and_id);
133 assert(cl_instance_name_and_id != nullptr && cl_instance_name_and_id[0] != '\0', "class loader has no name and id");
134 _name_and_id = SymbolTable::new_symbol(cl_instance_name_and_id);
135 }
136
137 ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool has_class_mirror_holder) :
138 _metaspace(nullptr),
139 _metaspace_lock(new Mutex(Mutex::nosafepoint-2, "MetaspaceAllocation_lock")),
140 _unloading(false), _has_class_mirror_holder(has_class_mirror_holder),
141 _modified_oops(true),
142 // A non-strong hidden class loader data doesn't have anything to keep
143 // it from being unloaded during parsing of the non-strong hidden class.
144 // The null-class-loader should always be kept alive.
145 _keep_alive_ref_count((has_class_mirror_holder || h_class_loader.is_null()) ? 1 : 0),
146 _claim(0),
147 _handles(),
148 _klasses(nullptr), _packages(nullptr), _modules(nullptr), _unnamed_module(nullptr), _dictionary(nullptr),
149 _jmethod_ids(nullptr),
150 _deallocate_list(nullptr),
151 _next(nullptr),
152 _unloading_next(nullptr),
153 _class_loader_klass(nullptr), _name(nullptr), _name_and_id(nullptr) {
154
155 if (!h_class_loader.is_null()) {
156 _class_loader = _handles.add(h_class_loader());
157 _class_loader_klass = h_class_loader->klass();
158 initialize_name(h_class_loader);
159 }
160
161 if (!has_class_mirror_holder) {
162 // The holder is initialized later for non-strong hidden classes,
163 // and before calling anything that call class_loader().
164 initialize_holder(h_class_loader);
165
166 // A ClassLoaderData created solely for a non-strong hidden class should never
167 // have a ModuleEntryTable or PackageEntryTable created for it.
168 _packages = new PackageEntryTable();
169 if (h_class_loader.is_null()) {
170 // Create unnamed module for boot loader
171 _unnamed_module = ModuleEntry::create_boot_unnamed_module(this);
172 } else {
173 // Create unnamed module for all other loaders
174 _unnamed_module = ModuleEntry::create_unnamed_module(this);
175 }
176 _dictionary = create_dictionary();
177 }
178
179 NOT_PRODUCT(_dependency_count = 0); // number of class loader dependencies
180
181 JFR_ONLY(INIT_ID(this);)
182 }
183
184 ClassLoaderData::ChunkedHandleList::~ChunkedHandleList() {
185 Chunk* c = _head;
186 while (c != nullptr) {
187 Chunk* next = c->_next;
188 delete c;
189 c = next;
190 }
191 }
192
193 OopHandle ClassLoaderData::ChunkedHandleList::add(oop o) {
194 if (_head == nullptr || _head->_size == Chunk::CAPACITY) {
195 Chunk* next = new Chunk(_head);
196 AtomicAccess::release_store(&_head, next);
197 }
198 oop* handle = &_head->_data[_head->_size];
199 NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, o);
200 AtomicAccess::release_store(&_head->_size, _head->_size + 1);
201 return OopHandle(handle);
202 }
203
204 int ClassLoaderData::ChunkedHandleList::count() const {
205 int count = 0;
206 Chunk* chunk = AtomicAccess::load_acquire(&_head);
207 while (chunk != nullptr) {
208 count += AtomicAccess::load(&chunk->_size);
209 chunk = chunk->_next;
210 }
211 return count;
212 }
213
214 inline void ClassLoaderData::ChunkedHandleList::oops_do_chunk(OopClosure* f, Chunk* c, const juint size) {
215 for (juint i = 0; i < size; i++) {
216 f->do_oop(&c->_data[i]);
217 }
218 }
219
220 void ClassLoaderData::ChunkedHandleList::oops_do(OopClosure* f) {
221 Chunk* head = AtomicAccess::load_acquire(&_head);
222 if (head != nullptr) {
223 // Must be careful when reading size of head
224 oops_do_chunk(f, head, AtomicAccess::load_acquire(&head->_size));
225 for (Chunk* c = head->_next; c != nullptr; c = c->_next) {
226 oops_do_chunk(f, c, c->_size);
227 }
228 }
229 }
230
231 class VerifyContainsOopClosure : public OopClosure {
232 oop _target;
233 bool _found;
234
235 public:
236 VerifyContainsOopClosure(oop target) : _target(target), _found(false) {}
237
238 void do_oop(oop* p) {
239 if (p != nullptr && NativeAccess<AS_NO_KEEPALIVE>::oop_load(p) == _target) {
240 _found = true;
241 }
242 }
243
244 void do_oop(narrowOop* p) {
245 // The ChunkedHandleList should not contain any narrowOop
246 ShouldNotReachHere();
247 }
248
249 bool found() const {
250 return _found;
251 }
252 };
253
254 bool ClassLoaderData::ChunkedHandleList::contains(oop p) {
255 VerifyContainsOopClosure cl(p);
256 oops_do(&cl);
257 return cl.found();
258 }
259
260 #ifndef PRODUCT
261 bool ClassLoaderData::ChunkedHandleList::owner_of(oop* oop_handle) {
262 Chunk* chunk = AtomicAccess::load_acquire(&_head);
263 while (chunk != nullptr) {
264 if (&(chunk->_data[0]) <= oop_handle && oop_handle < &(chunk->_data[AtomicAccess::load(&chunk->_size)])) {
265 return true;
266 }
267 chunk = chunk->_next;
268 }
269 return false;
270 }
271 #endif // PRODUCT
272
273 void ClassLoaderData::clear_claim(int claim) {
274 for (;;) {
275 int old_claim = AtomicAccess::load(&_claim);
276 if ((old_claim & claim) == 0) {
277 return;
278 }
279 int new_claim = old_claim & ~claim;
280 if (AtomicAccess::cmpxchg(&_claim, old_claim, new_claim) == old_claim) {
281 return;
282 }
283 }
284 }
285
286 #ifdef ASSERT
287 void ClassLoaderData::verify_not_claimed(int claim) {
288 assert((_claim & claim) == 0, "Found claim: %d bits in _claim: %d", claim, _claim);
289 }
290 #endif
291
292 bool ClassLoaderData::try_claim(int claim) {
293 for (;;) {
294 int old_claim = AtomicAccess::load(&_claim);
295 if ((old_claim & claim) == claim) {
296 return false;
297 }
298 int new_claim = old_claim | claim;
299 if (AtomicAccess::cmpxchg(&_claim, old_claim, new_claim) == old_claim) {
300 return true;
301 }
302 }
303 }
304
305 void ClassLoaderData::demote_strong_roots() {
306 // The oop handle area contains strong roots that the GC traces from. We are about
307 // to demote them to strong native oops that the GC does *not* trace from. Conceptually,
308 // we are retiring a rather normal strong root, and creating a strong non-root handle,
309 // which happens to reuse the same address as the normal strong root had.
310 // Unless we invoke the right barriers, the GC might not notice that a strong root
311 // has been pulled from the system, and is left unprocessed by the GC. There can be
312 // several consequences:
313 // 1. A concurrently marking snapshot-at-the-beginning GC might assume that the contents
314 // of all strong roots get processed by the GC in order to keep them alive. Without
315 // barriers, some objects might not be kept alive.
316 // 2. A concurrently relocating GC might assume that after moving an object, a subsequent
317 // tracing from all roots can fix all the pointers in the system, which doesn't play
318 // well with roots racingly being pulled.
319 // 3. A concurrent GC using colored pointers, might assume that tracing the object graph
320 // from roots results in all pointers getting some particular color, which also doesn't
321 // play well with roots being pulled out from the system concurrently.
322
323 class TransitionRootsOopClosure : public OopClosure {
324 public:
325 virtual void do_oop(oop* p) {
326 // By loading the strong root with the access API, we can use the right barriers to
327 // store the oop as a strong non-root handle, that happens to reuse the same memory
328 // address as the strong root. The barriered store ensures that:
329 // 1. The concurrent SATB marking properties are satisfied as the store will keep
330 // the oop alive.
331 // 2. The concurrent object movement properties are satisfied as we store the address
332 // of the new location of the object, if any.
333 // 3. The colors if any will be stored as the new good colors.
334 oop obj = NativeAccess<>::oop_load(p); // Load the strong root
335 NativeAccess<>::oop_store(p, obj); // Store the strong non-root
336 }
337
338 virtual void do_oop(narrowOop* p) {
339 ShouldNotReachHere();
340 }
341 } cl;
342 oops_do(&cl, ClassLoaderData::_claim_none, false /* clear_mod_oops */);
343 }
344
345 // Non-strong hidden classes have their own ClassLoaderData that is marked to keep alive
346 // while the class is being parsed, and if the class appears on the module fixup list.
347 // Due to the uniqueness that no other class shares the hidden class' name or
348 // ClassLoaderData, no other non-GC thread has knowledge of the hidden class while
349 // it is being defined, therefore _keep_alive_ref_count is not volatile or atomic.
350 void ClassLoaderData::inc_keep_alive_ref_count() {
351 if (has_class_mirror_holder()) {
352 assert(_keep_alive_ref_count > 0, "Invalid keep alive increment count");
353 _keep_alive_ref_count++;
354 }
355 }
356
357 void ClassLoaderData::dec_keep_alive_ref_count() {
358 if (has_class_mirror_holder()) {
359 assert(_keep_alive_ref_count > 0, "Invalid keep alive decrement count");
360 if (_keep_alive_ref_count == 1) {
361 // When the keep_alive_ref_count counter is 1, the oop handle area is a strong root,
362 // acting as input to the GC tracing. Such strong roots are part of the
363 // snapshot-at-the-beginning, and can not just be pulled out from the
364 // system when concurrent GCs are running at the same time, without
365 // invoking the right barriers.
366 demote_strong_roots();
367 }
368 _keep_alive_ref_count--;
369 }
370 }
371
372 void ClassLoaderData::oops_do(OopClosure* f, int claim_value, bool clear_mod_oops) {
373 if (claim_value != ClassLoaderData::_claim_none && !try_claim(claim_value)) {
374 return;
375 }
376
377 // Only clear modified_oops after the ClassLoaderData is claimed.
378 if (clear_mod_oops) {
379 clear_modified_oops();
380 }
381
382 _handles.oops_do(f);
383 }
384
385 void ClassLoaderData::classes_do(KlassClosure* klass_closure) {
386 // Lock-free access requires load_acquire
387 for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
388 klass_closure->do_klass(k);
389 assert(k != k->next_link(), "no loops!");
390 }
391 }
392
393 void ClassLoaderData::classes_do(void f(Klass * const)) {
394 // Lock-free access requires load_acquire
395 for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
396 f(k);
397 assert(k != k->next_link(), "no loops!");
398 }
399 }
400
401 void ClassLoaderData::methods_do(void f(Method*)) {
402 // Lock-free access requires load_acquire
403 for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
404 if (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded()) {
405 InstanceKlass::cast(k)->methods_do(f);
406 }
407 }
408 }
409
410 void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) {
411 // Lock-free access requires load_acquire
412 for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
413 // Filter out InstanceKlasses (or their ObjArrayKlasses) that have not entered the
414 // loaded state.
415 if (k->is_instance_klass()) {
416 if (!InstanceKlass::cast(k)->is_loaded()) {
417 continue;
418 }
419 } else if (k->in_aot_cache() && k->is_objArray_klass()) {
420 Klass* bottom = ObjArrayKlass::cast(k)->bottom_klass();
421 if (bottom->is_instance_klass() && !InstanceKlass::cast(bottom)->is_loaded()) {
422 // This could happen if <bottom> is a shared class that has been restored
423 // but is not yet marked as loaded. All archived array classes of the
424 // bottom class are already restored and placed in the _klasses list.
425 continue;
426 }
427 }
428
429 #ifdef ASSERT
430 oop m = k->java_mirror();
431 assert(m != nullptr, "nullptr mirror");
432 assert(m->is_a(vmClasses::Class_klass()), "invalid mirror");
433 #endif
434 klass_closure->do_klass(k);
435 }
436 }
437
438 void ClassLoaderData::classes_do(void f(InstanceKlass*)) {
439 // Lock-free access requires load_acquire
440 for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
441 if (k->is_instance_klass()) {
442 f(InstanceKlass::cast(k));
443 }
444 assert(k != k->next_link(), "no loops!");
445 }
446 }
447
448 void ClassLoaderData::modules_do(void f(ModuleEntry*)) {
449 assert_locked_or_safepoint(Module_lock);
450 if (_unnamed_module != nullptr) {
451 f(_unnamed_module);
452 }
453 if (_modules != nullptr) {
454 _modules->modules_do(f);
455 }
456 }
457
458 void ClassLoaderData::packages_do(void f(PackageEntry*)) {
459 assert_locked_or_safepoint(Module_lock);
460 if (_packages != nullptr) {
461 _packages->packages_do(f);
462 }
463 }
464
465 void ClassLoaderData::record_dependency(const Klass* k) {
466 assert(k != nullptr, "invariant");
467
468 ClassLoaderData * const from_cld = this;
469 ClassLoaderData * const to_cld = k->class_loader_data();
470
471 // Do not need to record dependency if the dependency is to a class whose
472 // class loader data is never freed. (i.e. the dependency's class loader
473 // is one of the three builtin class loaders and the dependency's class
474 // loader data has a ClassLoader holder, not a Class holder.)
475 if (to_cld->is_permanent_class_loader_data()) {
476 return;
477 }
478
479 oop to;
480 if (to_cld->has_class_mirror_holder()) {
481 // Just return if a non-strong hidden class class is attempting to record a dependency
482 // to itself. (Note that every non-strong hidden class has its own unique class
483 // loader data.)
484 if (to_cld == from_cld) {
485 return;
486 }
487 // Hidden class dependencies are through the mirror.
488 to = k->java_mirror();
489 } else {
490 to = to_cld->class_loader();
491 oop from = from_cld->class_loader();
492
493 // Just return if this dependency is to a class with the same or a parent
494 // class_loader.
495 if (from == to || java_lang_ClassLoader::isAncestor(from, to)) {
496 return; // this class loader is in the parent list, no need to add it.
497 }
498 }
499
500 // It's a dependency we won't find through GC, add it.
501 if (!_handles.contains(to)) {
502 NOT_PRODUCT(AtomicAccess::inc(&_dependency_count));
503 LogTarget(Trace, class, loader, data) lt;
504 if (lt.is_enabled()) {
505 ResourceMark rm;
506 LogStream ls(lt);
507 ls.print("adding dependency from ");
508 print_value_on(&ls);
509 ls.print(" to ");
510 to_cld->print_value_on(&ls);
511 ls.cr();
512 }
513 Handle dependency(Thread::current(), to);
514 add_handle(dependency);
515 // Added a potentially young gen oop to the ClassLoaderData
516 record_modified_oops();
517 }
518 }
519
520 void ClassLoaderData::add_class(Klass* k, bool publicize /* true */) {
521 {
522 MutexLocker ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
523 Klass* old_value = _klasses;
524 k->set_next_link(old_value);
525 // Link the new item into the list, making sure the linked class is stable
526 // since the list can be walked without a lock
527 AtomicAccess::release_store(&_klasses, k);
528 if (k->is_array_klass()) {
529 ClassLoaderDataGraph::inc_array_classes(1);
530 } else {
531 ClassLoaderDataGraph::inc_instance_classes(1);
532 }
533 }
534
535 if (publicize) {
536 LogTarget(Trace, class, loader, data) lt;
537 if (lt.is_enabled()) {
538 ResourceMark rm;
539 LogStream ls(lt);
540 ls.print("Adding k: " PTR_FORMAT " %s to ", p2i(k), k->external_name());
541 print_value_on(&ls);
542 ls.cr();
543 }
544 }
545 }
546
547 void ClassLoaderData::initialize_holder(Handle loader_or_mirror) {
548 if (loader_or_mirror() != nullptr) {
549 assert(_holder.is_null(), "never replace holders");
550 _holder = WeakHandle(Universe::vm_weak(), loader_or_mirror);
551 }
552 }
553
554 // Remove a klass from the _klasses list for scratch_class during redefinition
555 // or parsed class in the case of an error.
556 void ClassLoaderData::remove_class(Klass* scratch_class) {
557 assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
558
559 Klass* prev = nullptr;
560 for (Klass* k = _klasses; k != nullptr; k = k->next_link()) {
561 if (k == scratch_class) {
562 if (prev == nullptr) {
563 _klasses = k->next_link();
564 } else {
565 Klass* next = k->next_link();
566 prev->set_next_link(next);
567 }
568
569 if (k->is_array_klass()) {
570 ClassLoaderDataGraph::dec_array_classes(1);
571 } else {
572 ClassLoaderDataGraph::dec_instance_classes(1);
573 }
574
575 return;
576 }
577 prev = k;
578 assert(k != k->next_link(), "no loops!");
579 }
580 ShouldNotReachHere(); // should have found this class!!
581 }
582
583 void ClassLoaderData::add_jmethod_id(jmethodID mid) {
584 MutexLocker m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
585 if (_jmethod_ids == nullptr) {
586 _jmethod_ids = new (mtClass) GrowableArray<jmethodID>(32, mtClass);
587 }
588 _jmethod_ids->push(mid);
589 }
590
591 // Method::remove_jmethod_ids removes jmethodID entries from the table which
592 // releases memory.
593 // Because native code (e.g., JVMTI agent) holding jmethod_ids may access them
594 // after the associated classes and class loader are unloaded, subsequent lookups
595 // for these ids will return null since they are no longer found in the table.
596 // The Java Native Interface Specification says "method ID
597 // does not prevent the VM from unloading the class from which the ID has
598 // been derived. After the class is unloaded, the method or field ID becomes
599 // invalid".
600 void ClassLoaderData::remove_jmethod_ids() {
601 MutexLocker ml(JmethodIdCreation_lock, Mutex::_no_safepoint_check_flag);
602 for (int i = 0; i < _jmethod_ids->length(); i++) {
603 jmethodID mid = _jmethod_ids->at(i);
604 JmethodIDTable::remove(mid);
605 }
606 delete _jmethod_ids;
607 _jmethod_ids = nullptr;
608 }
609
610 void ClassLoaderData::unload() {
611 _unloading = true;
612
613 LogTarget(Trace, class, loader, data) lt;
614 if (lt.is_enabled()) {
615 ResourceMark rm;
616 LogStream ls(lt);
617 ls.print("unload");
618 print_value_on(&ls);
619 ls.cr();
620 }
621
622 // Some items on the _deallocate_list need to free their C heap structures
623 // if they are not already on the _klasses list.
624 free_deallocate_list_C_heap_structures();
625
626 // Clean up class dependencies and tell serviceability tools
627 // these classes are unloading. This must be called
628 // after erroneous classes are released.
629 classes_do(InstanceKlass::unload_class);
630
631 if (_jmethod_ids != nullptr) {
632 remove_jmethod_ids();
633 }
634 }
635
636 ModuleEntryTable* ClassLoaderData::modules() {
637 // Lazily create the module entry table at first request.
638 // Lock-free access requires load_acquire.
639 ModuleEntryTable* modules = AtomicAccess::load_acquire(&_modules);
640 if (modules == nullptr) {
641 MutexLocker m1(Module_lock);
642 // Check if _modules got allocated while we were waiting for this lock.
643 if ((modules = _modules) == nullptr) {
644 modules = new ModuleEntryTable();
645
646 {
647 MutexLocker m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
648 // Ensure _modules is stable, since it is examined without a lock
649 AtomicAccess::release_store(&_modules, modules);
650 }
651 }
652 }
653 return modules;
654 }
655
656 const int _boot_loader_dictionary_size = 1009;
657 const int _default_loader_dictionary_size = 107;
658
659 Dictionary* ClassLoaderData::create_dictionary() {
660 assert(!has_class_mirror_holder(), "class mirror holder cld does not have a dictionary");
661 int size;
662 if (_the_null_class_loader_data == nullptr) {
663 size = _boot_loader_dictionary_size;
664 } else if (is_system_class_loader_data()) {
665 size = _boot_loader_dictionary_size;
666 } else {
667 size = _default_loader_dictionary_size;
668 }
669 return new Dictionary(this, size);
670 }
671
672 // Tell the GC to keep this klass alive. Needed while iterating ClassLoaderDataGraph,
673 // and any runtime code that uses klasses.
674 oop ClassLoaderData::holder() const {
675 // A klass that was previously considered dead can be looked up in the
676 // CLD/SD, and its _java_mirror or _class_loader can be stored in a root
677 // or a reachable object making it alive again. The SATB part of G1 needs
678 // to get notified about this potential resurrection, otherwise the marking
679 // might not find the object.
680 if (!_holder.is_null()) { // null class_loader
681 return _holder.resolve();
682 } else {
683 return nullptr;
684 }
685 }
686
687 // Let the GC read the holder without keeping it alive.
688 oop ClassLoaderData::holder_no_keepalive() const {
689 if (!_holder.is_null()) { // null class_loader
690 return _holder.peek();
691 } else {
692 return nullptr;
693 }
694 }
695
696 // Unloading support
697 bool ClassLoaderData::is_alive() const {
698 bool alive = (_keep_alive_ref_count > 0) // null class loader and incomplete non-strong hidden class.
699 || (_holder.peek() != nullptr); // and not cleaned by the GC weak handle processing.
700
701 return alive;
702 }
703
704 class ReleaseKlassClosure: public KlassClosure {
705 private:
706 size_t _instance_class_released;
707 size_t _array_class_released;
708 public:
709 ReleaseKlassClosure() : _instance_class_released(0), _array_class_released(0) { }
710
711 size_t instance_class_released() const { return _instance_class_released; }
712 size_t array_class_released() const { return _array_class_released; }
713
714 void do_klass(Klass* k) {
715 if (k->is_array_klass()) {
716 _array_class_released ++;
717 } else {
718 assert(k->is_instance_klass(), "Must be");
719 _instance_class_released ++;
720 }
721 k->release_C_heap_structures();
722 }
723 };
724
725 ClassLoaderData::~ClassLoaderData() {
726 // Release C heap structures for all the classes.
727 ReleaseKlassClosure cl;
728 classes_do(&cl);
729
730 ClassLoaderDataGraph::dec_array_classes(cl.array_class_released());
731 ClassLoaderDataGraph::dec_instance_classes(cl.instance_class_released());
732
733 // Release the WeakHandle
734 _holder.release(Universe::vm_weak());
735
736 // Release C heap allocated hashtable for all the packages.
737 if (_packages != nullptr) {
738 // Destroy the table itself
739 delete _packages;
740 _packages = nullptr;
741 }
742
743 // Release C heap allocated hashtable for all the modules.
744 if (_modules != nullptr) {
745 // Destroy the table itself
746 delete _modules;
747 _modules = nullptr;
748 }
749
750 // Release C heap allocated hashtable for the dictionary
751 if (_dictionary != nullptr) {
752 // Destroy the table itself
753 delete _dictionary;
754 _dictionary = nullptr;
755 }
756
757 if (_unnamed_module != nullptr) {
758 delete _unnamed_module;
759 _unnamed_module = nullptr;
760 }
761
762 // release the metaspace
763 ClassLoaderMetaspace *m = _metaspace;
764 if (m != nullptr) {
765 _metaspace = nullptr;
766 delete m;
767 }
768
769 // Delete lock
770 delete _metaspace_lock;
771
772 // Delete free list
773 if (_deallocate_list != nullptr) {
774 delete _deallocate_list;
775 }
776
777 // Decrement refcounts of Symbols if created.
778 if (_name != nullptr) {
779 _name->decrement_refcount();
780 }
781 if (_name_and_id != nullptr) {
782 _name_and_id->decrement_refcount();
783 }
784 }
785
786 // Returns true if this class loader data is for the app class loader
787 // or a user defined system class loader. (Note that the class loader
788 // data may have a Class holder.)
789 bool ClassLoaderData::is_system_class_loader_data() const {
790 return SystemDictionary::is_system_class_loader(class_loader());
791 }
792
793 // Returns true if this class loader data is for the platform class loader.
794 // (Note that the class loader data may have a Class holder.)
795 bool ClassLoaderData::is_platform_class_loader_data() const {
796 return SystemDictionary::is_platform_class_loader(class_loader());
797 }
798
799 // Returns true if the class loader for this class loader data is one of
800 // the 3 builtin (boot application/system or platform) class loaders,
801 // including a user-defined system class loader. Note that if the class
802 // loader data is for a non-strong hidden class then it may
803 // get freed by a GC even if its class loader is one of these loaders.
804 bool ClassLoaderData::is_builtin_class_loader_data() const {
805 return (is_boot_class_loader_data() ||
806 SystemDictionary::is_system_class_loader(class_loader()) ||
807 SystemDictionary::is_platform_class_loader(class_loader()));
808 }
809
810 // Returns true if this class loader data is a class loader data
811 // that is not ever freed by a GC. It must be the CLD for one of the builtin
812 // class loaders and not the CLD for a non-strong hidden class.
813 bool ClassLoaderData::is_permanent_class_loader_data() const {
814 return is_builtin_class_loader_data() && !has_class_mirror_holder();
815 }
816
817 ClassLoaderMetaspace* ClassLoaderData::metaspace_non_null() {
818 // If the metaspace has not been allocated, create a new one. Might want
819 // to create smaller arena for Reflection class loaders also.
820 // The reason for the delayed allocation is because some class loaders are
821 // simply for delegating with no metadata of their own.
822 // Lock-free access requires load_acquire.
823 ClassLoaderMetaspace* metaspace = AtomicAccess::load_acquire(&_metaspace);
824 if (metaspace == nullptr) {
825 MutexLocker ml(_metaspace_lock, Mutex::_no_safepoint_check_flag);
826 // Check if _metaspace got allocated while we were waiting for this lock.
827 if ((metaspace = _metaspace) == nullptr) {
828 if (this == the_null_class_loader_data()) {
829 assert (class_loader() == nullptr, "Must be");
830 metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::BootMetaspaceType);
831 } else if (has_class_mirror_holder()) {
832 metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::ClassMirrorHolderMetaspaceType);
833 } else {
834 metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::StandardMetaspaceType);
835 }
836 // Ensure _metaspace is stable, since it is examined without a lock
837 AtomicAccess::release_store(&_metaspace, metaspace);
838 }
839 }
840 return metaspace;
841 }
842
843 OopHandle ClassLoaderData::add_handle(Handle h) {
844 MutexLocker ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
845 record_modified_oops();
846 return _handles.add(h());
847 }
848
849 void ClassLoaderData::remove_handle(OopHandle h) {
850 assert(!is_unloading(), "Do not remove a handle for a CLD that is unloading");
851 if (!h.is_empty()) {
852 assert(_handles.owner_of(h.ptr_raw()),
853 "Got unexpected handle " PTR_FORMAT, p2i(h.ptr_raw()));
854 h.replace(oop(nullptr));
855 }
856 }
857
858 void ClassLoaderData::init_handle_locked(OopHandle& dest, Handle h) {
859 MutexLocker ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
860 if (dest.resolve() != nullptr) {
861 return;
862 } else {
863 record_modified_oops();
864 dest = _handles.add(h());
865 }
866 }
867
868 // Add this metadata pointer to be freed when it's safe. This is only during
869 // a safepoint which checks if handles point to this metadata field.
870 void ClassLoaderData::add_to_deallocate_list(Metadata* m) {
871 // Metadata in shared region isn't deleted.
872 if (!m->in_aot_cache()) {
873 MutexLocker ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
874 if (_deallocate_list == nullptr) {
875 _deallocate_list = new (mtClass) GrowableArray<Metadata*>(100, mtClass);
876 }
877 _deallocate_list->append_if_missing(m);
878 ResourceMark rm;
879 log_debug(class, loader, data)("deallocate added for %s", m->print_value_string());
880 ClassLoaderDataGraph::set_should_clean_deallocate_lists();
881 }
882 }
883
884 // Deallocate free metadata on the free list. How useful the PermGen was!
885 void ClassLoaderData::free_deallocate_list() {
886 // This must be called at a safepoint because it depends on metadata walking at
887 // safepoint cleanup time.
888 assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
889 assert(!is_unloading(), "only called for ClassLoaderData that are not unloading");
890 if (_deallocate_list == nullptr) {
891 return;
892 }
893 // Go backwards because this removes entries that are freed.
894 for (int i = _deallocate_list->length() - 1; i >= 0; i--) {
895 Metadata* m = _deallocate_list->at(i);
896 if (!m->on_stack()) {
897 _deallocate_list->remove_at(i);
898 // There are only three types of metadata that we deallocate directly.
899 // Cast them so they can be used by the template function.
900 if (m->is_method()) {
901 MetadataFactory::free_metadata(this, (Method*)m);
902 } else if (m->is_constantPool()) {
903 HeapShared::remove_scratch_resolved_references((ConstantPool*)m);
904 MetadataFactory::free_metadata(this, (ConstantPool*)m);
905 } else if (m->is_klass()) {
906 MetadataFactory::free_metadata(this, (InstanceKlass*)m);
907 } else {
908 ShouldNotReachHere();
909 }
910 } else {
911 // Metadata is alive.
912 // If scratch_class is on stack then it shouldn't be on this list!
913 assert(!m->is_klass() || !((InstanceKlass*)m)->is_scratch_class(),
914 "scratch classes on this list should be dead");
915 // Also should assert that other metadata on the list was found in handles.
916 // Some cleaning remains.
917 ClassLoaderDataGraph::set_should_clean_deallocate_lists();
918 }
919 }
920 }
921
922 // This is distinct from free_deallocate_list. For class loader data that are
923 // unloading, this frees the C heap memory for items on the list, and unlinks
924 // scratch or error classes so that unloading events aren't triggered for these
925 // classes. The metadata is removed with the unloading metaspace.
926 // There isn't C heap memory allocated for methods, so nothing is done for them.
927 void ClassLoaderData::free_deallocate_list_C_heap_structures() {
928 assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
929 assert(is_unloading(), "only called for ClassLoaderData that are unloading");
930 if (_deallocate_list == nullptr) {
931 return;
932 }
933 // Go backwards because this removes entries that are freed.
934 for (int i = _deallocate_list->length() - 1; i >= 0; i--) {
935 Metadata* m = _deallocate_list->at(i);
936 _deallocate_list->remove_at(i);
937 if (m->is_constantPool()) {
938 ((ConstantPool*)m)->release_C_heap_structures();
939 } else if (m->is_klass()) {
940 InstanceKlass* ik = (InstanceKlass*)m;
941 // also releases ik->constants() C heap memory
942 ik->release_C_heap_structures();
943 // Remove the class so unloading events aren't triggered for
944 // this class (scratch or error class) in do_unloading().
945 remove_class(ik);
946 // But still have to remove it from the dumptime_table.
947 SystemDictionaryShared::handle_class_unloading(ik);
948 }
949 }
950 }
951
952 // Caller needs ResourceMark
953 // If the class loader's _name has not been explicitly set, the class loader's
954 // qualified class name is returned.
955 const char* ClassLoaderData::loader_name() const {
956 if (_class_loader_klass == nullptr) {
957 return BOOTSTRAP_LOADER_NAME;
958 } else if (_name != nullptr) {
959 return _name->as_C_string();
960 } else {
961 return _class_loader_klass->external_name();
962 }
963 }
964
965 // Caller needs ResourceMark
966 // Format of the _name_and_id is as follows:
967 // If the defining loader has a name explicitly set then '<loader-name>' @<id>
968 // If the defining loader has no name then <qualified-class-name> @<id>
969 // If built-in loader, then omit '@<id>' as there is only one instance.
970 const char* ClassLoaderData::loader_name_and_id() const {
971 if (_class_loader_klass == nullptr) {
972 return "'" BOOTSTRAP_LOADER_NAME "'";
973 } else if (_name_and_id != nullptr) {
974 return _name_and_id->as_C_string();
975 } else {
976 // May be called in a race before _name_and_id is initialized.
977 return _class_loader_klass->external_name();
978 }
979 }
980
981 void ClassLoaderData::print_value_on(outputStream* out) const {
982 if (!is_unloading() && class_loader() != nullptr) {
983 out->print("loader data: " INTPTR_FORMAT " for instance ", p2i(this));
984 class_loader()->print_value_on(out); // includes loader_name_and_id() and address of class loader instance
985 } else {
986 // loader data: 0xsomeaddr of 'bootstrap'
987 out->print("loader data: " INTPTR_FORMAT " of %s", p2i(this), loader_name_and_id());
988 }
989 if (_has_class_mirror_holder) {
990 out->print(" has a class holder");
991 }
992 }
993
994 void ClassLoaderData::print_value() const { print_value_on(tty); }
995
996 #ifndef PRODUCT
997 class PrintKlassClosure: public KlassClosure {
998 outputStream* _out;
999 public:
1000 PrintKlassClosure(outputStream* out): _out(out) { }
1001
1002 void do_klass(Klass* k) {
1003 ResourceMark rm;
1004 _out->print("%s,", k->external_name());
1005 }
1006 };
1007
1008 void ClassLoaderData::print_on(outputStream* out) const {
1009 ResourceMark rm;
1010 out->print_cr("ClassLoaderData(" INTPTR_FORMAT ")", p2i(this));
1011 out->print_cr(" - name %s", loader_name_and_id());
1012 if (!_holder.is_null()) {
1013 out->print (" - holder ");
1014 _holder.print_on(out);
1015 out->print_cr("");
1016 }
1017 if (!_unloading) {
1018 out->print_cr(" - class loader " INTPTR_FORMAT, p2i(_class_loader.peek()));
1019 } else {
1020 out->print_cr(" - class loader <unloading, oop is bad>");
1021 }
1022 out->print_cr(" - metaspace " INTPTR_FORMAT, p2i(_metaspace));
1023 out->print_cr(" - unloading %s", _unloading ? "true" : "false");
1024 out->print_cr(" - class mirror holder %s", _has_class_mirror_holder ? "true" : "false");
1025 out->print_cr(" - modified oops %s", _modified_oops ? "true" : "false");
1026 out->print_cr(" - _keep_alive_ref_count %d", _keep_alive_ref_count);
1027 out->print (" - claim ");
1028 switch(_claim) {
1029 case _claim_none: out->print_cr("none"); break;
1030 case _claim_finalizable: out->print_cr("finalizable"); break;
1031 case _claim_strong: out->print_cr("strong"); break;
1032 case _claim_stw_fullgc_mark: out->print_cr("stw full gc mark"); break;
1033 case _claim_stw_fullgc_adjust: out->print_cr("stw full gc adjust"); break;
1034 case _claim_other: out->print_cr("other"); break;
1035 case _claim_other | _claim_finalizable: out->print_cr("other and finalizable"); break;
1036 case _claim_other | _claim_strong: out->print_cr("other and strong"); break;
1037 default: ShouldNotReachHere();
1038 }
1039 out->print_cr(" - handles %d", _handles.count());
1040 out->print_cr(" - dependency count %d", _dependency_count);
1041 out->print (" - klasses { ");
1042 if (Verbose) {
1043 PrintKlassClosure closure(out);
1044 ((ClassLoaderData*)this)->classes_do(&closure);
1045 } else {
1046 out->print("...");
1047 }
1048 out->print_cr(" }");
1049 out->print_cr(" - packages " INTPTR_FORMAT, p2i(_packages));
1050 out->print_cr(" - module " INTPTR_FORMAT, p2i(_modules));
1051 out->print_cr(" - unnamed module " INTPTR_FORMAT, p2i(_unnamed_module));
1052 if (_dictionary != nullptr) {
1053 out->print (" - dictionary " INTPTR_FORMAT " ", p2i(_dictionary));
1054 _dictionary->print_size(out);
1055 } else {
1056 out->print_cr(" - dictionary " INTPTR_FORMAT, p2i(_dictionary));
1057 }
1058 if (_jmethod_ids != nullptr) {
1059 out->print_cr(" - jmethod count %d", _jmethod_ids->length());
1060 }
1061 out->print_cr(" - deallocate list " INTPTR_FORMAT, p2i(_deallocate_list));
1062 out->print_cr(" - next CLD " INTPTR_FORMAT, p2i(_next));
1063 }
1064 #endif // PRODUCT
1065
1066 void ClassLoaderData::print() const { print_on(tty); }
1067
1068 class VerifyHandleOops : public OopClosure {
1069 VerifyOopClosure vc;
1070 public:
1071 virtual void do_oop(oop* p) {
1072 if (p != nullptr && *p != nullptr) {
1073 oop o = *p;
1074 if (!java_lang_Class::is_instance(o)) {
1075 // is_instance will assert for an invalid oop.
1076 // Walk the resolved_references array and other assorted oops in the
1077 // CLD::_handles field. The mirror oops are followed by other heap roots.
1078 o->oop_iterate(&vc);
1079 }
1080 }
1081 }
1082 virtual void do_oop(narrowOop* o) { ShouldNotReachHere(); }
1083 };
1084
1085 void ClassLoaderData::verify() {
1086 assert_locked_or_safepoint(_metaspace_lock);
1087 oop cl = class_loader();
1088
1089 guarantee(this == class_loader_data(cl) || has_class_mirror_holder(), "Must be the same");
1090 guarantee(cl != nullptr || this == ClassLoaderData::the_null_class_loader_data() || has_class_mirror_holder(), "must be");
1091
1092 // Verify the integrity of the allocated space.
1093 #ifdef ASSERT
1094 if (metaspace_or_null() != nullptr) {
1095 metaspace_or_null()->verify();
1096 }
1097 #endif
1098
1099 for (Klass* k = _klasses; k != nullptr; k = k->next_link()) {
1100 guarantee(k->class_loader_data() == this, "Must be the same");
1101 k->verify();
1102 assert(k != k->next_link(), "no loops!");
1103 }
1104
1105 if (_modules != nullptr) {
1106 _modules->verify();
1107 }
1108
1109 if (_deallocate_list != nullptr) {
1110 for (int i = _deallocate_list->length() - 1; i >= 0; i--) {
1111 Metadata* m = _deallocate_list->at(i);
1112 if (m->is_klass()) {
1113 ((Klass*)m)->verify();
1114 }
1115 }
1116 }
1117
1118 // Check the oops in the handles area
1119 VerifyHandleOops vho;
1120 oops_do(&vho, _claim_none, false);
1121 }
1122
1123 bool ClassLoaderData::contains_klass(Klass* klass) {
1124 // Lock-free access requires load_acquire
1125 for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
1126 if (k == klass) return true;
1127 }
1128 return false;
1129 }