1 /*
2 * Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 // A ClassLoaderData identifies the full set of class types that a class
26 // loader's name resolution strategy produces for a given configuration of the
27 // class loader.
28 // Class types in the ClassLoaderData may be defined by from class file binaries
29 // provided by the class loader, or from other class loader it interacts with
30 // according to its name resolution strategy.
31 //
32 // Class loaders that implement a deterministic name resolution strategy
33 // (including with respect to their delegation behavior), such as the boot, the
34 // platform, and the system loaders of the JDK's built-in class loader
35 // hierarchy, always produce the same linkset for a given configuration.
36 //
37 // ClassLoaderData carries information related to a linkset (e.g.,
38 // metaspace holding its klass definitions).
39 // The System Dictionary and related data structures (e.g., placeholder table,
40 // loader constraints table) as well as the runtime representation of classes
41 // only reference ClassLoaderData.
42 //
43 // Instances of java.lang.ClassLoader holds a pointer to a ClassLoaderData that
44 // that represent the loader's "linking domain" in the JVM.
45 //
46 // The bootstrap loader (represented by null) also has a ClassLoaderData,
47 // the singleton class the_null_class_loader_data().
48
49 #include "classfile/classLoaderData.inline.hpp"
50 #include "classfile/classLoaderDataGraph.inline.hpp"
51 #include "classfile/dictionary.hpp"
52 #include "classfile/javaClasses.inline.hpp"
53 #include "classfile/moduleEntry.hpp"
54 #include "classfile/packageEntry.hpp"
55 #include "classfile/symbolTable.hpp"
56 #include "classfile/systemDictionary.hpp"
57 #include "classfile/systemDictionaryShared.hpp"
58 #include "classfile/vmClasses.hpp"
59 #include "logging/log.hpp"
60 #include "logging/logStream.hpp"
61 #include "memory/allocation.inline.hpp"
62 #include "memory/classLoaderMetaspace.hpp"
63 #include "memory/metadataFactory.hpp"
64 #include "memory/metaspace.hpp"
65 #include "memory/resourceArea.hpp"
66 #include "memory/universe.hpp"
67 #include "oops/access.inline.hpp"
68 #include "oops/jmethodIDTable.hpp"
69 #include "oops/klass.inline.hpp"
70 #include "oops/oop.inline.hpp"
71 #include "oops/oopHandle.inline.hpp"
72 #include "oops/verifyOopClosure.hpp"
73 #include "oops/weakHandle.inline.hpp"
74 #include "runtime/arguments.hpp"
75 #include "runtime/atomicAccess.hpp"
76 #include "runtime/handles.inline.hpp"
77 #include "runtime/mutex.hpp"
78 #include "runtime/safepoint.hpp"
79 #include "utilities/growableArray.hpp"
80 #include "utilities/macros.hpp"
81 #include "utilities/ostream.hpp"
82
83 ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = nullptr;
84
85 void ClassLoaderData::init_null_class_loader_data() {
86 assert(_the_null_class_loader_data == nullptr, "cannot initialize twice");
87 assert(ClassLoaderDataGraph::_head == nullptr, "cannot initialize twice");
88
89 _the_null_class_loader_data = new ClassLoaderData(Handle(), false);
90 ClassLoaderDataGraph::_head = _the_null_class_loader_data;
91 assert(_the_null_class_loader_data->is_the_null_class_loader_data(), "Must be");
92
93 LogTarget(Trace, class, loader, data) lt;
94 if (lt.is_enabled()) {
95 ResourceMark rm;
96 LogStream ls(lt);
97 ls.print("create ");
98 _the_null_class_loader_data->print_value_on(&ls);
99 ls.cr();
100 }
101 }
102
103 // Obtain and set the class loader's name within the ClassLoaderData so
104 // it will be available for error messages, logging, JFR, etc. The name
105 // and klass are available after the class_loader oop is no longer alive,
106 // during unloading.
107 void ClassLoaderData::initialize_name(Handle class_loader) {
108 ResourceMark rm;
109
110 // Obtain the class loader's name. If the class loader's name was not
111 // explicitly set during construction, the CLD's _name field will be null.
112 oop cl_name = java_lang_ClassLoader::name(class_loader());
113 if (cl_name != nullptr) {
114 const char* cl_instance_name = java_lang_String::as_utf8_string(cl_name);
115
116 if (cl_instance_name != nullptr && cl_instance_name[0] != '\0') {
117 _name = SymbolTable::new_symbol(cl_instance_name);
118 }
119 }
120
121 // Obtain the class loader's name and identity hash. If the class loader's
122 // name was not explicitly set during construction, the class loader's name and id
123 // will be set to the qualified class name of the class loader along with its
124 // identity hash.
125 // If for some reason the ClassLoader's constructor has not been run, instead of
126 // leaving the _name_and_id field null, fall back to the external qualified class
127 // name. Thus CLD's _name_and_id field should never have a null value.
128 oop cl_name_and_id = java_lang_ClassLoader::nameAndId(class_loader());
129 const char* cl_instance_name_and_id =
130 (cl_name_and_id == nullptr) ? _class_loader_klass->external_name() :
131 java_lang_String::as_utf8_string(cl_name_and_id);
132 assert(cl_instance_name_and_id != nullptr && cl_instance_name_and_id[0] != '\0', "class loader has no name and id");
133 _name_and_id = SymbolTable::new_symbol(cl_instance_name_and_id);
134 }
135
136 ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool has_class_mirror_holder) :
137 _metaspace(nullptr),
138 _metaspace_lock(new Mutex(Mutex::nosafepoint-2, "MetaspaceAllocation_lock")),
139 _unloading(false), _has_class_mirror_holder(has_class_mirror_holder),
140 _modified_oops(true),
141 // A non-strong hidden class loader data doesn't have anything to keep
142 // it from being unloaded during parsing of the non-strong hidden class.
143 // The null-class-loader should always be kept alive.
144 _keep_alive_ref_count((has_class_mirror_holder || h_class_loader.is_null()) ? 1 : 0),
145 _claim(0),
146 _handles(),
147 _klasses(nullptr), _packages(nullptr), _modules(nullptr), _unnamed_module(nullptr), _dictionary(nullptr),
148 _jmethod_ids(nullptr),
149 _deallocate_list(nullptr),
150 _next(nullptr),
151 _unloading_next(nullptr),
152 _class_loader_klass(nullptr), _name(nullptr), _name_and_id(nullptr) {
153
154 if (!h_class_loader.is_null()) {
155 _class_loader = _handles.add(h_class_loader());
156 _class_loader_klass = h_class_loader->klass();
157 initialize_name(h_class_loader);
158 }
159
160 if (!has_class_mirror_holder) {
161 // The holder is initialized later for non-strong hidden classes,
162 // and before calling anything that call class_loader().
163 initialize_holder(h_class_loader);
164
165 // A ClassLoaderData created solely for a non-strong hidden class should never
166 // have a ModuleEntryTable or PackageEntryTable created for it.
167 _packages = new PackageEntryTable();
168 if (h_class_loader.is_null()) {
169 // Create unnamed module for boot loader
170 _unnamed_module = ModuleEntry::create_boot_unnamed_module(this);
171 } else {
172 // Create unnamed module for all other loaders
173 _unnamed_module = ModuleEntry::create_unnamed_module(this);
174 }
175 _dictionary = create_dictionary();
176 }
177
178 NOT_PRODUCT(_dependency_count = 0); // number of class loader dependencies
179
180 JFR_ONLY(INIT_ID(this);)
181 }
182
183 ClassLoaderData::ChunkedHandleList::~ChunkedHandleList() {
184 Chunk* c = _head;
185 while (c != nullptr) {
186 Chunk* next = c->_next;
187 delete c;
188 c = next;
189 }
190 }
191
192 OopHandle ClassLoaderData::ChunkedHandleList::add(oop o) {
193 if (_head == nullptr || _head->_size == Chunk::CAPACITY) {
194 Chunk* next = new Chunk(_head);
195 AtomicAccess::release_store(&_head, next);
196 }
197 oop* handle = &_head->_data[_head->_size];
198 NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, o);
199 AtomicAccess::release_store(&_head->_size, _head->_size + 1);
200 return OopHandle(handle);
201 }
202
203 int ClassLoaderData::ChunkedHandleList::count() const {
204 int count = 0;
205 Chunk* chunk = AtomicAccess::load_acquire(&_head);
206 while (chunk != nullptr) {
207 count += AtomicAccess::load(&chunk->_size);
208 chunk = chunk->_next;
209 }
210 return count;
211 }
212
213 inline void ClassLoaderData::ChunkedHandleList::oops_do_chunk(OopClosure* f, Chunk* c, const juint size) {
214 for (juint i = 0; i < size; i++) {
215 f->do_oop(&c->_data[i]);
216 }
217 }
218
219 void ClassLoaderData::ChunkedHandleList::oops_do(OopClosure* f) {
220 Chunk* head = AtomicAccess::load_acquire(&_head);
221 if (head != nullptr) {
222 // Must be careful when reading size of head
223 oops_do_chunk(f, head, AtomicAccess::load_acquire(&head->_size));
224 for (Chunk* c = head->_next; c != nullptr; c = c->_next) {
225 oops_do_chunk(f, c, c->_size);
226 }
227 }
228 }
229
230 class VerifyContainsOopClosure : public OopClosure {
231 oop _target;
232 bool _found;
233
234 public:
235 VerifyContainsOopClosure(oop target) : _target(target), _found(false) {}
236
237 void do_oop(oop* p) {
238 if (p != nullptr && NativeAccess<AS_NO_KEEPALIVE>::oop_load(p) == _target) {
239 _found = true;
240 }
241 }
242
243 void do_oop(narrowOop* p) {
244 // The ChunkedHandleList should not contain any narrowOop
245 ShouldNotReachHere();
246 }
247
248 bool found() const {
249 return _found;
250 }
251 };
252
253 bool ClassLoaderData::ChunkedHandleList::contains(oop p) {
254 VerifyContainsOopClosure cl(p);
255 oops_do(&cl);
256 return cl.found();
257 }
258
259 #ifndef PRODUCT
260 bool ClassLoaderData::ChunkedHandleList::owner_of(oop* oop_handle) {
261 Chunk* chunk = AtomicAccess::load_acquire(&_head);
262 while (chunk != nullptr) {
263 if (&(chunk->_data[0]) <= oop_handle && oop_handle < &(chunk->_data[AtomicAccess::load(&chunk->_size)])) {
264 return true;
265 }
266 chunk = chunk->_next;
267 }
268 return false;
269 }
270 #endif // PRODUCT
271
272 void ClassLoaderData::clear_claim(int claim) {
273 for (;;) {
274 int old_claim = AtomicAccess::load(&_claim);
275 if ((old_claim & claim) == 0) {
276 return;
277 }
278 int new_claim = old_claim & ~claim;
279 if (AtomicAccess::cmpxchg(&_claim, old_claim, new_claim) == old_claim) {
280 return;
281 }
282 }
283 }
284
285 #ifdef ASSERT
286 void ClassLoaderData::verify_not_claimed(int claim) {
287 assert((_claim & claim) == 0, "Found claim: %d bits in _claim: %d", claim, _claim);
288 }
289 #endif
290
291 bool ClassLoaderData::try_claim(int claim) {
292 for (;;) {
293 int old_claim = AtomicAccess::load(&_claim);
294 if ((old_claim & claim) == claim) {
295 return false;
296 }
297 int new_claim = old_claim | claim;
298 if (AtomicAccess::cmpxchg(&_claim, old_claim, new_claim) == old_claim) {
299 return true;
300 }
301 }
302 }
303
304 void ClassLoaderData::demote_strong_roots() {
305 // The oop handle area contains strong roots that the GC traces from. We are about
306 // to demote them to strong native oops that the GC does *not* trace from. Conceptually,
307 // we are retiring a rather normal strong root, and creating a strong non-root handle,
308 // which happens to reuse the same address as the normal strong root had.
309 // Unless we invoke the right barriers, the GC might not notice that a strong root
310 // has been pulled from the system, and is left unprocessed by the GC. There can be
311 // several consequences:
312 // 1. A concurrently marking snapshot-at-the-beginning GC might assume that the contents
313 // of all strong roots get processed by the GC in order to keep them alive. Without
314 // barriers, some objects might not be kept alive.
315 // 2. A concurrently relocating GC might assume that after moving an object, a subsequent
316 // tracing from all roots can fix all the pointers in the system, which doesn't play
317 // well with roots racingly being pulled.
318 // 3. A concurrent GC using colored pointers, might assume that tracing the object graph
319 // from roots results in all pointers getting some particular color, which also doesn't
320 // play well with roots being pulled out from the system concurrently.
321
322 class TransitionRootsOopClosure : public OopClosure {
323 public:
324 virtual void do_oop(oop* p) {
325 // By loading the strong root with the access API, we can use the right barriers to
326 // store the oop as a strong non-root handle, that happens to reuse the same memory
327 // address as the strong root. The barriered store ensures that:
328 // 1. The concurrent SATB marking properties are satisfied as the store will keep
329 // the oop alive.
330 // 2. The concurrent object movement properties are satisfied as we store the address
331 // of the new location of the object, if any.
332 // 3. The colors if any will be stored as the new good colors.
333 oop obj = NativeAccess<>::oop_load(p); // Load the strong root
334 NativeAccess<>::oop_store(p, obj); // Store the strong non-root
335 }
336
337 virtual void do_oop(narrowOop* p) {
338 ShouldNotReachHere();
339 }
340 } cl;
341 oops_do(&cl, ClassLoaderData::_claim_none, false /* clear_mod_oops */);
342 }
343
344 // Non-strong hidden classes have their own ClassLoaderData that is marked to keep alive
345 // while the class is being parsed, and if the class appears on the module fixup list.
346 // Due to the uniqueness that no other class shares the hidden class' name or
347 // ClassLoaderData, no other non-GC thread has knowledge of the hidden class while
348 // it is being defined, therefore _keep_alive_ref_count is not volatile or atomic.
349 void ClassLoaderData::inc_keep_alive_ref_count() {
350 if (has_class_mirror_holder()) {
351 assert(_keep_alive_ref_count > 0, "Invalid keep alive increment count");
352 _keep_alive_ref_count++;
353 }
354 }
355
356 void ClassLoaderData::dec_keep_alive_ref_count() {
357 if (has_class_mirror_holder()) {
358 assert(_keep_alive_ref_count > 0, "Invalid keep alive decrement count");
359 if (_keep_alive_ref_count == 1) {
360 // When the keep_alive_ref_count counter is 1, the oop handle area is a strong root,
361 // acting as input to the GC tracing. Such strong roots are part of the
362 // snapshot-at-the-beginning, and can not just be pulled out from the
363 // system when concurrent GCs are running at the same time, without
364 // invoking the right barriers.
365 demote_strong_roots();
366 }
367 _keep_alive_ref_count--;
368 }
369 }
370
371 void ClassLoaderData::oops_do(OopClosure* f, int claim_value, bool clear_mod_oops) {
372 if (claim_value != ClassLoaderData::_claim_none && !try_claim(claim_value)) {
373 return;
374 }
375
376 // Only clear modified_oops after the ClassLoaderData is claimed.
377 if (clear_mod_oops) {
378 clear_modified_oops();
379 }
380
381 _handles.oops_do(f);
382 }
383
384 void ClassLoaderData::classes_do(KlassClosure* klass_closure) {
385 // Lock-free access requires load_acquire
386 for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
387 klass_closure->do_klass(k);
388 assert(k != k->next_link(), "no loops!");
389 }
390 }
391
392 void ClassLoaderData::classes_do(void f(Klass * const)) {
393 // Lock-free access requires load_acquire
394 for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
395 f(k);
396 assert(k != k->next_link(), "no loops!");
397 }
398 }
399
400 void ClassLoaderData::methods_do(void f(Method*)) {
401 // Lock-free access requires load_acquire
402 for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
403 if (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded()) {
404 InstanceKlass::cast(k)->methods_do(f);
405 }
406 }
407 }
408
409 void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) {
410 // Lock-free access requires load_acquire
411 for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
412 // Filter out InstanceKlasses (or their ObjArrayKlasses) that have not entered the
413 // loaded state.
414 if (k->is_instance_klass()) {
415 if (!InstanceKlass::cast(k)->is_loaded()) {
416 continue;
417 }
418 } else if (k->in_aot_cache() && k->is_objArray_klass()) {
419 Klass* bottom = ObjArrayKlass::cast(k)->bottom_klass();
420 if (bottom->is_instance_klass() && !InstanceKlass::cast(bottom)->is_loaded()) {
421 // This could happen if <bottom> is a shared class that has been restored
422 // but is not yet marked as loaded. All archived array classes of the
423 // bottom class are already restored and placed in the _klasses list.
424 continue;
425 }
426 }
427
428 #ifdef ASSERT
429 oop m = k->java_mirror();
430 assert(m != nullptr, "nullptr mirror");
431 assert(m->is_a(vmClasses::Class_klass()), "invalid mirror");
432 #endif
433 klass_closure->do_klass(k);
434 }
435 }
436
437 void ClassLoaderData::classes_do(void f(InstanceKlass*)) {
438 // Lock-free access requires load_acquire
439 for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
440 if (k->is_instance_klass()) {
441 f(InstanceKlass::cast(k));
442 }
443 assert(k != k->next_link(), "no loops!");
444 }
445 }
446
447 void ClassLoaderData::modules_do(void f(ModuleEntry*)) {
448 assert_locked_or_safepoint(Module_lock);
449 if (_unnamed_module != nullptr) {
450 f(_unnamed_module);
451 }
452 if (_modules != nullptr) {
453 _modules->modules_do(f);
454 }
455 }
456
457 void ClassLoaderData::packages_do(void f(PackageEntry*)) {
458 assert_locked_or_safepoint(Module_lock);
459 if (_packages != nullptr) {
460 _packages->packages_do(f);
461 }
462 }
463
464 void ClassLoaderData::record_dependency(const Klass* k) {
465 assert(k != nullptr, "invariant");
466
467 ClassLoaderData * const from_cld = this;
468 ClassLoaderData * const to_cld = k->class_loader_data();
469
470 // Do not need to record dependency if the dependency is to a class whose
471 // class loader data is never freed. (i.e. the dependency's class loader
472 // is one of the three builtin class loaders and the dependency's class
473 // loader data has a ClassLoader holder, not a Class holder.)
474 if (to_cld->is_permanent_class_loader_data()) {
475 return;
476 }
477
478 oop to;
479 if (to_cld->has_class_mirror_holder()) {
480 // Just return if a non-strong hidden class class is attempting to record a dependency
481 // to itself. (Note that every non-strong hidden class has its own unique class
482 // loader data.)
483 if (to_cld == from_cld) {
484 return;
485 }
486 // Hidden class dependencies are through the mirror.
487 to = k->java_mirror();
488 } else {
489 to = to_cld->class_loader();
490 oop from = from_cld->class_loader();
491
492 // Just return if this dependency is to a class with the same or a parent
493 // class_loader.
494 if (from == to || java_lang_ClassLoader::isAncestor(from, to)) {
495 return; // this class loader is in the parent list, no need to add it.
496 }
497 }
498
499 // It's a dependency we won't find through GC, add it.
500 if (!_handles.contains(to)) {
501 NOT_PRODUCT(AtomicAccess::inc(&_dependency_count));
502 LogTarget(Trace, class, loader, data) lt;
503 if (lt.is_enabled()) {
504 ResourceMark rm;
505 LogStream ls(lt);
506 ls.print("adding dependency from ");
507 print_value_on(&ls);
508 ls.print(" to ");
509 to_cld->print_value_on(&ls);
510 ls.cr();
511 }
512 Handle dependency(Thread::current(), to);
513 add_handle(dependency);
514 // Added a potentially young gen oop to the ClassLoaderData
515 record_modified_oops();
516 }
517 }
518
519 void ClassLoaderData::add_class(Klass* k, bool publicize /* true */) {
520 {
521 MutexLocker ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
522 Klass* old_value = _klasses;
523 k->set_next_link(old_value);
524 // Link the new item into the list, making sure the linked class is stable
525 // since the list can be walked without a lock
526 AtomicAccess::release_store(&_klasses, k);
527 if (k->is_array_klass()) {
528 ClassLoaderDataGraph::inc_array_classes(1);
529 } else {
530 ClassLoaderDataGraph::inc_instance_classes(1);
531 }
532 }
533
534 if (publicize) {
535 LogTarget(Trace, class, loader, data) lt;
536 if (lt.is_enabled()) {
537 ResourceMark rm;
538 LogStream ls(lt);
539 ls.print("Adding k: " PTR_FORMAT " %s to ", p2i(k), k->external_name());
540 print_value_on(&ls);
541 ls.cr();
542 }
543 }
544 }
545
546 void ClassLoaderData::initialize_holder(Handle loader_or_mirror) {
547 if (loader_or_mirror() != nullptr) {
548 assert(_holder.is_null(), "never replace holders");
549 _holder = WeakHandle(Universe::vm_weak(), loader_or_mirror);
550 }
551 }
552
553 // Remove a klass from the _klasses list for scratch_class during redefinition
554 // or parsed class in the case of an error.
555 void ClassLoaderData::remove_class(Klass* scratch_class) {
556 assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
557
558 Klass* prev = nullptr;
559 for (Klass* k = _klasses; k != nullptr; k = k->next_link()) {
560 if (k == scratch_class) {
561 if (prev == nullptr) {
562 _klasses = k->next_link();
563 } else {
564 Klass* next = k->next_link();
565 prev->set_next_link(next);
566 }
567
568 if (k->is_array_klass()) {
569 ClassLoaderDataGraph::dec_array_classes(1);
570 } else {
571 ClassLoaderDataGraph::dec_instance_classes(1);
572 }
573
574 return;
575 }
576 prev = k;
577 assert(k != k->next_link(), "no loops!");
578 }
579 ShouldNotReachHere(); // should have found this class!!
580 }
581
582 void ClassLoaderData::add_jmethod_id(jmethodID mid) {
583 MutexLocker m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
584 if (_jmethod_ids == nullptr) {
585 _jmethod_ids = new (mtClass) GrowableArray<jmethodID>(32, mtClass);
586 }
587 _jmethod_ids->push(mid);
588 }
589
590 // Method::remove_jmethod_ids removes jmethodID entries from the table which
591 // releases memory.
592 // Because native code (e.g., JVMTI agent) holding jmethod_ids may access them
593 // after the associated classes and class loader are unloaded, subsequent lookups
594 // for these ids will return null since they are no longer found in the table.
595 // The Java Native Interface Specification says "method ID
596 // does not prevent the VM from unloading the class from which the ID has
597 // been derived. After the class is unloaded, the method or field ID becomes
598 // invalid".
599 void ClassLoaderData::remove_jmethod_ids() {
600 MutexLocker ml(JmethodIdCreation_lock, Mutex::_no_safepoint_check_flag);
601 for (int i = 0; i < _jmethod_ids->length(); i++) {
602 jmethodID mid = _jmethod_ids->at(i);
603 JmethodIDTable::remove(mid);
604 }
605 delete _jmethod_ids;
606 _jmethod_ids = nullptr;
607 }
608
609 void ClassLoaderData::unload() {
610 _unloading = true;
611
612 LogTarget(Trace, class, loader, data) lt;
613 if (lt.is_enabled()) {
614 ResourceMark rm;
615 LogStream ls(lt);
616 ls.print("unload");
617 print_value_on(&ls);
618 ls.cr();
619 }
620
621 // Some items on the _deallocate_list need to free their C heap structures
622 // if they are not already on the _klasses list.
623 free_deallocate_list_C_heap_structures();
624
625 // Clean up class dependencies and tell serviceability tools
626 // these classes are unloading. This must be called
627 // after erroneous classes are released.
628 classes_do(InstanceKlass::unload_class);
629
630 if (_jmethod_ids != nullptr) {
631 remove_jmethod_ids();
632 }
633 }
634
635 ModuleEntryTable* ClassLoaderData::modules() {
636 // Lazily create the module entry table at first request.
637 // Lock-free access requires load_acquire.
638 ModuleEntryTable* modules = AtomicAccess::load_acquire(&_modules);
639 if (modules == nullptr) {
640 MutexLocker m1(Module_lock);
641 // Check if _modules got allocated while we were waiting for this lock.
642 if ((modules = _modules) == nullptr) {
643 modules = new ModuleEntryTable();
644
645 {
646 MutexLocker m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
647 // Ensure _modules is stable, since it is examined without a lock
648 AtomicAccess::release_store(&_modules, modules);
649 }
650 }
651 }
652 return modules;
653 }
654
655 const int _boot_loader_dictionary_size = 1009;
656 const int _default_loader_dictionary_size = 107;
657
658 Dictionary* ClassLoaderData::create_dictionary() {
659 assert(!has_class_mirror_holder(), "class mirror holder cld does not have a dictionary");
660 int size;
661 if (_the_null_class_loader_data == nullptr) {
662 size = _boot_loader_dictionary_size;
663 } else if (is_system_class_loader_data()) {
664 size = _boot_loader_dictionary_size;
665 } else {
666 size = _default_loader_dictionary_size;
667 }
668 return new Dictionary(this, size);
669 }
670
671 // Tell the GC to keep this klass alive. Needed while iterating ClassLoaderDataGraph,
672 // and any runtime code that uses klasses.
673 oop ClassLoaderData::holder() const {
674 // A klass that was previously considered dead can be looked up in the
675 // CLD/SD, and its _java_mirror or _class_loader can be stored in a root
676 // or a reachable object making it alive again. The SATB part of G1 needs
677 // to get notified about this potential resurrection, otherwise the marking
678 // might not find the object.
679 if (!_holder.is_null()) { // null class_loader
680 return _holder.resolve();
681 } else {
682 return nullptr;
683 }
684 }
685
686 // Let the GC read the holder without keeping it alive.
687 oop ClassLoaderData::holder_no_keepalive() const {
688 if (!_holder.is_null()) { // null class_loader
689 return _holder.peek();
690 } else {
691 return nullptr;
692 }
693 }
694
695 // Unloading support
696 bool ClassLoaderData::is_alive() const {
697 bool alive = (_keep_alive_ref_count > 0) // null class loader and incomplete non-strong hidden class.
698 || (_holder.peek() != nullptr); // and not cleaned by the GC weak handle processing.
699
700 return alive;
701 }
702
703 class ReleaseKlassClosure: public KlassClosure {
704 private:
705 size_t _instance_class_released;
706 size_t _array_class_released;
707 public:
708 ReleaseKlassClosure() : _instance_class_released(0), _array_class_released(0) { }
709
710 size_t instance_class_released() const { return _instance_class_released; }
711 size_t array_class_released() const { return _array_class_released; }
712
713 void do_klass(Klass* k) {
714 if (k->is_array_klass()) {
715 _array_class_released ++;
716 } else {
717 assert(k->is_instance_klass(), "Must be");
718 _instance_class_released ++;
719 }
720 k->release_C_heap_structures();
721 }
722 };
723
724 ClassLoaderData::~ClassLoaderData() {
725 // Release C heap structures for all the classes.
726 ReleaseKlassClosure cl;
727 classes_do(&cl);
728
729 ClassLoaderDataGraph::dec_array_classes(cl.array_class_released());
730 ClassLoaderDataGraph::dec_instance_classes(cl.instance_class_released());
731
732 // Release the WeakHandle
733 _holder.release(Universe::vm_weak());
734
735 // Release C heap allocated hashtable for all the packages.
736 if (_packages != nullptr) {
737 // Destroy the table itself
738 delete _packages;
739 _packages = nullptr;
740 }
741
742 // Release C heap allocated hashtable for all the modules.
743 if (_modules != nullptr) {
744 // Destroy the table itself
745 delete _modules;
746 _modules = nullptr;
747 }
748
749 // Release C heap allocated hashtable for the dictionary
750 if (_dictionary != nullptr) {
751 // Destroy the table itself
752 delete _dictionary;
753 _dictionary = nullptr;
754 }
755
756 if (_unnamed_module != nullptr) {
757 delete _unnamed_module;
758 _unnamed_module = nullptr;
759 }
760
761 // release the metaspace
762 ClassLoaderMetaspace *m = _metaspace;
763 if (m != nullptr) {
764 _metaspace = nullptr;
765 delete m;
766 }
767
768 // Delete lock
769 delete _metaspace_lock;
770
771 // Delete free list
772 if (_deallocate_list != nullptr) {
773 delete _deallocate_list;
774 }
775
776 // Decrement refcounts of Symbols if created.
777 if (_name != nullptr) {
778 _name->decrement_refcount();
779 }
780 if (_name_and_id != nullptr) {
781 _name_and_id->decrement_refcount();
782 }
783 }
784
785 // Returns true if this class loader data is for the app class loader
786 // or a user defined system class loader. (Note that the class loader
787 // data may have a Class holder.)
788 bool ClassLoaderData::is_system_class_loader_data() const {
789 return SystemDictionary::is_system_class_loader(class_loader());
790 }
791
792 // Returns true if this class loader data is for the platform class loader.
793 // (Note that the class loader data may have a Class holder.)
794 bool ClassLoaderData::is_platform_class_loader_data() const {
795 return SystemDictionary::is_platform_class_loader(class_loader());
796 }
797
798 // Returns true if the class loader for this class loader data is one of
799 // the 3 builtin (boot application/system or platform) class loaders,
800 // including a user-defined system class loader. Note that if the class
801 // loader data is for a non-strong hidden class then it may
802 // get freed by a GC even if its class loader is one of these loaders.
803 bool ClassLoaderData::is_builtin_class_loader_data() const {
804 return (is_boot_class_loader_data() ||
805 SystemDictionary::is_system_class_loader(class_loader()) ||
806 SystemDictionary::is_platform_class_loader(class_loader()));
807 }
808
809 // Returns true if this class loader data is a class loader data
810 // that is not ever freed by a GC. It must be the CLD for one of the builtin
811 // class loaders and not the CLD for a non-strong hidden class.
812 bool ClassLoaderData::is_permanent_class_loader_data() const {
813 return is_builtin_class_loader_data() && !has_class_mirror_holder();
814 }
815
816 ClassLoaderMetaspace* ClassLoaderData::metaspace_non_null() {
817 // If the metaspace has not been allocated, create a new one. Might want
818 // to create smaller arena for Reflection class loaders also.
819 // The reason for the delayed allocation is because some class loaders are
820 // simply for delegating with no metadata of their own.
821 // Lock-free access requires load_acquire.
822 ClassLoaderMetaspace* metaspace = AtomicAccess::load_acquire(&_metaspace);
823 if (metaspace == nullptr) {
824 MutexLocker ml(_metaspace_lock, Mutex::_no_safepoint_check_flag);
825 // Check if _metaspace got allocated while we were waiting for this lock.
826 if ((metaspace = _metaspace) == nullptr) {
827 if (this == the_null_class_loader_data()) {
828 assert (class_loader() == nullptr, "Must be");
829 metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::BootMetaspaceType);
830 } else if (has_class_mirror_holder()) {
831 metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::ClassMirrorHolderMetaspaceType);
832 } else {
833 metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::StandardMetaspaceType);
834 }
835 // Ensure _metaspace is stable, since it is examined without a lock
836 AtomicAccess::release_store(&_metaspace, metaspace);
837 }
838 }
839 return metaspace;
840 }
841
842 OopHandle ClassLoaderData::add_handle(Handle h) {
843 MutexLocker ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
844 record_modified_oops();
845 return _handles.add(h());
846 }
847
848 void ClassLoaderData::remove_handle(OopHandle h) {
849 assert(!is_unloading(), "Do not remove a handle for a CLD that is unloading");
850 if (!h.is_empty()) {
851 assert(_handles.owner_of(h.ptr_raw()),
852 "Got unexpected handle " PTR_FORMAT, p2i(h.ptr_raw()));
853 h.replace(oop(nullptr));
854 }
855 }
856
857 void ClassLoaderData::init_handle_locked(OopHandle& dest, Handle h) {
858 MutexLocker ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
859 if (dest.resolve() != nullptr) {
860 return;
861 } else {
862 record_modified_oops();
863 dest = _handles.add(h());
864 }
865 }
866
867 // Add this metadata pointer to be freed when it's safe. This is only during
868 // a safepoint which checks if handles point to this metadata field.
869 void ClassLoaderData::add_to_deallocate_list(Metadata* m) {
870 // Metadata in shared region isn't deleted.
871 if (!m->in_aot_cache()) {
872 MutexLocker ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
873 if (_deallocate_list == nullptr) {
874 _deallocate_list = new (mtClass) GrowableArray<Metadata*>(100, mtClass);
875 }
876 _deallocate_list->append_if_missing(m);
877 ResourceMark rm;
878 log_debug(class, loader, data)("deallocate added for %s", m->print_value_string());
879 ClassLoaderDataGraph::set_should_clean_deallocate_lists();
880 }
881 }
882
883 // Deallocate free metadata on the free list. How useful the PermGen was!
884 void ClassLoaderData::free_deallocate_list() {
885 // This must be called at a safepoint because it depends on metadata walking at
886 // safepoint cleanup time.
887 assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
888 assert(!is_unloading(), "only called for ClassLoaderData that are not unloading");
889 if (_deallocate_list == nullptr) {
890 return;
891 }
892 // Go backwards because this removes entries that are freed.
893 for (int i = _deallocate_list->length() - 1; i >= 0; i--) {
894 Metadata* m = _deallocate_list->at(i);
895 if (!m->on_stack()) {
896 _deallocate_list->remove_at(i);
897 // There are only three types of metadata that we deallocate directly.
898 // Cast them so they can be used by the template function.
899 if (m->is_method()) {
900 MetadataFactory::free_metadata(this, (Method*)m);
901 } else if (m->is_constantPool()) {
902 MetadataFactory::free_metadata(this, (ConstantPool*)m);
903 } else if (m->is_klass()) {
904 MetadataFactory::free_metadata(this, (InstanceKlass*)m);
905 } else {
906 ShouldNotReachHere();
907 }
908 } else {
909 // Metadata is alive.
910 // If scratch_class is on stack then it shouldn't be on this list!
911 assert(!m->is_klass() || !((InstanceKlass*)m)->is_scratch_class(),
912 "scratch classes on this list should be dead");
913 // Also should assert that other metadata on the list was found in handles.
914 // Some cleaning remains.
915 ClassLoaderDataGraph::set_should_clean_deallocate_lists();
916 }
917 }
918 }
919
920 // This is distinct from free_deallocate_list. For class loader data that are
921 // unloading, this frees the C heap memory for items on the list, and unlinks
922 // scratch or error classes so that unloading events aren't triggered for these
923 // classes. The metadata is removed with the unloading metaspace.
924 // There isn't C heap memory allocated for methods, so nothing is done for them.
925 void ClassLoaderData::free_deallocate_list_C_heap_structures() {
926 assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
927 assert(is_unloading(), "only called for ClassLoaderData that are unloading");
928 if (_deallocate_list == nullptr) {
929 return;
930 }
931 // Go backwards because this removes entries that are freed.
932 for (int i = _deallocate_list->length() - 1; i >= 0; i--) {
933 Metadata* m = _deallocate_list->at(i);
934 _deallocate_list->remove_at(i);
935 if (m->is_constantPool()) {
936 ((ConstantPool*)m)->release_C_heap_structures();
937 } else if (m->is_klass()) {
938 InstanceKlass* ik = (InstanceKlass*)m;
939 // also releases ik->constants() C heap memory
940 ik->release_C_heap_structures();
941 // Remove the class so unloading events aren't triggered for
942 // this class (scratch or error class) in do_unloading().
943 remove_class(ik);
944 // But still have to remove it from the dumptime_table.
945 SystemDictionaryShared::handle_class_unloading(ik);
946 }
947 }
948 }
949
950 // Caller needs ResourceMark
951 // If the class loader's _name has not been explicitly set, the class loader's
952 // qualified class name is returned.
953 const char* ClassLoaderData::loader_name() const {
954 if (_class_loader_klass == nullptr) {
955 return BOOTSTRAP_LOADER_NAME;
956 } else if (_name != nullptr) {
957 return _name->as_C_string();
958 } else {
959 return _class_loader_klass->external_name();
960 }
961 }
962
963 // Caller needs ResourceMark
964 // Format of the _name_and_id is as follows:
965 // If the defining loader has a name explicitly set then '<loader-name>' @<id>
966 // If the defining loader has no name then <qualified-class-name> @<id>
967 // If built-in loader, then omit '@<id>' as there is only one instance.
968 const char* ClassLoaderData::loader_name_and_id() const {
969 if (_class_loader_klass == nullptr) {
970 return "'" BOOTSTRAP_LOADER_NAME "'";
971 } else if (_name_and_id != nullptr) {
972 return _name_and_id->as_C_string();
973 } else {
974 // May be called in a race before _name_and_id is initialized.
975 return _class_loader_klass->external_name();
976 }
977 }
978
979 void ClassLoaderData::print_value_on(outputStream* out) const {
980 if (!is_unloading() && class_loader() != nullptr) {
981 out->print("loader data: " INTPTR_FORMAT " for instance ", p2i(this));
982 class_loader()->print_value_on(out); // includes loader_name_and_id() and address of class loader instance
983 } else {
984 // loader data: 0xsomeaddr of 'bootstrap'
985 out->print("loader data: " INTPTR_FORMAT " of %s", p2i(this), loader_name_and_id());
986 }
987 if (_has_class_mirror_holder) {
988 out->print(" has a class holder");
989 }
990 }
991
992 void ClassLoaderData::print_value() const { print_value_on(tty); }
993
994 #ifndef PRODUCT
995 class PrintKlassClosure: public KlassClosure {
996 outputStream* _out;
997 public:
998 PrintKlassClosure(outputStream* out): _out(out) { }
999
1000 void do_klass(Klass* k) {
1001 ResourceMark rm;
1002 _out->print("%s,", k->external_name());
1003 }
1004 };
1005
1006 void ClassLoaderData::print_on(outputStream* out) const {
1007 ResourceMark rm;
1008 out->print_cr("ClassLoaderData(" INTPTR_FORMAT ")", p2i(this));
1009 out->print_cr(" - name %s", loader_name_and_id());
1010 if (!_holder.is_null()) {
1011 out->print (" - holder ");
1012 _holder.print_on(out);
1013 out->print_cr("");
1014 }
1015 if (!_unloading) {
1016 out->print_cr(" - class loader " INTPTR_FORMAT, p2i(_class_loader.peek()));
1017 } else {
1018 out->print_cr(" - class loader <unloading, oop is bad>");
1019 }
1020 out->print_cr(" - metaspace " INTPTR_FORMAT, p2i(_metaspace));
1021 out->print_cr(" - unloading %s", _unloading ? "true" : "false");
1022 out->print_cr(" - class mirror holder %s", _has_class_mirror_holder ? "true" : "false");
1023 out->print_cr(" - modified oops %s", _modified_oops ? "true" : "false");
1024 out->print_cr(" - _keep_alive_ref_count %d", _keep_alive_ref_count);
1025 out->print (" - claim ");
1026 switch(_claim) {
1027 case _claim_none: out->print_cr("none"); break;
1028 case _claim_finalizable: out->print_cr("finalizable"); break;
1029 case _claim_strong: out->print_cr("strong"); break;
1030 case _claim_stw_fullgc_mark: out->print_cr("stw full gc mark"); break;
1031 case _claim_stw_fullgc_adjust: out->print_cr("stw full gc adjust"); break;
1032 case _claim_other: out->print_cr("other"); break;
1033 case _claim_other | _claim_finalizable: out->print_cr("other and finalizable"); break;
1034 case _claim_other | _claim_strong: out->print_cr("other and strong"); break;
1035 default: ShouldNotReachHere();
1036 }
1037 out->print_cr(" - handles %d", _handles.count());
1038 out->print_cr(" - dependency count %d", _dependency_count);
1039 out->print (" - klasses { ");
1040 if (Verbose) {
1041 PrintKlassClosure closure(out);
1042 ((ClassLoaderData*)this)->classes_do(&closure);
1043 } else {
1044 out->print("...");
1045 }
1046 out->print_cr(" }");
1047 out->print_cr(" - packages " INTPTR_FORMAT, p2i(_packages));
1048 out->print_cr(" - module " INTPTR_FORMAT, p2i(_modules));
1049 out->print_cr(" - unnamed module " INTPTR_FORMAT, p2i(_unnamed_module));
1050 if (_dictionary != nullptr) {
1051 out->print (" - dictionary " INTPTR_FORMAT " ", p2i(_dictionary));
1052 _dictionary->print_size(out);
1053 } else {
1054 out->print_cr(" - dictionary " INTPTR_FORMAT, p2i(_dictionary));
1055 }
1056 if (_jmethod_ids != nullptr) {
1057 out->print_cr(" - jmethod count %d", _jmethod_ids->length());
1058 }
1059 out->print_cr(" - deallocate list " INTPTR_FORMAT, p2i(_deallocate_list));
1060 out->print_cr(" - next CLD " INTPTR_FORMAT, p2i(_next));
1061 }
1062 #endif // PRODUCT
1063
1064 void ClassLoaderData::print() const { print_on(tty); }
1065
1066 class VerifyHandleOops : public OopClosure {
1067 VerifyOopClosure vc;
1068 public:
1069 virtual void do_oop(oop* p) {
1070 if (p != nullptr && *p != nullptr) {
1071 oop o = *p;
1072 if (!java_lang_Class::is_instance(o)) {
1073 // is_instance will assert for an invalid oop.
1074 // Walk the resolved_references array and other assorted oops in the
1075 // CLD::_handles field. The mirror oops are followed by other heap roots.
1076 o->oop_iterate(&vc);
1077 }
1078 }
1079 }
1080 virtual void do_oop(narrowOop* o) { ShouldNotReachHere(); }
1081 };
1082
1083 void ClassLoaderData::verify() {
1084 assert_locked_or_safepoint(_metaspace_lock);
1085 oop cl = class_loader();
1086
1087 guarantee(this == class_loader_data(cl) || has_class_mirror_holder(), "Must be the same");
1088 guarantee(cl != nullptr || this == ClassLoaderData::the_null_class_loader_data() || has_class_mirror_holder(), "must be");
1089
1090 // Verify the integrity of the allocated space.
1091 #ifdef ASSERT
1092 if (metaspace_or_null() != nullptr) {
1093 metaspace_or_null()->verify();
1094 }
1095 #endif
1096
1097 for (Klass* k = _klasses; k != nullptr; k = k->next_link()) {
1098 guarantee(k->class_loader_data() == this, "Must be the same");
1099 k->verify();
1100 assert(k != k->next_link(), "no loops!");
1101 }
1102
1103 if (_modules != nullptr) {
1104 _modules->verify();
1105 }
1106
1107 if (_deallocate_list != nullptr) {
1108 for (int i = _deallocate_list->length() - 1; i >= 0; i--) {
1109 Metadata* m = _deallocate_list->at(i);
1110 if (m->is_klass()) {
1111 ((Klass*)m)->verify();
1112 }
1113 }
1114 }
1115
1116 // Check the oops in the handles area
1117 VerifyHandleOops vho;
1118 oops_do(&vho, _claim_none, false);
1119 }
1120
1121 bool ClassLoaderData::contains_klass(Klass* klass) {
1122 // Lock-free access requires load_acquire
1123 for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
1124 if (k == klass) return true;
1125 }
1126 return false;
1127 }