1 /*
2 * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/vmSymbols.hpp"
26 #include "gc/shared/collectedHeap.hpp"
27 #include "gc/shared/oopStorage.inline.hpp"
28 #include "gc/shared/oopStorageSet.hpp"
29 #include "logging/log.hpp"
30 #include "memory/iterator.hpp"
31 #include "memory/universe.hpp"
32 #include "oops/access.inline.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "runtime/handles.inline.hpp"
35 #include "runtime/javaCalls.hpp"
36 #include "runtime/javaThread.inline.hpp"
37 #include "runtime/jniHandles.inline.hpp"
38 #include "runtime/mutexLocker.hpp"
39 #include "utilities/align.hpp"
40 #include "utilities/debug.hpp"
41
42 OopStorage* JNIHandles::global_handles() {
43 return _global_handles;
44 }
45
46 OopStorage* JNIHandles::weak_global_handles() {
47 return _weak_global_handles;
48 }
49
50 // Serviceability agent support.
51 OopStorage* JNIHandles::_global_handles = nullptr;
52 OopStorage* JNIHandles::_weak_global_handles = nullptr;
53
54 void jni_handles_init() {
55 JNIHandles::_global_handles = OopStorageSet::create_strong("JNI Global", mtInternal);
56 JNIHandles::_weak_global_handles = OopStorageSet::create_weak("JNI Weak", mtInternal);
57 }
58
59 jobject JNIHandles::make_local(oop obj) {
60 return make_local(JavaThread::current(), obj);
61 }
62
63 // Used by NewLocalRef which requires null on out-of-memory
64 jobject JNIHandles::make_local(JavaThread* thread, oop obj, AllocFailType alloc_failmode) {
65 if (obj == nullptr) {
66 return nullptr; // ignore null handles
67 } else {
68 assert(oopDesc::is_oop(obj), "not an oop");
69 assert(!current_thread_in_native(), "must not be in native");
70 STATIC_ASSERT(TypeTag::local == 0);
71 return thread->active_handles()->allocate_handle(thread, obj, alloc_failmode);
72 }
73 }
74
75 static void report_handle_allocation_failure(AllocFailType alloc_failmode,
76 const char* handle_kind) {
77 if (alloc_failmode == AllocFailStrategy::EXIT_OOM) {
78 // Fake size value, since we don't know the min allocation size here.
79 vm_exit_out_of_memory(sizeof(oop), OOM_MALLOC_ERROR,
80 "Cannot create %s JNI handle", handle_kind);
81 } else {
82 assert(alloc_failmode == AllocFailStrategy::RETURN_NULL, "invariant");
83 }
84 }
85
86 jobject JNIHandles::make_global(Handle obj, AllocFailType alloc_failmode) {
87 assert(!Universe::heap()->is_stw_gc_active(), "can't extend the root set during GC pause");
88 assert(!current_thread_in_native(), "must not be in native");
89 jobject res = nullptr;
90 if (!obj.is_null()) {
91 // ignore null handles
92 assert(oopDesc::is_oop(obj()), "not an oop");
93 oop* ptr = global_handles()->allocate();
94 // Return null on allocation failure.
95 if (ptr != nullptr) {
96 assert(NativeAccess<AS_NO_KEEPALIVE>::oop_load(ptr) == oop(nullptr), "invariant");
97 NativeAccess<>::oop_store(ptr, obj());
98 char* tptr = reinterpret_cast<char*>(ptr) + TypeTag::global;
99 res = reinterpret_cast<jobject>(tptr);
100 } else {
101 report_handle_allocation_failure(alloc_failmode, "global");
102 }
103 }
104
105 return res;
106 }
107
108 jweak JNIHandles::make_weak_global(Handle obj, AllocFailType alloc_failmode) {
109 assert(!Universe::heap()->is_stw_gc_active(), "can't extend the root set during GC pause");
110 assert(!current_thread_in_native(), "must not be in native");
111 jweak res = nullptr;
112 if (!obj.is_null()) {
113 // ignore null handles
114 assert(oopDesc::is_oop(obj()), "not an oop");
115 oop* ptr = weak_global_handles()->allocate();
116 // Return nullptr on allocation failure.
117 if (ptr != nullptr) {
118 assert(NativeAccess<AS_NO_KEEPALIVE>::oop_load(ptr) == oop(nullptr), "invariant");
119 NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(ptr, obj());
120 char* tptr = reinterpret_cast<char*>(ptr) + TypeTag::weak_global;
121 res = reinterpret_cast<jweak>(tptr);
122 } else {
123 report_handle_allocation_failure(alloc_failmode, "weak global");
124 }
125 }
126 return res;
127 }
128
129 // Resolve some erroneous cases to null, rather than treating them as
130 // possibly unchecked errors. In particular, deleted handles are
131 // treated as null (though a deleted and later reallocated handle
132 // isn't detected).
133 oop JNIHandles::resolve_external_guard(jobject handle) {
134 oop result = nullptr;
135 if (handle != nullptr) {
136 result = resolve_impl<DECORATORS_NONE, true /* external_guard */>(handle);
137 }
138 return result;
139 }
140
141 bool JNIHandles::is_weak_global_cleared(jweak handle) {
142 assert(handle != nullptr, "precondition");
143 oop* oop_ptr = weak_global_ptr(handle);
144 oop value = NativeAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(oop_ptr);
145 return value == nullptr;
146 }
147
148 void JNIHandles::destroy_global(jobject handle) {
149 if (handle != nullptr) {
150 oop* oop_ptr = global_ptr(handle);
151 NativeAccess<>::oop_store(oop_ptr, (oop)nullptr);
152 global_handles()->release(oop_ptr);
153 }
154 }
155
156
157 void JNIHandles::destroy_weak_global(jweak handle) {
158 if (handle != nullptr) {
159 oop* oop_ptr = weak_global_ptr(handle);
160 NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(oop_ptr, (oop)nullptr);
161 weak_global_handles()->release(oop_ptr);
162 }
163 }
164
165
166 void JNIHandles::oops_do(OopClosure* f) {
167 global_handles()->oops_do(f);
168 }
169
170
171 void JNIHandles::weak_oops_do(OopClosure* f) {
172 weak_global_handles()->weak_oops_do(f);
173 }
174
175 bool JNIHandles::is_global_storage(const OopStorage* storage) {
176 return _global_handles == storage;
177 }
178
179 inline bool is_storage_handle(const OopStorage* storage, const oop* ptr) {
180 return storage->allocation_status(ptr) == OopStorage::ALLOCATED_ENTRY;
181 }
182
183
184 jobjectRefType JNIHandles::handle_type(JavaThread* thread, jobject handle) {
185 assert(handle != nullptr, "precondition");
186 jobjectRefType result = JNIInvalidRefType;
187 if (is_weak_global_tagged(handle)) {
188 if (is_storage_handle(weak_global_handles(), weak_global_ptr(handle))) {
189 result = JNIWeakGlobalRefType;
190 }
191 } else if (is_global_tagged(handle)) {
192 switch (global_handles()->allocation_status(global_ptr(handle))) {
193 case OopStorage::ALLOCATED_ENTRY:
194 result = JNIGlobalRefType;
195 break;
196
197 case OopStorage::UNALLOCATED_ENTRY:
198 break; // Invalid global handle
199
200 default:
201 ShouldNotReachHere();
202 }
203 } else if (is_local_handle(thread, handle) || is_frame_handle(thread, handle)) {
204 // Not in global storage. Might be a local handle.
205 result = JNILocalRefType;
206 }
207 return result;
208 }
209
210
211 bool JNIHandles::is_local_handle(JavaThread* thread, jobject handle) {
212 assert(handle != nullptr, "precondition");
213 JNIHandleBlock* block = thread->active_handles();
214
215 // Look back past possible native calls to jni_PushLocalFrame.
216 while (block != nullptr) {
217 if (block->chain_contains(handle)) {
218 return true;
219 }
220 block = block->pop_frame_link();
221 }
222 return false;
223 }
224
225
226 // Determine if the handle is somewhere in the current thread's stack.
227 // We easily can't isolate any particular stack frame the handle might
228 // come from, so we'll check the whole stack.
229
230 bool JNIHandles::is_frame_handle(JavaThread* thr, jobject handle) {
231 assert(handle != nullptr, "precondition");
232 // If there is no java frame, then this must be top level code, such
233 // as the java command executable, in which case, this type of handle
234 // is not permitted.
235 return (thr->has_last_Java_frame() &&
236 thr->is_in_stack_range_incl((address)handle, (address)thr->last_Java_sp()));
237 }
238
239
240 bool JNIHandles::is_global_handle(jobject handle) {
241 assert(handle != nullptr, "precondition");
242 return is_global_tagged(handle) && is_storage_handle(global_handles(), global_ptr(handle));
243 }
244
245
246 bool JNIHandles::is_weak_global_handle(jobject handle) {
247 assert(handle != nullptr, "precondition");
248 return is_weak_global_tagged(handle) && is_storage_handle(weak_global_handles(), weak_global_ptr(handle));
249 }
250
251 // We assume this is called at a safepoint: no lock is needed.
252 void JNIHandles::print_on(outputStream* st) {
253 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
254
255 st->print_cr("JNI global refs: %zu, weak refs: %zu",
256 global_handles()->allocation_count(),
257 weak_global_handles()->allocation_count());
258 st->cr();
259 st->flush();
260 }
261
262 void JNIHandles::print() { print_on(tty); }
263
264 class VerifyJNIHandles: public OopClosure {
265 public:
266 virtual void do_oop(oop* root) {
267 guarantee(oopDesc::is_oop_or_null(RawAccess<>::oop_load(root)), "Invalid oop");
268 }
269 virtual void do_oop(narrowOop* root) { ShouldNotReachHere(); }
270 };
271
272 void JNIHandles::verify() {
273 VerifyJNIHandles verify_handle;
274
275 oops_do(&verify_handle);
276 weak_oops_do(&verify_handle);
277 }
278
279 // This method is implemented here to avoid circular includes between
280 // jniHandles.hpp and thread.hpp.
281 bool JNIHandles::current_thread_in_native() {
282 Thread* thread = Thread::current();
283 return (thread->is_Java_thread() &&
284 JavaThread::cast(thread)->thread_state() == _thread_in_native);
285 }
286
287 bool JNIHandles::is_same_object(jobject handle1, jobject handle2) {
288 oop obj1 = resolve_no_keepalive(handle1);
289 oop obj2 = resolve_no_keepalive(handle2);
290
291 bool ret = obj1 == obj2;
292
293 if (EnableValhalla) {
294 if (!ret && obj1 != nullptr && obj2 != nullptr && obj1->klass() == obj2->klass() && obj1->klass()->is_inline_klass()) {
295 // The two references are different, they are not null and they are both inline types,
296 // a full substitutability test is required, calling ValueObjectMethods.isSubstitutable()
297 // (similarly to InterpreterRuntime::is_substitutable)
298 JavaThread* THREAD = JavaThread::current();
299 Handle ha(THREAD, obj1);
300 Handle hb(THREAD, obj2);
301 JavaValue result(T_BOOLEAN);
302 JavaCallArguments args;
303 args.push_oop(ha);
304 args.push_oop(hb);
305 methodHandle method(THREAD, Universe::is_substitutable_method());
306 JavaCalls::call(&result, method, &args, THREAD);
307 if (HAS_PENDING_EXCEPTION) {
308 // Something really bad happened because isSubstitutable() should not throw exceptions
309 // If it is an error, just let it propagate
310 // If it is an exception, wrap it into an InternalError
311 if (!PENDING_EXCEPTION->is_a(vmClasses::Error_klass())) {
312 Handle e(THREAD, PENDING_EXCEPTION);
313 CLEAR_PENDING_EXCEPTION;
314 THROW_MSG_CAUSE_(vmSymbols::java_lang_InternalError(), "Internal error in substitutability test", e, false);
315 }
316 }
317 ret = result.get_jboolean();
318 }
319 }
320
321 return ret;
322 }
323
324
325 int JNIHandleBlock::_blocks_allocated = 0;
326
327 static inline bool is_tagged_free_list(uintptr_t value) {
328 return (value & 1u) != 0;
329 }
330
331 static inline uintptr_t tag_free_list(uintptr_t value) {
332 return value | 1u;
333 }
334
335 static inline uintptr_t untag_free_list(uintptr_t value) {
336 return value & ~(uintptr_t)1u;
337 }
338
339 // There is a freelist of handles running through the JNIHandleBlock
340 // with a tagged next pointer, distinguishing these next pointers from
341 // oops. The freelist handling currently relies on the size of oops
342 // being the same as a native pointer. If this ever changes, then
343 // this freelist handling must change too.
344 STATIC_ASSERT(sizeof(oop) == sizeof(uintptr_t));
345
346 #ifdef ASSERT
347 void JNIHandleBlock::zap() {
348 // Zap block values
349 _top = 0;
350 for (int index = 0; index < block_size_in_oops; index++) {
351 // NOT using Access here; just bare clobbering to null, since the
352 // block no longer contains valid oops.
353 _handles[index] = 0;
354 }
355 }
356 #endif // ASSERT
357
358 JNIHandleBlock* JNIHandleBlock::allocate_block(JavaThread* thread, AllocFailType alloc_failmode) {
359 // The VM thread can allocate a handle block in behalf of another thread during a safepoint.
360 assert(thread == nullptr || thread == Thread::current() || SafepointSynchronize::is_at_safepoint(),
361 "sanity check");
362 JNIHandleBlock* block;
363 // Check the thread-local free list for a block so we don't
364 // have to acquire a mutex.
365 if (thread != nullptr && thread->free_handle_block() != nullptr) {
366 block = thread->free_handle_block();
367 thread->set_free_handle_block(block->_next);
368 } else {
369 // Allocate new block
370 if (alloc_failmode == AllocFailStrategy::RETURN_NULL) {
371 block = new (std::nothrow) JNIHandleBlock();
372 if (block == nullptr) {
373 return nullptr;
374 }
375 } else {
376 block = new JNIHandleBlock();
377 }
378 AtomicAccess::inc(&_blocks_allocated);
379 block->zap();
380 }
381 block->_top = 0;
382 block->_next = nullptr;
383 block->_pop_frame_link = nullptr;
384 // _last, _free_list & _allocate_before_rebuild initialized in allocate_handle
385 DEBUG_ONLY(block->_last = nullptr);
386 DEBUG_ONLY(block->_free_list = nullptr);
387 DEBUG_ONLY(block->_allocate_before_rebuild = -1);
388 return block;
389 }
390
391
392 void JNIHandleBlock::release_block(JNIHandleBlock* block, JavaThread* thread) {
393 assert(thread == nullptr || thread == Thread::current(), "sanity check");
394 JNIHandleBlock* pop_frame_link = block->pop_frame_link();
395 // Put returned block at the beginning of the thread-local free list.
396 // Note that if thread == nullptr, we use it as an implicit argument that
397 // we _don't_ want the block to be kept on the free_handle_block.
398 // See for instance JavaThread::exit().
399 if (thread != nullptr ) {
400 block->zap();
401 JNIHandleBlock* freelist = thread->free_handle_block();
402 block->_pop_frame_link = nullptr;
403 thread->set_free_handle_block(block);
404
405 // Add original freelist to end of chain
406 if ( freelist != nullptr ) {
407 while ( block->_next != nullptr ) block = block->_next;
408 block->_next = freelist;
409 }
410 block = nullptr;
411 } else {
412 DEBUG_ONLY(block->set_pop_frame_link(nullptr));
413 while (block != nullptr) {
414 JNIHandleBlock* next = block->_next;
415 AtomicAccess::dec(&_blocks_allocated);
416 assert(block->pop_frame_link() == nullptr, "pop_frame_link should be null");
417 delete block;
418 block = next;
419 }
420 }
421 if (pop_frame_link != nullptr) {
422 // As a sanity check we release blocks pointed to by the pop_frame_link.
423 // This should never happen (only if PopLocalFrame is not called the
424 // correct number of times).
425 release_block(pop_frame_link, thread);
426 }
427 }
428
429
430 void JNIHandleBlock::oops_do(OopClosure* f) {
431 JNIHandleBlock* current_chain = this;
432 // Iterate over chain of blocks, followed by chains linked through the
433 // pop frame links.
434 while (current_chain != nullptr) {
435 for (JNIHandleBlock* current = current_chain; current != nullptr;
436 current = current->_next) {
437 assert(current == current_chain || current->pop_frame_link() == nullptr,
438 "only blocks first in chain should have pop frame link set");
439 for (int index = 0; index < current->_top; index++) {
440 uintptr_t* addr = &(current->_handles)[index];
441 uintptr_t value = *addr;
442 // traverse heap pointers only, not deleted handles or free list
443 // pointers
444 if (value != 0 && !is_tagged_free_list(value)) {
445 oop* root = (oop*)addr;
446 f->do_oop(root);
447 }
448 }
449 // the next handle block is valid only if current block is full
450 if (current->_top < block_size_in_oops) {
451 break;
452 }
453 }
454 current_chain = current_chain->pop_frame_link();
455 }
456 }
457
458
459 jobject JNIHandleBlock::allocate_handle(JavaThread* caller, oop obj, AllocFailType alloc_failmode) {
460 assert(Universe::heap()->is_in(obj), "sanity check");
461 if (_top == 0) {
462 // This is the first allocation or the initial block got zapped when
463 // entering a native function. If we have any following blocks they are
464 // not valid anymore.
465 for (JNIHandleBlock* current = _next; current != nullptr;
466 current = current->_next) {
467 assert(current->_last == nullptr, "only first block should have _last set");
468 assert(current->_free_list == nullptr,
469 "only first block should have _free_list set");
470 if (current->_top == 0) {
471 // All blocks after the first clear trailing block are already cleared.
472 #ifdef ASSERT
473 for (current = current->_next; current != nullptr; current = current->_next) {
474 assert(current->_top == 0, "trailing blocks must already be cleared");
475 }
476 #endif
477 break;
478 }
479 current->_top = 0;
480 current->zap();
481 }
482 // Clear initial block
483 _free_list = nullptr;
484 _allocate_before_rebuild = 0;
485 _last = this;
486 zap();
487 }
488
489 // Try last block
490 if (_last->_top < block_size_in_oops) {
491 oop* handle = (oop*)&(_last->_handles)[_last->_top++];
492 *handle = obj;
493 return (jobject) handle;
494 }
495
496 // Try free list
497 if (_free_list != nullptr) {
498 oop* handle = (oop*)_free_list;
499 _free_list = (uintptr_t*) untag_free_list(*_free_list);
500 *handle = obj;
501 return (jobject) handle;
502 }
503 // Check if unused block follow last
504 if (_last->_next != nullptr) {
505 // update last and retry
506 _last = _last->_next;
507 return allocate_handle(caller, obj, alloc_failmode);
508 }
509
510 // No space available, we have to rebuild free list or expand
511 if (_allocate_before_rebuild == 0) {
512 rebuild_free_list(); // updates _allocate_before_rebuild counter
513 } else {
514 _last->_next = JNIHandleBlock::allocate_block(caller, alloc_failmode);
515 if (_last->_next == nullptr) {
516 return nullptr;
517 }
518 _last = _last->_next;
519 _allocate_before_rebuild--;
520 }
521 return allocate_handle(caller, obj, alloc_failmode); // retry
522 }
523
524 void JNIHandleBlock::rebuild_free_list() {
525 assert(_allocate_before_rebuild == 0 && _free_list == nullptr, "just checking");
526 int free = 0;
527 int blocks = 0;
528 for (JNIHandleBlock* current = this; current != nullptr; current = current->_next) {
529 for (int index = 0; index < current->_top; index++) {
530 uintptr_t* handle = &(current->_handles)[index];
531 if (*handle == 0) {
532 // this handle was cleared out by a delete call, reuse it
533 *handle = _free_list == nullptr ? 0 : tag_free_list((uintptr_t)_free_list);
534 _free_list = handle;
535 free++;
536 }
537 }
538 // we should not rebuild free list if there are unused handles at the end
539 assert(current->_top == block_size_in_oops, "just checking");
540 blocks++;
541 }
542 // Heuristic: if more than half of the handles are free we rebuild next time
543 // as well, otherwise we append a corresponding number of new blocks before
544 // attempting a free list rebuild again.
545 int total = blocks * block_size_in_oops;
546 int extra = total - 2*free;
547 if (extra > 0) {
548 // Not as many free handles as we would like - compute number of new blocks to append
549 _allocate_before_rebuild = (extra + block_size_in_oops - 1) / block_size_in_oops;
550 }
551 }
552
553
554 bool JNIHandleBlock::contains(jobject handle) const {
555 return ((jobject)&_handles[0] <= handle && handle<(jobject)&_handles[_top]);
556 }
557
558
559 bool JNIHandleBlock::chain_contains(jobject handle) const {
560 for (JNIHandleBlock* current = (JNIHandleBlock*) this; current != nullptr; current = current->_next) {
561 if (current->contains(handle)) {
562 return true;
563 }
564 }
565 return false;
566 }