1 /*
  2  * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "gc/shared/collectedHeap.hpp"
 26 #include "gc/shared/oopStorage.inline.hpp"
 27 #include "gc/shared/oopStorageSet.hpp"
 28 #include "logging/log.hpp"
 29 #include "memory/iterator.hpp"
 30 #include "memory/universe.hpp"
 31 #include "oops/access.inline.hpp"
 32 #include "oops/oop.inline.hpp"
 33 #include "runtime/handles.inline.hpp"
 34 #include "runtime/javaThread.inline.hpp"
 35 #include "runtime/jniHandles.inline.hpp"
 36 #include "runtime/mutexLocker.hpp"
 37 #include "utilities/align.hpp"
 38 #include "utilities/debug.hpp"
 39 
 40 OopStorage* JNIHandles::global_handles() {
 41   return _global_handles;
 42 }
 43 
 44 OopStorage* JNIHandles::weak_global_handles() {
 45   return _weak_global_handles;
 46 }
 47 
 48 // Serviceability agent support.
 49 OopStorage* JNIHandles::_global_handles = nullptr;
 50 OopStorage* JNIHandles::_weak_global_handles = nullptr;
 51 
 52 void jni_handles_init() {
 53   JNIHandles::_global_handles = OopStorageSet::create_strong("JNI Global", mtInternal);
 54   JNIHandles::_weak_global_handles = OopStorageSet::create_weak("JNI Weak", mtInternal);
 55 }
 56 
 57 jobject JNIHandles::make_local(oop obj) {
 58   return make_local(JavaThread::current(), obj);
 59 }
 60 
 61 // Used by NewLocalRef which requires null on out-of-memory
 62 jobject JNIHandles::make_local(JavaThread* thread, oop obj, AllocFailType alloc_failmode) {
 63   if (obj == nullptr) {
 64     return nullptr;                // ignore null handles
 65   } else {
 66     assert(oopDesc::is_oop(obj), "not an oop");
 67     assert(!current_thread_in_native(), "must not be in native");
 68     STATIC_ASSERT(TypeTag::local == 0);
 69     return thread->active_handles()->allocate_handle(thread, obj, alloc_failmode);
 70   }
 71 }
 72 
 73 static void report_handle_allocation_failure(AllocFailType alloc_failmode,
 74                                              const char* handle_kind) {
 75   if (alloc_failmode == AllocFailStrategy::EXIT_OOM) {
 76     // Fake size value, since we don't know the min allocation size here.
 77     vm_exit_out_of_memory(sizeof(oop), OOM_MALLOC_ERROR,
 78                           "Cannot create %s JNI handle", handle_kind);
 79   } else {
 80     assert(alloc_failmode == AllocFailStrategy::RETURN_NULL, "invariant");
 81   }
 82 }
 83 
 84 jobject JNIHandles::make_global(Handle obj, AllocFailType alloc_failmode) {
 85   assert(!Universe::heap()->is_stw_gc_active(), "can't extend the root set during GC pause");
 86   assert(!current_thread_in_native(), "must not be in native");
 87   jobject res = nullptr;
 88   if (!obj.is_null()) {
 89     // ignore null handles
 90     assert(oopDesc::is_oop(obj()), "not an oop");
 91     oop* ptr = global_handles()->allocate();
 92     // Return null on allocation failure.
 93     if (ptr != nullptr) {
 94       assert(NativeAccess<AS_NO_KEEPALIVE>::oop_load(ptr) == oop(nullptr), "invariant");
 95       NativeAccess<>::oop_store(ptr, obj());
 96       char* tptr = reinterpret_cast<char*>(ptr) + TypeTag::global;
 97       res = reinterpret_cast<jobject>(tptr);
 98     } else {
 99       report_handle_allocation_failure(alloc_failmode, "global");
100     }
101   }
102 
103   return res;
104 }
105 
106 jweak JNIHandles::make_weak_global(Handle obj, AllocFailType alloc_failmode) {
107   assert(!Universe::heap()->is_stw_gc_active(), "can't extend the root set during GC pause");
108   assert(!current_thread_in_native(), "must not be in native");
109   jweak res = nullptr;
110   if (!obj.is_null()) {
111     // ignore null handles
112     assert(oopDesc::is_oop(obj()), "not an oop");
113     oop* ptr = weak_global_handles()->allocate();
114     // Return nullptr on allocation failure.
115     if (ptr != nullptr) {
116       assert(NativeAccess<AS_NO_KEEPALIVE>::oop_load(ptr) == oop(nullptr), "invariant");
117       NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(ptr, obj());
118       char* tptr = reinterpret_cast<char*>(ptr) + TypeTag::weak_global;
119       res = reinterpret_cast<jweak>(tptr);
120     } else {
121       report_handle_allocation_failure(alloc_failmode, "weak global");
122     }
123   }
124   return res;
125 }
126 
127 // Resolve some erroneous cases to null, rather than treating them as
128 // possibly unchecked errors.  In particular, deleted handles are
129 // treated as null (though a deleted and later reallocated handle
130 // isn't detected).
131 oop JNIHandles::resolve_external_guard(jobject handle) {
132   oop result = nullptr;
133   if (handle != nullptr) {
134     result = resolve_impl<DECORATORS_NONE, true /* external_guard */>(handle);
135   }
136   return result;
137 }
138 
139 bool JNIHandles::is_weak_global_cleared(jweak handle) {
140   assert(handle != nullptr, "precondition");
141   oop* oop_ptr = weak_global_ptr(handle);
142   oop value = NativeAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(oop_ptr);
143   return value == nullptr;
144 }
145 
146 void JNIHandles::destroy_global(jobject handle) {
147   if (handle != nullptr) {
148     oop* oop_ptr = global_ptr(handle);
149     NativeAccess<>::oop_store(oop_ptr, (oop)nullptr);
150     global_handles()->release(oop_ptr);
151   }
152 }
153 
154 
155 void JNIHandles::destroy_weak_global(jweak handle) {
156   if (handle != nullptr) {
157     oop* oop_ptr = weak_global_ptr(handle);
158     NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(oop_ptr, (oop)nullptr);
159     weak_global_handles()->release(oop_ptr);
160   }
161 }
162 
163 
164 void JNIHandles::oops_do(OopClosure* f) {
165   global_handles()->oops_do(f);
166 }
167 
168 
169 void JNIHandles::weak_oops_do(OopClosure* f) {
170   weak_global_handles()->weak_oops_do(f);
171 }
172 
173 bool JNIHandles::is_global_storage(const OopStorage* storage) {
174   return _global_handles == storage;
175 }
176 
177 inline bool is_storage_handle(const OopStorage* storage, const oop* ptr) {
178   return storage->allocation_status(ptr) == OopStorage::ALLOCATED_ENTRY;
179 }
180 
181 
182 jobjectRefType JNIHandles::handle_type(JavaThread* thread, jobject handle) {
183   assert(handle != nullptr, "precondition");
184   jobjectRefType result = JNIInvalidRefType;
185   if (is_weak_global_tagged(handle)) {
186     if (is_storage_handle(weak_global_handles(), weak_global_ptr(handle))) {
187       result = JNIWeakGlobalRefType;
188     }
189   } else if (is_global_tagged(handle)) {
190     switch (global_handles()->allocation_status(global_ptr(handle))) {
191     case OopStorage::ALLOCATED_ENTRY:
192       result = JNIGlobalRefType;
193       break;
194 
195     case OopStorage::UNALLOCATED_ENTRY:
196       break;                    // Invalid global handle
197 
198     default:
199       ShouldNotReachHere();
200     }
201   } else if (is_local_handle(thread, handle) || is_frame_handle(thread, handle)) {
202     // Not in global storage.  Might be a local handle.
203     result = JNILocalRefType;
204   }
205   return result;
206 }
207 
208 
209 bool JNIHandles::is_local_handle(JavaThread* thread, jobject handle) {
210   assert(handle != nullptr, "precondition");
211   JNIHandleBlock* block = thread->active_handles();
212 
213   // Look back past possible native calls to jni_PushLocalFrame.
214   while (block != nullptr) {
215     if (block->chain_contains(handle)) {
216       return true;
217     }
218     block = block->pop_frame_link();
219   }
220   return false;
221 }
222 
223 
224 // Determine if the handle is somewhere in the current thread's stack.
225 // We easily can't isolate any particular stack frame the handle might
226 // come from, so we'll check the whole stack.
227 
228 bool JNIHandles::is_frame_handle(JavaThread* thr, jobject handle) {
229   assert(handle != nullptr, "precondition");
230   // If there is no java frame, then this must be top level code, such
231   // as the java command executable, in which case, this type of handle
232   // is not permitted.
233   return (thr->has_last_Java_frame() &&
234           thr->is_in_stack_range_incl((address)handle, (address)thr->last_Java_sp()));
235 }
236 
237 
238 bool JNIHandles::is_global_handle(jobject handle) {
239   assert(handle != nullptr, "precondition");
240   return is_global_tagged(handle) && is_storage_handle(global_handles(), global_ptr(handle));
241 }
242 
243 
244 bool JNIHandles::is_weak_global_handle(jobject handle) {
245   assert(handle != nullptr, "precondition");
246   return is_weak_global_tagged(handle) && is_storage_handle(weak_global_handles(), weak_global_ptr(handle));
247 }
248 
249 // We assume this is called at a safepoint: no lock is needed.
250 void JNIHandles::print_on(outputStream* st) {
251   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
252 
253   st->print_cr("JNI global refs: %zu, weak refs: %zu",
254                global_handles()->allocation_count(),
255                weak_global_handles()->allocation_count());
256   st->cr();
257   st->flush();
258 }
259 
260 void JNIHandles::print() { print_on(tty); }
261 
262 class VerifyJNIHandles: public OopClosure {
263 public:
264   virtual void do_oop(oop* root) {
265     guarantee(oopDesc::is_oop_or_null(RawAccess<>::oop_load(root)), "Invalid oop");
266   }
267   virtual void do_oop(narrowOop* root) { ShouldNotReachHere(); }
268 };
269 
270 void JNIHandles::verify() {
271   VerifyJNIHandles verify_handle;
272 
273   oops_do(&verify_handle);
274   weak_oops_do(&verify_handle);
275 }
276 
277 // This method is implemented here to avoid circular includes between
278 // jniHandles.hpp and thread.hpp.
279 bool JNIHandles::current_thread_in_native() {
280   Thread* thread = Thread::current();
281   return (thread->is_Java_thread() &&
282           JavaThread::cast(thread)->thread_state() == _thread_in_native);
283 }
284 
285 int JNIHandleBlock::_blocks_allocated = 0;
286 
287 static inline bool is_tagged_free_list(uintptr_t value) {
288   return (value & 1u) != 0;
289 }
290 
291 static inline uintptr_t tag_free_list(uintptr_t value) {
292   return value | 1u;
293 }
294 
295 static inline uintptr_t untag_free_list(uintptr_t value) {
296   return value & ~(uintptr_t)1u;
297 }
298 
299 // There is a freelist of handles running through the JNIHandleBlock
300 // with a tagged next pointer, distinguishing these next pointers from
301 // oops. The freelist handling currently relies on the size of oops
302 // being the same as a native pointer. If this ever changes, then
303 // this freelist handling must change too.
304 STATIC_ASSERT(sizeof(oop) == sizeof(uintptr_t));
305 
306 #ifdef ASSERT
307 void JNIHandleBlock::zap() {
308   // Zap block values
309   _top = 0;
310   for (int index = 0; index < block_size_in_oops; index++) {
311     // NOT using Access here; just bare clobbering to null, since the
312     // block no longer contains valid oops.
313     _handles[index] = 0;
314   }
315 }
316 #endif // ASSERT
317 
318 JNIHandleBlock* JNIHandleBlock::allocate_block(JavaThread* thread, AllocFailType alloc_failmode)  {
319   // The VM thread can allocate a handle block in behalf of another thread during a safepoint.
320   assert(thread == nullptr || thread == Thread::current() || SafepointSynchronize::is_at_safepoint(),
321          "sanity check");
322   JNIHandleBlock* block;
323   // Check the thread-local free list for a block so we don't
324   // have to acquire a mutex.
325   if (thread != nullptr && thread->free_handle_block() != nullptr) {
326     block = thread->free_handle_block();
327     thread->set_free_handle_block(block->_next);
328   } else {
329     // Allocate new block
330     if (alloc_failmode == AllocFailStrategy::RETURN_NULL) {
331       block = new (std::nothrow) JNIHandleBlock();
332       if (block == nullptr) {
333         return nullptr;
334       }
335     } else {
336       block = new JNIHandleBlock();
337     }
338     Atomic::inc(&_blocks_allocated);
339     block->zap();
340   }
341   block->_top = 0;
342   block->_next = nullptr;
343   block->_pop_frame_link = nullptr;
344   // _last, _free_list & _allocate_before_rebuild initialized in allocate_handle
345   debug_only(block->_last = nullptr);
346   debug_only(block->_free_list = nullptr);
347   debug_only(block->_allocate_before_rebuild = -1);
348   return block;
349 }
350 
351 
352 void JNIHandleBlock::release_block(JNIHandleBlock* block, JavaThread* thread) {
353   assert(thread == nullptr || thread == Thread::current(), "sanity check");
354   JNIHandleBlock* pop_frame_link = block->pop_frame_link();
355   // Put returned block at the beginning of the thread-local free list.
356   // Note that if thread == nullptr, we use it as an implicit argument that
357   // we _don't_ want the block to be kept on the free_handle_block.
358   // See for instance JavaThread::exit().
359   if (thread != nullptr ) {
360     block->zap();
361     JNIHandleBlock* freelist = thread->free_handle_block();
362     block->_pop_frame_link = nullptr;
363     thread->set_free_handle_block(block);
364 
365     // Add original freelist to end of chain
366     if ( freelist != nullptr ) {
367       while ( block->_next != nullptr ) block = block->_next;
368       block->_next = freelist;
369     }
370     block = nullptr;
371   } else {
372     DEBUG_ONLY(block->set_pop_frame_link(nullptr));
373     while (block != nullptr) {
374       JNIHandleBlock* next = block->_next;
375       Atomic::dec(&_blocks_allocated);
376       assert(block->pop_frame_link() == nullptr, "pop_frame_link should be null");
377       delete block;
378       block = next;
379     }
380   }
381   if (pop_frame_link != nullptr) {
382     // As a sanity check we release blocks pointed to by the pop_frame_link.
383     // This should never happen (only if PopLocalFrame is not called the
384     // correct number of times).
385     release_block(pop_frame_link, thread);
386   }
387 }
388 
389 
390 void JNIHandleBlock::oops_do(OopClosure* f) {
391   JNIHandleBlock* current_chain = this;
392   // Iterate over chain of blocks, followed by chains linked through the
393   // pop frame links.
394   while (current_chain != nullptr) {
395     for (JNIHandleBlock* current = current_chain; current != nullptr;
396          current = current->_next) {
397       assert(current == current_chain || current->pop_frame_link() == nullptr,
398         "only blocks first in chain should have pop frame link set");
399       for (int index = 0; index < current->_top; index++) {
400         uintptr_t* addr = &(current->_handles)[index];
401         uintptr_t value = *addr;
402         // traverse heap pointers only, not deleted handles or free list
403         // pointers
404         if (value != 0 && !is_tagged_free_list(value)) {
405           oop* root = (oop*)addr;
406           f->do_oop(root);
407         }
408       }
409       // the next handle block is valid only if current block is full
410       if (current->_top < block_size_in_oops) {
411         break;
412       }
413     }
414     current_chain = current_chain->pop_frame_link();
415   }
416 }
417 
418 
419 jobject JNIHandleBlock::allocate_handle(JavaThread* caller, oop obj, AllocFailType alloc_failmode) {
420   assert(Universe::heap()->is_in(obj), "sanity check");
421   if (_top == 0) {
422     // This is the first allocation or the initial block got zapped when
423     // entering a native function. If we have any following blocks they are
424     // not valid anymore.
425     for (JNIHandleBlock* current = _next; current != nullptr;
426          current = current->_next) {
427       assert(current->_last == nullptr, "only first block should have _last set");
428       assert(current->_free_list == nullptr,
429              "only first block should have _free_list set");
430       if (current->_top == 0) {
431         // All blocks after the first clear trailing block are already cleared.
432 #ifdef ASSERT
433         for (current = current->_next; current != nullptr; current = current->_next) {
434           assert(current->_top == 0, "trailing blocks must already be cleared");
435         }
436 #endif
437         break;
438       }
439       current->_top = 0;
440       current->zap();
441     }
442     // Clear initial block
443     _free_list = nullptr;
444     _allocate_before_rebuild = 0;
445     _last = this;
446     zap();
447   }
448 
449   // Try last block
450   if (_last->_top < block_size_in_oops) {
451     oop* handle = (oop*)&(_last->_handles)[_last->_top++];
452     *handle = obj;
453     return (jobject) handle;
454   }
455 
456   // Try free list
457   if (_free_list != nullptr) {
458     oop* handle = (oop*)_free_list;
459     _free_list = (uintptr_t*) untag_free_list(*_free_list);
460     *handle = obj;
461     return (jobject) handle;
462   }
463   // Check if unused block follow last
464   if (_last->_next != nullptr) {
465     // update last and retry
466     _last = _last->_next;
467     return allocate_handle(caller, obj, alloc_failmode);
468   }
469 
470   // No space available, we have to rebuild free list or expand
471   if (_allocate_before_rebuild == 0) {
472       rebuild_free_list();        // updates _allocate_before_rebuild counter
473   } else {
474     _last->_next = JNIHandleBlock::allocate_block(caller, alloc_failmode);
475     if (_last->_next == nullptr) {
476       return nullptr;
477     }
478     _last = _last->_next;
479     _allocate_before_rebuild--;
480   }
481   return allocate_handle(caller, obj, alloc_failmode);  // retry
482 }
483 
484 void JNIHandleBlock::rebuild_free_list() {
485   assert(_allocate_before_rebuild == 0 && _free_list == nullptr, "just checking");
486   int free = 0;
487   int blocks = 0;
488   for (JNIHandleBlock* current = this; current != nullptr; current = current->_next) {
489     for (int index = 0; index < current->_top; index++) {
490       uintptr_t* handle = &(current->_handles)[index];
491       if (*handle == 0) {
492         // this handle was cleared out by a delete call, reuse it
493         *handle = _free_list == nullptr ? 0 : tag_free_list((uintptr_t)_free_list);
494         _free_list = handle;
495         free++;
496       }
497     }
498     // we should not rebuild free list if there are unused handles at the end
499     assert(current->_top == block_size_in_oops, "just checking");
500     blocks++;
501   }
502   // Heuristic: if more than half of the handles are free we rebuild next time
503   // as well, otherwise we append a corresponding number of new blocks before
504   // attempting a free list rebuild again.
505   int total = blocks * block_size_in_oops;
506   int extra = total - 2*free;
507   if (extra > 0) {
508     // Not as many free handles as we would like - compute number of new blocks to append
509     _allocate_before_rebuild = (extra + block_size_in_oops - 1) / block_size_in_oops;
510   }
511 }
512 
513 
514 bool JNIHandleBlock::contains(jobject handle) const {
515   return ((jobject)&_handles[0] <= handle && handle<(jobject)&_handles[_top]);
516 }
517 
518 
519 bool JNIHandleBlock::chain_contains(jobject handle) const {
520   for (JNIHandleBlock* current = (JNIHandleBlock*) this; current != nullptr; current = current->_next) {
521     if (current->contains(handle)) {
522       return true;
523     }
524   }
525   return false;
526 }