1 /*
  2  * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/vmSymbols.hpp"
 27 #include "gc/shared/collectedHeap.hpp"
 28 #include "gc/shared/oopStorage.inline.hpp"
 29 #include "gc/shared/oopStorageSet.hpp"
 30 #include "logging/log.hpp"
 31 #include "memory/iterator.hpp"
 32 #include "memory/universe.hpp"
 33 #include "oops/access.inline.hpp"
 34 #include "oops/oop.inline.hpp"
 35 #include "runtime/handles.inline.hpp"
 36 #include "runtime/javaCalls.hpp"
 37 #include "runtime/javaThread.inline.hpp"
 38 #include "runtime/jniHandles.inline.hpp"
 39 #include "runtime/mutexLocker.hpp"
 40 #include "utilities/align.hpp"
 41 #include "utilities/debug.hpp"
 42 
 43 OopStorage* JNIHandles::global_handles() {
 44   return _global_handles;
 45 }
 46 
 47 OopStorage* JNIHandles::weak_global_handles() {
 48   return _weak_global_handles;
 49 }
 50 
 51 // Serviceability agent support.
 52 OopStorage* JNIHandles::_global_handles = nullptr;
 53 OopStorage* JNIHandles::_weak_global_handles = nullptr;
 54 
 55 void jni_handles_init() {
 56   JNIHandles::_global_handles = OopStorageSet::create_strong("JNI Global", mtInternal);
 57   JNIHandles::_weak_global_handles = OopStorageSet::create_weak("JNI Weak", mtInternal);
 58 }
 59 
 60 jobject JNIHandles::make_local(oop obj) {
 61   return make_local(JavaThread::current(), obj);
 62 }
 63 
 64 // Used by NewLocalRef which requires null on out-of-memory
 65 jobject JNIHandles::make_local(JavaThread* thread, oop obj, AllocFailType alloc_failmode) {
 66   if (obj == nullptr) {
 67     return nullptr;                // ignore null handles
 68   } else {
 69     assert(oopDesc::is_oop(obj), "not an oop");
 70     assert(!current_thread_in_native(), "must not be in native");
 71     STATIC_ASSERT(TypeTag::local == 0);
 72     return thread->active_handles()->allocate_handle(thread, obj, alloc_failmode);
 73   }
 74 }
 75 
 76 static void report_handle_allocation_failure(AllocFailType alloc_failmode,
 77                                              const char* handle_kind) {
 78   if (alloc_failmode == AllocFailStrategy::EXIT_OOM) {
 79     // Fake size value, since we don't know the min allocation size here.
 80     vm_exit_out_of_memory(sizeof(oop), OOM_MALLOC_ERROR,
 81                           "Cannot create %s JNI handle", handle_kind);
 82   } else {
 83     assert(alloc_failmode == AllocFailStrategy::RETURN_NULL, "invariant");
 84   }
 85 }
 86 
 87 jobject JNIHandles::make_global(Handle obj, AllocFailType alloc_failmode) {
 88   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
 89   assert(!current_thread_in_native(), "must not be in native");
 90   jobject res = nullptr;
 91   if (!obj.is_null()) {
 92     // ignore null handles
 93     assert(oopDesc::is_oop(obj()), "not an oop");
 94     oop* ptr = global_handles()->allocate();
 95     // Return null on allocation failure.
 96     if (ptr != nullptr) {
 97       assert(NativeAccess<AS_NO_KEEPALIVE>::oop_load(ptr) == oop(nullptr), "invariant");
 98       NativeAccess<>::oop_store(ptr, obj());
 99       char* tptr = reinterpret_cast<char*>(ptr) + TypeTag::global;
100       res = reinterpret_cast<jobject>(tptr);
101     } else {
102       report_handle_allocation_failure(alloc_failmode, "global");
103     }
104   }
105 
106   return res;
107 }
108 
109 jweak JNIHandles::make_weak_global(Handle obj, AllocFailType alloc_failmode) {
110   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
111   assert(!current_thread_in_native(), "must not be in native");
112   jweak res = nullptr;
113   if (!obj.is_null()) {
114     // ignore null handles
115     assert(oopDesc::is_oop(obj()), "not an oop");
116     oop* ptr = weak_global_handles()->allocate();
117     // Return nullptr on allocation failure.
118     if (ptr != nullptr) {
119       assert(NativeAccess<AS_NO_KEEPALIVE>::oop_load(ptr) == oop(nullptr), "invariant");
120       NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(ptr, obj());
121       char* tptr = reinterpret_cast<char*>(ptr) + TypeTag::weak_global;
122       res = reinterpret_cast<jweak>(tptr);
123     } else {
124       report_handle_allocation_failure(alloc_failmode, "weak global");
125     }
126   }
127   return res;
128 }
129 
130 // Resolve some erroneous cases to null, rather than treating them as
131 // possibly unchecked errors.  In particular, deleted handles are
132 // treated as null (though a deleted and later reallocated handle
133 // isn't detected).
134 oop JNIHandles::resolve_external_guard(jobject handle) {
135   oop result = nullptr;
136   if (handle != nullptr) {
137     result = resolve_impl<DECORATORS_NONE, true /* external_guard */>(handle);
138   }
139   return result;
140 }
141 
142 bool JNIHandles::is_weak_global_cleared(jweak handle) {
143   assert(handle != nullptr, "precondition");
144   oop* oop_ptr = weak_global_ptr(handle);
145   oop value = NativeAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(oop_ptr);
146   return value == nullptr;
147 }
148 
149 void JNIHandles::destroy_global(jobject handle) {
150   if (handle != nullptr) {
151     oop* oop_ptr = global_ptr(handle);
152     NativeAccess<>::oop_store(oop_ptr, (oop)nullptr);
153     global_handles()->release(oop_ptr);
154   }
155 }
156 
157 
158 void JNIHandles::destroy_weak_global(jweak handle) {
159   if (handle != nullptr) {
160     oop* oop_ptr = weak_global_ptr(handle);
161     NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(oop_ptr, (oop)nullptr);
162     weak_global_handles()->release(oop_ptr);
163   }
164 }
165 
166 
167 void JNIHandles::oops_do(OopClosure* f) {
168   global_handles()->oops_do(f);
169 }
170 
171 
172 void JNIHandles::weak_oops_do(OopClosure* f) {
173   weak_global_handles()->weak_oops_do(f);
174 }
175 
176 bool JNIHandles::is_global_storage(const OopStorage* storage) {
177   return _global_handles == storage;
178 }
179 
180 inline bool is_storage_handle(const OopStorage* storage, const oop* ptr) {
181   return storage->allocation_status(ptr) == OopStorage::ALLOCATED_ENTRY;
182 }
183 
184 
185 jobjectRefType JNIHandles::handle_type(JavaThread* thread, jobject handle) {
186   assert(handle != nullptr, "precondition");
187   jobjectRefType result = JNIInvalidRefType;
188   if (is_weak_global_tagged(handle)) {
189     if (is_storage_handle(weak_global_handles(), weak_global_ptr(handle))) {
190       result = JNIWeakGlobalRefType;
191     }
192   } else if (is_global_tagged(handle)) {
193     switch (global_handles()->allocation_status(global_ptr(handle))) {
194     case OopStorage::ALLOCATED_ENTRY:
195       result = JNIGlobalRefType;
196       break;
197 
198     case OopStorage::UNALLOCATED_ENTRY:
199       break;                    // Invalid global handle
200 
201     default:
202       ShouldNotReachHere();
203     }
204   } else {
205     // Not in global storage.  Might be a local handle.
206     if (is_local_handle(thread, handle) || is_frame_handle(thread, handle)) {
207       result = JNILocalRefType;
208     } else {
209       ShouldNotReachHere();
210     }
211   }
212   return result;
213 }
214 
215 
216 bool JNIHandles::is_local_handle(JavaThread* thread, jobject handle) {
217   assert(handle != nullptr, "precondition");
218   JNIHandleBlock* block = thread->active_handles();
219 
220   // Look back past possible native calls to jni_PushLocalFrame.
221   while (block != nullptr) {
222     if (block->chain_contains(handle)) {
223       return true;
224     }
225     block = block->pop_frame_link();
226   }
227   return false;
228 }
229 
230 
231 // Determine if the handle is somewhere in the current thread's stack.
232 // We easily can't isolate any particular stack frame the handle might
233 // come from, so we'll check the whole stack.
234 
235 bool JNIHandles::is_frame_handle(JavaThread* thr, jobject handle) {
236   assert(handle != nullptr, "precondition");
237   // If there is no java frame, then this must be top level code, such
238   // as the java command executable, in which case, this type of handle
239   // is not permitted.
240   return (thr->has_last_Java_frame() &&
241           thr->is_in_stack_range_incl((address)handle, (address)thr->last_Java_sp()));
242 }
243 
244 
245 bool JNIHandles::is_global_handle(jobject handle) {
246   assert(handle != nullptr, "precondition");
247   return is_global_tagged(handle) && is_storage_handle(global_handles(), global_ptr(handle));
248 }
249 
250 
251 bool JNIHandles::is_weak_global_handle(jobject handle) {
252   assert(handle != nullptr, "precondition");
253   return is_weak_global_tagged(handle) && is_storage_handle(weak_global_handles(), weak_global_ptr(handle));
254 }
255 
256 // We assume this is called at a safepoint: no lock is needed.
257 void JNIHandles::print_on(outputStream* st) {
258   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
259 
260   st->print_cr("JNI global refs: " SIZE_FORMAT ", weak refs: " SIZE_FORMAT,
261                global_handles()->allocation_count(),
262                weak_global_handles()->allocation_count());
263   st->cr();
264   st->flush();
265 }
266 
267 void JNIHandles::print() { print_on(tty); }
268 
269 class VerifyJNIHandles: public OopClosure {
270 public:
271   virtual void do_oop(oop* root) {
272     guarantee(oopDesc::is_oop_or_null(RawAccess<>::oop_load(root)), "Invalid oop");
273   }
274   virtual void do_oop(narrowOop* root) { ShouldNotReachHere(); }
275 };
276 
277 void JNIHandles::verify() {
278   VerifyJNIHandles verify_handle;
279 
280   oops_do(&verify_handle);
281   weak_oops_do(&verify_handle);
282 }
283 
284 // This method is implemented here to avoid circular includes between
285 // jniHandles.hpp and thread.hpp.
286 bool JNIHandles::current_thread_in_native() {
287   Thread* thread = Thread::current();
288   return (thread->is_Java_thread() &&
289           JavaThread::cast(thread)->thread_state() == _thread_in_native);
290 }
291 
292 bool JNIHandles::is_same_object(jobject handle1, jobject handle2) {
293   oop obj1 = resolve_no_keepalive(handle1);
294   oop obj2 = resolve_no_keepalive(handle2);
295 
296   bool ret = obj1 == obj2;
297 
298   if (EnableValhalla) {
299     if (!ret && obj1 != nullptr && obj2 != nullptr && obj1->klass() == obj2->klass() && obj1->klass()->is_inline_klass()) {
300       // The two references are different, they are not null and they are both inline types,
301       // a full substitutability test is required, calling ValueObjectMethods.isSubstitutable()
302       // (similarly to InterpreterRuntime::is_substitutable)
303       JavaThread* THREAD = JavaThread::current();
304       Handle ha(THREAD, obj1);
305       Handle hb(THREAD, obj2);
306       JavaValue result(T_BOOLEAN);
307       JavaCallArguments args;
308       args.push_oop(ha);
309       args.push_oop(hb);
310       methodHandle method(THREAD, Universe::is_substitutable_method());
311       JavaCalls::call(&result, method, &args, THREAD);
312       if (HAS_PENDING_EXCEPTION) {
313         // Something really bad happened because isSubstitutable() should not throw exceptions
314         // If it is an error, just let it propagate
315         // If it is an exception, wrap it into an InternalError
316         if (!PENDING_EXCEPTION->is_a(vmClasses::Error_klass())) {
317           Handle e(THREAD, PENDING_EXCEPTION);
318           CLEAR_PENDING_EXCEPTION;
319           THROW_MSG_CAUSE_(vmSymbols::java_lang_InternalError(), "Internal error in substitutability test", e, false);
320         }
321       }
322       ret = result.get_jboolean();
323     }
324   }
325 
326   return ret;
327 }
328 
329 
330 int JNIHandleBlock::_blocks_allocated = 0;
331 
332 static inline bool is_tagged_free_list(uintptr_t value) {
333   return (value & 1u) != 0;
334 }
335 
336 static inline uintptr_t tag_free_list(uintptr_t value) {
337   return value | 1u;
338 }
339 
340 static inline uintptr_t untag_free_list(uintptr_t value) {
341   return value & ~(uintptr_t)1u;
342 }
343 
344 // There is a freelist of handles running through the JNIHandleBlock
345 // with a tagged next pointer, distinguishing these next pointers from
346 // oops. The freelist handling currently relies on the size of oops
347 // being the same as a native pointer. If this ever changes, then
348 // this freelist handling must change too.
349 STATIC_ASSERT(sizeof(oop) == sizeof(uintptr_t));
350 
351 #ifdef ASSERT
352 void JNIHandleBlock::zap() {
353   // Zap block values
354   _top = 0;
355   for (int index = 0; index < block_size_in_oops; index++) {
356     // NOT using Access here; just bare clobbering to null, since the
357     // block no longer contains valid oops.
358     _handles[index] = 0;
359   }
360 }
361 #endif // ASSERT
362 
363 JNIHandleBlock* JNIHandleBlock::allocate_block(JavaThread* thread, AllocFailType alloc_failmode)  {
364   // The VM thread can allocate a handle block in behalf of another thread during a safepoint.
365   assert(thread == nullptr || thread == Thread::current() || SafepointSynchronize::is_at_safepoint(),
366          "sanity check");
367   JNIHandleBlock* block;
368   // Check the thread-local free list for a block so we don't
369   // have to acquire a mutex.
370   if (thread != nullptr && thread->free_handle_block() != nullptr) {
371     block = thread->free_handle_block();
372     thread->set_free_handle_block(block->_next);
373   } else {
374     // Allocate new block
375     if (alloc_failmode == AllocFailStrategy::RETURN_NULL) {
376       block = new (std::nothrow) JNIHandleBlock();
377       if (block == nullptr) {
378         return nullptr;
379       }
380     } else {
381       block = new JNIHandleBlock();
382     }
383     Atomic::inc(&_blocks_allocated);
384     block->zap();
385   }
386   block->_top = 0;
387   block->_next = nullptr;
388   block->_pop_frame_link = nullptr;
389   // _last, _free_list & _allocate_before_rebuild initialized in allocate_handle
390   debug_only(block->_last = nullptr);
391   debug_only(block->_free_list = nullptr);
392   debug_only(block->_allocate_before_rebuild = -1);
393   return block;
394 }
395 
396 
397 void JNIHandleBlock::release_block(JNIHandleBlock* block, JavaThread* thread) {
398   assert(thread == nullptr || thread == Thread::current(), "sanity check");
399   JNIHandleBlock* pop_frame_link = block->pop_frame_link();
400   // Put returned block at the beginning of the thread-local free list.
401   // Note that if thread == nullptr, we use it as an implicit argument that
402   // we _don't_ want the block to be kept on the free_handle_block.
403   // See for instance JavaThread::exit().
404   if (thread != nullptr ) {
405     block->zap();
406     JNIHandleBlock* freelist = thread->free_handle_block();
407     block->_pop_frame_link = nullptr;
408     thread->set_free_handle_block(block);
409 
410     // Add original freelist to end of chain
411     if ( freelist != nullptr ) {
412       while ( block->_next != nullptr ) block = block->_next;
413       block->_next = freelist;
414     }
415     block = nullptr;
416   } else {
417     DEBUG_ONLY(block->set_pop_frame_link(nullptr));
418     while (block != nullptr) {
419       JNIHandleBlock* next = block->_next;
420       Atomic::dec(&_blocks_allocated);
421       assert(block->pop_frame_link() == nullptr, "pop_frame_link should be null");
422       delete block;
423       block = next;
424     }
425   }
426   if (pop_frame_link != nullptr) {
427     // As a sanity check we release blocks pointed to by the pop_frame_link.
428     // This should never happen (only if PopLocalFrame is not called the
429     // correct number of times).
430     release_block(pop_frame_link, thread);
431   }
432 }
433 
434 
435 void JNIHandleBlock::oops_do(OopClosure* f) {
436   JNIHandleBlock* current_chain = this;
437   // Iterate over chain of blocks, followed by chains linked through the
438   // pop frame links.
439   while (current_chain != nullptr) {
440     for (JNIHandleBlock* current = current_chain; current != nullptr;
441          current = current->_next) {
442       assert(current == current_chain || current->pop_frame_link() == nullptr,
443         "only blocks first in chain should have pop frame link set");
444       for (int index = 0; index < current->_top; index++) {
445         uintptr_t* addr = &(current->_handles)[index];
446         uintptr_t value = *addr;
447         // traverse heap pointers only, not deleted handles or free list
448         // pointers
449         if (value != 0 && !is_tagged_free_list(value)) {
450           oop* root = (oop*)addr;
451           f->do_oop(root);
452         }
453       }
454       // the next handle block is valid only if current block is full
455       if (current->_top < block_size_in_oops) {
456         break;
457       }
458     }
459     current_chain = current_chain->pop_frame_link();
460   }
461 }
462 
463 
464 jobject JNIHandleBlock::allocate_handle(JavaThread* caller, oop obj, AllocFailType alloc_failmode) {
465   assert(Universe::heap()->is_in(obj), "sanity check");
466   if (_top == 0) {
467     // This is the first allocation or the initial block got zapped when
468     // entering a native function. If we have any following blocks they are
469     // not valid anymore.
470     for (JNIHandleBlock* current = _next; current != nullptr;
471          current = current->_next) {
472       assert(current->_last == nullptr, "only first block should have _last set");
473       assert(current->_free_list == nullptr,
474              "only first block should have _free_list set");
475       if (current->_top == 0) {
476         // All blocks after the first clear trailing block are already cleared.
477 #ifdef ASSERT
478         for (current = current->_next; current != nullptr; current = current->_next) {
479           assert(current->_top == 0, "trailing blocks must already be cleared");
480         }
481 #endif
482         break;
483       }
484       current->_top = 0;
485       current->zap();
486     }
487     // Clear initial block
488     _free_list = nullptr;
489     _allocate_before_rebuild = 0;
490     _last = this;
491     zap();
492   }
493 
494   // Try last block
495   if (_last->_top < block_size_in_oops) {
496     oop* handle = (oop*)&(_last->_handles)[_last->_top++];
497     *handle = obj;
498     return (jobject) handle;
499   }
500 
501   // Try free list
502   if (_free_list != nullptr) {
503     oop* handle = (oop*)_free_list;
504     _free_list = (uintptr_t*) untag_free_list(*_free_list);
505     *handle = obj;
506     return (jobject) handle;
507   }
508   // Check if unused block follow last
509   if (_last->_next != nullptr) {
510     // update last and retry
511     _last = _last->_next;
512     return allocate_handle(caller, obj, alloc_failmode);
513   }
514 
515   // No space available, we have to rebuild free list or expand
516   if (_allocate_before_rebuild == 0) {
517       rebuild_free_list();        // updates _allocate_before_rebuild counter
518   } else {
519     _last->_next = JNIHandleBlock::allocate_block(caller, alloc_failmode);
520     if (_last->_next == nullptr) {
521       return nullptr;
522     }
523     _last = _last->_next;
524     _allocate_before_rebuild--;
525   }
526   return allocate_handle(caller, obj, alloc_failmode);  // retry
527 }
528 
529 void JNIHandleBlock::rebuild_free_list() {
530   assert(_allocate_before_rebuild == 0 && _free_list == nullptr, "just checking");
531   int free = 0;
532   int blocks = 0;
533   for (JNIHandleBlock* current = this; current != nullptr; current = current->_next) {
534     for (int index = 0; index < current->_top; index++) {
535       uintptr_t* handle = &(current->_handles)[index];
536       if (*handle == 0) {
537         // this handle was cleared out by a delete call, reuse it
538         *handle = _free_list == nullptr ? 0 : tag_free_list((uintptr_t)_free_list);
539         _free_list = handle;
540         free++;
541       }
542     }
543     // we should not rebuild free list if there are unused handles at the end
544     assert(current->_top == block_size_in_oops, "just checking");
545     blocks++;
546   }
547   // Heuristic: if more than half of the handles are free we rebuild next time
548   // as well, otherwise we append a corresponding number of new blocks before
549   // attempting a free list rebuild again.
550   int total = blocks * block_size_in_oops;
551   int extra = total - 2*free;
552   if (extra > 0) {
553     // Not as many free handles as we would like - compute number of new blocks to append
554     _allocate_before_rebuild = (extra + block_size_in_oops - 1) / block_size_in_oops;
555   }
556 }
557 
558 
559 bool JNIHandleBlock::contains(jobject handle) const {
560   return ((jobject)&_handles[0] <= handle && handle<(jobject)&_handles[_top]);
561 }
562 
563 
564 bool JNIHandleBlock::chain_contains(jobject handle) const {
565   for (JNIHandleBlock* current = (JNIHandleBlock*) this; current != nullptr; current = current->_next) {
566     if (current->contains(handle)) {
567       return true;
568     }
569   }
570   return false;
571 }