1 /*
  2  * Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/vmSymbols.hpp"
 27 #include "gc/shared/collectedHeap.hpp"
 28 #include "gc/shared/oopStorage.inline.hpp"
 29 #include "gc/shared/oopStorageSet.hpp"
 30 #include "logging/log.hpp"
 31 #include "memory/iterator.hpp"
 32 #include "memory/universe.hpp"
 33 #include "oops/access.inline.hpp"
 34 #include "oops/oop.inline.hpp"
 35 #include "runtime/handles.inline.hpp"
 36 #include "runtime/javaCalls.hpp"
 37 #include "runtime/javaThread.inline.hpp"
 38 #include "runtime/jniHandles.inline.hpp"
 39 #include "runtime/mutexLocker.hpp"
 40 #include "utilities/align.hpp"
 41 #include "utilities/debug.hpp"
 42 
 43 OopStorage* JNIHandles::global_handles() {
 44   return _global_handles;
 45 }
 46 
 47 OopStorage* JNIHandles::weak_global_handles() {
 48   return _weak_global_handles;
 49 }
 50 
 51 // Serviceability agent support.
 52 OopStorage* JNIHandles::_global_handles = NULL;
 53 OopStorage* JNIHandles::_weak_global_handles = NULL;
 54 
 55 void jni_handles_init() {
 56   JNIHandles::_global_handles = OopStorageSet::create_strong("JNI Global", mtInternal);
 57   JNIHandles::_weak_global_handles = OopStorageSet::create_weak("JNI Weak", mtInternal);
 58 }
 59 
 60 jobject JNIHandles::make_local(oop obj) {
 61   return make_local(JavaThread::current(), obj);
 62 }
 63 
 64 // Used by NewLocalRef which requires NULL on out-of-memory
 65 jobject JNIHandles::make_local(JavaThread* thread, oop obj, AllocFailType alloc_failmode) {
 66   if (obj == NULL) {
 67     return NULL;                // ignore null handles
 68   } else {
 69     assert(oopDesc::is_oop(obj), "not an oop");
 70     assert(!current_thread_in_native(), "must not be in native");
 71     return thread->active_handles()->allocate_handle(thread, obj, alloc_failmode);
 72   }
 73 }
 74 
 75 static void report_handle_allocation_failure(AllocFailType alloc_failmode,
 76                                              const char* handle_kind) {
 77   if (alloc_failmode == AllocFailStrategy::EXIT_OOM) {
 78     // Fake size value, since we don't know the min allocation size here.
 79     vm_exit_out_of_memory(sizeof(oop), OOM_MALLOC_ERROR,
 80                           "Cannot create %s JNI handle", handle_kind);
 81   } else {
 82     assert(alloc_failmode == AllocFailStrategy::RETURN_NULL, "invariant");
 83   }
 84 }
 85 
 86 jobject JNIHandles::make_global(Handle obj, AllocFailType alloc_failmode) {
 87   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
 88   assert(!current_thread_in_native(), "must not be in native");
 89   jobject res = NULL;
 90   if (!obj.is_null()) {
 91     // ignore null handles
 92     assert(oopDesc::is_oop(obj()), "not an oop");
 93     oop* ptr = global_handles()->allocate();
 94     // Return NULL on allocation failure.
 95     if (ptr != NULL) {
 96       assert(*ptr == NULL, "invariant");
 97       NativeAccess<>::oop_store(ptr, obj());
 98       res = reinterpret_cast<jobject>(ptr);
 99     } else {
100       report_handle_allocation_failure(alloc_failmode, "global");
101     }
102   }
103 
104   return res;
105 }
106 
107 jobject JNIHandles::make_weak_global(Handle obj, AllocFailType alloc_failmode) {
108   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
109   assert(!current_thread_in_native(), "must not be in native");
110   jobject res = NULL;
111   if (!obj.is_null()) {
112     // ignore null handles
113     assert(oopDesc::is_oop(obj()), "not an oop");
114     oop* ptr = weak_global_handles()->allocate();
115     // Return NULL on allocation failure.
116     if (ptr != NULL) {
117       assert(*ptr == NULL, "invariant");
118       NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(ptr, obj());
119       char* tptr = reinterpret_cast<char*>(ptr) + weak_tag_value;
120       res = reinterpret_cast<jobject>(tptr);
121     } else {
122       report_handle_allocation_failure(alloc_failmode, "weak global");
123     }
124   }
125   return res;
126 }
127 
128 // Resolve some erroneous cases to NULL, rather than treating them as
129 // possibly unchecked errors.  In particular, deleted handles are
130 // treated as NULL (though a deleted and later reallocated handle
131 // isn't detected).
132 oop JNIHandles::resolve_external_guard(jobject handle) {
133   oop result = NULL;
134   if (handle != NULL) {
135     result = resolve_impl<DECORATORS_NONE, true /* external_guard */>(handle);
136   }
137   return result;
138 }
139 
140 bool JNIHandles::is_global_weak_cleared(jweak handle) {
141   assert(handle != NULL, "precondition");
142   assert(is_jweak(handle), "not a weak handle");
143   oop* oop_ptr = jweak_ptr(handle);
144   oop value = NativeAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(oop_ptr);
145   return value == NULL;
146 }
147 
148 void JNIHandles::destroy_global(jobject handle) {
149   if (handle != NULL) {
150     assert(!is_jweak(handle), "wrong method for destroying jweak");
151     oop* oop_ptr = jobject_ptr(handle);
152     NativeAccess<>::oop_store(oop_ptr, (oop)NULL);
153     global_handles()->release(oop_ptr);
154   }
155 }
156 
157 
158 void JNIHandles::destroy_weak_global(jobject handle) {
159   if (handle != NULL) {
160     assert(is_jweak(handle), "JNI handle not jweak");
161     oop* oop_ptr = jweak_ptr(handle);
162     NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(oop_ptr, (oop)NULL);
163     weak_global_handles()->release(oop_ptr);
164   }
165 }
166 
167 
168 void JNIHandles::oops_do(OopClosure* f) {
169   global_handles()->oops_do(f);
170 }
171 
172 
173 void JNIHandles::weak_oops_do(OopClosure* f) {
174   weak_global_handles()->weak_oops_do(f);
175 }
176 
177 bool JNIHandles::is_global_storage(const OopStorage* storage) {
178   return _global_handles == storage;
179 }
180 
181 inline bool is_storage_handle(const OopStorage* storage, const oop* ptr) {
182   return storage->allocation_status(ptr) == OopStorage::ALLOCATED_ENTRY;
183 }
184 
185 
186 jobjectRefType JNIHandles::handle_type(JavaThread* thread, jobject handle) {
187   assert(handle != NULL, "precondition");
188   jobjectRefType result = JNIInvalidRefType;
189   if (is_jweak(handle)) {
190     if (is_storage_handle(weak_global_handles(), jweak_ptr(handle))) {
191       result = JNIWeakGlobalRefType;
192     }
193   } else {
194     switch (global_handles()->allocation_status(jobject_ptr(handle))) {
195     case OopStorage::ALLOCATED_ENTRY:
196       result = JNIGlobalRefType;
197       break;
198 
199     case OopStorage::UNALLOCATED_ENTRY:
200       break;                    // Invalid global handle
201 
202     case OopStorage::INVALID_ENTRY:
203       // Not in global storage.  Might be a local handle.
204       if (is_local_handle(thread, handle) || is_frame_handle(thread, handle)) {
205         result = JNILocalRefType;
206       }
207       break;
208 
209     default:
210       ShouldNotReachHere();
211     }
212   }
213   return result;
214 }
215 
216 
217 bool JNIHandles::is_local_handle(JavaThread* thread, jobject handle) {
218   assert(handle != NULL, "precondition");
219   JNIHandleBlock* block = thread->active_handles();
220 
221   // Look back past possible native calls to jni_PushLocalFrame.
222   while (block != NULL) {
223     if (block->chain_contains(handle)) {
224       return true;
225     }
226     block = block->pop_frame_link();
227   }
228   return false;
229 }
230 
231 
232 // Determine if the handle is somewhere in the current thread's stack.
233 // We easily can't isolate any particular stack frame the handle might
234 // come from, so we'll check the whole stack.
235 
236 bool JNIHandles::is_frame_handle(JavaThread* thr, jobject handle) {
237   assert(handle != NULL, "precondition");
238   // If there is no java frame, then this must be top level code, such
239   // as the java command executable, in which case, this type of handle
240   // is not permitted.
241   return (thr->has_last_Java_frame() &&
242           thr->is_in_stack_range_incl((address)handle, (address)thr->last_Java_sp()));
243 }
244 
245 
246 bool JNIHandles::is_global_handle(jobject handle) {
247   assert(handle != NULL, "precondition");
248   return !is_jweak(handle) && is_storage_handle(global_handles(), jobject_ptr(handle));
249 }
250 
251 
252 bool JNIHandles::is_weak_global_handle(jobject handle) {
253   assert(handle != NULL, "precondition");
254   return is_jweak(handle) && is_storage_handle(weak_global_handles(), jweak_ptr(handle));
255 }
256 
257 size_t JNIHandles::global_handle_memory_usage() {
258   return global_handles()->total_memory_usage();
259 }
260 
261 size_t JNIHandles::weak_global_handle_memory_usage() {
262   return weak_global_handles()->total_memory_usage();
263 }
264 
265 
266 // We assume this is called at a safepoint: no lock is needed.
267 void JNIHandles::print_on(outputStream* st) {
268   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
269 
270   st->print_cr("JNI global refs: " SIZE_FORMAT ", weak refs: " SIZE_FORMAT,
271                global_handles()->allocation_count(),
272                weak_global_handles()->allocation_count());
273   st->cr();
274   st->flush();
275 }
276 
277 void JNIHandles::print() { print_on(tty); }
278 
279 class VerifyJNIHandles: public OopClosure {
280 public:
281   virtual void do_oop(oop* root) {
282     guarantee(oopDesc::is_oop_or_null(RawAccess<>::oop_load(root)), "Invalid oop");
283   }
284   virtual void do_oop(narrowOop* root) { ShouldNotReachHere(); }
285 };
286 
287 void JNIHandles::verify() {
288   VerifyJNIHandles verify_handle;
289 
290   oops_do(&verify_handle);
291   weak_oops_do(&verify_handle);
292 }
293 
294 // This method is implemented here to avoid circular includes between
295 // jniHandles.hpp and thread.hpp.
296 bool JNIHandles::current_thread_in_native() {
297   Thread* thread = Thread::current();
298   return (thread->is_Java_thread() &&
299           JavaThread::cast(thread)->thread_state() == _thread_in_native);
300 }
301 
302 bool JNIHandles::is_same_object(jobject handle1, jobject handle2) {
303   oop obj1 = resolve_no_keepalive(handle1);
304   oop obj2 = resolve_no_keepalive(handle2);
305 
306   bool ret = obj1 == obj2;
307 
308   if (EnableValhalla) {
309     if (!ret && obj1 != NULL && obj2 != NULL && obj1->klass() == obj2->klass() && obj1->klass()->is_inline_klass()) {
310       // The two references are different, they are not null and they are both inline types,
311       // a full substitutability test is required, calling PrimitiveObjectMethods.isSubstitutable()
312       // (similarly to InterpreterRuntime::is_substitutable)
313       JavaThread* THREAD = JavaThread::current();
314       Handle ha(THREAD, obj1);
315       Handle hb(THREAD, obj2);
316       JavaValue result(T_BOOLEAN);
317       JavaCallArguments args;
318       args.push_oop(ha);
319       args.push_oop(hb);
320       methodHandle method(THREAD, Universe::is_substitutable_method());
321       JavaCalls::call(&result, method, &args, THREAD);
322       if (HAS_PENDING_EXCEPTION) {
323         // Something really bad happened because isSubstitutable() should not throw exceptions
324         // If it is an error, just let it propagate
325         // If it is an exception, wrap it into an InternalError
326         if (!PENDING_EXCEPTION->is_a(vmClasses::Error_klass())) {
327           Handle e(THREAD, PENDING_EXCEPTION);
328           CLEAR_PENDING_EXCEPTION;
329           THROW_MSG_CAUSE_(vmSymbols::java_lang_InternalError(), "Internal error in substitutability test", e, false);
330         }
331       }
332       ret = result.get_jboolean();
333     }
334   }
335 
336   return ret;
337 }
338 
339 
340 int JNIHandleBlock::_blocks_allocated = 0;
341 
342 static inline bool is_tagged_free_list(uintptr_t value) {
343   return (value & 1u) != 0;
344 }
345 
346 static inline uintptr_t tag_free_list(uintptr_t value) {
347   return value | 1u;
348 }
349 
350 static inline uintptr_t untag_free_list(uintptr_t value) {
351   return value & ~(uintptr_t)1u;
352 }
353 
354 // There is a freelist of handles running through the JNIHandleBlock
355 // with a tagged next pointer, distinguishing these next pointers from
356 // oops. The freelist handling currently relies on the size of oops
357 // being the same as a native pointer. If this ever changes, then
358 // this freelist handling must change too.
359 STATIC_ASSERT(sizeof(oop) == sizeof(uintptr_t));
360 
361 #ifdef ASSERT
362 void JNIHandleBlock::zap() {
363   // Zap block values
364   _top = 0;
365   for (int index = 0; index < block_size_in_oops; index++) {
366     // NOT using Access here; just bare clobbering to NULL, since the
367     // block no longer contains valid oops.
368     _handles[index] = 0;
369   }
370 }
371 #endif // ASSERT
372 
373 JNIHandleBlock* JNIHandleBlock::allocate_block(JavaThread* thread, AllocFailType alloc_failmode)  {
374   // The VM thread can allocate a handle block in behalf of another thread during a safepoint.
375   assert(thread == NULL || thread == Thread::current() || SafepointSynchronize::is_at_safepoint(),
376          "sanity check");
377   JNIHandleBlock* block;
378   // Check the thread-local free list for a block so we don't
379   // have to acquire a mutex.
380   if (thread != NULL && thread->free_handle_block() != NULL) {
381     block = thread->free_handle_block();
382     thread->set_free_handle_block(block->_next);
383   } else {
384     // Allocate new block
385     if (alloc_failmode == AllocFailStrategy::RETURN_NULL) {
386       block = new (std::nothrow) JNIHandleBlock();
387       if (block == NULL) {
388         return NULL;
389       }
390     } else {
391       block = new JNIHandleBlock();
392     }
393     Atomic::inc(&_blocks_allocated);
394     block->zap();
395   }
396   block->_top = 0;
397   block->_next = NULL;
398   block->_pop_frame_link = NULL;
399   block->_planned_capacity = block_size_in_oops;
400   // _last, _free_list & _allocate_before_rebuild initialized in allocate_handle
401   debug_only(block->_last = NULL);
402   debug_only(block->_free_list = NULL);
403   debug_only(block->_allocate_before_rebuild = -1);
404   return block;
405 }
406 
407 
408 void JNIHandleBlock::release_block(JNIHandleBlock* block, JavaThread* thread) {
409   assert(thread == NULL || thread == Thread::current(), "sanity check");
410   JNIHandleBlock* pop_frame_link = block->pop_frame_link();
411   // Put returned block at the beginning of the thread-local free list.
412   // Note that if thread == NULL, we use it as an implicit argument that
413   // we _don't_ want the block to be kept on the free_handle_block.
414   // See for instance JavaThread::exit().
415   if (thread != NULL ) {
416     block->zap();
417     JNIHandleBlock* freelist = thread->free_handle_block();
418     block->_pop_frame_link = NULL;
419     thread->set_free_handle_block(block);
420 
421     // Add original freelist to end of chain
422     if ( freelist != NULL ) {
423       while ( block->_next != NULL ) block = block->_next;
424       block->_next = freelist;
425     }
426     block = NULL;
427   } else {
428     DEBUG_ONLY(block->set_pop_frame_link(NULL));
429     while (block != NULL) {
430       JNIHandleBlock* next = block->_next;
431       Atomic::dec(&_blocks_allocated);
432       assert(block->pop_frame_link() == NULL, "pop_frame_link should be NULL");
433       delete block;
434       block = next;
435     }
436   }
437   if (pop_frame_link != NULL) {
438     // As a sanity check we release blocks pointed to by the pop_frame_link.
439     // This should never happen (only if PopLocalFrame is not called the
440     // correct number of times).
441     release_block(pop_frame_link, thread);
442   }
443 }
444 
445 
446 void JNIHandleBlock::oops_do(OopClosure* f) {
447   JNIHandleBlock* current_chain = this;
448   // Iterate over chain of blocks, followed by chains linked through the
449   // pop frame links.
450   while (current_chain != NULL) {
451     for (JNIHandleBlock* current = current_chain; current != NULL;
452          current = current->_next) {
453       assert(current == current_chain || current->pop_frame_link() == NULL,
454         "only blocks first in chain should have pop frame link set");
455       for (int index = 0; index < current->_top; index++) {
456         uintptr_t* addr = &(current->_handles)[index];
457         uintptr_t value = *addr;
458         // traverse heap pointers only, not deleted handles or free list
459         // pointers
460         if (value != 0 && !is_tagged_free_list(value)) {
461           oop* root = (oop*)addr;
462           f->do_oop(root);
463         }
464       }
465       // the next handle block is valid only if current block is full
466       if (current->_top < block_size_in_oops) {
467         break;
468       }
469     }
470     current_chain = current_chain->pop_frame_link();
471   }
472 }
473 
474 
475 jobject JNIHandleBlock::allocate_handle(JavaThread* caller, oop obj, AllocFailType alloc_failmode) {
476   assert(Universe::heap()->is_in(obj), "sanity check");
477   if (_top == 0) {
478     // This is the first allocation or the initial block got zapped when
479     // entering a native function. If we have any following blocks they are
480     // not valid anymore.
481     for (JNIHandleBlock* current = _next; current != NULL;
482          current = current->_next) {
483       assert(current->_last == NULL, "only first block should have _last set");
484       assert(current->_free_list == NULL,
485              "only first block should have _free_list set");
486       if (current->_top == 0) {
487         // All blocks after the first clear trailing block are already cleared.
488 #ifdef ASSERT
489         for (current = current->_next; current != NULL; current = current->_next) {
490           assert(current->_top == 0, "trailing blocks must already be cleared");
491         }
492 #endif
493         break;
494       }
495       current->_top = 0;
496       current->zap();
497     }
498     // Clear initial block
499     _free_list = NULL;
500     _allocate_before_rebuild = 0;
501     _last = this;
502     zap();
503   }
504 
505   // Try last block
506   if (_last->_top < block_size_in_oops) {
507     oop* handle = (oop*)&(_last->_handles)[_last->_top++];
508     NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, obj);
509     return (jobject) handle;
510   }
511 
512   // Try free list
513   if (_free_list != NULL) {
514     oop* handle = (oop*)_free_list;
515     _free_list = (uintptr_t*) untag_free_list(*_free_list);
516     NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, obj);
517     return (jobject) handle;
518   }
519   // Check if unused block follow last
520   if (_last->_next != NULL) {
521     // update last and retry
522     _last = _last->_next;
523     return allocate_handle(caller, obj, alloc_failmode);
524   }
525 
526   // No space available, we have to rebuild free list or expand
527   if (_allocate_before_rebuild == 0) {
528       rebuild_free_list();        // updates _allocate_before_rebuild counter
529   } else {
530     _last->_next = JNIHandleBlock::allocate_block(caller, alloc_failmode);
531     if (_last->_next == NULL) {
532       return NULL;
533     }
534     _last = _last->_next;
535     _allocate_before_rebuild--;
536   }
537   return allocate_handle(caller, obj, alloc_failmode);  // retry
538 }
539 
540 void JNIHandleBlock::rebuild_free_list() {
541   assert(_allocate_before_rebuild == 0 && _free_list == NULL, "just checking");
542   int free = 0;
543   int blocks = 0;
544   for (JNIHandleBlock* current = this; current != NULL; current = current->_next) {
545     for (int index = 0; index < current->_top; index++) {
546       uintptr_t* handle = &(current->_handles)[index];
547       if (*handle == 0) {
548         // this handle was cleared out by a delete call, reuse it
549         *handle = _free_list == NULL ? 0 : tag_free_list((uintptr_t)_free_list);
550         _free_list = handle;
551         free++;
552       }
553     }
554     // we should not rebuild free list if there are unused handles at the end
555     assert(current->_top == block_size_in_oops, "just checking");
556     blocks++;
557   }
558   // Heuristic: if more than half of the handles are free we rebuild next time
559   // as well, otherwise we append a corresponding number of new blocks before
560   // attempting a free list rebuild again.
561   int total = blocks * block_size_in_oops;
562   int extra = total - 2*free;
563   if (extra > 0) {
564     // Not as many free handles as we would like - compute number of new blocks to append
565     _allocate_before_rebuild = (extra + block_size_in_oops - 1) / block_size_in_oops;
566   }
567 }
568 
569 
570 bool JNIHandleBlock::contains(jobject handle) const {
571   return ((jobject)&_handles[0] <= handle && handle<(jobject)&_handles[_top]);
572 }
573 
574 
575 bool JNIHandleBlock::chain_contains(jobject handle) const {
576   for (JNIHandleBlock* current = (JNIHandleBlock*) this; current != NULL; current = current->_next) {
577     if (current->contains(handle)) {
578       return true;
579     }
580   }
581   return false;
582 }
583 
584 
585 size_t JNIHandleBlock::length() const {
586   size_t result = 1;
587   for (JNIHandleBlock* current = _next; current != NULL; current = current->_next) {
588     result++;
589   }
590   return result;
591 }
592 
593 class CountJNIHandleClosure: public OopClosure {
594 private:
595   int _count;
596 public:
597   CountJNIHandleClosure(): _count(0) {}
598   virtual void do_oop(oop* ooph) { _count++; }
599   virtual void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
600   int count() { return _count; }
601 };
602 
603 const size_t JNIHandleBlock::get_number_of_live_handles() {
604   CountJNIHandleClosure counter;
605   oops_do(&counter);
606   return counter.count();
607 }
608 
609 // This method is not thread-safe, i.e., must be called while holding a lock on the
610 // structure.
611 size_t JNIHandleBlock::memory_usage() const {
612   return length() * sizeof(JNIHandleBlock);
613 }