1 /*
  2  * Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/shared/collectedHeap.hpp"
 27 #include "gc/shared/oopStorage.inline.hpp"
 28 #include "gc/shared/oopStorageSet.hpp"
 29 #include "logging/log.hpp"
 30 #include "memory/iterator.hpp"
 31 #include "memory/universe.hpp"
 32 #include "oops/access.inline.hpp"
 33 #include "oops/oop.inline.hpp"
 34 #include "runtime/handles.inline.hpp"
 35 #include "runtime/jniHandles.inline.hpp"
 36 #include "runtime/mutexLocker.hpp"
 37 #include "runtime/thread.inline.hpp"
 38 #include "utilities/align.hpp"
 39 #include "utilities/debug.hpp"
 40 
 41 OopStorage* JNIHandles::global_handles() {
 42   return _global_handles;
 43 }
 44 
 45 OopStorage* JNIHandles::weak_global_handles() {
 46   return _weak_global_handles;
 47 }
 48 
 49 // Serviceability agent support.
 50 OopStorage* JNIHandles::_global_handles = NULL;
 51 OopStorage* JNIHandles::_weak_global_handles = NULL;
 52 
 53 void jni_handles_init() {
 54   JNIHandles::_global_handles = OopStorageSet::create_strong("JNI Global", mtInternal);
 55   JNIHandles::_weak_global_handles = OopStorageSet::create_weak("JNI Weak", mtInternal);
 56 }
 57 
 58 jobject JNIHandles::make_local(oop obj) {
 59   return make_local(JavaThread::current(), obj);
 60 }
 61 
 62 // Used by NewLocalRef which requires NULL on out-of-memory
 63 jobject JNIHandles::make_local(JavaThread* thread, oop obj, AllocFailType alloc_failmode) {
 64   if (obj == NULL) {
 65     return NULL;                // ignore null handles
 66   } else {
 67     assert(oopDesc::is_oop(obj), "not an oop");
 68     assert(!current_thread_in_native(), "must not be in native");
 69     return thread->active_handles()->allocate_handle(thread, obj, alloc_failmode);
 70   }
 71 }
 72 
 73 static void report_handle_allocation_failure(AllocFailType alloc_failmode,
 74                                              const char* handle_kind) {
 75   if (alloc_failmode == AllocFailStrategy::EXIT_OOM) {
 76     // Fake size value, since we don't know the min allocation size here.
 77     vm_exit_out_of_memory(sizeof(oop), OOM_MALLOC_ERROR,
 78                           "Cannot create %s JNI handle", handle_kind);
 79   } else {
 80     assert(alloc_failmode == AllocFailStrategy::RETURN_NULL, "invariant");
 81   }
 82 }
 83 
 84 jobject JNIHandles::make_global(Handle obj, AllocFailType alloc_failmode) {
 85   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
 86   assert(!current_thread_in_native(), "must not be in native");
 87   jobject res = NULL;
 88   if (!obj.is_null()) {
 89     // ignore null handles
 90     assert(oopDesc::is_oop(obj()), "not an oop");
 91     oop* ptr = global_handles()->allocate();
 92     // Return NULL on allocation failure.
 93     if (ptr != NULL) {
 94       assert(*ptr == NULL, "invariant");
 95       NativeAccess<>::oop_store(ptr, obj());
 96       res = reinterpret_cast<jobject>(ptr);
 97     } else {
 98       report_handle_allocation_failure(alloc_failmode, "global");
 99     }
100   }
101 
102   return res;
103 }
104 
105 jobject JNIHandles::make_weak_global(Handle obj, AllocFailType alloc_failmode) {
106   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
107   assert(!current_thread_in_native(), "must not be in native");
108   jobject res = NULL;
109   if (!obj.is_null()) {
110     // ignore null handles
111     assert(oopDesc::is_oop(obj()), "not an oop");
112     oop* ptr = weak_global_handles()->allocate();
113     // Return NULL on allocation failure.
114     if (ptr != NULL) {
115       assert(*ptr == NULL, "invariant");
116       NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(ptr, obj());
117       char* tptr = reinterpret_cast<char*>(ptr) + weak_tag_value;
118       res = reinterpret_cast<jobject>(tptr);
119     } else {
120       report_handle_allocation_failure(alloc_failmode, "weak global");
121     }
122   }
123   return res;
124 }
125 
126 // Resolve some erroneous cases to NULL, rather than treating them as
127 // possibly unchecked errors.  In particular, deleted handles are
128 // treated as NULL (though a deleted and later reallocated handle
129 // isn't detected).
130 oop JNIHandles::resolve_external_guard(jobject handle) {
131   oop result = NULL;
132   if (handle != NULL) {
133     result = resolve_impl<DECORATORS_NONE, true /* external_guard */>(handle);
134   }
135   return result;
136 }
137 
138 bool JNIHandles::is_global_weak_cleared(jweak handle) {
139   assert(handle != NULL, "precondition");
140   assert(is_jweak(handle), "not a weak handle");
141   oop* oop_ptr = jweak_ptr(handle);
142   oop value = NativeAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(oop_ptr);
143   return value == NULL;
144 }
145 
146 void JNIHandles::destroy_global(jobject handle) {
147   if (handle != NULL) {
148     assert(!is_jweak(handle), "wrong method for detroying jweak");
149     oop* oop_ptr = jobject_ptr(handle);
150     NativeAccess<>::oop_store(oop_ptr, (oop)NULL);
151     global_handles()->release(oop_ptr);
152   }
153 }
154 
155 
156 void JNIHandles::destroy_weak_global(jobject handle) {
157   if (handle != NULL) {
158     assert(is_jweak(handle), "JNI handle not jweak");
159     oop* oop_ptr = jweak_ptr(handle);
160     NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(oop_ptr, (oop)NULL);
161     weak_global_handles()->release(oop_ptr);
162   }
163 }
164 
165 
166 void JNIHandles::oops_do(OopClosure* f) {
167   global_handles()->oops_do(f);
168 }
169 
170 
171 void JNIHandles::weak_oops_do(OopClosure* f) {
172   weak_global_handles()->weak_oops_do(f);
173 }
174 
175 bool JNIHandles::is_global_storage(const OopStorage* storage) {
176   return _global_handles == storage;
177 }
178 
179 inline bool is_storage_handle(const OopStorage* storage, const oop* ptr) {
180   return storage->allocation_status(ptr) == OopStorage::ALLOCATED_ENTRY;
181 }
182 
183 
184 jobjectRefType JNIHandles::handle_type(JavaThread* thread, jobject handle) {
185   assert(handle != NULL, "precondition");
186   jobjectRefType result = JNIInvalidRefType;
187   if (is_jweak(handle)) {
188     if (is_storage_handle(weak_global_handles(), jweak_ptr(handle))) {
189       result = JNIWeakGlobalRefType;
190     }
191   } else {
192     switch (global_handles()->allocation_status(jobject_ptr(handle))) {
193     case OopStorage::ALLOCATED_ENTRY:
194       result = JNIGlobalRefType;
195       break;
196 
197     case OopStorage::UNALLOCATED_ENTRY:
198       break;                    // Invalid global handle
199 
200     case OopStorage::INVALID_ENTRY:
201       // Not in global storage.  Might be a local handle.
202       if (is_local_handle(thread, handle) || is_frame_handle(thread, handle)) {
203         result = JNILocalRefType;
204       }
205       break;
206 
207     default:
208       ShouldNotReachHere();
209     }
210   }
211   return result;
212 }
213 
214 
215 bool JNIHandles::is_local_handle(JavaThread* thread, jobject handle) {
216   assert(handle != NULL, "precondition");
217   JNIHandleBlock* block = thread->active_handles();
218 
219   // Look back past possible native calls to jni_PushLocalFrame.
220   while (block != NULL) {
221     if (block->chain_contains(handle)) {
222       return true;
223     }
224     block = block->pop_frame_link();
225   }
226   return false;
227 }
228 
229 
230 // Determine if the handle is somewhere in the current thread's stack.
231 // We easily can't isolate any particular stack frame the handle might
232 // come from, so we'll check the whole stack.
233 
234 bool JNIHandles::is_frame_handle(JavaThread* thr, jobject handle) {
235   assert(handle != NULL, "precondition");
236   // If there is no java frame, then this must be top level code, such
237   // as the java command executable, in which case, this type of handle
238   // is not permitted.
239   return (thr->has_last_Java_frame() &&
240           thr->is_in_stack_range_incl((address)handle, (address)thr->last_Java_sp()));
241 }
242 
243 
244 bool JNIHandles::is_global_handle(jobject handle) {
245   assert(handle != NULL, "precondition");
246   return !is_jweak(handle) && is_storage_handle(global_handles(), jobject_ptr(handle));
247 }
248 
249 
250 bool JNIHandles::is_weak_global_handle(jobject handle) {
251   assert(handle != NULL, "precondition");
252   return is_jweak(handle) && is_storage_handle(weak_global_handles(), jweak_ptr(handle));
253 }
254 
255 size_t JNIHandles::global_handle_memory_usage() {
256   return global_handles()->total_memory_usage();
257 }
258 
259 size_t JNIHandles::weak_global_handle_memory_usage() {
260   return weak_global_handles()->total_memory_usage();
261 }
262 
263 
264 // We assume this is called at a safepoint: no lock is needed.
265 void JNIHandles::print_on(outputStream* st) {
266   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
267 
268   st->print_cr("JNI global refs: " SIZE_FORMAT ", weak refs: " SIZE_FORMAT,
269                global_handles()->allocation_count(),
270                weak_global_handles()->allocation_count());
271   st->cr();
272   st->flush();
273 }
274 
275 void JNIHandles::print() { print_on(tty); }
276 
277 class VerifyJNIHandles: public OopClosure {
278 public:
279   virtual void do_oop(oop* root) {
280     guarantee(oopDesc::is_oop_or_null(RawAccess<>::oop_load(root)), "Invalid oop");
281   }
282   virtual void do_oop(narrowOop* root) { ShouldNotReachHere(); }
283 };
284 
285 void JNIHandles::verify() {
286   VerifyJNIHandles verify_handle;
287 
288   oops_do(&verify_handle);
289   weak_oops_do(&verify_handle);
290 }
291 
292 // This method is implemented here to avoid circular includes between
293 // jniHandles.hpp and thread.hpp.
294 bool JNIHandles::current_thread_in_native() {
295   Thread* thread = Thread::current();
296   return (thread->is_Java_thread() &&
297           JavaThread::cast(thread)->thread_state() == _thread_in_native);
298 }
299 
300 int JNIHandleBlock::_blocks_allocated = 0;
301 
302 static inline bool is_tagged_free_list(uintptr_t value) {
303   return (value & 1u) != 0;
304 }
305 
306 static inline uintptr_t tag_free_list(uintptr_t value) {
307   return value | 1u;
308 }
309 
310 static inline uintptr_t untag_free_list(uintptr_t value) {
311   return value & ~(uintptr_t)1u;
312 }
313 
314 // There is a freelist of handles running through the JNIHandleBlock
315 // with a tagged next pointer, distinguishing these next pointers from
316 // oops. The freelist handling currently relies on the size of oops
317 // being the same as a native pointer. If this ever changes, then
318 // this freelist handling must change too.
319 STATIC_ASSERT(sizeof(oop) == sizeof(uintptr_t));
320 
321 #ifdef ASSERT
322 void JNIHandleBlock::zap() {
323   // Zap block values
324   _top = 0;
325   for (int index = 0; index < block_size_in_oops; index++) {
326     // NOT using Access here; just bare clobbering to NULL, since the
327     // block no longer contains valid oops.
328     _handles[index] = 0;
329   }
330 }
331 #endif // ASSERT
332 
333 JNIHandleBlock* JNIHandleBlock::allocate_block(JavaThread* thread, AllocFailType alloc_failmode)  {
334   // The VM thread can allocate a handle block in behalf of another thread during a safepoint.
335   assert(thread == NULL || thread == Thread::current() || SafepointSynchronize::is_at_safepoint(),
336          "sanity check");
337   JNIHandleBlock* block;
338   // Check the thread-local free list for a block so we don't
339   // have to acquire a mutex.
340   if (thread != NULL && thread->free_handle_block() != NULL) {
341     block = thread->free_handle_block();
342     thread->set_free_handle_block(block->_next);
343   } else {
344     // Allocate new block
345     if (alloc_failmode == AllocFailStrategy::RETURN_NULL) {
346       block = new (std::nothrow) JNIHandleBlock();
347       if (block == NULL) {
348         return NULL;
349       }
350     } else {
351       block = new JNIHandleBlock();
352     }
353     Atomic::inc(&_blocks_allocated);
354     block->zap();
355   }
356   block->_top = 0;
357   block->_next = NULL;
358   block->_pop_frame_link = NULL;
359   block->_planned_capacity = block_size_in_oops;
360   // _last, _free_list & _allocate_before_rebuild initialized in allocate_handle
361   debug_only(block->_last = NULL);
362   debug_only(block->_free_list = NULL);
363   debug_only(block->_allocate_before_rebuild = -1);
364   return block;
365 }
366 
367 
368 void JNIHandleBlock::release_block(JNIHandleBlock* block, JavaThread* thread) {
369   assert(thread == NULL || thread == Thread::current(), "sanity check");
370   JNIHandleBlock* pop_frame_link = block->pop_frame_link();
371   // Put returned block at the beginning of the thread-local free list.
372   // Note that if thread == NULL, we use it as an implicit argument that
373   // we _don't_ want the block to be kept on the free_handle_block.
374   // See for instance JavaThread::exit().
375   if (thread != NULL ) {
376     block->zap();
377     JNIHandleBlock* freelist = thread->free_handle_block();
378     block->_pop_frame_link = NULL;
379     thread->set_free_handle_block(block);
380 
381     // Add original freelist to end of chain
382     if ( freelist != NULL ) {
383       while ( block->_next != NULL ) block = block->_next;
384       block->_next = freelist;
385     }
386     block = NULL;
387   } else {
388     DEBUG_ONLY(block->set_pop_frame_link(NULL));
389     while (block != NULL) {
390       JNIHandleBlock* next = block->_next;
391       Atomic::dec(&_blocks_allocated);
392       assert(block->pop_frame_link() == NULL, "pop_frame_link should be NULL");
393       delete block;
394       block = next;
395     }
396   }
397   if (pop_frame_link != NULL) {
398     // As a sanity check we release blocks pointed to by the pop_frame_link.
399     // This should never happen (only if PopLocalFrame is not called the
400     // correct number of times).
401     release_block(pop_frame_link, thread);
402   }
403 }
404 
405 
406 void JNIHandleBlock::oops_do(OopClosure* f) {
407   JNIHandleBlock* current_chain = this;
408   // Iterate over chain of blocks, followed by chains linked through the
409   // pop frame links.
410   while (current_chain != NULL) {
411     for (JNIHandleBlock* current = current_chain; current != NULL;
412          current = current->_next) {
413       assert(current == current_chain || current->pop_frame_link() == NULL,
414         "only blocks first in chain should have pop frame link set");
415       for (int index = 0; index < current->_top; index++) {
416         uintptr_t* addr = &(current->_handles)[index];
417         uintptr_t value = *addr;
418         // traverse heap pointers only, not deleted handles or free list
419         // pointers
420         if (value != 0 && !is_tagged_free_list(value)) {
421           oop* root = (oop*)addr;
422           f->do_oop(root);
423         }
424       }
425       // the next handle block is valid only if current block is full
426       if (current->_top < block_size_in_oops) {
427         break;
428       }
429     }
430     current_chain = current_chain->pop_frame_link();
431   }
432 }
433 
434 
435 jobject JNIHandleBlock::allocate_handle(JavaThread* caller, oop obj, AllocFailType alloc_failmode) {
436   assert(Universe::heap()->is_in(obj), "sanity check");
437   if (_top == 0) {
438     // This is the first allocation or the initial block got zapped when
439     // entering a native function. If we have any following blocks they are
440     // not valid anymore.
441     for (JNIHandleBlock* current = _next; current != NULL;
442          current = current->_next) {
443       assert(current->_last == NULL, "only first block should have _last set");
444       assert(current->_free_list == NULL,
445              "only first block should have _free_list set");
446       if (current->_top == 0) {
447         // All blocks after the first clear trailing block are already cleared.
448 #ifdef ASSERT
449         for (current = current->_next; current != NULL; current = current->_next) {
450           assert(current->_top == 0, "trailing blocks must already be cleared");
451         }
452 #endif
453         break;
454       }
455       current->_top = 0;
456       current->zap();
457     }
458     // Clear initial block
459     _free_list = NULL;
460     _allocate_before_rebuild = 0;
461     _last = this;
462     zap();
463   }
464 
465   // Try last block
466   if (_last->_top < block_size_in_oops) {
467     oop* handle = (oop*)&(_last->_handles)[_last->_top++];
468     NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, obj);
469     return (jobject) handle;
470   }
471 
472   // Try free list
473   if (_free_list != NULL) {
474     oop* handle = (oop*)_free_list;
475     _free_list = (uintptr_t*) untag_free_list(*_free_list);
476     NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, obj);
477     return (jobject) handle;
478   }
479   // Check if unused block follow last
480   if (_last->_next != NULL) {
481     // update last and retry
482     _last = _last->_next;
483     return allocate_handle(caller, obj, alloc_failmode);
484   }
485 
486   // No space available, we have to rebuild free list or expand
487   if (_allocate_before_rebuild == 0) {
488       rebuild_free_list();        // updates _allocate_before_rebuild counter
489   } else {
490     _last->_next = JNIHandleBlock::allocate_block(caller, alloc_failmode);
491     if (_last->_next == NULL) {
492       return NULL;
493     }
494     _last = _last->_next;
495     _allocate_before_rebuild--;
496   }
497   return allocate_handle(caller, obj, alloc_failmode);  // retry
498 }
499 
500 void JNIHandleBlock::rebuild_free_list() {
501   assert(_allocate_before_rebuild == 0 && _free_list == NULL, "just checking");
502   int free = 0;
503   int blocks = 0;
504   for (JNIHandleBlock* current = this; current != NULL; current = current->_next) {
505     for (int index = 0; index < current->_top; index++) {
506       uintptr_t* handle = &(current->_handles)[index];
507       if (*handle == 0) {
508         // this handle was cleared out by a delete call, reuse it
509         *handle = _free_list == NULL ? 0 : tag_free_list((uintptr_t)_free_list);
510         _free_list = handle;
511         free++;
512       }
513     }
514     // we should not rebuild free list if there are unused handles at the end
515     assert(current->_top == block_size_in_oops, "just checking");
516     blocks++;
517   }
518   // Heuristic: if more than half of the handles are free we rebuild next time
519   // as well, otherwise we append a corresponding number of new blocks before
520   // attempting a free list rebuild again.
521   int total = blocks * block_size_in_oops;
522   int extra = total - 2*free;
523   if (extra > 0) {
524     // Not as many free handles as we would like - compute number of new blocks to append
525     _allocate_before_rebuild = (extra + block_size_in_oops - 1) / block_size_in_oops;
526   }
527 }
528 
529 
530 bool JNIHandleBlock::contains(jobject handle) const {
531   return ((jobject)&_handles[0] <= handle && handle<(jobject)&_handles[_top]);
532 }
533 
534 
535 bool JNIHandleBlock::chain_contains(jobject handle) const {
536   for (JNIHandleBlock* current = (JNIHandleBlock*) this; current != NULL; current = current->_next) {
537     if (current->contains(handle)) {
538       return true;
539     }
540   }
541   return false;
542 }
543 
544 
545 size_t JNIHandleBlock::length() const {
546   size_t result = 1;
547   for (JNIHandleBlock* current = _next; current != NULL; current = current->_next) {
548     result++;
549   }
550   return result;
551 }
552 
553 class CountJNIHandleClosure: public OopClosure {
554 private:
555   int _count;
556 public:
557   CountJNIHandleClosure(): _count(0) {}
558   virtual void do_oop(oop* ooph) { _count++; }
559   virtual void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
560   int count() { return _count; }
561 };
562 
563 const size_t JNIHandleBlock::get_number_of_live_handles() {
564   CountJNIHandleClosure counter;
565   oops_do(&counter);
566   return counter.count();
567 }
568 
569 // This method is not thread-safe, i.e., must be called while holding a lock on the
570 // structure.
571 size_t JNIHandleBlock::memory_usage() const {
572   return length() * sizeof(JNIHandleBlock);
573 }