1 /*
  2  * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/shared/collectedHeap.hpp"
 27 #include "gc/shared/oopStorage.inline.hpp"
 28 #include "gc/shared/oopStorageSet.hpp"
 29 #include "logging/log.hpp"
 30 #include "memory/iterator.hpp"
 31 #include "memory/universe.hpp"
 32 #include "oops/access.inline.hpp"
 33 #include "oops/oop.inline.hpp"
 34 #include "runtime/handles.inline.hpp"
 35 #include "runtime/javaThread.inline.hpp"
 36 #include "runtime/jniHandles.inline.hpp"
 37 #include "runtime/mutexLocker.hpp"
 38 #include "utilities/align.hpp"
 39 #include "utilities/debug.hpp"
 40 
 41 OopStorage* JNIHandles::global_handles() {
 42   return _global_handles;
 43 }
 44 
 45 OopStorage* JNIHandles::weak_global_handles() {
 46   return _weak_global_handles;
 47 }
 48 
 49 // Serviceability agent support.
 50 OopStorage* JNIHandles::_global_handles = nullptr;
 51 OopStorage* JNIHandles::_weak_global_handles = nullptr;
 52 
 53 void jni_handles_init() {
 54   JNIHandles::_global_handles = OopStorageSet::create_strong("JNI Global", mtInternal);
 55   JNIHandles::_weak_global_handles = OopStorageSet::create_weak("JNI Weak", mtInternal);
 56 }
 57 
 58 jobject JNIHandles::make_local(oop obj) {
 59   return make_local(JavaThread::current(), obj);
 60 }
 61 
 62 // Used by NewLocalRef which requires null on out-of-memory
 63 jobject JNIHandles::make_local(JavaThread* thread, oop obj, AllocFailType alloc_failmode) {
 64   if (obj == nullptr) {
 65     return nullptr;                // ignore null handles
 66   } else {
 67     assert(oopDesc::is_oop(obj), "not an oop");
 68     assert(!current_thread_in_native(), "must not be in native");
 69     STATIC_ASSERT(TypeTag::local == 0);
 70     return thread->active_handles()->allocate_handle(thread, obj, alloc_failmode);
 71   }
 72 }
 73 
 74 static void report_handle_allocation_failure(AllocFailType alloc_failmode,
 75                                              const char* handle_kind) {
 76   if (alloc_failmode == AllocFailStrategy::EXIT_OOM) {
 77     // Fake size value, since we don't know the min allocation size here.
 78     vm_exit_out_of_memory(sizeof(oop), OOM_MALLOC_ERROR,
 79                           "Cannot create %s JNI handle", handle_kind);
 80   } else {
 81     assert(alloc_failmode == AllocFailStrategy::RETURN_NULL, "invariant");
 82   }
 83 }
 84 
 85 jobject JNIHandles::make_global(Handle obj, AllocFailType alloc_failmode) {
 86   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
 87   assert(!current_thread_in_native(), "must not be in native");
 88   jobject res = nullptr;
 89   if (!obj.is_null()) {
 90     // ignore null handles
 91     assert(oopDesc::is_oop(obj()), "not an oop");
 92     oop* ptr = global_handles()->allocate();
 93     // Return null on allocation failure.
 94     if (ptr != nullptr) {
 95       assert(NativeAccess<AS_NO_KEEPALIVE>::oop_load(ptr) == oop(nullptr), "invariant");
 96       NativeAccess<>::oop_store(ptr, obj());
 97       char* tptr = reinterpret_cast<char*>(ptr) + TypeTag::global;
 98       res = reinterpret_cast<jobject>(tptr);
 99     } else {
100       report_handle_allocation_failure(alloc_failmode, "global");
101     }
102   }
103 
104   return res;
105 }
106 
107 jweak JNIHandles::make_weak_global(Handle obj, AllocFailType alloc_failmode) {
108   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
109   assert(!current_thread_in_native(), "must not be in native");
110   jweak res = nullptr;
111   if (!obj.is_null()) {
112     // ignore null handles
113     assert(oopDesc::is_oop(obj()), "not an oop");
114     oop* ptr = weak_global_handles()->allocate();
115     // Return nullptr on allocation failure.
116     if (ptr != nullptr) {
117       assert(NativeAccess<AS_NO_KEEPALIVE>::oop_load(ptr) == oop(nullptr), "invariant");
118       NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(ptr, obj());
119       char* tptr = reinterpret_cast<char*>(ptr) + TypeTag::weak_global;
120       res = reinterpret_cast<jweak>(tptr);
121     } else {
122       report_handle_allocation_failure(alloc_failmode, "weak global");
123     }
124   }
125   return res;
126 }
127 
128 // Resolve some erroneous cases to null, rather than treating them as
129 // possibly unchecked errors.  In particular, deleted handles are
130 // treated as null (though a deleted and later reallocated handle
131 // isn't detected).
132 oop JNIHandles::resolve_external_guard(jobject handle) {
133   oop result = nullptr;
134   if (handle != nullptr) {
135     result = resolve_impl<DECORATORS_NONE, true /* external_guard */>(handle);
136   }
137   return result;
138 }
139 
140 bool JNIHandles::is_weak_global_cleared(jweak handle) {
141   assert(handle != nullptr, "precondition");
142   oop* oop_ptr = weak_global_ptr(handle);
143   oop value = NativeAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(oop_ptr);
144   return value == nullptr;
145 }
146 
147 void JNIHandles::destroy_global(jobject handle) {
148   if (handle != nullptr) {
149     oop* oop_ptr = global_ptr(handle);
150     NativeAccess<>::oop_store(oop_ptr, (oop)nullptr);
151     global_handles()->release(oop_ptr);
152   }
153 }
154 
155 
156 void JNIHandles::destroy_weak_global(jweak handle) {
157   if (handle != nullptr) {
158     oop* oop_ptr = weak_global_ptr(handle);
159     NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(oop_ptr, (oop)nullptr);
160     weak_global_handles()->release(oop_ptr);
161   }
162 }
163 
164 
165 void JNIHandles::oops_do(OopClosure* f) {
166   global_handles()->oops_do(f);
167 }
168 
169 
170 void JNIHandles::weak_oops_do(OopClosure* f) {
171   weak_global_handles()->weak_oops_do(f);
172 }
173 
174 bool JNIHandles::is_global_storage(const OopStorage* storage) {
175   return _global_handles == storage;
176 }
177 
178 inline bool is_storage_handle(const OopStorage* storage, const oop* ptr) {
179   return storage->allocation_status(ptr) == OopStorage::ALLOCATED_ENTRY;
180 }
181 
182 
183 jobjectRefType JNIHandles::handle_type(JavaThread* thread, jobject handle) {
184   assert(handle != nullptr, "precondition");
185   jobjectRefType result = JNIInvalidRefType;
186   if (is_weak_global_tagged(handle)) {
187     if (is_storage_handle(weak_global_handles(), weak_global_ptr(handle))) {
188       result = JNIWeakGlobalRefType;
189     }
190   } else if (is_global_tagged(handle)) {
191     switch (global_handles()->allocation_status(global_ptr(handle))) {
192     case OopStorage::ALLOCATED_ENTRY:
193       result = JNIGlobalRefType;
194       break;
195 
196     case OopStorage::UNALLOCATED_ENTRY:
197       break;                    // Invalid global handle
198 
199     default:
200       ShouldNotReachHere();
201     }
202   } else {
203     // Not in global storage.  Might be a local handle.
204     if (is_local_handle(thread, handle) || is_frame_handle(thread, handle)) {
205       result = JNILocalRefType;
206     } else {
207       ShouldNotReachHere();
208     }
209   }
210   return result;
211 }
212 
213 
214 bool JNIHandles::is_local_handle(JavaThread* thread, jobject handle) {
215   assert(handle != nullptr, "precondition");
216   JNIHandleBlock* block = thread->active_handles();
217 
218   // Look back past possible native calls to jni_PushLocalFrame.
219   while (block != nullptr) {
220     if (block->chain_contains(handle)) {
221       return true;
222     }
223     block = block->pop_frame_link();
224   }
225   return false;
226 }
227 
228 
229 // Determine if the handle is somewhere in the current thread's stack.
230 // We easily can't isolate any particular stack frame the handle might
231 // come from, so we'll check the whole stack.
232 
233 bool JNIHandles::is_frame_handle(JavaThread* thr, jobject handle) {
234   assert(handle != nullptr, "precondition");
235   // If there is no java frame, then this must be top level code, such
236   // as the java command executable, in which case, this type of handle
237   // is not permitted.
238   return (thr->has_last_Java_frame() &&
239           thr->is_in_stack_range_incl((address)handle, (address)thr->last_Java_sp()));
240 }
241 
242 
243 bool JNIHandles::is_global_handle(jobject handle) {
244   assert(handle != nullptr, "precondition");
245   return is_global_tagged(handle) && is_storage_handle(global_handles(), global_ptr(handle));
246 }
247 
248 
249 bool JNIHandles::is_weak_global_handle(jobject handle) {
250   assert(handle != nullptr, "precondition");
251   return is_weak_global_tagged(handle) && is_storage_handle(weak_global_handles(), weak_global_ptr(handle));
252 }
253 
254 // We assume this is called at a safepoint: no lock is needed.
255 void JNIHandles::print_on(outputStream* st) {
256   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
257 
258   st->print_cr("JNI global refs: " SIZE_FORMAT ", weak refs: " SIZE_FORMAT,
259                global_handles()->allocation_count(),
260                weak_global_handles()->allocation_count());
261   st->cr();
262   st->flush();
263 }
264 
265 void JNIHandles::print() { print_on(tty); }
266 
267 class VerifyJNIHandles: public OopClosure {
268 public:
269   virtual void do_oop(oop* root) {
270     guarantee(oopDesc::is_oop_or_null(RawAccess<>::oop_load(root)), "Invalid oop");
271   }
272   virtual void do_oop(narrowOop* root) { ShouldNotReachHere(); }
273 };
274 
275 void JNIHandles::verify() {
276   VerifyJNIHandles verify_handle;
277 
278   oops_do(&verify_handle);
279   weak_oops_do(&verify_handle);
280 }
281 
282 // This method is implemented here to avoid circular includes between
283 // jniHandles.hpp and thread.hpp.
284 bool JNIHandles::current_thread_in_native() {
285   Thread* thread = Thread::current();
286   return (thread->is_Java_thread() &&
287           JavaThread::cast(thread)->thread_state() == _thread_in_native);
288 }
289 
290 int JNIHandleBlock::_blocks_allocated = 0;
291 
292 static inline bool is_tagged_free_list(uintptr_t value) {
293   return (value & 1u) != 0;
294 }
295 
296 static inline uintptr_t tag_free_list(uintptr_t value) {
297   return value | 1u;
298 }
299 
300 static inline uintptr_t untag_free_list(uintptr_t value) {
301   return value & ~(uintptr_t)1u;
302 }
303 
304 // There is a freelist of handles running through the JNIHandleBlock
305 // with a tagged next pointer, distinguishing these next pointers from
306 // oops. The freelist handling currently relies on the size of oops
307 // being the same as a native pointer. If this ever changes, then
308 // this freelist handling must change too.
309 STATIC_ASSERT(sizeof(oop) == sizeof(uintptr_t));
310 
311 #ifdef ASSERT
312 void JNIHandleBlock::zap() {
313   // Zap block values
314   _top = 0;
315   for (int index = 0; index < block_size_in_oops; index++) {
316     // NOT using Access here; just bare clobbering to null, since the
317     // block no longer contains valid oops.
318     _handles[index] = 0;
319   }
320 }
321 #endif // ASSERT
322 
323 JNIHandleBlock* JNIHandleBlock::allocate_block(JavaThread* thread, AllocFailType alloc_failmode)  {
324   // The VM thread can allocate a handle block in behalf of another thread during a safepoint.
325   assert(thread == nullptr || thread == Thread::current() || SafepointSynchronize::is_at_safepoint(),
326          "sanity check");
327   JNIHandleBlock* block;
328   // Check the thread-local free list for a block so we don't
329   // have to acquire a mutex.
330   if (thread != nullptr && thread->free_handle_block() != nullptr) {
331     block = thread->free_handle_block();
332     thread->set_free_handle_block(block->_next);
333   } else {
334     // Allocate new block
335     if (alloc_failmode == AllocFailStrategy::RETURN_NULL) {
336       block = new (std::nothrow) JNIHandleBlock();
337       if (block == nullptr) {
338         return nullptr;
339       }
340     } else {
341       block = new JNIHandleBlock();
342     }
343     Atomic::inc(&_blocks_allocated);
344     block->zap();
345   }
346   block->_top = 0;
347   block->_next = nullptr;
348   block->_pop_frame_link = nullptr;
349   // _last, _free_list & _allocate_before_rebuild initialized in allocate_handle
350   debug_only(block->_last = nullptr);
351   debug_only(block->_free_list = nullptr);
352   debug_only(block->_allocate_before_rebuild = -1);
353   return block;
354 }
355 
356 
357 void JNIHandleBlock::release_block(JNIHandleBlock* block, JavaThread* thread) {
358   assert(thread == nullptr || thread == Thread::current(), "sanity check");
359   JNIHandleBlock* pop_frame_link = block->pop_frame_link();
360   // Put returned block at the beginning of the thread-local free list.
361   // Note that if thread == nullptr, we use it as an implicit argument that
362   // we _don't_ want the block to be kept on the free_handle_block.
363   // See for instance JavaThread::exit().
364   if (thread != nullptr ) {
365     block->zap();
366     JNIHandleBlock* freelist = thread->free_handle_block();
367     block->_pop_frame_link = nullptr;
368     thread->set_free_handle_block(block);
369 
370     // Add original freelist to end of chain
371     if ( freelist != nullptr ) {
372       while ( block->_next != nullptr ) block = block->_next;
373       block->_next = freelist;
374     }
375     block = nullptr;
376   } else {
377     DEBUG_ONLY(block->set_pop_frame_link(nullptr));
378     while (block != nullptr) {
379       JNIHandleBlock* next = block->_next;
380       Atomic::dec(&_blocks_allocated);
381       assert(block->pop_frame_link() == nullptr, "pop_frame_link should be null");
382       delete block;
383       block = next;
384     }
385   }
386   if (pop_frame_link != nullptr) {
387     // As a sanity check we release blocks pointed to by the pop_frame_link.
388     // This should never happen (only if PopLocalFrame is not called the
389     // correct number of times).
390     release_block(pop_frame_link, thread);
391   }
392 }
393 
394 
395 void JNIHandleBlock::oops_do(OopClosure* f) {
396   JNIHandleBlock* current_chain = this;
397   // Iterate over chain of blocks, followed by chains linked through the
398   // pop frame links.
399   while (current_chain != nullptr) {
400     for (JNIHandleBlock* current = current_chain; current != nullptr;
401          current = current->_next) {
402       assert(current == current_chain || current->pop_frame_link() == nullptr,
403         "only blocks first in chain should have pop frame link set");
404       for (int index = 0; index < current->_top; index++) {
405         uintptr_t* addr = &(current->_handles)[index];
406         uintptr_t value = *addr;
407         // traverse heap pointers only, not deleted handles or free list
408         // pointers
409         if (value != 0 && !is_tagged_free_list(value)) {
410           oop* root = (oop*)addr;
411           f->do_oop(root);
412         }
413       }
414       // the next handle block is valid only if current block is full
415       if (current->_top < block_size_in_oops) {
416         break;
417       }
418     }
419     current_chain = current_chain->pop_frame_link();
420   }
421 }
422 
423 
424 jobject JNIHandleBlock::allocate_handle(JavaThread* caller, oop obj, AllocFailType alloc_failmode) {
425   assert(Universe::heap()->is_in(obj), "sanity check");
426   if (_top == 0) {
427     // This is the first allocation or the initial block got zapped when
428     // entering a native function. If we have any following blocks they are
429     // not valid anymore.
430     for (JNIHandleBlock* current = _next; current != nullptr;
431          current = current->_next) {
432       assert(current->_last == nullptr, "only first block should have _last set");
433       assert(current->_free_list == nullptr,
434              "only first block should have _free_list set");
435       if (current->_top == 0) {
436         // All blocks after the first clear trailing block are already cleared.
437 #ifdef ASSERT
438         for (current = current->_next; current != nullptr; current = current->_next) {
439           assert(current->_top == 0, "trailing blocks must already be cleared");
440         }
441 #endif
442         break;
443       }
444       current->_top = 0;
445       current->zap();
446     }
447     // Clear initial block
448     _free_list = nullptr;
449     _allocate_before_rebuild = 0;
450     _last = this;
451     zap();
452   }
453 
454   // Try last block
455   if (_last->_top < block_size_in_oops) {
456     oop* handle = (oop*)&(_last->_handles)[_last->_top++];
457     *handle = obj;
458     return (jobject) handle;
459   }
460 
461   // Try free list
462   if (_free_list != nullptr) {
463     oop* handle = (oop*)_free_list;
464     _free_list = (uintptr_t*) untag_free_list(*_free_list);
465     *handle = obj;
466     return (jobject) handle;
467   }
468   // Check if unused block follow last
469   if (_last->_next != nullptr) {
470     // update last and retry
471     _last = _last->_next;
472     return allocate_handle(caller, obj, alloc_failmode);
473   }
474 
475   // No space available, we have to rebuild free list or expand
476   if (_allocate_before_rebuild == 0) {
477       rebuild_free_list();        // updates _allocate_before_rebuild counter
478   } else {
479     _last->_next = JNIHandleBlock::allocate_block(caller, alloc_failmode);
480     if (_last->_next == nullptr) {
481       return nullptr;
482     }
483     _last = _last->_next;
484     _allocate_before_rebuild--;
485   }
486   return allocate_handle(caller, obj, alloc_failmode);  // retry
487 }
488 
489 void JNIHandleBlock::rebuild_free_list() {
490   assert(_allocate_before_rebuild == 0 && _free_list == nullptr, "just checking");
491   int free = 0;
492   int blocks = 0;
493   for (JNIHandleBlock* current = this; current != nullptr; current = current->_next) {
494     for (int index = 0; index < current->_top; index++) {
495       uintptr_t* handle = &(current->_handles)[index];
496       if (*handle == 0) {
497         // this handle was cleared out by a delete call, reuse it
498         *handle = _free_list == nullptr ? 0 : tag_free_list((uintptr_t)_free_list);
499         _free_list = handle;
500         free++;
501       }
502     }
503     // we should not rebuild free list if there are unused handles at the end
504     assert(current->_top == block_size_in_oops, "just checking");
505     blocks++;
506   }
507   // Heuristic: if more than half of the handles are free we rebuild next time
508   // as well, otherwise we append a corresponding number of new blocks before
509   // attempting a free list rebuild again.
510   int total = blocks * block_size_in_oops;
511   int extra = total - 2*free;
512   if (extra > 0) {
513     // Not as many free handles as we would like - compute number of new blocks to append
514     _allocate_before_rebuild = (extra + block_size_in_oops - 1) / block_size_in_oops;
515   }
516 }
517 
518 
519 bool JNIHandleBlock::contains(jobject handle) const {
520   return ((jobject)&_handles[0] <= handle && handle<(jobject)&_handles[_top]);
521 }
522 
523 
524 bool JNIHandleBlock::chain_contains(jobject handle) const {
525   for (JNIHandleBlock* current = (JNIHandleBlock*) this; current != nullptr; current = current->_next) {
526     if (current->contains(handle)) {
527       return true;
528     }
529   }
530   return false;
531 }