1 /* 2 * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "gc/shared/collectedHeap.hpp" 28 #include "gc/shared/oopStorage.inline.hpp" 29 #include "gc/shared/oopStorageSet.hpp" 30 #include "logging/log.hpp" 31 #include "memory/iterator.hpp" 32 #include "memory/universe.hpp" 33 #include "oops/access.inline.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "runtime/handles.inline.hpp" 36 #include "runtime/javaCalls.hpp" 37 #include "runtime/javaThread.inline.hpp" 38 #include "runtime/jniHandles.inline.hpp" 39 #include "runtime/mutexLocker.hpp" 40 #include "utilities/align.hpp" 41 #include "utilities/debug.hpp" 42 43 OopStorage* JNIHandles::global_handles() { 44 return _global_handles; 45 } 46 47 OopStorage* JNIHandles::weak_global_handles() { 48 return _weak_global_handles; 49 } 50 51 // Serviceability agent support. 52 OopStorage* JNIHandles::_global_handles = nullptr; 53 OopStorage* JNIHandles::_weak_global_handles = nullptr; 54 55 void jni_handles_init() { 56 JNIHandles::_global_handles = OopStorageSet::create_strong("JNI Global", mtInternal); 57 JNIHandles::_weak_global_handles = OopStorageSet::create_weak("JNI Weak", mtInternal); 58 } 59 60 jobject JNIHandles::make_local(oop obj) { 61 return make_local(JavaThread::current(), obj); 62 } 63 64 // Used by NewLocalRef which requires null on out-of-memory 65 jobject JNIHandles::make_local(JavaThread* thread, oop obj, AllocFailType alloc_failmode) { 66 if (obj == nullptr) { 67 return nullptr; // ignore null handles 68 } else { 69 assert(oopDesc::is_oop(obj), "not an oop"); 70 assert(!current_thread_in_native(), "must not be in native"); 71 STATIC_ASSERT(TypeTag::local == 0); 72 return thread->active_handles()->allocate_handle(thread, obj, alloc_failmode); 73 } 74 } 75 76 static void report_handle_allocation_failure(AllocFailType alloc_failmode, 77 const char* handle_kind) { 78 if (alloc_failmode == AllocFailStrategy::EXIT_OOM) { 79 // Fake size value, since we don't know the min allocation size here. 80 vm_exit_out_of_memory(sizeof(oop), OOM_MALLOC_ERROR, 81 "Cannot create %s JNI handle", handle_kind); 82 } else { 83 assert(alloc_failmode == AllocFailStrategy::RETURN_NULL, "invariant"); 84 } 85 } 86 87 jobject JNIHandles::make_global(Handle obj, AllocFailType alloc_failmode) { 88 assert(!Universe::heap()->is_stw_gc_active(), "can't extend the root set during GC pause"); 89 assert(!current_thread_in_native(), "must not be in native"); 90 jobject res = nullptr; 91 if (!obj.is_null()) { 92 // ignore null handles 93 assert(oopDesc::is_oop(obj()), "not an oop"); 94 oop* ptr = global_handles()->allocate(); 95 // Return null on allocation failure. 96 if (ptr != nullptr) { 97 assert(NativeAccess<AS_NO_KEEPALIVE>::oop_load(ptr) == oop(nullptr), "invariant"); 98 NativeAccess<>::oop_store(ptr, obj()); 99 char* tptr = reinterpret_cast<char*>(ptr) + TypeTag::global; 100 res = reinterpret_cast<jobject>(tptr); 101 } else { 102 report_handle_allocation_failure(alloc_failmode, "global"); 103 } 104 } 105 106 return res; 107 } 108 109 jweak JNIHandles::make_weak_global(Handle obj, AllocFailType alloc_failmode) { 110 assert(!Universe::heap()->is_stw_gc_active(), "can't extend the root set during GC pause"); 111 assert(!current_thread_in_native(), "must not be in native"); 112 jweak res = nullptr; 113 if (!obj.is_null()) { 114 // ignore null handles 115 assert(oopDesc::is_oop(obj()), "not an oop"); 116 oop* ptr = weak_global_handles()->allocate(); 117 // Return nullptr on allocation failure. 118 if (ptr != nullptr) { 119 assert(NativeAccess<AS_NO_KEEPALIVE>::oop_load(ptr) == oop(nullptr), "invariant"); 120 NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(ptr, obj()); 121 char* tptr = reinterpret_cast<char*>(ptr) + TypeTag::weak_global; 122 res = reinterpret_cast<jweak>(tptr); 123 } else { 124 report_handle_allocation_failure(alloc_failmode, "weak global"); 125 } 126 } 127 return res; 128 } 129 130 // Resolve some erroneous cases to null, rather than treating them as 131 // possibly unchecked errors. In particular, deleted handles are 132 // treated as null (though a deleted and later reallocated handle 133 // isn't detected). 134 oop JNIHandles::resolve_external_guard(jobject handle) { 135 oop result = nullptr; 136 if (handle != nullptr) { 137 result = resolve_impl<DECORATORS_NONE, true /* external_guard */>(handle); 138 } 139 return result; 140 } 141 142 bool JNIHandles::is_weak_global_cleared(jweak handle) { 143 assert(handle != nullptr, "precondition"); 144 oop* oop_ptr = weak_global_ptr(handle); 145 oop value = NativeAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(oop_ptr); 146 return value == nullptr; 147 } 148 149 void JNIHandles::destroy_global(jobject handle) { 150 if (handle != nullptr) { 151 oop* oop_ptr = global_ptr(handle); 152 NativeAccess<>::oop_store(oop_ptr, (oop)nullptr); 153 global_handles()->release(oop_ptr); 154 } 155 } 156 157 158 void JNIHandles::destroy_weak_global(jweak handle) { 159 if (handle != nullptr) { 160 oop* oop_ptr = weak_global_ptr(handle); 161 NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(oop_ptr, (oop)nullptr); 162 weak_global_handles()->release(oop_ptr); 163 } 164 } 165 166 167 void JNIHandles::oops_do(OopClosure* f) { 168 global_handles()->oops_do(f); 169 } 170 171 172 void JNIHandles::weak_oops_do(OopClosure* f) { 173 weak_global_handles()->weak_oops_do(f); 174 } 175 176 bool JNIHandles::is_global_storage(const OopStorage* storage) { 177 return _global_handles == storage; 178 } 179 180 inline bool is_storage_handle(const OopStorage* storage, const oop* ptr) { 181 return storage->allocation_status(ptr) == OopStorage::ALLOCATED_ENTRY; 182 } 183 184 185 jobjectRefType JNIHandles::handle_type(JavaThread* thread, jobject handle) { 186 assert(handle != nullptr, "precondition"); 187 jobjectRefType result = JNIInvalidRefType; 188 if (is_weak_global_tagged(handle)) { 189 if (is_storage_handle(weak_global_handles(), weak_global_ptr(handle))) { 190 result = JNIWeakGlobalRefType; 191 } 192 } else if (is_global_tagged(handle)) { 193 switch (global_handles()->allocation_status(global_ptr(handle))) { 194 case OopStorage::ALLOCATED_ENTRY: 195 result = JNIGlobalRefType; 196 break; 197 198 case OopStorage::UNALLOCATED_ENTRY: 199 break; // Invalid global handle 200 201 default: 202 ShouldNotReachHere(); 203 } 204 } else if (is_local_handle(thread, handle) || is_frame_handle(thread, handle)) { 205 // Not in global storage. Might be a local handle. 206 result = JNILocalRefType; 207 } 208 return result; 209 } 210 211 212 bool JNIHandles::is_local_handle(JavaThread* thread, jobject handle) { 213 assert(handle != nullptr, "precondition"); 214 JNIHandleBlock* block = thread->active_handles(); 215 216 // Look back past possible native calls to jni_PushLocalFrame. 217 while (block != nullptr) { 218 if (block->chain_contains(handle)) { 219 return true; 220 } 221 block = block->pop_frame_link(); 222 } 223 return false; 224 } 225 226 227 // Determine if the handle is somewhere in the current thread's stack. 228 // We easily can't isolate any particular stack frame the handle might 229 // come from, so we'll check the whole stack. 230 231 bool JNIHandles::is_frame_handle(JavaThread* thr, jobject handle) { 232 assert(handle != nullptr, "precondition"); 233 // If there is no java frame, then this must be top level code, such 234 // as the java command executable, in which case, this type of handle 235 // is not permitted. 236 return (thr->has_last_Java_frame() && 237 thr->is_in_stack_range_incl((address)handle, (address)thr->last_Java_sp())); 238 } 239 240 241 bool JNIHandles::is_global_handle(jobject handle) { 242 assert(handle != nullptr, "precondition"); 243 return is_global_tagged(handle) && is_storage_handle(global_handles(), global_ptr(handle)); 244 } 245 246 247 bool JNIHandles::is_weak_global_handle(jobject handle) { 248 assert(handle != nullptr, "precondition"); 249 return is_weak_global_tagged(handle) && is_storage_handle(weak_global_handles(), weak_global_ptr(handle)); 250 } 251 252 // We assume this is called at a safepoint: no lock is needed. 253 void JNIHandles::print_on(outputStream* st) { 254 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 255 256 st->print_cr("JNI global refs: " SIZE_FORMAT ", weak refs: " SIZE_FORMAT, 257 global_handles()->allocation_count(), 258 weak_global_handles()->allocation_count()); 259 st->cr(); 260 st->flush(); 261 } 262 263 void JNIHandles::print() { print_on(tty); } 264 265 class VerifyJNIHandles: public OopClosure { 266 public: 267 virtual void do_oop(oop* root) { 268 guarantee(oopDesc::is_oop_or_null(RawAccess<>::oop_load(root)), "Invalid oop"); 269 } 270 virtual void do_oop(narrowOop* root) { ShouldNotReachHere(); } 271 }; 272 273 void JNIHandles::verify() { 274 VerifyJNIHandles verify_handle; 275 276 oops_do(&verify_handle); 277 weak_oops_do(&verify_handle); 278 } 279 280 // This method is implemented here to avoid circular includes between 281 // jniHandles.hpp and thread.hpp. 282 bool JNIHandles::current_thread_in_native() { 283 Thread* thread = Thread::current(); 284 return (thread->is_Java_thread() && 285 JavaThread::cast(thread)->thread_state() == _thread_in_native); 286 } 287 288 bool JNIHandles::is_same_object(jobject handle1, jobject handle2) { 289 oop obj1 = resolve_no_keepalive(handle1); 290 oop obj2 = resolve_no_keepalive(handle2); 291 292 bool ret = obj1 == obj2; 293 294 if (EnableValhalla) { 295 if (!ret && obj1 != nullptr && obj2 != nullptr && obj1->klass() == obj2->klass() && obj1->klass()->is_inline_klass()) { 296 // The two references are different, they are not null and they are both inline types, 297 // a full substitutability test is required, calling ValueObjectMethods.isSubstitutable() 298 // (similarly to InterpreterRuntime::is_substitutable) 299 JavaThread* THREAD = JavaThread::current(); 300 Handle ha(THREAD, obj1); 301 Handle hb(THREAD, obj2); 302 JavaValue result(T_BOOLEAN); 303 JavaCallArguments args; 304 args.push_oop(ha); 305 args.push_oop(hb); 306 methodHandle method(THREAD, Universe::is_substitutable_method()); 307 JavaCalls::call(&result, method, &args, THREAD); 308 if (HAS_PENDING_EXCEPTION) { 309 // Something really bad happened because isSubstitutable() should not throw exceptions 310 // If it is an error, just let it propagate 311 // If it is an exception, wrap it into an InternalError 312 if (!PENDING_EXCEPTION->is_a(vmClasses::Error_klass())) { 313 Handle e(THREAD, PENDING_EXCEPTION); 314 CLEAR_PENDING_EXCEPTION; 315 THROW_MSG_CAUSE_(vmSymbols::java_lang_InternalError(), "Internal error in substitutability test", e, false); 316 } 317 } 318 ret = result.get_jboolean(); 319 } 320 } 321 322 return ret; 323 } 324 325 326 int JNIHandleBlock::_blocks_allocated = 0; 327 328 static inline bool is_tagged_free_list(uintptr_t value) { 329 return (value & 1u) != 0; 330 } 331 332 static inline uintptr_t tag_free_list(uintptr_t value) { 333 return value | 1u; 334 } 335 336 static inline uintptr_t untag_free_list(uintptr_t value) { 337 return value & ~(uintptr_t)1u; 338 } 339 340 // There is a freelist of handles running through the JNIHandleBlock 341 // with a tagged next pointer, distinguishing these next pointers from 342 // oops. The freelist handling currently relies on the size of oops 343 // being the same as a native pointer. If this ever changes, then 344 // this freelist handling must change too. 345 STATIC_ASSERT(sizeof(oop) == sizeof(uintptr_t)); 346 347 #ifdef ASSERT 348 void JNIHandleBlock::zap() { 349 // Zap block values 350 _top = 0; 351 for (int index = 0; index < block_size_in_oops; index++) { 352 // NOT using Access here; just bare clobbering to null, since the 353 // block no longer contains valid oops. 354 _handles[index] = 0; 355 } 356 } 357 #endif // ASSERT 358 359 JNIHandleBlock* JNIHandleBlock::allocate_block(JavaThread* thread, AllocFailType alloc_failmode) { 360 // The VM thread can allocate a handle block in behalf of another thread during a safepoint. 361 assert(thread == nullptr || thread == Thread::current() || SafepointSynchronize::is_at_safepoint(), 362 "sanity check"); 363 JNIHandleBlock* block; 364 // Check the thread-local free list for a block so we don't 365 // have to acquire a mutex. 366 if (thread != nullptr && thread->free_handle_block() != nullptr) { 367 block = thread->free_handle_block(); 368 thread->set_free_handle_block(block->_next); 369 } else { 370 // Allocate new block 371 if (alloc_failmode == AllocFailStrategy::RETURN_NULL) { 372 block = new (std::nothrow) JNIHandleBlock(); 373 if (block == nullptr) { 374 return nullptr; 375 } 376 } else { 377 block = new JNIHandleBlock(); 378 } 379 Atomic::inc(&_blocks_allocated); 380 block->zap(); 381 } 382 block->_top = 0; 383 block->_next = nullptr; 384 block->_pop_frame_link = nullptr; 385 // _last, _free_list & _allocate_before_rebuild initialized in allocate_handle 386 debug_only(block->_last = nullptr); 387 debug_only(block->_free_list = nullptr); 388 debug_only(block->_allocate_before_rebuild = -1); 389 return block; 390 } 391 392 393 void JNIHandleBlock::release_block(JNIHandleBlock* block, JavaThread* thread) { 394 assert(thread == nullptr || thread == Thread::current(), "sanity check"); 395 JNIHandleBlock* pop_frame_link = block->pop_frame_link(); 396 // Put returned block at the beginning of the thread-local free list. 397 // Note that if thread == nullptr, we use it as an implicit argument that 398 // we _don't_ want the block to be kept on the free_handle_block. 399 // See for instance JavaThread::exit(). 400 if (thread != nullptr ) { 401 block->zap(); 402 JNIHandleBlock* freelist = thread->free_handle_block(); 403 block->_pop_frame_link = nullptr; 404 thread->set_free_handle_block(block); 405 406 // Add original freelist to end of chain 407 if ( freelist != nullptr ) { 408 while ( block->_next != nullptr ) block = block->_next; 409 block->_next = freelist; 410 } 411 block = nullptr; 412 } else { 413 DEBUG_ONLY(block->set_pop_frame_link(nullptr)); 414 while (block != nullptr) { 415 JNIHandleBlock* next = block->_next; 416 Atomic::dec(&_blocks_allocated); 417 assert(block->pop_frame_link() == nullptr, "pop_frame_link should be null"); 418 delete block; 419 block = next; 420 } 421 } 422 if (pop_frame_link != nullptr) { 423 // As a sanity check we release blocks pointed to by the pop_frame_link. 424 // This should never happen (only if PopLocalFrame is not called the 425 // correct number of times). 426 release_block(pop_frame_link, thread); 427 } 428 } 429 430 431 void JNIHandleBlock::oops_do(OopClosure* f) { 432 JNIHandleBlock* current_chain = this; 433 // Iterate over chain of blocks, followed by chains linked through the 434 // pop frame links. 435 while (current_chain != nullptr) { 436 for (JNIHandleBlock* current = current_chain; current != nullptr; 437 current = current->_next) { 438 assert(current == current_chain || current->pop_frame_link() == nullptr, 439 "only blocks first in chain should have pop frame link set"); 440 for (int index = 0; index < current->_top; index++) { 441 uintptr_t* addr = &(current->_handles)[index]; 442 uintptr_t value = *addr; 443 // traverse heap pointers only, not deleted handles or free list 444 // pointers 445 if (value != 0 && !is_tagged_free_list(value)) { 446 oop* root = (oop*)addr; 447 f->do_oop(root); 448 } 449 } 450 // the next handle block is valid only if current block is full 451 if (current->_top < block_size_in_oops) { 452 break; 453 } 454 } 455 current_chain = current_chain->pop_frame_link(); 456 } 457 } 458 459 460 jobject JNIHandleBlock::allocate_handle(JavaThread* caller, oop obj, AllocFailType alloc_failmode) { 461 assert(Universe::heap()->is_in(obj), "sanity check"); 462 if (_top == 0) { 463 // This is the first allocation or the initial block got zapped when 464 // entering a native function. If we have any following blocks they are 465 // not valid anymore. 466 for (JNIHandleBlock* current = _next; current != nullptr; 467 current = current->_next) { 468 assert(current->_last == nullptr, "only first block should have _last set"); 469 assert(current->_free_list == nullptr, 470 "only first block should have _free_list set"); 471 if (current->_top == 0) { 472 // All blocks after the first clear trailing block are already cleared. 473 #ifdef ASSERT 474 for (current = current->_next; current != nullptr; current = current->_next) { 475 assert(current->_top == 0, "trailing blocks must already be cleared"); 476 } 477 #endif 478 break; 479 } 480 current->_top = 0; 481 current->zap(); 482 } 483 // Clear initial block 484 _free_list = nullptr; 485 _allocate_before_rebuild = 0; 486 _last = this; 487 zap(); 488 } 489 490 // Try last block 491 if (_last->_top < block_size_in_oops) { 492 oop* handle = (oop*)&(_last->_handles)[_last->_top++]; 493 *handle = obj; 494 return (jobject) handle; 495 } 496 497 // Try free list 498 if (_free_list != nullptr) { 499 oop* handle = (oop*)_free_list; 500 _free_list = (uintptr_t*) untag_free_list(*_free_list); 501 *handle = obj; 502 return (jobject) handle; 503 } 504 // Check if unused block follow last 505 if (_last->_next != nullptr) { 506 // update last and retry 507 _last = _last->_next; 508 return allocate_handle(caller, obj, alloc_failmode); 509 } 510 511 // No space available, we have to rebuild free list or expand 512 if (_allocate_before_rebuild == 0) { 513 rebuild_free_list(); // updates _allocate_before_rebuild counter 514 } else { 515 _last->_next = JNIHandleBlock::allocate_block(caller, alloc_failmode); 516 if (_last->_next == nullptr) { 517 return nullptr; 518 } 519 _last = _last->_next; 520 _allocate_before_rebuild--; 521 } 522 return allocate_handle(caller, obj, alloc_failmode); // retry 523 } 524 525 void JNIHandleBlock::rebuild_free_list() { 526 assert(_allocate_before_rebuild == 0 && _free_list == nullptr, "just checking"); 527 int free = 0; 528 int blocks = 0; 529 for (JNIHandleBlock* current = this; current != nullptr; current = current->_next) { 530 for (int index = 0; index < current->_top; index++) { 531 uintptr_t* handle = &(current->_handles)[index]; 532 if (*handle == 0) { 533 // this handle was cleared out by a delete call, reuse it 534 *handle = _free_list == nullptr ? 0 : tag_free_list((uintptr_t)_free_list); 535 _free_list = handle; 536 free++; 537 } 538 } 539 // we should not rebuild free list if there are unused handles at the end 540 assert(current->_top == block_size_in_oops, "just checking"); 541 blocks++; 542 } 543 // Heuristic: if more than half of the handles are free we rebuild next time 544 // as well, otherwise we append a corresponding number of new blocks before 545 // attempting a free list rebuild again. 546 int total = blocks * block_size_in_oops; 547 int extra = total - 2*free; 548 if (extra > 0) { 549 // Not as many free handles as we would like - compute number of new blocks to append 550 _allocate_before_rebuild = (extra + block_size_in_oops - 1) / block_size_in_oops; 551 } 552 } 553 554 555 bool JNIHandleBlock::contains(jobject handle) const { 556 return ((jobject)&_handles[0] <= handle && handle<(jobject)&_handles[_top]); 557 } 558 559 560 bool JNIHandleBlock::chain_contains(jobject handle) const { 561 for (JNIHandleBlock* current = (JNIHandleBlock*) this; current != nullptr; current = current->_next) { 562 if (current->contains(handle)) { 563 return true; 564 } 565 } 566 return false; 567 }