1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.inline.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "classfile/systemDictionary.hpp" 29 #include "classfile/vmClasses.hpp" 30 #include "classfile/vmSymbols.hpp" 31 #include "code/codeCache.hpp" 32 #include "compiler/compilationPolicy.hpp" 33 #include "compiler/compileBroker.hpp" 34 #include "compiler/disassembler.hpp" 35 #include "gc/shared/barrierSetNMethod.hpp" 36 #include "gc/shared/collectedHeap.hpp" 37 #include "interpreter/bytecodeTracer.hpp" 38 #include "interpreter/interpreter.hpp" 39 #include "interpreter/interpreterRuntime.hpp" 40 #include "interpreter/linkResolver.hpp" 41 #include "interpreter/templateTable.hpp" 42 #include "jvm_io.h" 43 #include "logging/log.hpp" 44 #include "memory/oopFactory.hpp" 45 #include "memory/resourceArea.hpp" 46 #include "memory/universe.hpp" 47 #include "oops/constantPool.inline.hpp" 48 #include "oops/cpCache.inline.hpp" 49 #include "oops/flatArrayKlass.hpp" 50 #include "oops/flatArrayOop.inline.hpp" 51 #include "oops/inlineKlass.inline.hpp" 52 #include "oops/instanceKlass.inline.hpp" 53 #include "oops/klass.inline.hpp" 54 #include "oops/methodData.hpp" 55 #include "oops/method.inline.hpp" 56 #include "oops/objArrayKlass.hpp" 57 #include "oops/objArrayOop.inline.hpp" 58 #include "oops/oop.inline.hpp" 59 #include "oops/symbol.hpp" 60 #include "prims/jvmtiExport.hpp" 61 #include "prims/methodHandles.hpp" 62 #include "prims/nativeLookup.hpp" 63 #include "runtime/atomic.hpp" 64 #include "runtime/continuation.hpp" 65 #include "runtime/deoptimization.hpp" 66 #include "runtime/fieldDescriptor.inline.hpp" 67 #include "runtime/frame.inline.hpp" 68 #include "runtime/handles.inline.hpp" 69 #include "runtime/icache.hpp" 70 #include "runtime/interfaceSupport.inline.hpp" 71 #include "runtime/java.hpp" 72 #include "runtime/javaCalls.hpp" 73 #include "runtime/jfieldIDWorkaround.hpp" 74 #include "runtime/osThread.hpp" 75 #include "runtime/sharedRuntime.hpp" 76 #include "runtime/stackWatermarkSet.hpp" 77 #include "runtime/stubRoutines.hpp" 78 #include "runtime/synchronizer.hpp" 79 #include "runtime/threadCritical.hpp" 80 #include "utilities/align.hpp" 81 #include "utilities/checkedCast.hpp" 82 #include "utilities/copy.hpp" 83 #include "utilities/events.hpp" 84 #include "utilities/globalDefinitions.hpp" 85 #ifdef COMPILER2 86 #include "opto/runtime.hpp" 87 #endif 88 89 // Helper class to access current interpreter state 90 class LastFrameAccessor : public StackObj { 91 frame _last_frame; 92 public: 93 LastFrameAccessor(JavaThread* current) { 94 assert(current == Thread::current(), "sanity"); 95 _last_frame = current->last_frame(); 96 } 97 bool is_interpreted_frame() const { return _last_frame.is_interpreted_frame(); } 98 Method* method() const { return _last_frame.interpreter_frame_method(); } 99 address bcp() const { return _last_frame.interpreter_frame_bcp(); } 100 int bci() const { return _last_frame.interpreter_frame_bci(); } 101 address mdp() const { return _last_frame.interpreter_frame_mdp(); } 102 103 void set_bcp(address bcp) { _last_frame.interpreter_frame_set_bcp(bcp); } 104 void set_mdp(address dp) { _last_frame.interpreter_frame_set_mdp(dp); } 105 106 // pass method to avoid calling unsafe bcp_to_method (partial fix 4926272) 107 Bytecodes::Code code() const { return Bytecodes::code_at(method(), bcp()); } 108 109 Bytecode bytecode() const { return Bytecode(method(), bcp()); } 110 int get_index_u1(Bytecodes::Code bc) const { return bytecode().get_index_u1(bc); } 111 int get_index_u2(Bytecodes::Code bc) const { return bytecode().get_index_u2(bc); } 112 int get_index_u4(Bytecodes::Code bc) const { return bytecode().get_index_u4(bc); } 113 int number_of_dimensions() const { return bcp()[3]; } 114 115 oop callee_receiver(Symbol* signature) { 116 return _last_frame.interpreter_callee_receiver(signature); 117 } 118 BasicObjectLock* monitor_begin() const { 119 return _last_frame.interpreter_frame_monitor_begin(); 120 } 121 BasicObjectLock* monitor_end() const { 122 return _last_frame.interpreter_frame_monitor_end(); 123 } 124 BasicObjectLock* next_monitor(BasicObjectLock* current) const { 125 return _last_frame.next_monitor_in_interpreter_frame(current); 126 } 127 128 frame& get_frame() { return _last_frame; } 129 }; 130 131 //------------------------------------------------------------------------------------------------------------------------ 132 // State accessors 133 134 void InterpreterRuntime::set_bcp_and_mdp(address bcp, JavaThread* current) { 135 LastFrameAccessor last_frame(current); 136 last_frame.set_bcp(bcp); 137 if (ProfileInterpreter) { 138 // ProfileTraps uses MDOs independently of ProfileInterpreter. 139 // That is why we must check both ProfileInterpreter and mdo != nullptr. 140 MethodData* mdo = last_frame.method()->method_data(); 141 if (mdo != nullptr) { 142 NEEDS_CLEANUP; 143 last_frame.set_mdp(mdo->bci_to_dp(last_frame.bci())); 144 } 145 } 146 } 147 148 //------------------------------------------------------------------------------------------------------------------------ 149 // Constants 150 151 152 JRT_ENTRY(void, InterpreterRuntime::ldc(JavaThread* current, bool wide)) 153 // access constant pool 154 LastFrameAccessor last_frame(current); 155 ConstantPool* pool = last_frame.method()->constants(); 156 int cp_index = wide ? last_frame.get_index_u2(Bytecodes::_ldc_w) : last_frame.get_index_u1(Bytecodes::_ldc); 157 constantTag tag = pool->tag_at(cp_index); 158 159 assert (tag.is_unresolved_klass() || tag.is_klass(), "wrong ldc call"); 160 Klass* klass = pool->klass_at(cp_index, CHECK); 161 oop java_class = klass->java_mirror(); 162 current->set_vm_result(java_class); 163 JRT_END 164 165 JRT_ENTRY(void, InterpreterRuntime::resolve_ldc(JavaThread* current, Bytecodes::Code bytecode)) { 166 assert(bytecode == Bytecodes::_ldc || 167 bytecode == Bytecodes::_ldc_w || 168 bytecode == Bytecodes::_ldc2_w || 169 bytecode == Bytecodes::_fast_aldc || 170 bytecode == Bytecodes::_fast_aldc_w, "wrong bc"); 171 ResourceMark rm(current); 172 const bool is_fast_aldc = (bytecode == Bytecodes::_fast_aldc || 173 bytecode == Bytecodes::_fast_aldc_w); 174 LastFrameAccessor last_frame(current); 175 methodHandle m (current, last_frame.method()); 176 Bytecode_loadconstant ldc(m, last_frame.bci()); 177 178 // Double-check the size. (Condy can have any type.) 179 BasicType type = ldc.result_type(); 180 switch (type2size[type]) { 181 case 2: guarantee(bytecode == Bytecodes::_ldc2_w, ""); break; 182 case 1: guarantee(bytecode != Bytecodes::_ldc2_w, ""); break; 183 default: ShouldNotReachHere(); 184 } 185 186 // Resolve the constant. This does not do unboxing. 187 // But it does replace Universe::the_null_sentinel by null. 188 oop result = ldc.resolve_constant(CHECK); 189 assert(result != nullptr || is_fast_aldc, "null result only valid for fast_aldc"); 190 191 #ifdef ASSERT 192 { 193 // The bytecode wrappers aren't GC-safe so construct a new one 194 Bytecode_loadconstant ldc2(m, last_frame.bci()); 195 int rindex = ldc2.cache_index(); 196 if (rindex < 0) 197 rindex = m->constants()->cp_to_object_index(ldc2.pool_index()); 198 if (rindex >= 0) { 199 oop coop = m->constants()->resolved_reference_at(rindex); 200 oop roop = (result == nullptr ? Universe::the_null_sentinel() : result); 201 assert(roop == coop, "expected result for assembly code"); 202 } 203 } 204 #endif 205 current->set_vm_result(result); 206 if (!is_fast_aldc) { 207 // Tell the interpreter how to unbox the primitive. 208 guarantee(java_lang_boxing_object::is_instance(result, type), ""); 209 int offset = java_lang_boxing_object::value_offset(type); 210 intptr_t flags = ((as_TosState(type) << ConstantPoolCache::tos_state_shift) 211 | (offset & ConstantPoolCache::field_index_mask)); 212 current->set_vm_result_2((Metadata*)flags); 213 } 214 } 215 JRT_END 216 217 218 //------------------------------------------------------------------------------------------------------------------------ 219 // Allocation 220 221 JRT_ENTRY(void, InterpreterRuntime::_new(JavaThread* current, ConstantPool* pool, int index)) 222 Klass* k = pool->klass_at(index, CHECK); 223 InstanceKlass* klass = InstanceKlass::cast(k); 224 225 // Make sure we are not instantiating an abstract klass 226 klass->check_valid_for_instantiation(true, CHECK); 227 228 // Make sure klass is initialized 229 klass->initialize(CHECK); 230 231 // At this point the class may not be fully initialized 232 // because of recursive initialization. If it is fully 233 // initialized & has_finalized is not set, we rewrite 234 // it into its fast version (Note: no locking is needed 235 // here since this is an atomic byte write and can be 236 // done more than once). 237 // 238 // Note: In case of classes with has_finalized we don't 239 // rewrite since that saves us an extra check in 240 // the fast version which then would call the 241 // slow version anyway (and do a call back into 242 // Java). 243 // If we have a breakpoint, then we don't rewrite 244 // because the _breakpoint bytecode would be lost. 245 oop obj = klass->allocate_instance(CHECK); 246 current->set_vm_result(obj); 247 JRT_END 248 249 JRT_ENTRY(void, InterpreterRuntime::uninitialized_static_inline_type_field(JavaThread* current, oopDesc* mirror, ResolvedFieldEntry* entry)) 250 // The interpreter tries to access an inline static field that has not been initialized. 251 // This situation can happen in different scenarios: 252 // 1 - if the load or initialization of the field failed during step 8 of 253 // the initialization of the holder of the field, in this case the access to the field 254 // must fail 255 // 2 - it can also happen when the initialization of the holder class triggered the initialization of 256 // another class which accesses this field in its static initializer, in this case the 257 // access must succeed to allow circularity 258 // The code below tries to load and initialize the field's class again before returning the default value. 259 // If the field was not initialized because of an error, an exception should be thrown. 260 // If the class is being initialized, the default value is returned. 261 assert(entry->is_valid(), "Invalid ResolvedFieldEntry"); 262 instanceHandle mirror_h(THREAD, (instanceOop)mirror); 263 InstanceKlass* klass = entry->field_holder(); 264 u2 index = entry->field_index(); 265 assert(klass == java_lang_Class::as_Klass(mirror), "Not the field holder klass"); 266 assert(klass->field_is_null_free_inline_type(index), "Sanity check"); 267 if (klass->is_being_initialized() && klass->is_init_thread(THREAD)) { 268 int offset = klass->field_offset(index); 269 Klass* field_k = klass->get_inline_type_field_klass_or_null(index); 270 if (field_k == nullptr) { 271 field_k = SystemDictionary::resolve_or_fail(klass->field_signature(index)->fundamental_name(THREAD), 272 Handle(THREAD, klass->class_loader()), 273 Handle(THREAD, klass->protection_domain()), 274 true, CHECK); 275 assert(field_k != nullptr, "Should have been loaded or an exception thrown above"); 276 klass->set_inline_type_field_klass(index, InlineKlass::cast(field_k)); 277 } 278 field_k->initialize(CHECK); 279 oop defaultvalue = InlineKlass::cast(field_k)->default_value(); 280 // It is safe to initialize the static field because 1) the current thread is the initializing thread 281 // and is the only one that can access it, and 2) the field is actually not initialized (i.e. null) 282 // otherwise the JVM should not be executing this code. 283 mirror_h()->obj_field_put(offset, defaultvalue); 284 current->set_vm_result(defaultvalue); 285 } else { 286 assert(klass->is_in_error_state(), "If not initializing, initialization must have failed to get there"); 287 ResourceMark rm(THREAD); 288 const char* desc = "Could not initialize class "; 289 const char* className = klass->external_name(); 290 size_t msglen = strlen(desc) + strlen(className) + 1; 291 char* message = NEW_RESOURCE_ARRAY(char, msglen); 292 if (nullptr == message) { 293 // Out of memory: can't create detailed error message 294 THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), className); 295 } else { 296 jio_snprintf(message, msglen, "%s%s", desc, className); 297 THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), message); 298 } 299 } 300 JRT_END 301 302 JRT_ENTRY(void, InterpreterRuntime::read_flat_field(JavaThread* current, oopDesc* obj, int index, Klass* field_holder)) 303 Handle obj_h(THREAD, obj); 304 305 assert(oopDesc::is_oop(obj), "Sanity check"); 306 307 assert(field_holder->is_instance_klass(), "Sanity check"); 308 InstanceKlass* klass = InstanceKlass::cast(field_holder); 309 310 assert(klass->field_is_flat(index), "Sanity check"); 311 312 InlineKlass* field_vklass = InlineKlass::cast(klass->get_inline_type_field_klass(index)); 313 314 oop res = field_vklass->read_flat_field(obj_h(), klass->field_offset(index), CHECK); 315 current->set_vm_result(res); 316 JRT_END 317 318 // The protocol to read a nullable flat field is: 319 // Step 1: read the null marker with an load_acquire barrier to ensure that 320 // reordered loads won't try to load the value before the null marker is read 321 // Step 2: if the null marker value is zero, the field's value is null 322 // otherwise the flat field value can be read like a regular flat field 323 JRT_ENTRY(void, InterpreterRuntime::read_nullable_flat_field(JavaThread* current, oopDesc* obj, ResolvedFieldEntry* entry)) 324 assert(oopDesc::is_oop(obj), "Sanity check"); 325 assert(entry->has_null_marker(), "Otherwise should not get there"); 326 Handle obj_h(THREAD, obj); 327 328 InstanceKlass* ik = InstanceKlass::cast(obj_h()->klass()); 329 int field_index = entry->field_index(); 330 int nm_offset = ik->null_marker_offsets_array()->at(field_index); 331 if (obj_h()->byte_field_acquire(nm_offset) == 0) { 332 current->set_vm_result(nullptr); 333 } else { 334 InlineKlass* field_vklass = InlineKlass::cast(ik->get_inline_type_field_klass(field_index)); 335 oop res = field_vklass->read_flat_field(obj_h(), ik->field_offset(field_index), CHECK); 336 current->set_vm_result(res); 337 } 338 JRT_END 339 340 // The protocol to write a nullable flat field is: 341 // If the new field value is null, just write zero to the null marker 342 // Otherwise: 343 // Step 1: write the field value like a regular flat field 344 // Step 2: have a memory barrier to ensure that the whole value content is visible 345 // Step 3: update the null marker to a non zero value 346 JRT_ENTRY(void, InterpreterRuntime::write_nullable_flat_field(JavaThread* current, oopDesc* obj, oopDesc* value, ResolvedFieldEntry* entry)) 347 assert(oopDesc::is_oop(obj), "Sanity check"); 348 Handle obj_h(THREAD, obj); 349 assert(value == nullptr || oopDesc::is_oop(value), "Sanity check"); 350 Handle val_h(THREAD, value); 351 352 InstanceKlass* ik = InstanceKlass::cast(obj_h()->klass()); 353 int nm_offset = ik->null_marker_offsets_array()->at(entry->field_index()); 354 if (val_h() == nullptr) { 355 obj_h()->byte_field_put(nm_offset, (jbyte)0); 356 return; 357 } 358 InlineKlass* vk = InlineKlass::cast(val_h()->klass()); 359 if (entry->has_internal_null_marker()) { 360 // The interpreter copies values with a bulk operation 361 // To avoid accidently setting the null marker to "null" during 362 // the copying, the null marker is set to non zero in the source object 363 if (val_h()->byte_field(vk->get_internal_null_marker_offset()) == 0) { 364 val_h()->byte_field_put(vk->get_internal_null_marker_offset(), (jbyte)1); 365 } 366 vk->write_non_null_flat_field(obj_h(), entry->field_offset(), val_h()); 367 } else { 368 vk->write_non_null_flat_field(obj_h(), entry->field_offset(), val_h()); 369 OrderAccess::release(); 370 obj_h()->byte_field_put(nm_offset, (jbyte)1); 371 } 372 JRT_END 373 374 JRT_ENTRY(void, InterpreterRuntime::newarray(JavaThread* current, BasicType type, jint size)) 375 oop obj = oopFactory::new_typeArray(type, size, CHECK); 376 current->set_vm_result(obj); 377 JRT_END 378 379 380 JRT_ENTRY(void, InterpreterRuntime::anewarray(JavaThread* current, ConstantPool* pool, int index, jint size)) 381 Klass* klass = pool->klass_at(index, CHECK); 382 arrayOop obj = oopFactory::new_objArray(klass, size, CHECK); 383 current->set_vm_result(obj); 384 JRT_END 385 386 JRT_ENTRY(void, InterpreterRuntime::value_array_load(JavaThread* current, arrayOopDesc* array, int index)) 387 flatArrayHandle vah(current, (flatArrayOop)array); 388 oop value_holder = flatArrayOopDesc::value_alloc_copy_from_index(vah, index, CHECK); 389 current->set_vm_result(value_holder); 390 JRT_END 391 392 JRT_ENTRY(void, InterpreterRuntime::value_array_store(JavaThread* current, void* val, arrayOopDesc* array, int index)) 393 assert(val != nullptr, "can't store null into flat array"); 394 ((flatArrayOop)array)->value_copy_to_index(cast_to_oop(val), index); 395 JRT_END 396 397 JRT_ENTRY(void, InterpreterRuntime::multianewarray(JavaThread* current, jint* first_size_address)) 398 // We may want to pass in more arguments - could make this slightly faster 399 LastFrameAccessor last_frame(current); 400 ConstantPool* constants = last_frame.method()->constants(); 401 int i = last_frame.get_index_u2(Bytecodes::_multianewarray); 402 Klass* klass = constants->klass_at(i, CHECK); 403 int nof_dims = last_frame.number_of_dimensions(); 404 assert(klass->is_klass(), "not a class"); 405 assert(nof_dims >= 1, "multianewarray rank must be nonzero"); 406 407 // We must create an array of jints to pass to multi_allocate. 408 ResourceMark rm(current); 409 const int small_dims = 10; 410 jint dim_array[small_dims]; 411 jint *dims = &dim_array[0]; 412 if (nof_dims > small_dims) { 413 dims = (jint*) NEW_RESOURCE_ARRAY(jint, nof_dims); 414 } 415 for (int index = 0; index < nof_dims; index++) { 416 // offset from first_size_address is addressed as local[index] 417 int n = Interpreter::local_offset_in_bytes(index)/jintSize; 418 dims[index] = first_size_address[n]; 419 } 420 oop obj = ArrayKlass::cast(klass)->multi_allocate(nof_dims, dims, CHECK); 421 current->set_vm_result(obj); 422 JRT_END 423 424 425 JRT_ENTRY(void, InterpreterRuntime::register_finalizer(JavaThread* current, oopDesc* obj)) 426 assert(oopDesc::is_oop(obj), "must be a valid oop"); 427 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise"); 428 InstanceKlass::register_finalizer(instanceOop(obj), CHECK); 429 JRT_END 430 431 JRT_ENTRY(jboolean, InterpreterRuntime::is_substitutable(JavaThread* current, oopDesc* aobj, oopDesc* bobj)) 432 assert(oopDesc::is_oop(aobj) && oopDesc::is_oop(bobj), "must be valid oops"); 433 434 Handle ha(THREAD, aobj); 435 Handle hb(THREAD, bobj); 436 JavaValue result(T_BOOLEAN); 437 JavaCallArguments args; 438 args.push_oop(ha); 439 args.push_oop(hb); 440 methodHandle method(current, Universe::is_substitutable_method()); 441 method->method_holder()->initialize(CHECK_false); // Ensure class ValueObjectMethods is initialized 442 JavaCalls::call(&result, method, &args, THREAD); 443 if (HAS_PENDING_EXCEPTION) { 444 // Something really bad happened because isSubstitutable() should not throw exceptions 445 // If it is an error, just let it propagate 446 // If it is an exception, wrap it into an InternalError 447 if (!PENDING_EXCEPTION->is_a(vmClasses::Error_klass())) { 448 Handle e(THREAD, PENDING_EXCEPTION); 449 CLEAR_PENDING_EXCEPTION; 450 THROW_MSG_CAUSE_(vmSymbols::java_lang_InternalError(), "Internal error in substitutability test", e, false); 451 } 452 } 453 return result.get_jboolean(); 454 JRT_END 455 456 // Quicken instance-of and check-cast bytecodes 457 JRT_ENTRY(void, InterpreterRuntime::quicken_io_cc(JavaThread* current)) 458 // Force resolving; quicken the bytecode 459 LastFrameAccessor last_frame(current); 460 int which = last_frame.get_index_u2(Bytecodes::_checkcast); 461 ConstantPool* cpool = last_frame.method()->constants(); 462 // We'd expect to assert that we're only here to quicken bytecodes, but in a multithreaded 463 // program we might have seen an unquick'd bytecode in the interpreter but have another 464 // thread quicken the bytecode before we get here. 465 // assert( cpool->tag_at(which).is_unresolved_klass(), "should only come here to quicken bytecodes" ); 466 Klass* klass = cpool->klass_at(which, CHECK); 467 current->set_vm_result_2(klass); 468 JRT_END 469 470 471 //------------------------------------------------------------------------------------------------------------------------ 472 // Exceptions 473 474 void InterpreterRuntime::note_trap_inner(JavaThread* current, int reason, 475 const methodHandle& trap_method, int trap_bci) { 476 if (trap_method.not_null()) { 477 MethodData* trap_mdo = trap_method->method_data(); 478 if (trap_mdo == nullptr) { 479 ExceptionMark em(current); 480 JavaThread* THREAD = current; // For exception macros. 481 Method::build_profiling_method_data(trap_method, THREAD); 482 if (HAS_PENDING_EXCEPTION) { 483 // Only metaspace OOM is expected. No Java code executed. 484 assert((PENDING_EXCEPTION->is_a(vmClasses::OutOfMemoryError_klass())), 485 "we expect only an OOM error here"); 486 CLEAR_PENDING_EXCEPTION; 487 } 488 trap_mdo = trap_method->method_data(); 489 // and fall through... 490 } 491 if (trap_mdo != nullptr) { 492 // Update per-method count of trap events. The interpreter 493 // is updating the MDO to simulate the effect of compiler traps. 494 Deoptimization::update_method_data_from_interpreter(trap_mdo, trap_bci, reason); 495 } 496 } 497 } 498 499 // Assume the compiler is (or will be) interested in this event. 500 // If necessary, create an MDO to hold the information, and record it. 501 void InterpreterRuntime::note_trap(JavaThread* current, int reason) { 502 assert(ProfileTraps, "call me only if profiling"); 503 LastFrameAccessor last_frame(current); 504 methodHandle trap_method(current, last_frame.method()); 505 int trap_bci = trap_method->bci_from(last_frame.bcp()); 506 note_trap_inner(current, reason, trap_method, trap_bci); 507 } 508 509 static Handle get_preinitialized_exception(Klass* k, TRAPS) { 510 // get klass 511 InstanceKlass* klass = InstanceKlass::cast(k); 512 assert(klass->is_initialized(), 513 "this klass should have been initialized during VM initialization"); 514 // create instance - do not call constructor since we may have no 515 // (java) stack space left (should assert constructor is empty) 516 Handle exception; 517 oop exception_oop = klass->allocate_instance(CHECK_(exception)); 518 exception = Handle(THREAD, exception_oop); 519 if (StackTraceInThrowable) { 520 java_lang_Throwable::fill_in_stack_trace(exception); 521 } 522 return exception; 523 } 524 525 // Special handling for stack overflow: since we don't have any (java) stack 526 // space left we use the pre-allocated & pre-initialized StackOverflowError 527 // klass to create an stack overflow error instance. We do not call its 528 // constructor for the same reason (it is empty, anyway). 529 JRT_ENTRY(void, InterpreterRuntime::throw_StackOverflowError(JavaThread* current)) 530 Handle exception = get_preinitialized_exception( 531 vmClasses::StackOverflowError_klass(), 532 CHECK); 533 // Increment counter for hs_err file reporting 534 Atomic::inc(&Exceptions::_stack_overflow_errors); 535 // Remove the ScopedValue bindings in case we got a StackOverflowError 536 // while we were trying to manipulate ScopedValue bindings. 537 current->clear_scopedValueBindings(); 538 THROW_HANDLE(exception); 539 JRT_END 540 541 JRT_ENTRY(void, InterpreterRuntime::throw_delayed_StackOverflowError(JavaThread* current)) 542 Handle exception = get_preinitialized_exception( 543 vmClasses::StackOverflowError_klass(), 544 CHECK); 545 java_lang_Throwable::set_message(exception(), 546 Universe::delayed_stack_overflow_error_message()); 547 // Increment counter for hs_err file reporting 548 Atomic::inc(&Exceptions::_stack_overflow_errors); 549 // Remove the ScopedValue bindings in case we got a StackOverflowError 550 // while we were trying to manipulate ScopedValue bindings. 551 current->clear_scopedValueBindings(); 552 THROW_HANDLE(exception); 553 JRT_END 554 555 JRT_ENTRY(void, InterpreterRuntime::create_exception(JavaThread* current, char* name, char* message)) 556 // lookup exception klass 557 TempNewSymbol s = SymbolTable::new_symbol(name); 558 if (ProfileTraps) { 559 if (s == vmSymbols::java_lang_ArithmeticException()) { 560 note_trap(current, Deoptimization::Reason_div0_check); 561 } else if (s == vmSymbols::java_lang_NullPointerException()) { 562 note_trap(current, Deoptimization::Reason_null_check); 563 } 564 } 565 // create exception 566 Handle exception = Exceptions::new_exception(current, s, message); 567 current->set_vm_result(exception()); 568 JRT_END 569 570 571 JRT_ENTRY(void, InterpreterRuntime::create_klass_exception(JavaThread* current, char* name, oopDesc* obj)) 572 // Produce the error message first because note_trap can safepoint 573 ResourceMark rm(current); 574 const char* klass_name = obj->klass()->external_name(); 575 // lookup exception klass 576 TempNewSymbol s = SymbolTable::new_symbol(name); 577 if (ProfileTraps) { 578 if (s == vmSymbols::java_lang_ArrayStoreException()) { 579 note_trap(current, Deoptimization::Reason_array_check); 580 } else { 581 note_trap(current, Deoptimization::Reason_class_check); 582 } 583 } 584 // create exception, with klass name as detail message 585 Handle exception = Exceptions::new_exception(current, s, klass_name); 586 current->set_vm_result(exception()); 587 JRT_END 588 589 JRT_ENTRY(void, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException(JavaThread* current, arrayOopDesc* a, jint index)) 590 // Produce the error message first because note_trap can safepoint 591 ResourceMark rm(current); 592 stringStream ss; 593 ss.print("Index %d out of bounds for length %d", index, a->length()); 594 595 if (ProfileTraps) { 596 note_trap(current, Deoptimization::Reason_range_check); 597 } 598 599 THROW_MSG(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), ss.as_string()); 600 JRT_END 601 602 JRT_ENTRY(void, InterpreterRuntime::throw_ClassCastException( 603 JavaThread* current, oopDesc* obj)) 604 605 // Produce the error message first because note_trap can safepoint 606 ResourceMark rm(current); 607 char* message = SharedRuntime::generate_class_cast_message( 608 current, obj->klass()); 609 610 if (ProfileTraps) { 611 note_trap(current, Deoptimization::Reason_class_check); 612 } 613 614 // create exception 615 THROW_MSG(vmSymbols::java_lang_ClassCastException(), message); 616 JRT_END 617 618 // exception_handler_for_exception(...) returns the continuation address, 619 // the exception oop (via TLS) and sets the bci/bcp for the continuation. 620 // The exception oop is returned to make sure it is preserved over GC (it 621 // is only on the stack if the exception was thrown explicitly via athrow). 622 // During this operation, the expression stack contains the values for the 623 // bci where the exception happened. If the exception was propagated back 624 // from a call, the expression stack contains the values for the bci at the 625 // invoke w/o arguments (i.e., as if one were inside the call). 626 // Note that the implementation of this method assumes it's only called when an exception has actually occured 627 JRT_ENTRY(address, InterpreterRuntime::exception_handler_for_exception(JavaThread* current, oopDesc* exception)) 628 // We get here after we have unwound from a callee throwing an exception 629 // into the interpreter. Any deferred stack processing is notified of 630 // the event via the StackWatermarkSet. 631 StackWatermarkSet::after_unwind(current); 632 633 LastFrameAccessor last_frame(current); 634 Handle h_exception(current, exception); 635 methodHandle h_method (current, last_frame.method()); 636 constantPoolHandle h_constants(current, h_method->constants()); 637 bool should_repeat; 638 int handler_bci; 639 int current_bci = last_frame.bci(); 640 641 if (current->frames_to_pop_failed_realloc() > 0) { 642 // Allocation of scalar replaced object used in this frame 643 // failed. Unconditionally pop the frame. 644 current->dec_frames_to_pop_failed_realloc(); 645 current->set_vm_result(h_exception()); 646 // If the method is synchronized we already unlocked the monitor 647 // during deoptimization so the interpreter needs to skip it when 648 // the frame is popped. 649 current->set_do_not_unlock_if_synchronized(true); 650 return Interpreter::remove_activation_entry(); 651 } 652 653 // Need to do this check first since when _do_not_unlock_if_synchronized 654 // is set, we don't want to trigger any classloading which may make calls 655 // into java, or surprisingly find a matching exception handler for bci 0 656 // since at this moment the method hasn't been "officially" entered yet. 657 if (current->do_not_unlock_if_synchronized()) { 658 ResourceMark rm; 659 assert(current_bci == 0, "bci isn't zero for do_not_unlock_if_synchronized"); 660 current->set_vm_result(exception); 661 return Interpreter::remove_activation_entry(); 662 } 663 664 do { 665 should_repeat = false; 666 667 // assertions 668 assert(h_exception.not_null(), "null exceptions should be handled by athrow"); 669 // Check that exception is a subclass of Throwable. 670 assert(h_exception->is_a(vmClasses::Throwable_klass()), 671 "Exception not subclass of Throwable"); 672 673 // tracing 674 if (log_is_enabled(Info, exceptions)) { 675 ResourceMark rm(current); 676 stringStream tempst; 677 tempst.print("interpreter method <%s>\n" 678 " at bci %d for thread " INTPTR_FORMAT " (%s)", 679 h_method->print_value_string(), current_bci, p2i(current), current->name()); 680 Exceptions::log_exception(h_exception, tempst.as_string()); 681 } 682 // Don't go paging in something which won't be used. 683 // else if (extable->length() == 0) { 684 // // disabled for now - interpreter is not using shortcut yet 685 // // (shortcut is not to call runtime if we have no exception handlers) 686 // // warning("performance bug: should not call runtime if method has no exception handlers"); 687 // } 688 // for AbortVMOnException flag 689 Exceptions::debug_check_abort(h_exception); 690 691 // exception handler lookup 692 Klass* klass = h_exception->klass(); 693 handler_bci = Method::fast_exception_handler_bci_for(h_method, klass, current_bci, THREAD); 694 if (HAS_PENDING_EXCEPTION) { 695 // We threw an exception while trying to find the exception handler. 696 // Transfer the new exception to the exception handle which will 697 // be set into thread local storage, and do another lookup for an 698 // exception handler for this exception, this time starting at the 699 // BCI of the exception handler which caused the exception to be 700 // thrown (bug 4307310). 701 h_exception = Handle(THREAD, PENDING_EXCEPTION); 702 CLEAR_PENDING_EXCEPTION; 703 if (handler_bci >= 0) { 704 current_bci = handler_bci; 705 should_repeat = true; 706 } 707 } 708 } while (should_repeat == true); 709 710 #if INCLUDE_JVMCI 711 if (EnableJVMCI && h_method->method_data() != nullptr) { 712 ResourceMark rm(current); 713 MethodData* mdo = h_method->method_data(); 714 715 // Lock to read ProfileData, and ensure lock is not broken by a safepoint 716 MutexLocker ml(mdo->extra_data_lock(), Mutex::_no_safepoint_check_flag); 717 718 ProfileData* pdata = mdo->allocate_bci_to_data(current_bci, nullptr); 719 if (pdata != nullptr && pdata->is_BitData()) { 720 BitData* bit_data = (BitData*) pdata; 721 bit_data->set_exception_seen(); 722 } 723 } 724 #endif 725 726 // notify JVMTI of an exception throw; JVMTI will detect if this is a first 727 // time throw or a stack unwinding throw and accordingly notify the debugger 728 if (JvmtiExport::can_post_on_exceptions()) { 729 JvmtiExport::post_exception_throw(current, h_method(), last_frame.bcp(), h_exception()); 730 } 731 732 address continuation = nullptr; 733 address handler_pc = nullptr; 734 if (handler_bci < 0 || !current->stack_overflow_state()->reguard_stack((address) &continuation)) { 735 // Forward exception to callee (leaving bci/bcp untouched) because (a) no 736 // handler in this method, or (b) after a stack overflow there is not yet 737 // enough stack space available to reprotect the stack. 738 continuation = Interpreter::remove_activation_entry(); 739 #if COMPILER2_OR_JVMCI 740 // Count this for compilation purposes 741 h_method->interpreter_throwout_increment(THREAD); 742 #endif 743 } else { 744 // handler in this method => change bci/bcp to handler bci/bcp and continue there 745 handler_pc = h_method->code_base() + handler_bci; 746 h_method->set_exception_handler_entered(handler_bci); // profiling 747 #ifndef ZERO 748 set_bcp_and_mdp(handler_pc, current); 749 continuation = Interpreter::dispatch_table(vtos)[*handler_pc]; 750 #else 751 continuation = (address)(intptr_t) handler_bci; 752 #endif 753 } 754 755 // notify debugger of an exception catch 756 // (this is good for exceptions caught in native methods as well) 757 if (JvmtiExport::can_post_on_exceptions()) { 758 JvmtiExport::notice_unwind_due_to_exception(current, h_method(), handler_pc, h_exception(), (handler_pc != nullptr)); 759 } 760 761 current->set_vm_result(h_exception()); 762 return continuation; 763 JRT_END 764 765 766 JRT_ENTRY(void, InterpreterRuntime::throw_pending_exception(JavaThread* current)) 767 assert(current->has_pending_exception(), "must only be called if there's an exception pending"); 768 // nothing to do - eventually we should remove this code entirely (see comments @ call sites) 769 JRT_END 770 771 772 JRT_ENTRY(void, InterpreterRuntime::throw_AbstractMethodError(JavaThread* current)) 773 THROW(vmSymbols::java_lang_AbstractMethodError()); 774 JRT_END 775 776 // This method is called from the "abstract_entry" of the interpreter. 777 // At that point, the arguments have already been removed from the stack 778 // and therefore we don't have the receiver object at our fingertips. (Though, 779 // on some platforms the receiver still resides in a register...). Thus, 780 // we have no choice but print an error message not containing the receiver 781 // type. 782 JRT_ENTRY(void, InterpreterRuntime::throw_AbstractMethodErrorWithMethod(JavaThread* current, 783 Method* missingMethod)) 784 ResourceMark rm(current); 785 assert(missingMethod != nullptr, "sanity"); 786 methodHandle m(current, missingMethod); 787 LinkResolver::throw_abstract_method_error(m, THREAD); 788 JRT_END 789 790 JRT_ENTRY(void, InterpreterRuntime::throw_AbstractMethodErrorVerbose(JavaThread* current, 791 Klass* recvKlass, 792 Method* missingMethod)) 793 ResourceMark rm(current); 794 methodHandle mh = methodHandle(current, missingMethod); 795 LinkResolver::throw_abstract_method_error(mh, recvKlass, THREAD); 796 JRT_END 797 798 JRT_ENTRY(void, InterpreterRuntime::throw_InstantiationError(JavaThread* current)) 799 THROW(vmSymbols::java_lang_InstantiationError()); 800 JRT_END 801 802 803 JRT_ENTRY(void, InterpreterRuntime::throw_IncompatibleClassChangeError(JavaThread* current)) 804 THROW(vmSymbols::java_lang_IncompatibleClassChangeError()); 805 JRT_END 806 807 JRT_ENTRY(void, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose(JavaThread* current, 808 Klass* recvKlass, 809 Klass* interfaceKlass)) 810 ResourceMark rm(current); 811 char buf[1000]; 812 buf[0] = '\0'; 813 jio_snprintf(buf, sizeof(buf), 814 "Class %s does not implement the requested interface %s", 815 recvKlass ? recvKlass->external_name() : "nullptr", 816 interfaceKlass ? interfaceKlass->external_name() : "nullptr"); 817 THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf); 818 JRT_END 819 820 JRT_ENTRY(void, InterpreterRuntime::throw_NullPointerException(JavaThread* current)) 821 THROW(vmSymbols::java_lang_NullPointerException()); 822 JRT_END 823 824 //------------------------------------------------------------------------------------------------------------------------ 825 // Fields 826 // 827 828 void InterpreterRuntime::resolve_get_put(JavaThread* current, Bytecodes::Code bytecode) { 829 // resolve field 830 fieldDescriptor info; 831 LastFrameAccessor last_frame(current); 832 constantPoolHandle pool(current, last_frame.method()->constants()); 833 methodHandle m(current, last_frame.method()); 834 bool is_put = (bytecode == Bytecodes::_putfield || bytecode == Bytecodes::_nofast_putfield || 835 bytecode == Bytecodes::_putstatic); 836 bool is_static = (bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic); 837 838 int field_index = last_frame.get_index_u2(bytecode); 839 { 840 JvmtiHideSingleStepping jhss(current); 841 JavaThread* THREAD = current; // For exception macros. 842 LinkResolver::resolve_field_access(info, pool, field_index, 843 m, bytecode, CHECK); 844 } // end JvmtiHideSingleStepping 845 846 // check if link resolution caused cpCache to be updated 847 if (pool->resolved_field_entry_at(field_index)->is_resolved(bytecode)) return; 848 849 850 // compute auxiliary field attributes 851 TosState state = as_TosState(info.field_type()); 852 853 // Resolution of put instructions on final fields is delayed. That is required so that 854 // exceptions are thrown at the correct place (when the instruction is actually invoked). 855 // If we do not resolve an instruction in the current pass, leaving the put_code 856 // set to zero will cause the next put instruction to the same field to reresolve. 857 858 // Resolution of put instructions to final instance fields with invalid updates (i.e., 859 // to final instance fields with updates originating from a method different than <init>) 860 // is inhibited. A putfield instruction targeting an instance final field must throw 861 // an IllegalAccessError if the instruction is not in an instance 862 // initializer method <init>. If resolution were not inhibited, a putfield 863 // in an initializer method could be resolved in the initializer. Subsequent 864 // putfield instructions to the same field would then use cached information. 865 // As a result, those instructions would not pass through the VM. That is, 866 // checks in resolve_field_access() would not be executed for those instructions 867 // and the required IllegalAccessError would not be thrown. 868 // 869 // Also, we need to delay resolving getstatic and putstatic instructions until the 870 // class is initialized. This is required so that access to the static 871 // field will call the initialization function every time until the class 872 // is completely initialized ala. in 2.17.5 in JVM Specification. 873 InstanceKlass* klass = info.field_holder(); 874 bool uninitialized_static = is_static && !klass->is_initialized(); 875 bool has_initialized_final_update = info.field_holder()->major_version() >= 53 && 876 info.has_initialized_final_update(); 877 assert(!(has_initialized_final_update && !info.access_flags().is_final()), "Fields with initialized final updates must be final"); 878 879 Bytecodes::Code get_code = (Bytecodes::Code)0; 880 Bytecodes::Code put_code = (Bytecodes::Code)0; 881 if (!uninitialized_static) { 882 if (is_static) { 883 get_code = Bytecodes::_getstatic; 884 } else { 885 get_code = Bytecodes::_getfield; 886 } 887 if ((is_put && !has_initialized_final_update) || !info.access_flags().is_final()) { 888 put_code = ((is_static) ? Bytecodes::_putstatic : Bytecodes::_putfield); 889 } 890 } 891 892 ResolvedFieldEntry* entry = pool->resolved_field_entry_at(field_index); 893 entry->set_flags(info.access_flags().is_final(), info.access_flags().is_volatile(), 894 info.is_flat(), info.is_null_free_inline_type(), 895 info.has_null_marker(), info.has_internal_null_marker()); 896 897 entry->fill_in(info.field_holder(), info.offset(), 898 checked_cast<u2>(info.index()), checked_cast<u1>(state), 899 static_cast<u1>(get_code), static_cast<u1>(put_code)); 900 } 901 902 903 //------------------------------------------------------------------------------------------------------------------------ 904 // Synchronization 905 // 906 // The interpreter's synchronization code is factored out so that it can 907 // be shared by method invocation and synchronized blocks. 908 //%note synchronization_3 909 910 //%note monitor_1 911 JRT_ENTRY_NO_ASYNC(void, InterpreterRuntime::monitorenter(JavaThread* current, BasicObjectLock* elem)) 912 assert(LockingMode != LM_LIGHTWEIGHT, "Should call monitorenter_obj() when using the new lightweight locking"); 913 #ifdef ASSERT 914 current->last_frame().interpreter_frame_verify_monitor(elem); 915 #endif 916 Handle h_obj(current, elem->obj()); 917 assert(Universe::heap()->is_in_or_null(h_obj()), 918 "must be null or an object"); 919 ObjectSynchronizer::enter(h_obj, elem->lock(), current); 920 assert(Universe::heap()->is_in_or_null(elem->obj()), 921 "must be null or an object"); 922 #ifdef ASSERT 923 current->last_frame().interpreter_frame_verify_monitor(elem); 924 #endif 925 JRT_END 926 927 // NOTE: We provide a separate implementation for the new lightweight locking to workaround a limitation 928 // of registers in x86_32. This entry point accepts an oop instead of a BasicObjectLock*. 929 // The problem is that we would need to preserve the register that holds the BasicObjectLock, 930 // but we are using that register to hold the thread. We don't have enough registers to 931 // also keep the BasicObjectLock, but we don't really need it anyway, we only need 932 // the object. See also InterpreterMacroAssembler::lock_object(). 933 // As soon as legacy stack-locking goes away we could remove the other monitorenter() entry 934 // point, and only use oop-accepting entries (same for monitorexit() below). 935 JRT_ENTRY_NO_ASYNC(void, InterpreterRuntime::monitorenter_obj(JavaThread* current, oopDesc* obj)) 936 assert(LockingMode == LM_LIGHTWEIGHT, "Should call monitorenter() when not using the new lightweight locking"); 937 Handle h_obj(current, cast_to_oop(obj)); 938 assert(Universe::heap()->is_in_or_null(h_obj()), 939 "must be null or an object"); 940 ObjectSynchronizer::enter(h_obj, nullptr, current); 941 return; 942 JRT_END 943 944 JRT_LEAF(void, InterpreterRuntime::monitorexit(BasicObjectLock* elem)) 945 oop obj = elem->obj(); 946 assert(Universe::heap()->is_in(obj), "must be an object"); 947 // The object could become unlocked through a JNI call, which we have no other checks for. 948 // Give a fatal message if CheckJNICalls. Otherwise we ignore it. 949 if (obj->is_unlocked()) { 950 if (CheckJNICalls) { 951 fatal("Object has been unlocked by JNI"); 952 } 953 return; 954 } 955 ObjectSynchronizer::exit(obj, elem->lock(), JavaThread::current()); 956 // Free entry. If it is not cleared, the exception handling code will try to unlock the monitor 957 // again at method exit or in the case of an exception. 958 elem->set_obj(nullptr); 959 JRT_END 960 961 JRT_ENTRY(void, InterpreterRuntime::throw_illegal_monitor_state_exception(JavaThread* current)) 962 THROW(vmSymbols::java_lang_IllegalMonitorStateException()); 963 JRT_END 964 965 JRT_ENTRY(void, InterpreterRuntime::new_illegal_monitor_state_exception(JavaThread* current)) 966 // Returns an illegal exception to install into the current thread. The 967 // pending_exception flag is cleared so normal exception handling does not 968 // trigger. Any current installed exception will be overwritten. This 969 // method will be called during an exception unwind. 970 971 assert(!HAS_PENDING_EXCEPTION, "no pending exception"); 972 Handle exception(current, current->vm_result()); 973 assert(exception() != nullptr, "vm result should be set"); 974 current->set_vm_result(nullptr); // clear vm result before continuing (may cause memory leaks and assert failures) 975 exception = get_preinitialized_exception(vmClasses::IllegalMonitorStateException_klass(), CATCH); 976 current->set_vm_result(exception()); 977 JRT_END 978 979 JRT_ENTRY(void, InterpreterRuntime::throw_identity_exception(JavaThread* current, oopDesc* obj)) 980 Klass* klass = cast_to_oop(obj)->klass(); 981 ResourceMark rm(THREAD); 982 const char* desc = "Cannot synchronize on an instance of value class "; 983 const char* className = klass->external_name(); 984 size_t msglen = strlen(desc) + strlen(className) + 1; 985 char* message = NEW_RESOURCE_ARRAY(char, msglen); 986 if (nullptr == message) { 987 // Out of memory: can't create detailed error message 988 THROW_MSG(vmSymbols::java_lang_IdentityException(), className); 989 } else { 990 jio_snprintf(message, msglen, "%s%s", desc, className); 991 THROW_MSG(vmSymbols::java_lang_IdentityException(), message); 992 } 993 JRT_END 994 995 //------------------------------------------------------------------------------------------------------------------------ 996 // Invokes 997 998 JRT_ENTRY(Bytecodes::Code, InterpreterRuntime::get_original_bytecode_at(JavaThread* current, Method* method, address bcp)) 999 return method->orig_bytecode_at(method->bci_from(bcp)); 1000 JRT_END 1001 1002 JRT_ENTRY(void, InterpreterRuntime::set_original_bytecode_at(JavaThread* current, Method* method, address bcp, Bytecodes::Code new_code)) 1003 method->set_orig_bytecode_at(method->bci_from(bcp), new_code); 1004 JRT_END 1005 1006 JRT_ENTRY(void, InterpreterRuntime::_breakpoint(JavaThread* current, Method* method, address bcp)) 1007 JvmtiExport::post_raw_breakpoint(current, method, bcp); 1008 JRT_END 1009 1010 void InterpreterRuntime::resolve_invoke(JavaThread* current, Bytecodes::Code bytecode) { 1011 LastFrameAccessor last_frame(current); 1012 // extract receiver from the outgoing argument list if necessary 1013 Handle receiver(current, nullptr); 1014 if (bytecode == Bytecodes::_invokevirtual || bytecode == Bytecodes::_invokeinterface || 1015 bytecode == Bytecodes::_invokespecial) { 1016 ResourceMark rm(current); 1017 methodHandle m (current, last_frame.method()); 1018 Bytecode_invoke call(m, last_frame.bci()); 1019 Symbol* signature = call.signature(); 1020 receiver = Handle(current, last_frame.callee_receiver(signature)); 1021 1022 assert(Universe::heap()->is_in_or_null(receiver()), 1023 "sanity check"); 1024 assert(receiver.is_null() || 1025 !Universe::heap()->is_in(receiver->klass()), 1026 "sanity check"); 1027 } 1028 1029 // resolve method 1030 CallInfo info; 1031 constantPoolHandle pool(current, last_frame.method()->constants()); 1032 ConstantPoolCache* cache = pool->cache(); 1033 1034 methodHandle resolved_method; 1035 1036 int method_index = last_frame.get_index_u2(bytecode); 1037 { 1038 JvmtiHideSingleStepping jhss(current); 1039 JavaThread* THREAD = current; // For exception macros. 1040 LinkResolver::resolve_invoke(info, receiver, pool, 1041 method_index, bytecode, 1042 THREAD); 1043 1044 if (HAS_PENDING_EXCEPTION) { 1045 if (ProfileTraps && PENDING_EXCEPTION->klass()->name() == vmSymbols::java_lang_NullPointerException()) { 1046 // Preserve the original exception across the call to note_trap() 1047 PreserveExceptionMark pm(current); 1048 // Recording the trap will help the compiler to potentially recognize this exception as "hot" 1049 note_trap(current, Deoptimization::Reason_null_check); 1050 } 1051 return; 1052 } 1053 1054 if (JvmtiExport::can_hotswap_or_post_breakpoint() && info.resolved_method()->is_old()) { 1055 resolved_method = methodHandle(current, info.resolved_method()->get_new_method()); 1056 } else { 1057 resolved_method = methodHandle(current, info.resolved_method()); 1058 } 1059 } // end JvmtiHideSingleStepping 1060 1061 // check if link resolution caused cpCache to be updated 1062 if (cache->resolved_method_entry_at(method_index)->is_resolved(bytecode)) return; 1063 1064 #ifdef ASSERT 1065 if (bytecode == Bytecodes::_invokeinterface) { 1066 if (resolved_method->method_holder() == vmClasses::Object_klass()) { 1067 // NOTE: THIS IS A FIX FOR A CORNER CASE in the JVM spec 1068 // (see also CallInfo::set_interface for details) 1069 assert(info.call_kind() == CallInfo::vtable_call || 1070 info.call_kind() == CallInfo::direct_call, ""); 1071 assert(resolved_method->is_final() || info.has_vtable_index(), 1072 "should have been set already"); 1073 } else if (!resolved_method->has_itable_index()) { 1074 // Resolved something like CharSequence.toString. Use vtable not itable. 1075 assert(info.call_kind() != CallInfo::itable_call, ""); 1076 } else { 1077 // Setup itable entry 1078 assert(info.call_kind() == CallInfo::itable_call, ""); 1079 int index = resolved_method->itable_index(); 1080 assert(info.itable_index() == index, ""); 1081 } 1082 } else if (bytecode == Bytecodes::_invokespecial) { 1083 assert(info.call_kind() == CallInfo::direct_call, "must be direct call"); 1084 } else { 1085 assert(info.call_kind() == CallInfo::direct_call || 1086 info.call_kind() == CallInfo::vtable_call, ""); 1087 } 1088 #endif 1089 // Get sender and only set cpCache entry to resolved if it is not an 1090 // interface. The receiver for invokespecial calls within interface 1091 // methods must be checked for every call. 1092 InstanceKlass* sender = pool->pool_holder(); 1093 1094 switch (info.call_kind()) { 1095 case CallInfo::direct_call: 1096 cache->set_direct_call(bytecode, method_index, resolved_method, sender->is_interface()); 1097 break; 1098 case CallInfo::vtable_call: 1099 cache->set_vtable_call(bytecode, method_index, resolved_method, info.vtable_index()); 1100 break; 1101 case CallInfo::itable_call: 1102 cache->set_itable_call( 1103 bytecode, 1104 method_index, 1105 info.resolved_klass(), 1106 resolved_method, 1107 info.itable_index()); 1108 break; 1109 default: ShouldNotReachHere(); 1110 } 1111 } 1112 1113 1114 // First time execution: Resolve symbols, create a permanent MethodType object. 1115 void InterpreterRuntime::resolve_invokehandle(JavaThread* current) { 1116 const Bytecodes::Code bytecode = Bytecodes::_invokehandle; 1117 LastFrameAccessor last_frame(current); 1118 1119 // resolve method 1120 CallInfo info; 1121 constantPoolHandle pool(current, last_frame.method()->constants()); 1122 int method_index = last_frame.get_index_u2(bytecode); 1123 { 1124 JvmtiHideSingleStepping jhss(current); 1125 JavaThread* THREAD = current; // For exception macros. 1126 LinkResolver::resolve_invoke(info, Handle(), pool, 1127 method_index, bytecode, 1128 CHECK); 1129 } // end JvmtiHideSingleStepping 1130 1131 pool->cache()->set_method_handle(method_index, info); 1132 } 1133 1134 // First time execution: Resolve symbols, create a permanent CallSite object. 1135 void InterpreterRuntime::resolve_invokedynamic(JavaThread* current) { 1136 LastFrameAccessor last_frame(current); 1137 const Bytecodes::Code bytecode = Bytecodes::_invokedynamic; 1138 1139 // resolve method 1140 CallInfo info; 1141 constantPoolHandle pool(current, last_frame.method()->constants()); 1142 int index = last_frame.get_index_u4(bytecode); 1143 { 1144 JvmtiHideSingleStepping jhss(current); 1145 JavaThread* THREAD = current; // For exception macros. 1146 LinkResolver::resolve_invoke(info, Handle(), pool, 1147 index, bytecode, CHECK); 1148 } // end JvmtiHideSingleStepping 1149 1150 pool->cache()->set_dynamic_call(info, pool->decode_invokedynamic_index(index)); 1151 } 1152 1153 // This function is the interface to the assembly code. It returns the resolved 1154 // cpCache entry. This doesn't safepoint, but the helper routines safepoint. 1155 // This function will check for redefinition! 1156 JRT_ENTRY(void, InterpreterRuntime::resolve_from_cache(JavaThread* current, Bytecodes::Code bytecode)) { 1157 switch (bytecode) { 1158 case Bytecodes::_getstatic: 1159 case Bytecodes::_putstatic: 1160 case Bytecodes::_getfield: 1161 case Bytecodes::_putfield: 1162 resolve_get_put(current, bytecode); 1163 break; 1164 case Bytecodes::_invokevirtual: 1165 case Bytecodes::_invokespecial: 1166 case Bytecodes::_invokestatic: 1167 case Bytecodes::_invokeinterface: 1168 resolve_invoke(current, bytecode); 1169 break; 1170 case Bytecodes::_invokehandle: 1171 resolve_invokehandle(current); 1172 break; 1173 case Bytecodes::_invokedynamic: 1174 resolve_invokedynamic(current); 1175 break; 1176 default: 1177 fatal("unexpected bytecode: %s", Bytecodes::name(bytecode)); 1178 break; 1179 } 1180 } 1181 JRT_END 1182 1183 //------------------------------------------------------------------------------------------------------------------------ 1184 // Miscellaneous 1185 1186 1187 nmethod* InterpreterRuntime::frequency_counter_overflow(JavaThread* current, address branch_bcp) { 1188 // Enable WXWrite: the function is called directly by interpreter. 1189 MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, current)); 1190 1191 // frequency_counter_overflow_inner can throw async exception. 1192 nmethod* nm = frequency_counter_overflow_inner(current, branch_bcp); 1193 assert(branch_bcp != nullptr || nm == nullptr, "always returns null for non OSR requests"); 1194 if (branch_bcp != nullptr && nm != nullptr) { 1195 // This was a successful request for an OSR nmethod. Because 1196 // frequency_counter_overflow_inner ends with a safepoint check, 1197 // nm could have been unloaded so look it up again. It's unsafe 1198 // to examine nm directly since it might have been freed and used 1199 // for something else. 1200 LastFrameAccessor last_frame(current); 1201 Method* method = last_frame.method(); 1202 int bci = method->bci_from(last_frame.bcp()); 1203 nm = method->lookup_osr_nmethod_for(bci, CompLevel_none, false); 1204 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); 1205 if (nm != nullptr && bs_nm != nullptr) { 1206 // in case the transition passed a safepoint we need to barrier this again 1207 if (!bs_nm->nmethod_osr_entry_barrier(nm)) { 1208 nm = nullptr; 1209 } 1210 } 1211 } 1212 if (nm != nullptr && current->is_interp_only_mode()) { 1213 // Normally we never get an nm if is_interp_only_mode() is true, because 1214 // policy()->event has a check for this and won't compile the method when 1215 // true. However, it's possible for is_interp_only_mode() to become true 1216 // during the compilation. We don't want to return the nm in that case 1217 // because we want to continue to execute interpreted. 1218 nm = nullptr; 1219 } 1220 #ifndef PRODUCT 1221 if (TraceOnStackReplacement) { 1222 if (nm != nullptr) { 1223 tty->print("OSR entry @ pc: " INTPTR_FORMAT ": ", p2i(nm->osr_entry())); 1224 nm->print(); 1225 } 1226 } 1227 #endif 1228 return nm; 1229 } 1230 1231 JRT_ENTRY(nmethod*, 1232 InterpreterRuntime::frequency_counter_overflow_inner(JavaThread* current, address branch_bcp)) 1233 // use UnlockFlagSaver to clear and restore the _do_not_unlock_if_synchronized 1234 // flag, in case this method triggers classloading which will call into Java. 1235 UnlockFlagSaver fs(current); 1236 1237 LastFrameAccessor last_frame(current); 1238 assert(last_frame.is_interpreted_frame(), "must come from interpreter"); 1239 methodHandle method(current, last_frame.method()); 1240 const int branch_bci = branch_bcp != nullptr ? method->bci_from(branch_bcp) : InvocationEntryBci; 1241 const int bci = branch_bcp != nullptr ? method->bci_from(last_frame.bcp()) : InvocationEntryBci; 1242 1243 nmethod* osr_nm = CompilationPolicy::event(method, method, branch_bci, bci, CompLevel_none, nullptr, CHECK_NULL); 1244 1245 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); 1246 if (osr_nm != nullptr && bs_nm != nullptr) { 1247 if (!bs_nm->nmethod_osr_entry_barrier(osr_nm)) { 1248 osr_nm = nullptr; 1249 } 1250 } 1251 return osr_nm; 1252 JRT_END 1253 1254 JRT_LEAF(jint, InterpreterRuntime::bcp_to_di(Method* method, address cur_bcp)) 1255 assert(ProfileInterpreter, "must be profiling interpreter"); 1256 int bci = method->bci_from(cur_bcp); 1257 MethodData* mdo = method->method_data(); 1258 if (mdo == nullptr) return 0; 1259 return mdo->bci_to_di(bci); 1260 JRT_END 1261 1262 #ifdef ASSERT 1263 JRT_LEAF(void, InterpreterRuntime::verify_mdp(Method* method, address bcp, address mdp)) 1264 assert(ProfileInterpreter, "must be profiling interpreter"); 1265 1266 MethodData* mdo = method->method_data(); 1267 assert(mdo != nullptr, "must not be null"); 1268 1269 int bci = method->bci_from(bcp); 1270 1271 address mdp2 = mdo->bci_to_dp(bci); 1272 if (mdp != mdp2) { 1273 ResourceMark rm; 1274 tty->print_cr("FAILED verify : actual mdp %p expected mdp %p @ bci %d", mdp, mdp2, bci); 1275 int current_di = mdo->dp_to_di(mdp); 1276 int expected_di = mdo->dp_to_di(mdp2); 1277 tty->print_cr(" actual di %d expected di %d", current_di, expected_di); 1278 int expected_approx_bci = mdo->data_at(expected_di)->bci(); 1279 int approx_bci = -1; 1280 if (current_di >= 0) { 1281 approx_bci = mdo->data_at(current_di)->bci(); 1282 } 1283 tty->print_cr(" actual bci is %d expected bci %d", approx_bci, expected_approx_bci); 1284 mdo->print_on(tty); 1285 method->print_codes(); 1286 } 1287 assert(mdp == mdp2, "wrong mdp"); 1288 JRT_END 1289 #endif // ASSERT 1290 1291 JRT_ENTRY(void, InterpreterRuntime::update_mdp_for_ret(JavaThread* current, int return_bci)) 1292 assert(ProfileInterpreter, "must be profiling interpreter"); 1293 ResourceMark rm(current); 1294 LastFrameAccessor last_frame(current); 1295 assert(last_frame.is_interpreted_frame(), "must come from interpreter"); 1296 MethodData* h_mdo = last_frame.method()->method_data(); 1297 1298 // Grab a lock to ensure atomic access to setting the return bci and 1299 // the displacement. This can block and GC, invalidating all naked oops. 1300 MutexLocker ml(RetData_lock); 1301 1302 // ProfileData is essentially a wrapper around a derived oop, so we 1303 // need to take the lock before making any ProfileData structures. 1304 ProfileData* data = h_mdo->data_at(h_mdo->dp_to_di(last_frame.mdp())); 1305 guarantee(data != nullptr, "profile data must be valid"); 1306 RetData* rdata = data->as_RetData(); 1307 address new_mdp = rdata->fixup_ret(return_bci, h_mdo); 1308 last_frame.set_mdp(new_mdp); 1309 JRT_END 1310 1311 JRT_ENTRY(MethodCounters*, InterpreterRuntime::build_method_counters(JavaThread* current, Method* m)) 1312 return Method::build_method_counters(current, m); 1313 JRT_END 1314 1315 1316 JRT_ENTRY(void, InterpreterRuntime::at_safepoint(JavaThread* current)) 1317 // We used to need an explicit preserve_arguments here for invoke bytecodes. However, 1318 // stack traversal automatically takes care of preserving arguments for invoke, so 1319 // this is no longer needed. 1320 1321 // JRT_END does an implicit safepoint check, hence we are guaranteed to block 1322 // if this is called during a safepoint 1323 1324 if (JvmtiExport::should_post_single_step()) { 1325 // This function is called by the interpreter when single stepping. Such single 1326 // stepping could unwind a frame. Then, it is important that we process any frames 1327 // that we might return into. 1328 StackWatermarkSet::before_unwind(current); 1329 1330 // We are called during regular safepoints and when the VM is 1331 // single stepping. If any thread is marked for single stepping, 1332 // then we may have JVMTI work to do. 1333 LastFrameAccessor last_frame(current); 1334 JvmtiExport::at_single_stepping_point(current, last_frame.method(), last_frame.bcp()); 1335 } 1336 JRT_END 1337 1338 JRT_LEAF(void, InterpreterRuntime::at_unwind(JavaThread* current)) 1339 assert(current == JavaThread::current(), "pre-condition"); 1340 // This function is called by the interpreter when the return poll found a reason 1341 // to call the VM. The reason could be that we are returning into a not yet safe 1342 // to access frame. We handle that below. 1343 // Note that this path does not check for single stepping, because we do not want 1344 // to single step when unwinding frames for an exception being thrown. Instead, 1345 // such single stepping code will use the safepoint table, which will use the 1346 // InterpreterRuntime::at_safepoint callback. 1347 StackWatermarkSet::before_unwind(current); 1348 JRT_END 1349 1350 JRT_ENTRY(void, InterpreterRuntime::post_field_access(JavaThread* current, oopDesc* obj, 1351 ResolvedFieldEntry *entry)) 1352 1353 assert(entry->is_valid(), "Invalid ResolvedFieldEntry"); 1354 // check the access_flags for the field in the klass 1355 1356 InstanceKlass* ik = entry->field_holder(); 1357 int index = entry->field_index(); 1358 if (!ik->field_status(index).is_access_watched()) return; 1359 1360 bool is_static = (obj == nullptr); 1361 bool is_flat = entry->is_flat(); 1362 HandleMark hm(current); 1363 1364 Handle h_obj; 1365 if (!is_static) { 1366 // non-static field accessors have an object, but we need a handle 1367 h_obj = Handle(current, obj); 1368 } 1369 InstanceKlass* field_holder = entry->field_holder(); // HERE 1370 jfieldID fid = jfieldIDWorkaround::to_jfieldID(field_holder, entry->field_offset(), is_static, is_flat); 1371 LastFrameAccessor last_frame(current); 1372 JvmtiExport::post_field_access(current, last_frame.method(), last_frame.bcp(), field_holder, h_obj, fid); 1373 JRT_END 1374 1375 JRT_ENTRY(void, InterpreterRuntime::post_field_modification(JavaThread* current, oopDesc* obj, 1376 ResolvedFieldEntry *entry, jvalue *value)) 1377 1378 assert(entry->is_valid(), "Invalid ResolvedFieldEntry"); 1379 InstanceKlass* ik = entry->field_holder(); 1380 1381 // check the access_flags for the field in the klass 1382 int index = entry->field_index(); 1383 // bail out if field modifications are not watched 1384 if (!ik->field_status(index).is_modification_watched()) return; 1385 1386 char sig_type = '\0'; 1387 1388 switch((TosState)entry->tos_state()) { 1389 case btos: sig_type = JVM_SIGNATURE_BYTE; break; 1390 case ztos: sig_type = JVM_SIGNATURE_BOOLEAN; break; 1391 case ctos: sig_type = JVM_SIGNATURE_CHAR; break; 1392 case stos: sig_type = JVM_SIGNATURE_SHORT; break; 1393 case itos: sig_type = JVM_SIGNATURE_INT; break; 1394 case ftos: sig_type = JVM_SIGNATURE_FLOAT; break; 1395 case atos: sig_type = JVM_SIGNATURE_CLASS; break; 1396 case ltos: sig_type = JVM_SIGNATURE_LONG; break; 1397 case dtos: sig_type = JVM_SIGNATURE_DOUBLE; break; 1398 default: ShouldNotReachHere(); return; 1399 } 1400 1401 bool is_static = (obj == nullptr); 1402 bool is_flat = entry->is_flat(); 1403 1404 HandleMark hm(current); 1405 jfieldID fid = jfieldIDWorkaround::to_jfieldID(ik, entry->field_offset(), is_static, is_flat); 1406 jvalue fvalue; 1407 #ifdef _LP64 1408 fvalue = *value; 1409 #else 1410 // Long/double values are stored unaligned and also noncontiguously with 1411 // tagged stacks. We can't just do a simple assignment even in the non- 1412 // J/D cases because a C++ compiler is allowed to assume that a jvalue is 1413 // 8-byte aligned, and interpreter stack slots are only 4-byte aligned. 1414 // We assume that the two halves of longs/doubles are stored in interpreter 1415 // stack slots in platform-endian order. 1416 jlong_accessor u; 1417 jint* newval = (jint*)value; 1418 u.words[0] = newval[0]; 1419 u.words[1] = newval[Interpreter::stackElementWords]; // skip if tag 1420 fvalue.j = u.long_value; 1421 #endif // _LP64 1422 1423 Handle h_obj; 1424 if (!is_static) { 1425 // non-static field accessors have an object, but we need a handle 1426 h_obj = Handle(current, obj); 1427 } 1428 1429 LastFrameAccessor last_frame(current); 1430 JvmtiExport::post_raw_field_modification(current, last_frame.method(), last_frame.bcp(), ik, h_obj, 1431 fid, sig_type, &fvalue); 1432 JRT_END 1433 1434 JRT_ENTRY(void, InterpreterRuntime::post_method_entry(JavaThread* current)) 1435 LastFrameAccessor last_frame(current); 1436 JvmtiExport::post_method_entry(current, last_frame.method(), last_frame.get_frame()); 1437 JRT_END 1438 1439 1440 // This is a JRT_BLOCK_ENTRY because we have to stash away the return oop 1441 // before transitioning to VM, and restore it after transitioning back 1442 // to Java. The return oop at the top-of-stack, is not walked by the GC. 1443 JRT_BLOCK_ENTRY(void, InterpreterRuntime::post_method_exit(JavaThread* current)) 1444 LastFrameAccessor last_frame(current); 1445 JvmtiExport::post_method_exit(current, last_frame.method(), last_frame.get_frame()); 1446 JRT_END 1447 1448 JRT_LEAF(int, InterpreterRuntime::interpreter_contains(address pc)) 1449 { 1450 return (Interpreter::contains(Continuation::get_top_return_pc_post_barrier(JavaThread::current(), pc)) ? 1 : 0); 1451 } 1452 JRT_END 1453 1454 1455 // Implementation of SignatureHandlerLibrary 1456 1457 #ifndef SHARING_FAST_NATIVE_FINGERPRINTS 1458 // Dummy definition (else normalization method is defined in CPU 1459 // dependent code) 1460 uint64_t InterpreterRuntime::normalize_fast_native_fingerprint(uint64_t fingerprint) { 1461 return fingerprint; 1462 } 1463 #endif 1464 1465 address SignatureHandlerLibrary::set_handler_blob() { 1466 BufferBlob* handler_blob = BufferBlob::create("native signature handlers", blob_size); 1467 if (handler_blob == nullptr) { 1468 return nullptr; 1469 } 1470 address handler = handler_blob->code_begin(); 1471 _handler_blob = handler_blob; 1472 _handler = handler; 1473 return handler; 1474 } 1475 1476 void SignatureHandlerLibrary::initialize() { 1477 if (_fingerprints != nullptr) { 1478 return; 1479 } 1480 if (set_handler_blob() == nullptr) { 1481 vm_exit_out_of_memory(blob_size, OOM_MALLOC_ERROR, "native signature handlers"); 1482 } 1483 1484 BufferBlob* bb = BufferBlob::create("Signature Handler Temp Buffer", 1485 SignatureHandlerLibrary::buffer_size); 1486 _buffer = bb->code_begin(); 1487 1488 _fingerprints = new (mtCode) GrowableArray<uint64_t>(32, mtCode); 1489 _handlers = new (mtCode) GrowableArray<address>(32, mtCode); 1490 } 1491 1492 address SignatureHandlerLibrary::set_handler(CodeBuffer* buffer) { 1493 address handler = _handler; 1494 int insts_size = buffer->pure_insts_size(); 1495 if (handler + insts_size > _handler_blob->code_end()) { 1496 // get a new handler blob 1497 handler = set_handler_blob(); 1498 } 1499 if (handler != nullptr) { 1500 memcpy(handler, buffer->insts_begin(), insts_size); 1501 pd_set_handler(handler); 1502 ICache::invalidate_range(handler, insts_size); 1503 _handler = handler + insts_size; 1504 } 1505 return handler; 1506 } 1507 1508 void SignatureHandlerLibrary::add(const methodHandle& method) { 1509 if (method->signature_handler() == nullptr) { 1510 // use slow signature handler if we can't do better 1511 int handler_index = -1; 1512 // check if we can use customized (fast) signature handler 1513 if (UseFastSignatureHandlers && method->size_of_parameters() <= Fingerprinter::fp_max_size_of_parameters) { 1514 // use customized signature handler 1515 MutexLocker mu(SignatureHandlerLibrary_lock); 1516 // make sure data structure is initialized 1517 initialize(); 1518 // lookup method signature's fingerprint 1519 uint64_t fingerprint = Fingerprinter(method).fingerprint(); 1520 // allow CPU dependent code to optimize the fingerprints for the fast handler 1521 fingerprint = InterpreterRuntime::normalize_fast_native_fingerprint(fingerprint); 1522 handler_index = _fingerprints->find(fingerprint); 1523 // create handler if necessary 1524 if (handler_index < 0) { 1525 ResourceMark rm; 1526 ptrdiff_t align_offset = align_up(_buffer, CodeEntryAlignment) - (address)_buffer; 1527 CodeBuffer buffer((address)(_buffer + align_offset), 1528 checked_cast<int>(SignatureHandlerLibrary::buffer_size - align_offset)); 1529 InterpreterRuntime::SignatureHandlerGenerator(method, &buffer).generate(fingerprint); 1530 // copy into code heap 1531 address handler = set_handler(&buffer); 1532 if (handler == nullptr) { 1533 // use slow signature handler (without memorizing it in the fingerprints) 1534 } else { 1535 // debugging support 1536 if (PrintSignatureHandlers && (handler != Interpreter::slow_signature_handler())) { 1537 ttyLocker ttyl; 1538 tty->cr(); 1539 tty->print_cr("argument handler #%d for: %s %s (fingerprint = " UINT64_FORMAT ", %d bytes generated)", 1540 _handlers->length(), 1541 (method->is_static() ? "static" : "receiver"), 1542 method->name_and_sig_as_C_string(), 1543 fingerprint, 1544 buffer.insts_size()); 1545 if (buffer.insts_size() > 0) { 1546 Disassembler::decode(handler, handler + buffer.insts_size(), tty 1547 NOT_PRODUCT(COMMA &buffer.asm_remarks())); 1548 } 1549 #ifndef PRODUCT 1550 address rh_begin = Interpreter::result_handler(method()->result_type()); 1551 if (CodeCache::contains(rh_begin)) { 1552 // else it might be special platform dependent values 1553 tty->print_cr(" --- associated result handler ---"); 1554 address rh_end = rh_begin; 1555 while (*(int*)rh_end != 0) { 1556 rh_end += sizeof(int); 1557 } 1558 Disassembler::decode(rh_begin, rh_end); 1559 } else { 1560 tty->print_cr(" associated result handler: " PTR_FORMAT, p2i(rh_begin)); 1561 } 1562 #endif 1563 } 1564 // add handler to library 1565 _fingerprints->append(fingerprint); 1566 _handlers->append(handler); 1567 // set handler index 1568 assert(_fingerprints->length() == _handlers->length(), "sanity check"); 1569 handler_index = _fingerprints->length() - 1; 1570 } 1571 } 1572 // Set handler under SignatureHandlerLibrary_lock 1573 if (handler_index < 0) { 1574 // use generic signature handler 1575 method->set_signature_handler(Interpreter::slow_signature_handler()); 1576 } else { 1577 // set handler 1578 method->set_signature_handler(_handlers->at(handler_index)); 1579 } 1580 } else { 1581 DEBUG_ONLY(JavaThread::current()->check_possible_safepoint()); 1582 // use generic signature handler 1583 method->set_signature_handler(Interpreter::slow_signature_handler()); 1584 } 1585 } 1586 #ifdef ASSERT 1587 int handler_index = -1; 1588 int fingerprint_index = -2; 1589 { 1590 // '_handlers' and '_fingerprints' are 'GrowableArray's and are NOT synchronized 1591 // in any way if accessed from multiple threads. To avoid races with another 1592 // thread which may change the arrays in the above, mutex protected block, we 1593 // have to protect this read access here with the same mutex as well! 1594 MutexLocker mu(SignatureHandlerLibrary_lock); 1595 if (_handlers != nullptr) { 1596 handler_index = _handlers->find(method->signature_handler()); 1597 uint64_t fingerprint = Fingerprinter(method).fingerprint(); 1598 fingerprint = InterpreterRuntime::normalize_fast_native_fingerprint(fingerprint); 1599 fingerprint_index = _fingerprints->find(fingerprint); 1600 } 1601 } 1602 assert(method->signature_handler() == Interpreter::slow_signature_handler() || 1603 handler_index == fingerprint_index, "sanity check"); 1604 #endif // ASSERT 1605 } 1606 1607 void SignatureHandlerLibrary::add(uint64_t fingerprint, address handler) { 1608 int handler_index = -1; 1609 // use customized signature handler 1610 MutexLocker mu(SignatureHandlerLibrary_lock); 1611 // make sure data structure is initialized 1612 initialize(); 1613 fingerprint = InterpreterRuntime::normalize_fast_native_fingerprint(fingerprint); 1614 handler_index = _fingerprints->find(fingerprint); 1615 // create handler if necessary 1616 if (handler_index < 0) { 1617 if (PrintSignatureHandlers && (handler != Interpreter::slow_signature_handler())) { 1618 tty->cr(); 1619 tty->print_cr("argument handler #%d at " PTR_FORMAT " for fingerprint " UINT64_FORMAT, 1620 _handlers->length(), 1621 p2i(handler), 1622 fingerprint); 1623 } 1624 _fingerprints->append(fingerprint); 1625 _handlers->append(handler); 1626 } else { 1627 if (PrintSignatureHandlers) { 1628 tty->cr(); 1629 tty->print_cr("duplicate argument handler #%d for fingerprint " UINT64_FORMAT "(old: " PTR_FORMAT ", new : " PTR_FORMAT ")", 1630 _handlers->length(), 1631 fingerprint, 1632 p2i(_handlers->at(handler_index)), 1633 p2i(handler)); 1634 } 1635 } 1636 } 1637 1638 1639 BufferBlob* SignatureHandlerLibrary::_handler_blob = nullptr; 1640 address SignatureHandlerLibrary::_handler = nullptr; 1641 GrowableArray<uint64_t>* SignatureHandlerLibrary::_fingerprints = nullptr; 1642 GrowableArray<address>* SignatureHandlerLibrary::_handlers = nullptr; 1643 address SignatureHandlerLibrary::_buffer = nullptr; 1644 1645 1646 JRT_ENTRY(void, InterpreterRuntime::prepare_native_call(JavaThread* current, Method* method)) 1647 methodHandle m(current, method); 1648 assert(m->is_native(), "sanity check"); 1649 // lookup native function entry point if it doesn't exist 1650 if (!m->has_native_function()) { 1651 NativeLookup::lookup(m, CHECK); 1652 } 1653 // make sure signature handler is installed 1654 SignatureHandlerLibrary::add(m); 1655 // The interpreter entry point checks the signature handler first, 1656 // before trying to fetch the native entry point and klass mirror. 1657 // We must set the signature handler last, so that multiple processors 1658 // preparing the same method will be sure to see non-null entry & mirror. 1659 JRT_END 1660 1661 #if defined(IA32) || defined(AMD64) || defined(ARM) 1662 JRT_LEAF(void, InterpreterRuntime::popframe_move_outgoing_args(JavaThread* current, void* src_address, void* dest_address)) 1663 assert(current == JavaThread::current(), "pre-condition"); 1664 if (src_address == dest_address) { 1665 return; 1666 } 1667 ResourceMark rm; 1668 LastFrameAccessor last_frame(current); 1669 assert(last_frame.is_interpreted_frame(), ""); 1670 jint bci = last_frame.bci(); 1671 methodHandle mh(current, last_frame.method()); 1672 Bytecode_invoke invoke(mh, bci); 1673 ArgumentSizeComputer asc(invoke.signature()); 1674 int size_of_arguments = (asc.size() + (invoke.has_receiver() ? 1 : 0)); // receiver 1675 Copy::conjoint_jbytes(src_address, dest_address, 1676 size_of_arguments * Interpreter::stackElementSize); 1677 JRT_END 1678 #endif 1679 1680 #if INCLUDE_JVMTI 1681 // This is a support of the JVMTI PopFrame interface. 1682 // Make sure it is an invokestatic of a polymorphic intrinsic that has a member_name argument 1683 // and return it as a vm_result so that it can be reloaded in the list of invokestatic parameters. 1684 // The member_name argument is a saved reference (in local#0) to the member_name. 1685 // For backward compatibility with some JDK versions (7, 8) it can also be a direct method handle. 1686 // FIXME: remove DMH case after j.l.i.InvokerBytecodeGenerator code shape is updated. 1687 JRT_ENTRY(void, InterpreterRuntime::member_name_arg_or_null(JavaThread* current, address member_name, 1688 Method* method, address bcp)) 1689 Bytecodes::Code code = Bytecodes::code_at(method, bcp); 1690 if (code != Bytecodes::_invokestatic) { 1691 return; 1692 } 1693 ConstantPool* cpool = method->constants(); 1694 int cp_index = Bytes::get_native_u2(bcp + 1); 1695 Symbol* cname = cpool->klass_name_at(cpool->klass_ref_index_at(cp_index, code)); 1696 Symbol* mname = cpool->name_ref_at(cp_index, code); 1697 1698 if (MethodHandles::has_member_arg(cname, mname)) { 1699 oop member_name_oop = cast_to_oop(member_name); 1700 if (java_lang_invoke_DirectMethodHandle::is_instance(member_name_oop)) { 1701 // FIXME: remove after j.l.i.InvokerBytecodeGenerator code shape is updated. 1702 member_name_oop = java_lang_invoke_DirectMethodHandle::member(member_name_oop); 1703 } 1704 current->set_vm_result(member_name_oop); 1705 } else { 1706 current->set_vm_result(nullptr); 1707 } 1708 JRT_END 1709 #endif // INCLUDE_JVMTI 1710 1711 #ifndef PRODUCT 1712 // This must be a JRT_LEAF function because the interpreter must save registers on x86 to 1713 // call this, which changes rsp and makes the interpreter's expression stack not walkable. 1714 // The generated code still uses call_VM because that will set up the frame pointer for 1715 // bcp and method. 1716 JRT_LEAF(intptr_t, InterpreterRuntime::trace_bytecode(JavaThread* current, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2)) 1717 assert(current == JavaThread::current(), "pre-condition"); 1718 LastFrameAccessor last_frame(current); 1719 assert(last_frame.is_interpreted_frame(), "must be an interpreted frame"); 1720 methodHandle mh(current, last_frame.method()); 1721 BytecodeTracer::trace_interpreter(mh, last_frame.bcp(), tos, tos2); 1722 return preserve_this_value; 1723 JRT_END 1724 #endif // !PRODUCT --- EOF ---