1 /* 2 * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/codeBuffer.hpp" 27 #include "c1/c1_CodeStubs.hpp" 28 #include "c1/c1_Defs.hpp" 29 #include "c1/c1_FrameMap.hpp" 30 #include "c1/c1_LIRAssembler.hpp" 31 #include "c1/c1_MacroAssembler.hpp" 32 #include "c1/c1_Runtime1.hpp" 33 #include "classfile/javaClasses.inline.hpp" 34 #include "classfile/vmClasses.hpp" 35 #include "classfile/vmSymbols.hpp" 36 #include "code/codeBlob.hpp" 37 #include "code/compiledIC.hpp" 38 #include "code/pcDesc.hpp" 39 #include "code/scopeDesc.hpp" 40 #include "code/vtableStubs.hpp" 41 #include "compiler/compilationPolicy.hpp" 42 #include "compiler/compilerDefinitions.inline.hpp" 43 #include "compiler/disassembler.hpp" 44 #include "compiler/oopMap.hpp" 45 #include "gc/shared/barrierSet.hpp" 46 #include "gc/shared/c1/barrierSetC1.hpp" 47 #include "gc/shared/collectedHeap.hpp" 48 #include "interpreter/bytecode.hpp" 49 #include "interpreter/interpreter.hpp" 50 #include "jfr/support/jfrIntrinsics.hpp" 51 #include "logging/log.hpp" 52 #include "memory/allocation.inline.hpp" 53 #include "memory/oopFactory.hpp" 54 #include "memory/resourceArea.hpp" 55 #include "memory/universe.hpp" 56 #include "oops/access.inline.hpp" 57 #include "oops/klass.inline.hpp" 58 #include "oops/objArrayOop.inline.hpp" 59 #include "oops/objArrayKlass.hpp" 60 #include "oops/oop.inline.hpp" 61 #include "prims/jvmtiExport.hpp" 62 #include "runtime/atomic.hpp" 63 #include "runtime/fieldDescriptor.inline.hpp" 64 #include "runtime/frame.inline.hpp" 65 #include "runtime/handles.inline.hpp" 66 #include "runtime/interfaceSupport.inline.hpp" 67 #include "runtime/javaCalls.hpp" 68 #include "runtime/perfData.inline.hpp" 69 #include "runtime/sharedRuntime.hpp" 70 #include "runtime/stackWatermarkSet.hpp" 71 #include "runtime/stubRoutines.hpp" 72 #include "runtime/threadCritical.hpp" 73 #include "runtime/vframe.inline.hpp" 74 #include "runtime/vframeArray.hpp" 75 #include "runtime/vm_version.hpp" 76 #include "services/management.hpp" 77 #include "utilities/copy.hpp" 78 #include "utilities/events.hpp" 79 80 81 // Implementation of StubAssembler 82 83 StubAssembler::StubAssembler(CodeBuffer* code, const char * name, int stub_id) : C1_MacroAssembler(code) { 84 _name = name; 85 _must_gc_arguments = false; 86 _frame_size = no_frame_size; 87 _num_rt_args = 0; 88 _stub_id = stub_id; 89 } 90 91 92 void StubAssembler::set_info(const char* name, bool must_gc_arguments) { 93 _name = name; 94 _must_gc_arguments = must_gc_arguments; 95 } 96 97 98 void StubAssembler::set_frame_size(int size) { 99 if (_frame_size == no_frame_size) { 100 _frame_size = size; 101 } 102 assert(_frame_size == size, "can't change the frame size"); 103 } 104 105 106 void StubAssembler::set_num_rt_args(int args) { 107 if (_num_rt_args == 0) { 108 _num_rt_args = args; 109 } 110 assert(_num_rt_args == args, "can't change the number of args"); 111 } 112 113 // Implementation of Runtime1 114 115 CodeBlob* Runtime1::_blobs[Runtime1::number_of_ids]; 116 const char *Runtime1::_blob_names[] = { 117 RUNTIME1_STUBS(STUB_NAME, LAST_STUB_NAME) 118 }; 119 120 #ifndef PRODUCT 121 // statistics 122 uint Runtime1::_generic_arraycopystub_cnt = 0; 123 uint Runtime1::_arraycopy_slowcase_cnt = 0; 124 uint Runtime1::_arraycopy_checkcast_cnt = 0; 125 uint Runtime1::_arraycopy_checkcast_attempt_cnt = 0; 126 uint Runtime1::_new_type_array_slowcase_cnt = 0; 127 uint Runtime1::_new_object_array_slowcase_cnt = 0; 128 uint Runtime1::_new_instance_slowcase_cnt = 0; 129 uint Runtime1::_new_multi_array_slowcase_cnt = 0; 130 uint Runtime1::_monitorenter_slowcase_cnt = 0; 131 uint Runtime1::_monitorexit_slowcase_cnt = 0; 132 uint Runtime1::_patch_code_slowcase_cnt = 0; 133 uint Runtime1::_throw_range_check_exception_count = 0; 134 uint Runtime1::_throw_index_exception_count = 0; 135 uint Runtime1::_throw_div0_exception_count = 0; 136 uint Runtime1::_throw_null_pointer_exception_count = 0; 137 uint Runtime1::_throw_class_cast_exception_count = 0; 138 uint Runtime1::_throw_incompatible_class_change_error_count = 0; 139 uint Runtime1::_throw_count = 0; 140 141 static uint _byte_arraycopy_stub_cnt = 0; 142 static uint _short_arraycopy_stub_cnt = 0; 143 static uint _int_arraycopy_stub_cnt = 0; 144 static uint _long_arraycopy_stub_cnt = 0; 145 static uint _oop_arraycopy_stub_cnt = 0; 146 147 address Runtime1::arraycopy_count_address(BasicType type) { 148 switch (type) { 149 case T_BOOLEAN: 150 case T_BYTE: return (address)&_byte_arraycopy_stub_cnt; 151 case T_CHAR: 152 case T_SHORT: return (address)&_short_arraycopy_stub_cnt; 153 case T_FLOAT: 154 case T_INT: return (address)&_int_arraycopy_stub_cnt; 155 case T_DOUBLE: 156 case T_LONG: return (address)&_long_arraycopy_stub_cnt; 157 case T_ARRAY: 158 case T_OBJECT: return (address)&_oop_arraycopy_stub_cnt; 159 default: 160 ShouldNotReachHere(); 161 return nullptr; 162 } 163 } 164 165 166 #endif 167 168 // Simple helper to see if the caller of a runtime stub which 169 // entered the VM has been deoptimized 170 171 static bool caller_is_deopted(JavaThread* current) { 172 RegisterMap reg_map(current, 173 RegisterMap::UpdateMap::skip, 174 RegisterMap::ProcessFrames::include, 175 RegisterMap::WalkContinuation::skip); 176 frame runtime_frame = current->last_frame(); 177 frame caller_frame = runtime_frame.sender(®_map); 178 assert(caller_frame.is_compiled_frame(), "must be compiled"); 179 return caller_frame.is_deoptimized_frame(); 180 } 181 182 // Stress deoptimization 183 static void deopt_caller(JavaThread* current) { 184 if (!caller_is_deopted(current)) { 185 RegisterMap reg_map(current, 186 RegisterMap::UpdateMap::skip, 187 RegisterMap::ProcessFrames::include, 188 RegisterMap::WalkContinuation::skip); 189 frame runtime_frame = current->last_frame(); 190 frame caller_frame = runtime_frame.sender(®_map); 191 Deoptimization::deoptimize_frame(current, caller_frame.id()); 192 assert(caller_is_deopted(current), "Must be deoptimized"); 193 } 194 } 195 196 class StubIDStubAssemblerCodeGenClosure: public StubAssemblerCodeGenClosure { 197 private: 198 Runtime1::StubID _id; 199 public: 200 StubIDStubAssemblerCodeGenClosure(Runtime1::StubID id) : _id(id) {} 201 virtual OopMapSet* generate_code(StubAssembler* sasm) { 202 return Runtime1::generate_code_for(_id, sasm); 203 } 204 }; 205 206 CodeBlob* Runtime1::generate_blob(BufferBlob* buffer_blob, int stub_id, const char* name, bool expect_oop_map, StubAssemblerCodeGenClosure* cl) { 207 ResourceMark rm; 208 // create code buffer for code storage 209 CodeBuffer code(buffer_blob); 210 211 OopMapSet* oop_maps; 212 int frame_size; 213 bool must_gc_arguments; 214 215 Compilation::setup_code_buffer(&code, 0); 216 217 // create assembler for code generation 218 StubAssembler* sasm = new StubAssembler(&code, name, stub_id); 219 // generate code for runtime stub 220 oop_maps = cl->generate_code(sasm); 221 assert(oop_maps == nullptr || sasm->frame_size() != no_frame_size, 222 "if stub has an oop map it must have a valid frame size"); 223 assert(!expect_oop_map || oop_maps != nullptr, "must have an oopmap"); 224 225 // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned) 226 sasm->align(BytesPerWord); 227 // make sure all code is in code buffer 228 sasm->flush(); 229 230 frame_size = sasm->frame_size(); 231 must_gc_arguments = sasm->must_gc_arguments(); 232 // create blob - distinguish a few special cases 233 CodeBlob* blob = RuntimeStub::new_runtime_stub(name, 234 &code, 235 CodeOffsets::frame_never_safe, 236 frame_size, 237 oop_maps, 238 must_gc_arguments); 239 assert(blob != nullptr, "blob must exist"); 240 return blob; 241 } 242 243 void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) { 244 assert(0 <= id && id < number_of_ids, "illegal stub id"); 245 bool expect_oop_map = true; 246 #ifdef ASSERT 247 // Make sure that stubs that need oopmaps have them 248 switch (id) { 249 // These stubs don't need to have an oopmap 250 case dtrace_object_alloc_id: 251 case slow_subtype_check_id: 252 case fpu2long_stub_id: 253 case unwind_exception_id: 254 case counter_overflow_id: 255 expect_oop_map = false; 256 break; 257 default: 258 break; 259 } 260 #endif 261 StubIDStubAssemblerCodeGenClosure cl(id); 262 CodeBlob* blob = generate_blob(buffer_blob, id, name_for(id), expect_oop_map, &cl); 263 // install blob 264 _blobs[id] = blob; 265 } 266 267 void Runtime1::initialize(BufferBlob* blob) { 268 init_counters(); 269 // platform-dependent initialization 270 initialize_pd(); 271 // generate stubs 272 for (int id = 0; id < number_of_ids; id++) generate_blob_for(blob, (StubID)id); 273 // printing 274 #ifndef PRODUCT 275 if (PrintSimpleStubs) { 276 ResourceMark rm; 277 for (int id = 0; id < number_of_ids; id++) { 278 _blobs[id]->print(); 279 if (_blobs[id]->oop_maps() != nullptr) { 280 _blobs[id]->oop_maps()->print(); 281 } 282 } 283 } 284 #endif 285 BarrierSetC1* bs = BarrierSet::barrier_set()->barrier_set_c1(); 286 bs->generate_c1_runtime_stubs(blob); 287 } 288 289 CodeBlob* Runtime1::blob_for(StubID id) { 290 assert(0 <= id && id < number_of_ids, "illegal stub id"); 291 return _blobs[id]; 292 } 293 294 295 const char* Runtime1::name_for(StubID id) { 296 assert(0 <= id && id < number_of_ids, "illegal stub id"); 297 return _blob_names[id]; 298 } 299 300 const char* Runtime1::name_for_address(address entry) { 301 for (int id = 0; id < number_of_ids; id++) { 302 if (entry == entry_for((StubID)id)) return name_for((StubID)id); 303 } 304 305 #define FUNCTION_CASE(a, f) \ 306 if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f)) return #f 307 308 FUNCTION_CASE(entry, os::javaTimeMillis); 309 FUNCTION_CASE(entry, os::javaTimeNanos); 310 FUNCTION_CASE(entry, SharedRuntime::OSR_migration_end); 311 FUNCTION_CASE(entry, SharedRuntime::d2f); 312 FUNCTION_CASE(entry, SharedRuntime::d2i); 313 FUNCTION_CASE(entry, SharedRuntime::d2l); 314 FUNCTION_CASE(entry, SharedRuntime::dcos); 315 FUNCTION_CASE(entry, SharedRuntime::dexp); 316 FUNCTION_CASE(entry, SharedRuntime::dlog); 317 FUNCTION_CASE(entry, SharedRuntime::dlog10); 318 FUNCTION_CASE(entry, SharedRuntime::dpow); 319 FUNCTION_CASE(entry, SharedRuntime::drem); 320 FUNCTION_CASE(entry, SharedRuntime::dsin); 321 FUNCTION_CASE(entry, SharedRuntime::dtan); 322 FUNCTION_CASE(entry, SharedRuntime::f2i); 323 FUNCTION_CASE(entry, SharedRuntime::f2l); 324 FUNCTION_CASE(entry, SharedRuntime::frem); 325 FUNCTION_CASE(entry, SharedRuntime::l2d); 326 FUNCTION_CASE(entry, SharedRuntime::l2f); 327 FUNCTION_CASE(entry, SharedRuntime::ldiv); 328 FUNCTION_CASE(entry, SharedRuntime::lmul); 329 FUNCTION_CASE(entry, SharedRuntime::lrem); 330 FUNCTION_CASE(entry, SharedRuntime::lrem); 331 FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry); 332 FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit); 333 FUNCTION_CASE(entry, is_instance_of); 334 FUNCTION_CASE(entry, trace_block_entry); 335 #ifdef JFR_HAVE_INTRINSICS 336 FUNCTION_CASE(entry, JfrTime::time_function()); 337 #endif 338 FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32()); 339 FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32C()); 340 FUNCTION_CASE(entry, StubRoutines::vectorizedMismatch()); 341 FUNCTION_CASE(entry, StubRoutines::dexp()); 342 FUNCTION_CASE(entry, StubRoutines::dlog()); 343 FUNCTION_CASE(entry, StubRoutines::dlog10()); 344 FUNCTION_CASE(entry, StubRoutines::dpow()); 345 FUNCTION_CASE(entry, StubRoutines::dsin()); 346 FUNCTION_CASE(entry, StubRoutines::dcos()); 347 FUNCTION_CASE(entry, StubRoutines::dtan()); 348 349 #undef FUNCTION_CASE 350 351 // Soft float adds more runtime names. 352 return pd_name_for_address(entry); 353 } 354 355 356 JRT_ENTRY_PROF(void, Runtime1, new_instance, Runtime1::new_instance(JavaThread* current, Klass* klass)) 357 #ifndef PRODUCT 358 if (PrintC1Statistics) { 359 _new_instance_slowcase_cnt++; 360 } 361 #endif 362 assert(klass->is_klass(), "not a class"); 363 Handle holder(current, klass->klass_holder()); // keep the klass alive 364 InstanceKlass* h = InstanceKlass::cast(klass); 365 h->check_valid_for_instantiation(true, CHECK); 366 // make sure klass is initialized 367 h->initialize(CHECK); 368 // allocate instance and return via TLS 369 oop obj = h->allocate_instance(CHECK); 370 current->set_vm_result(obj); 371 JRT_END 372 373 374 JRT_ENTRY_PROF(void, Runtime1, new_type_array, Runtime1::new_type_array(JavaThread* current, Klass* klass, jint length)) 375 #ifndef PRODUCT 376 if (PrintC1Statistics) { 377 _new_type_array_slowcase_cnt++; 378 } 379 #endif 380 // Note: no handle for klass needed since they are not used 381 // anymore after new_typeArray() and no GC can happen before. 382 // (This may have to change if this code changes!) 383 assert(klass->is_klass(), "not a class"); 384 BasicType elt_type = TypeArrayKlass::cast(klass)->element_type(); 385 oop obj = oopFactory::new_typeArray(elt_type, length, CHECK); 386 current->set_vm_result(obj); 387 // This is pretty rare but this runtime patch is stressful to deoptimization 388 // if we deoptimize here so force a deopt to stress the path. 389 if (DeoptimizeALot) { 390 deopt_caller(current); 391 } 392 393 JRT_END 394 395 396 JRT_ENTRY_PROF(void, Runtime1, new_object_array, Runtime1::new_object_array(JavaThread* current, Klass* array_klass, jint length)) 397 #ifndef PRODUCT 398 if (PrintC1Statistics) { 399 _new_object_array_slowcase_cnt++; 400 } 401 #endif 402 // Note: no handle for klass needed since they are not used 403 // anymore after new_objArray() and no GC can happen before. 404 // (This may have to change if this code changes!) 405 assert(array_klass->is_klass(), "not a class"); 406 Handle holder(current, array_klass->klass_holder()); // keep the klass alive 407 Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass(); 408 objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK); 409 current->set_vm_result(obj); 410 // This is pretty rare but this runtime patch is stressful to deoptimization 411 // if we deoptimize here so force a deopt to stress the path. 412 if (DeoptimizeALot) { 413 deopt_caller(current); 414 } 415 JRT_END 416 417 418 JRT_ENTRY_PROF(void, Runtime1, new_multi_array, Runtime1::new_multi_array(JavaThread* current, Klass* klass, int rank, jint* dims)) 419 #ifndef PRODUCT 420 if (PrintC1Statistics) { 421 _new_multi_array_slowcase_cnt++; 422 } 423 #endif 424 assert(klass->is_klass(), "not a class"); 425 assert(rank >= 1, "rank must be nonzero"); 426 Handle holder(current, klass->klass_holder()); // keep the klass alive 427 oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK); 428 current->set_vm_result(obj); 429 JRT_END 430 431 432 JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* current, StubID id)) 433 tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", id); 434 JRT_END 435 436 437 JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* current, oopDesc* obj)) 438 ResourceMark rm(current); 439 const char* klass_name = obj->klass()->external_name(); 440 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArrayStoreException(), klass_name); 441 JRT_END 442 443 444 // counter_overflow() is called from within C1-compiled methods. The enclosing method is the method 445 // associated with the top activation record. The inlinee (that is possibly included in the enclosing 446 // method) method is passed as an argument. In order to do that it is embedded in the code as 447 // a constant. 448 static nmethod* counter_overflow_helper(JavaThread* current, int branch_bci, Method* m) { 449 nmethod* osr_nm = nullptr; 450 methodHandle method(current, m); 451 452 RegisterMap map(current, 453 RegisterMap::UpdateMap::skip, 454 RegisterMap::ProcessFrames::include, 455 RegisterMap::WalkContinuation::skip); 456 frame fr = current->last_frame().sender(&map); 457 nmethod* nm = (nmethod*) fr.cb(); 458 assert(nm!= nullptr && nm->is_nmethod(), "Sanity check"); 459 methodHandle enclosing_method(current, nm->method()); 460 461 CompLevel level = (CompLevel)nm->comp_level(); 462 int bci = InvocationEntryBci; 463 if (branch_bci != InvocationEntryBci) { 464 // Compute destination bci 465 address pc = method()->code_base() + branch_bci; 466 Bytecodes::Code branch = Bytecodes::code_at(method(), pc); 467 int offset = 0; 468 switch (branch) { 469 case Bytecodes::_if_icmplt: case Bytecodes::_iflt: 470 case Bytecodes::_if_icmpgt: case Bytecodes::_ifgt: 471 case Bytecodes::_if_icmple: case Bytecodes::_ifle: 472 case Bytecodes::_if_icmpge: case Bytecodes::_ifge: 473 case Bytecodes::_if_icmpeq: case Bytecodes::_if_acmpeq: case Bytecodes::_ifeq: 474 case Bytecodes::_if_icmpne: case Bytecodes::_if_acmpne: case Bytecodes::_ifne: 475 case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: case Bytecodes::_goto: 476 offset = (int16_t)Bytes::get_Java_u2(pc + 1); 477 break; 478 case Bytecodes::_goto_w: 479 offset = Bytes::get_Java_u4(pc + 1); 480 break; 481 default: ; 482 } 483 bci = branch_bci + offset; 484 } 485 osr_nm = CompilationPolicy::event(enclosing_method, method, branch_bci, bci, level, nm, current); 486 return osr_nm; 487 } 488 489 JRT_BLOCK_ENTRY_PROF(address, Runtime1, counter_overflow, Runtime1::counter_overflow(JavaThread* current, int bci, Method* method)) 490 nmethod* osr_nm; 491 JRT_BLOCK 492 osr_nm = counter_overflow_helper(current, bci, method); 493 if (osr_nm != nullptr) { 494 RegisterMap map(current, 495 RegisterMap::UpdateMap::skip, 496 RegisterMap::ProcessFrames::include, 497 RegisterMap::WalkContinuation::skip); 498 frame fr = current->last_frame().sender(&map); 499 Deoptimization::deoptimize_frame(current, fr.id()); 500 } 501 JRT_BLOCK_END 502 return nullptr; 503 JRT_END 504 505 extern void vm_exit(int code); 506 507 // Enter this method from compiled code handler below. This is where we transition 508 // to VM mode. This is done as a helper routine so that the method called directly 509 // from compiled code does not have to transition to VM. This allows the entry 510 // method to see if the nmethod that we have just looked up a handler for has 511 // been deoptimized while we were in the vm. This simplifies the assembly code 512 // cpu directories. 513 // 514 // We are entering here from exception stub (via the entry method below) 515 // If there is a compiled exception handler in this method, we will continue there; 516 // otherwise we will unwind the stack and continue at the caller of top frame method 517 // Note: we enter in Java using a special JRT wrapper. This wrapper allows us to 518 // control the area where we can allow a safepoint. After we exit the safepoint area we can 519 // check to see if the handler we are going to return is now in a nmethod that has 520 // been deoptimized. If that is the case we return the deopt blob 521 // unpack_with_exception entry instead. This makes life for the exception blob easier 522 // because making that same check and diverting is painful from assembly language. 523 JRT_ENTRY_NO_ASYNC_PROF(static address, Runtime1, exception_handler_for_pc_helper, exception_handler_for_pc_helper(JavaThread* current, oopDesc* ex, address pc, nmethod*& nm)) 524 // Reset method handle flag. 525 current->set_is_method_handle_return(false); 526 527 Handle exception(current, ex); 528 529 // This function is called when we are about to throw an exception. Therefore, 530 // we have to poll the stack watermark barrier to make sure that not yet safe 531 // stack frames are made safe before returning into them. 532 if (current->last_frame().cb() == Runtime1::blob_for(Runtime1::handle_exception_from_callee_id)) { 533 // The Runtime1::handle_exception_from_callee_id handler is invoked after the 534 // frame has been unwound. It instead builds its own stub frame, to call the 535 // runtime. But the throwing frame has already been unwound here. 536 StackWatermarkSet::after_unwind(current); 537 } 538 539 nm = CodeCache::find_nmethod(pc); 540 assert(nm != nullptr, "this is not an nmethod"); 541 // Adjust the pc as needed/ 542 if (nm->is_deopt_pc(pc)) { 543 RegisterMap map(current, 544 RegisterMap::UpdateMap::skip, 545 RegisterMap::ProcessFrames::include, 546 RegisterMap::WalkContinuation::skip); 547 frame exception_frame = current->last_frame().sender(&map); 548 // if the frame isn't deopted then pc must not correspond to the caller of last_frame 549 assert(exception_frame.is_deoptimized_frame(), "must be deopted"); 550 pc = exception_frame.pc(); 551 } 552 assert(exception.not_null(), "null exceptions should be handled by throw_exception"); 553 // Check that exception is a subclass of Throwable 554 assert(exception->is_a(vmClasses::Throwable_klass()), 555 "Exception not subclass of Throwable"); 556 557 // debugging support 558 // tracing 559 if (log_is_enabled(Info, exceptions)) { 560 ResourceMark rm; // print_value_string 561 stringStream tempst; 562 assert(nm->method() != nullptr, "Unexpected null method()"); 563 tempst.print("C1 compiled method <%s>\n" 564 " at PC" INTPTR_FORMAT " for thread " INTPTR_FORMAT, 565 nm->method()->print_value_string(), p2i(pc), p2i(current)); 566 Exceptions::log_exception(exception, tempst.freeze()); 567 } 568 // for AbortVMOnException flag 569 Exceptions::debug_check_abort(exception); 570 571 // Check the stack guard pages and re-enable them if necessary and there is 572 // enough space on the stack to do so. Use fast exceptions only if the guard 573 // pages are enabled. 574 bool guard_pages_enabled = current->stack_overflow_state()->reguard_stack_if_needed(); 575 576 if (JvmtiExport::can_post_on_exceptions()) { 577 // To ensure correct notification of exception catches and throws 578 // we have to deoptimize here. If we attempted to notify the 579 // catches and throws during this exception lookup it's possible 580 // we could deoptimize on the way out of the VM and end back in 581 // the interpreter at the throw site. This would result in double 582 // notifications since the interpreter would also notify about 583 // these same catches and throws as it unwound the frame. 584 585 RegisterMap reg_map(current, 586 RegisterMap::UpdateMap::include, 587 RegisterMap::ProcessFrames::include, 588 RegisterMap::WalkContinuation::skip); 589 frame stub_frame = current->last_frame(); 590 frame caller_frame = stub_frame.sender(®_map); 591 592 // We don't really want to deoptimize the nmethod itself since we 593 // can actually continue in the exception handler ourselves but I 594 // don't see an easy way to have the desired effect. 595 Deoptimization::deoptimize_frame(current, caller_frame.id()); 596 assert(caller_is_deopted(current), "Must be deoptimized"); 597 598 return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls(); 599 } 600 601 // ExceptionCache is used only for exceptions at call sites and not for implicit exceptions 602 if (guard_pages_enabled) { 603 address fast_continuation = nm->handler_for_exception_and_pc(exception, pc); 604 if (fast_continuation != nullptr) { 605 // Set flag if return address is a method handle call site. 606 current->set_is_method_handle_return(nm->is_method_handle_return(pc)); 607 return fast_continuation; 608 } 609 } 610 611 // If the stack guard pages are enabled, check whether there is a handler in 612 // the current method. Otherwise (guard pages disabled), force an unwind and 613 // skip the exception cache update (i.e., just leave continuation as null). 614 address continuation = nullptr; 615 if (guard_pages_enabled) { 616 617 // New exception handling mechanism can support inlined methods 618 // with exception handlers since the mappings are from PC to PC 619 620 // Clear out the exception oop and pc since looking up an 621 // exception handler can cause class loading, which might throw an 622 // exception and those fields are expected to be clear during 623 // normal bytecode execution. 624 current->clear_exception_oop_and_pc(); 625 626 bool recursive_exception = false; 627 continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false, recursive_exception); 628 // If an exception was thrown during exception dispatch, the exception oop may have changed 629 current->set_exception_oop(exception()); 630 current->set_exception_pc(pc); 631 632 // the exception cache is used only by non-implicit exceptions 633 // Update the exception cache only when there didn't happen 634 // another exception during the computation of the compiled 635 // exception handler. Checking for exception oop equality is not 636 // sufficient because some exceptions are pre-allocated and reused. 637 if (continuation != nullptr && !recursive_exception) { 638 nm->add_handler_for_exception_and_pc(exception, pc, continuation); 639 } 640 } 641 642 current->set_vm_result(exception()); 643 // Set flag if return address is a method handle call site. 644 current->set_is_method_handle_return(nm->is_method_handle_return(pc)); 645 646 if (log_is_enabled(Info, exceptions)) { 647 ResourceMark rm; 648 log_info(exceptions)("Thread " PTR_FORMAT " continuing at PC " PTR_FORMAT 649 " for exception thrown at PC " PTR_FORMAT, 650 p2i(current), p2i(continuation), p2i(pc)); 651 } 652 653 return continuation; 654 JRT_END 655 656 // Enter this method from compiled code only if there is a Java exception handler 657 // in the method handling the exception. 658 // We are entering here from exception stub. We don't do a normal VM transition here. 659 // We do it in a helper. This is so we can check to see if the nmethod we have just 660 // searched for an exception handler has been deoptimized in the meantime. 661 address Runtime1::exception_handler_for_pc(JavaThread* current) { 662 oop exception = current->exception_oop(); 663 address pc = current->exception_pc(); 664 // Still in Java mode 665 DEBUG_ONLY(NoHandleMark nhm); 666 nmethod* nm = nullptr; 667 address continuation = nullptr; 668 { 669 // Enter VM mode by calling the helper 670 ResetNoHandleMark rnhm; 671 continuation = exception_handler_for_pc_helper(current, exception, pc, nm); 672 } 673 // Back in JAVA, use no oops DON'T safepoint 674 675 // Now check to see if the nmethod we were called from is now deoptimized. 676 // If so we must return to the deopt blob and deoptimize the nmethod 677 if (nm != nullptr && caller_is_deopted(current)) { 678 continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls(); 679 } 680 681 assert(continuation != nullptr, "no handler found"); 682 return continuation; 683 } 684 685 686 JRT_ENTRY(void, Runtime1::throw_range_check_exception(JavaThread* current, int index, arrayOopDesc* a)) 687 #ifndef PRODUCT 688 if (PrintC1Statistics) { 689 _throw_range_check_exception_count++; 690 } 691 #endif 692 const int len = 35; 693 assert(len < strlen("Index %d out of bounds for length %d"), "Must allocate more space for message."); 694 char message[2 * jintAsStringSize + len]; 695 os::snprintf_checked(message, sizeof(message), "Index %d out of bounds for length %d", index, a->length()); 696 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message); 697 JRT_END 698 699 700 JRT_ENTRY(void, Runtime1::throw_index_exception(JavaThread* current, int index)) 701 #ifndef PRODUCT 702 if (PrintC1Statistics) { 703 _throw_index_exception_count++; 704 } 705 #endif 706 char message[16]; 707 os::snprintf_checked(message, sizeof(message), "%d", index); 708 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IndexOutOfBoundsException(), message); 709 JRT_END 710 711 712 JRT_ENTRY(void, Runtime1::throw_div0_exception(JavaThread* current)) 713 #ifndef PRODUCT 714 if (PrintC1Statistics) { 715 _throw_div0_exception_count++; 716 } 717 #endif 718 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArithmeticException(), "/ by zero"); 719 JRT_END 720 721 722 JRT_ENTRY(void, Runtime1::throw_null_pointer_exception(JavaThread* current)) 723 #ifndef PRODUCT 724 if (PrintC1Statistics) { 725 _throw_null_pointer_exception_count++; 726 } 727 #endif 728 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException()); 729 JRT_END 730 731 732 JRT_ENTRY(void, Runtime1::throw_class_cast_exception(JavaThread* current, oopDesc* object)) 733 #ifndef PRODUCT 734 if (PrintC1Statistics) { 735 _throw_class_cast_exception_count++; 736 } 737 #endif 738 ResourceMark rm(current); 739 char* message = SharedRuntime::generate_class_cast_message(current, object->klass()); 740 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ClassCastException(), message); 741 JRT_END 742 743 744 JRT_ENTRY(void, Runtime1::throw_incompatible_class_change_error(JavaThread* current)) 745 #ifndef PRODUCT 746 if (PrintC1Statistics) { 747 _throw_incompatible_class_change_error_count++; 748 } 749 #endif 750 ResourceMark rm(current); 751 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError()); 752 JRT_END 753 754 755 JRT_BLOCK_ENTRY_PROF(void, Runtime1, monitorenter, Runtime1::monitorenter(JavaThread* current, oopDesc* obj, BasicObjectLock* lock)) 756 #ifndef PRODUCT 757 if (PrintC1Statistics) { 758 _monitorenter_slowcase_cnt++; 759 } 760 #endif 761 if (LockingMode == LM_MONITOR) { 762 lock->set_obj(obj); 763 } 764 assert(LockingMode == LM_LIGHTWEIGHT || obj == lock->obj(), "must match"); 765 SharedRuntime::monitor_enter_helper(obj, LockingMode == LM_LIGHTWEIGHT ? nullptr : lock->lock(), current); 766 JRT_END 767 768 769 JRT_LEAF_PROF(void, Runtime1, monitorexit, Runtime1::monitorexit(JavaThread* current, BasicObjectLock* lock)) 770 assert(current == JavaThread::current(), "pre-condition"); 771 #ifndef PRODUCT 772 if (PrintC1Statistics) { 773 _monitorexit_slowcase_cnt++; 774 } 775 #endif 776 assert(current->last_Java_sp(), "last_Java_sp must be set"); 777 oop obj = lock->obj(); 778 assert(oopDesc::is_oop(obj), "must be null or an object"); 779 SharedRuntime::monitor_exit_helper(obj, lock->lock(), current); 780 JRT_END 781 782 // Cf. OptoRuntime::deoptimize_caller_frame 783 JRT_ENTRY_PROF(void, Runtime1, deoptimize, Runtime1::deoptimize(JavaThread* current, jint trap_request)) 784 // Called from within the owner thread, so no need for safepoint 785 RegisterMap reg_map(current, 786 RegisterMap::UpdateMap::skip, 787 RegisterMap::ProcessFrames::include, 788 RegisterMap::WalkContinuation::skip); 789 frame stub_frame = current->last_frame(); 790 assert(stub_frame.is_runtime_frame(), "Sanity check"); 791 frame caller_frame = stub_frame.sender(®_map); 792 nmethod* nm = caller_frame.cb()->as_nmethod_or_null(); 793 assert(nm != nullptr, "Sanity check"); 794 methodHandle method(current, nm->method()); 795 assert(nm == CodeCache::find_nmethod(caller_frame.pc()), "Should be the same"); 796 Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request); 797 Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request); 798 799 if (action == Deoptimization::Action_make_not_entrant) { 800 if (nm->make_not_entrant()) { 801 if (reason == Deoptimization::Reason_tenured) { 802 MethodData* trap_mdo = Deoptimization::get_method_data(current, method, true /*create_if_missing*/); 803 if (trap_mdo != nullptr) { 804 trap_mdo->inc_tenure_traps(); 805 } 806 } 807 } 808 } 809 810 // Deoptimize the caller frame. 811 Deoptimization::deoptimize_frame(current, caller_frame.id()); 812 // Return to the now deoptimized frame. 813 JRT_END 814 815 816 #ifndef DEOPTIMIZE_WHEN_PATCHING 817 818 static Klass* resolve_field_return_klass(const methodHandle& caller, int bci, TRAPS) { 819 Bytecode_field field_access(caller, bci); 820 // This can be static or non-static field access 821 Bytecodes::Code code = field_access.code(); 822 823 // We must load class, initialize class and resolve the field 824 fieldDescriptor result; // initialize class if needed 825 constantPoolHandle constants(THREAD, caller->constants()); 826 LinkResolver::resolve_field_access(result, constants, field_access.index(), caller, 827 Bytecodes::java_code(code), true /*initialize_class*/, CHECK_NULL); 828 return result.field_holder(); 829 } 830 831 832 // 833 // This routine patches sites where a class wasn't loaded or 834 // initialized at the time the code was generated. It handles 835 // references to classes, fields and forcing of initialization. Most 836 // of the cases are straightforward and involving simply forcing 837 // resolution of a class, rewriting the instruction stream with the 838 // needed constant and replacing the call in this function with the 839 // patched code. The case for static field is more complicated since 840 // the thread which is in the process of initializing a class can 841 // access it's static fields but other threads can't so the code 842 // either has to deoptimize when this case is detected or execute a 843 // check that the current thread is the initializing thread. The 844 // current 845 // 846 // Patches basically look like this: 847 // 848 // 849 // patch_site: jmp patch stub ;; will be patched 850 // continue: ... 851 // ... 852 // ... 853 // ... 854 // 855 // They have a stub which looks like this: 856 // 857 // ;; patch body 858 // movl <const>, reg (for class constants) 859 // <or> movl [reg1 + <const>], reg (for field offsets) 860 // <or> movl reg, [reg1 + <const>] (for field offsets) 861 // <being_init offset> <bytes to copy> <bytes to skip> 862 // patch_stub: call Runtime1::patch_code (through a runtime stub) 863 // jmp patch_site 864 // 865 // 866 // A normal patch is done by rewriting the patch body, usually a move, 867 // and then copying it into place over top of the jmp instruction 868 // being careful to flush caches and doing it in an MP-safe way. The 869 // constants following the patch body are used to find various pieces 870 // of the patch relative to the call site for Runtime1::patch_code. 871 // The case for getstatic and putstatic is more complicated because 872 // getstatic and putstatic have special semantics when executing while 873 // the class is being initialized. getstatic/putstatic on a class 874 // which is being_initialized may be executed by the initializing 875 // thread but other threads have to block when they execute it. This 876 // is accomplished in compiled code by executing a test of the current 877 // thread against the initializing thread of the class. It's emitted 878 // as boilerplate in their stub which allows the patched code to be 879 // executed before it's copied back into the main body of the nmethod. 880 // 881 // being_init: get_thread(<tmp reg> 882 // cmpl [reg1 + <init_thread_offset>], <tmp reg> 883 // jne patch_stub 884 // movl [reg1 + <const>], reg (for field offsets) <or> 885 // movl reg, [reg1 + <const>] (for field offsets) 886 // jmp continue 887 // <being_init offset> <bytes to copy> <bytes to skip> 888 // patch_stub: jmp Runtim1::patch_code (through a runtime stub) 889 // jmp patch_site 890 // 891 // If the class is being initialized the patch body is rewritten and 892 // the patch site is rewritten to jump to being_init, instead of 893 // patch_stub. Whenever this code is executed it checks the current 894 // thread against the initializing thread so other threads will enter 895 // the runtime and end up blocked waiting the class to finish 896 // initializing inside the calls to resolve_field below. The 897 // initializing class will continue on it's way. Once the class is 898 // fully_initialized, the intializing_thread of the class becomes 899 // null, so the next thread to execute this code will fail the test, 900 // call into patch_code and complete the patching process by copying 901 // the patch body back into the main part of the nmethod and resume 902 // executing. 903 904 // NB: 905 // 906 // Patchable instruction sequences inherently exhibit race conditions, 907 // where thread A is patching an instruction at the same time thread B 908 // is executing it. The algorithms we use ensure that any observation 909 // that B can make on any intermediate states during A's patching will 910 // always end up with a correct outcome. This is easiest if there are 911 // few or no intermediate states. (Some inline caches have two 912 // related instructions that must be patched in tandem. For those, 913 // intermediate states seem to be unavoidable, but we will get the 914 // right answer from all possible observation orders.) 915 // 916 // When patching the entry instruction at the head of a method, or a 917 // linkable call instruction inside of a method, we try very hard to 918 // use a patch sequence which executes as a single memory transaction. 919 // This means, in practice, that when thread A patches an instruction, 920 // it should patch a 32-bit or 64-bit word that somehow overlaps the 921 // instruction or is contained in it. We believe that memory hardware 922 // will never break up such a word write, if it is naturally aligned 923 // for the word being written. We also know that some CPUs work very 924 // hard to create atomic updates even of naturally unaligned words, 925 // but we don't want to bet the farm on this always working. 926 // 927 // Therefore, if there is any chance of a race condition, we try to 928 // patch only naturally aligned words, as single, full-word writes. 929 930 JRT_ENTRY_PROF(void, Runtime1, patch_code, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_id)) 931 #ifndef PRODUCT 932 if (PrintC1Statistics) { 933 _patch_code_slowcase_cnt++; 934 } 935 #endif 936 937 ResourceMark rm(current); 938 RegisterMap reg_map(current, 939 RegisterMap::UpdateMap::skip, 940 RegisterMap::ProcessFrames::include, 941 RegisterMap::WalkContinuation::skip); 942 frame runtime_frame = current->last_frame(); 943 frame caller_frame = runtime_frame.sender(®_map); 944 945 // last java frame on stack 946 vframeStream vfst(current, true); 947 assert(!vfst.at_end(), "Java frame must exist"); 948 949 methodHandle caller_method(current, vfst.method()); 950 // Note that caller_method->code() may not be same as caller_code because of OSR's 951 // Note also that in the presence of inlining it is not guaranteed 952 // that caller_method() == caller_code->method() 953 954 int bci = vfst.bci(); 955 Bytecodes::Code code = caller_method()->java_code_at(bci); 956 957 // this is used by assertions in the access_field_patching_id 958 BasicType patch_field_type = T_ILLEGAL; 959 bool deoptimize_for_volatile = false; 960 bool deoptimize_for_atomic = false; 961 int patch_field_offset = -1; 962 Klass* init_klass = nullptr; // klass needed by load_klass_patching code 963 Klass* load_klass = nullptr; // klass needed by load_klass_patching code 964 Handle mirror(current, nullptr); // oop needed by load_mirror_patching code 965 Handle appendix(current, nullptr); // oop needed by appendix_patching code 966 bool load_klass_or_mirror_patch_id = 967 (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id); 968 969 if (stub_id == Runtime1::access_field_patching_id) { 970 971 Bytecode_field field_access(caller_method, bci); 972 fieldDescriptor result; // initialize class if needed 973 Bytecodes::Code code = field_access.code(); 974 constantPoolHandle constants(current, caller_method->constants()); 975 LinkResolver::resolve_field_access(result, constants, field_access.index(), caller_method, 976 Bytecodes::java_code(code), true /*initialize_class*/, CHECK); 977 patch_field_offset = result.offset(); 978 979 // If we're patching a field which is volatile then at compile it 980 // must not have been know to be volatile, so the generated code 981 // isn't correct for a volatile reference. The nmethod has to be 982 // deoptimized so that the code can be regenerated correctly. 983 // This check is only needed for access_field_patching since this 984 // is the path for patching field offsets. load_klass is only 985 // used for patching references to oops which don't need special 986 // handling in the volatile case. 987 988 deoptimize_for_volatile = result.access_flags().is_volatile(); 989 990 // If we are patching a field which should be atomic, then 991 // the generated code is not correct either, force deoptimizing. 992 // We need to only cover T_LONG and T_DOUBLE fields, as we can 993 // break access atomicity only for them. 994 995 // Strictly speaking, the deoptimization on 64-bit platforms 996 // is unnecessary, and T_LONG stores on 32-bit platforms need 997 // to be handled by special patching code when AlwaysAtomicAccesses 998 // becomes product feature. At this point, we are still going 999 // for the deoptimization for consistency against volatile 1000 // accesses. 1001 1002 patch_field_type = result.field_type(); 1003 deoptimize_for_atomic = (AlwaysAtomicAccesses && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG)); 1004 1005 } else if (load_klass_or_mirror_patch_id) { 1006 Klass* k = nullptr; 1007 switch (code) { 1008 case Bytecodes::_putstatic: 1009 case Bytecodes::_getstatic: 1010 { Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK); 1011 init_klass = klass; 1012 mirror = Handle(current, klass->java_mirror()); 1013 } 1014 break; 1015 case Bytecodes::_new: 1016 { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci)); 1017 k = caller_method->constants()->klass_at(bnew.index(), CHECK); 1018 } 1019 break; 1020 case Bytecodes::_multianewarray: 1021 { Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci)); 1022 k = caller_method->constants()->klass_at(mna.index(), CHECK); 1023 } 1024 break; 1025 case Bytecodes::_instanceof: 1026 { Bytecode_instanceof io(caller_method(), caller_method->bcp_from(bci)); 1027 k = caller_method->constants()->klass_at(io.index(), CHECK); 1028 } 1029 break; 1030 case Bytecodes::_checkcast: 1031 { Bytecode_checkcast cc(caller_method(), caller_method->bcp_from(bci)); 1032 k = caller_method->constants()->klass_at(cc.index(), CHECK); 1033 } 1034 break; 1035 case Bytecodes::_anewarray: 1036 { Bytecode_anewarray anew(caller_method(), caller_method->bcp_from(bci)); 1037 Klass* ek = caller_method->constants()->klass_at(anew.index(), CHECK); 1038 k = ek->array_klass(CHECK); 1039 } 1040 break; 1041 case Bytecodes::_ldc: 1042 case Bytecodes::_ldc_w: 1043 case Bytecodes::_ldc2_w: 1044 { 1045 Bytecode_loadconstant cc(caller_method, bci); 1046 oop m = cc.resolve_constant(CHECK); 1047 mirror = Handle(current, m); 1048 } 1049 break; 1050 default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id"); 1051 } 1052 load_klass = k; 1053 } else if (stub_id == load_appendix_patching_id) { 1054 Bytecode_invoke bytecode(caller_method, bci); 1055 Bytecodes::Code bc = bytecode.invoke_code(); 1056 1057 CallInfo info; 1058 constantPoolHandle pool(current, caller_method->constants()); 1059 int index = bytecode.index(); 1060 LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK); 1061 switch (bc) { 1062 case Bytecodes::_invokehandle: { 1063 ResolvedMethodEntry* entry = pool->cache()->set_method_handle(index, info); 1064 appendix = Handle(current, pool->cache()->appendix_if_resolved(entry)); 1065 break; 1066 } 1067 case Bytecodes::_invokedynamic: { 1068 appendix = Handle(current, pool->cache()->set_dynamic_call(info, index)); 1069 break; 1070 } 1071 default: fatal("unexpected bytecode for load_appendix_patching_id"); 1072 } 1073 } else { 1074 ShouldNotReachHere(); 1075 } 1076 1077 if (deoptimize_for_volatile || deoptimize_for_atomic) { 1078 // At compile time we assumed the field wasn't volatile/atomic but after 1079 // loading it turns out it was volatile/atomic so we have to throw the 1080 // compiled code out and let it be regenerated. 1081 if (TracePatching) { 1082 if (deoptimize_for_volatile) { 1083 tty->print_cr("Deoptimizing for patching volatile field reference"); 1084 } 1085 if (deoptimize_for_atomic) { 1086 tty->print_cr("Deoptimizing for patching atomic field reference"); 1087 } 1088 } 1089 1090 // It's possible the nmethod was invalidated in the last 1091 // safepoint, but if it's still alive then make it not_entrant. 1092 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); 1093 if (nm != nullptr) { 1094 nm->make_not_entrant(); 1095 } 1096 1097 Deoptimization::deoptimize_frame(current, caller_frame.id()); 1098 1099 // Return to the now deoptimized frame. 1100 } 1101 1102 // Now copy code back 1103 1104 { 1105 MutexLocker ml_patch (current, Patching_lock, Mutex::_no_safepoint_check_flag); 1106 // 1107 // Deoptimization may have happened while we waited for the lock. 1108 // In that case we don't bother to do any patching we just return 1109 // and let the deopt happen 1110 if (!caller_is_deopted(current)) { 1111 NativeGeneralJump* jump = nativeGeneralJump_at(caller_frame.pc()); 1112 address instr_pc = jump->jump_destination(); 1113 NativeInstruction* ni = nativeInstruction_at(instr_pc); 1114 if (ni->is_jump() ) { 1115 // the jump has not been patched yet 1116 // The jump destination is slow case and therefore not part of the stubs 1117 // (stubs are only for StaticCalls) 1118 1119 // format of buffer 1120 // .... 1121 // instr byte 0 <-- copy_buff 1122 // instr byte 1 1123 // .. 1124 // instr byte n-1 1125 // n 1126 // .... <-- call destination 1127 1128 address stub_location = caller_frame.pc() + PatchingStub::patch_info_offset(); 1129 unsigned char* byte_count = (unsigned char*) (stub_location - 1); 1130 unsigned char* byte_skip = (unsigned char*) (stub_location - 2); 1131 unsigned char* being_initialized_entry_offset = (unsigned char*) (stub_location - 3); 1132 address copy_buff = stub_location - *byte_skip - *byte_count; 1133 address being_initialized_entry = stub_location - *being_initialized_entry_offset; 1134 if (TracePatching) { 1135 ttyLocker ttyl; 1136 tty->print_cr(" Patching %s at bci %d at address " INTPTR_FORMAT " (%s)", Bytecodes::name(code), bci, 1137 p2i(instr_pc), (stub_id == Runtime1::access_field_patching_id) ? "field" : "klass"); 1138 nmethod* caller_code = CodeCache::find_nmethod(caller_frame.pc()); 1139 assert(caller_code != nullptr, "nmethod not found"); 1140 1141 // NOTE we use pc() not original_pc() because we already know they are 1142 // identical otherwise we'd have never entered this block of code 1143 1144 const ImmutableOopMap* map = caller_code->oop_map_for_return_address(caller_frame.pc()); 1145 assert(map != nullptr, "null check"); 1146 map->print(); 1147 tty->cr(); 1148 1149 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty); 1150 } 1151 // depending on the code below, do_patch says whether to copy the patch body back into the nmethod 1152 bool do_patch = true; 1153 if (stub_id == Runtime1::access_field_patching_id) { 1154 // The offset may not be correct if the class was not loaded at code generation time. 1155 // Set it now. 1156 NativeMovRegMem* n_move = nativeMovRegMem_at(copy_buff); 1157 assert(n_move->offset() == 0 || (n_move->offset() == 4 && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG)), "illegal offset for type"); 1158 assert(patch_field_offset >= 0, "illegal offset"); 1159 n_move->add_offset_in_bytes(patch_field_offset); 1160 } else if (load_klass_or_mirror_patch_id) { 1161 // If a getstatic or putstatic is referencing a klass which 1162 // isn't fully initialized, the patch body isn't copied into 1163 // place until initialization is complete. In this case the 1164 // patch site is setup so that any threads besides the 1165 // initializing thread are forced to come into the VM and 1166 // block. 1167 do_patch = (code != Bytecodes::_getstatic && code != Bytecodes::_putstatic) || 1168 InstanceKlass::cast(init_klass)->is_initialized(); 1169 NativeGeneralJump* jump = nativeGeneralJump_at(instr_pc); 1170 if (jump->jump_destination() == being_initialized_entry) { 1171 assert(do_patch == true, "initialization must be complete at this point"); 1172 } else { 1173 // patch the instruction <move reg, klass> 1174 NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff); 1175 1176 assert(n_copy->data() == 0 || 1177 n_copy->data() == (intptr_t)Universe::non_oop_word(), 1178 "illegal init value"); 1179 if (stub_id == Runtime1::load_klass_patching_id) { 1180 assert(load_klass != nullptr, "klass not set"); 1181 n_copy->set_data((intx) (load_klass)); 1182 } else { 1183 // Don't need a G1 pre-barrier here since we assert above that data isn't an oop. 1184 n_copy->set_data(cast_from_oop<intx>(mirror())); 1185 } 1186 1187 if (TracePatching) { 1188 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty); 1189 } 1190 } 1191 } else if (stub_id == Runtime1::load_appendix_patching_id) { 1192 NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff); 1193 assert(n_copy->data() == 0 || 1194 n_copy->data() == (intptr_t)Universe::non_oop_word(), 1195 "illegal init value"); 1196 n_copy->set_data(cast_from_oop<intx>(appendix())); 1197 1198 if (TracePatching) { 1199 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty); 1200 } 1201 } else { 1202 ShouldNotReachHere(); 1203 } 1204 1205 if (do_patch) { 1206 // replace instructions 1207 // first replace the tail, then the call 1208 #ifdef ARM 1209 if((load_klass_or_mirror_patch_id || 1210 stub_id == Runtime1::load_appendix_patching_id) && 1211 nativeMovConstReg_at(copy_buff)->is_pc_relative()) { 1212 nmethod* nm = CodeCache::find_nmethod(instr_pc); 1213 address addr = nullptr; 1214 assert(nm != nullptr, "invalid nmethod_pc"); 1215 RelocIterator mds(nm, copy_buff, copy_buff + 1); 1216 while (mds.next()) { 1217 if (mds.type() == relocInfo::oop_type) { 1218 assert(stub_id == Runtime1::load_mirror_patching_id || 1219 stub_id == Runtime1::load_appendix_patching_id, "wrong stub id"); 1220 oop_Relocation* r = mds.oop_reloc(); 1221 addr = (address)r->oop_addr(); 1222 break; 1223 } else if (mds.type() == relocInfo::metadata_type) { 1224 assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id"); 1225 metadata_Relocation* r = mds.metadata_reloc(); 1226 addr = (address)r->metadata_addr(); 1227 break; 1228 } 1229 } 1230 assert(addr != nullptr, "metadata relocation must exist"); 1231 copy_buff -= *byte_count; 1232 NativeMovConstReg* n_copy2 = nativeMovConstReg_at(copy_buff); 1233 n_copy2->set_pc_relative_offset(addr, instr_pc); 1234 } 1235 #endif 1236 1237 for (int i = NativeGeneralJump::instruction_size; i < *byte_count; i++) { 1238 address ptr = copy_buff + i; 1239 int a_byte = (*ptr) & 0xFF; 1240 address dst = instr_pc + i; 1241 *(unsigned char*)dst = (unsigned char) a_byte; 1242 } 1243 ICache::invalidate_range(instr_pc, *byte_count); 1244 NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff); 1245 1246 if (load_klass_or_mirror_patch_id || 1247 stub_id == Runtime1::load_appendix_patching_id) { 1248 relocInfo::relocType rtype = 1249 (stub_id == Runtime1::load_klass_patching_id) ? 1250 relocInfo::metadata_type : 1251 relocInfo::oop_type; 1252 // update relocInfo to metadata 1253 nmethod* nm = CodeCache::find_nmethod(instr_pc); 1254 assert(nm != nullptr, "invalid nmethod_pc"); 1255 1256 // The old patch site is now a move instruction so update 1257 // the reloc info so that it will get updated during 1258 // future GCs. 1259 RelocIterator iter(nm, (address)instr_pc, (address)(instr_pc + 1)); 1260 relocInfo::change_reloc_info_for_address(&iter, (address) instr_pc, 1261 relocInfo::none, rtype); 1262 } 1263 1264 } else { 1265 ICache::invalidate_range(copy_buff, *byte_count); 1266 NativeGeneralJump::insert_unconditional(instr_pc, being_initialized_entry); 1267 } 1268 } 1269 } 1270 } 1271 1272 // If we are patching in a non-perm oop, make sure the nmethod 1273 // is on the right list. 1274 { 1275 MutexLocker ml_code (current, CodeCache_lock, Mutex::_no_safepoint_check_flag); 1276 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); 1277 guarantee(nm != nullptr, "only nmethods can contain non-perm oops"); 1278 1279 // Since we've patched some oops in the nmethod, 1280 // (re)register it with the heap. 1281 Universe::heap()->register_nmethod(nm); 1282 } 1283 JRT_END 1284 1285 #else // DEOPTIMIZE_WHEN_PATCHING 1286 1287 static bool is_patching_needed(JavaThread* current, Runtime1::StubID stub_id) { 1288 if (stub_id == Runtime1::load_klass_patching_id || 1289 stub_id == Runtime1::load_mirror_patching_id) { 1290 // last java frame on stack 1291 vframeStream vfst(current, true); 1292 assert(!vfst.at_end(), "Java frame must exist"); 1293 1294 methodHandle caller_method(current, vfst.method()); 1295 int bci = vfst.bci(); 1296 Bytecodes::Code code = caller_method()->java_code_at(bci); 1297 1298 switch (code) { 1299 case Bytecodes::_new: 1300 case Bytecodes::_anewarray: 1301 case Bytecodes::_multianewarray: 1302 case Bytecodes::_instanceof: 1303 case Bytecodes::_checkcast: { 1304 Bytecode bc(caller_method(), caller_method->bcp_from(bci)); 1305 constantTag tag = caller_method->constants()->tag_at(bc.get_index_u2(code)); 1306 if (tag.is_unresolved_klass_in_error()) { 1307 return false; // throws resolution error 1308 } 1309 break; 1310 } 1311 1312 default: break; 1313 } 1314 } 1315 return true; 1316 } 1317 1318 PROF_ENTRY(void, Runtime1, patch_code, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_id)) 1319 #ifndef PRODUCT 1320 if (PrintC1Statistics) { 1321 _patch_code_slowcase_cnt++; 1322 } 1323 #endif 1324 1325 // Enable WXWrite: the function is called by c1 stub as a runtime function 1326 // (see another implementation above). 1327 MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, current)); 1328 1329 if (TracePatching) { 1330 tty->print_cr("Deoptimizing because patch is needed"); 1331 } 1332 1333 RegisterMap reg_map(current, 1334 RegisterMap::UpdateMap::skip, 1335 RegisterMap::ProcessFrames::include, 1336 RegisterMap::WalkContinuation::skip); 1337 1338 frame runtime_frame = current->last_frame(); 1339 frame caller_frame = runtime_frame.sender(®_map); 1340 assert(caller_frame.is_compiled_frame(), "Wrong frame type"); 1341 1342 if (is_patching_needed(current, stub_id)) { 1343 // Make sure the nmethod is invalidated, i.e. made not entrant. 1344 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); 1345 if (nm != nullptr) { 1346 nm->make_not_entrant(); 1347 } 1348 } 1349 1350 Deoptimization::deoptimize_frame(current, caller_frame.id()); 1351 // Return to the now deoptimized frame. 1352 postcond(caller_is_deopted(current)); 1353 PROF_END 1354 1355 #endif // DEOPTIMIZE_WHEN_PATCHING 1356 1357 // Entry point for compiled code. We want to patch a nmethod. 1358 // We don't do a normal VM transition here because we want to 1359 // know after the patching is complete and any safepoint(s) are taken 1360 // if the calling nmethod was deoptimized. We do this by calling a 1361 // helper method which does the normal VM transition and when it 1362 // completes we can check for deoptimization. This simplifies the 1363 // assembly code in the cpu directories. 1364 // 1365 int Runtime1::move_klass_patching(JavaThread* current) { 1366 // 1367 // NOTE: we are still in Java 1368 // 1369 debug_only(NoHandleMark nhm;) 1370 { 1371 // Enter VM mode 1372 ResetNoHandleMark rnhm; 1373 patch_code(current, load_klass_patching_id); 1374 } 1375 // Back in JAVA, use no oops DON'T safepoint 1376 1377 // Return true if calling code is deoptimized 1378 1379 return caller_is_deopted(current); 1380 } 1381 1382 int Runtime1::move_mirror_patching(JavaThread* current) { 1383 // 1384 // NOTE: we are still in Java 1385 // 1386 debug_only(NoHandleMark nhm;) 1387 { 1388 // Enter VM mode 1389 ResetNoHandleMark rnhm; 1390 patch_code(current, load_mirror_patching_id); 1391 } 1392 // Back in JAVA, use no oops DON'T safepoint 1393 1394 // Return true if calling code is deoptimized 1395 1396 return caller_is_deopted(current); 1397 } 1398 1399 int Runtime1::move_appendix_patching(JavaThread* current) { 1400 // 1401 // NOTE: we are still in Java 1402 // 1403 debug_only(NoHandleMark nhm;) 1404 { 1405 // Enter VM mode 1406 ResetNoHandleMark rnhm; 1407 patch_code(current, load_appendix_patching_id); 1408 } 1409 // Back in JAVA, use no oops DON'T safepoint 1410 1411 // Return true if calling code is deoptimized 1412 1413 return caller_is_deopted(current); 1414 } 1415 1416 // Entry point for compiled code. We want to patch a nmethod. 1417 // We don't do a normal VM transition here because we want to 1418 // know after the patching is complete and any safepoint(s) are taken 1419 // if the calling nmethod was deoptimized. We do this by calling a 1420 // helper method which does the normal VM transition and when it 1421 // completes we can check for deoptimization. This simplifies the 1422 // assembly code in the cpu directories. 1423 // 1424 int Runtime1::access_field_patching(JavaThread* current) { 1425 // 1426 // NOTE: we are still in Java 1427 // 1428 // Handles created in this function will be deleted by the 1429 // HandleMarkCleaner in the transition to the VM. 1430 NoHandleMark nhm; 1431 { 1432 // Enter VM mode 1433 ResetNoHandleMark rnhm; 1434 patch_code(current, access_field_patching_id); 1435 } 1436 // Back in JAVA, use no oops DON'T safepoint 1437 1438 // Return true if calling code is deoptimized 1439 1440 return caller_is_deopted(current); 1441 } 1442 1443 1444 JRT_LEAF_PROF_NO_THREAD(void, Runtime1, trace_block_entry, Runtime1::trace_block_entry(jint block_id)) 1445 // for now we just print out the block id 1446 tty->print("%d ", block_id); 1447 JRT_END 1448 1449 1450 JRT_LEAF_PROF_NO_THREAD(int, Runtime1, is_instance_of, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj)) 1451 // had to return int instead of bool, otherwise there may be a mismatch 1452 // between the C calling convention and the Java one. 1453 // e.g., on x86, GCC may clear only %al when returning a bool false, but 1454 // JVM takes the whole %eax as the return value, which may misinterpret 1455 // the return value as a boolean true. 1456 1457 assert(mirror != nullptr, "should null-check on mirror before calling"); 1458 Klass* k = java_lang_Class::as_Klass(mirror); 1459 return (k != nullptr && obj != nullptr && obj->is_a(k)) ? 1 : 0; 1460 JRT_END 1461 1462 JRT_ENTRY_PROF(void, Runtime1, predicate_failed_trap, Runtime1::predicate_failed_trap(JavaThread* current)) 1463 ResourceMark rm; 1464 1465 RegisterMap reg_map(current, 1466 RegisterMap::UpdateMap::skip, 1467 RegisterMap::ProcessFrames::include, 1468 RegisterMap::WalkContinuation::skip); 1469 frame runtime_frame = current->last_frame(); 1470 frame caller_frame = runtime_frame.sender(®_map); 1471 1472 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); 1473 assert (nm != nullptr, "no more nmethod?"); 1474 nm->make_not_entrant(); 1475 1476 methodHandle m(current, nm->method()); 1477 MethodData* mdo = m->method_data(); 1478 1479 if (mdo == nullptr && !HAS_PENDING_EXCEPTION) { 1480 // Build an MDO. Ignore errors like OutOfMemory; 1481 // that simply means we won't have an MDO to update. 1482 Method::build_profiling_method_data(m, THREAD); 1483 if (HAS_PENDING_EXCEPTION) { 1484 // Only metaspace OOM is expected. No Java code executed. 1485 assert((PENDING_EXCEPTION->is_a(vmClasses::OutOfMemoryError_klass())), "we expect only an OOM error here"); 1486 CLEAR_PENDING_EXCEPTION; 1487 } 1488 mdo = m->method_data(); 1489 } 1490 1491 if (mdo != nullptr) { 1492 mdo->inc_trap_count(Deoptimization::Reason_none); 1493 } 1494 1495 if (TracePredicateFailedTraps) { 1496 stringStream ss1, ss2; 1497 vframeStream vfst(current); 1498 Method* inlinee = vfst.method(); 1499 inlinee->print_short_name(&ss1); 1500 m->print_short_name(&ss2); 1501 tty->print_cr("Predicate failed trap in method %s at bci %d inlined in %s at pc " INTPTR_FORMAT, ss1.freeze(), vfst.bci(), ss2.freeze(), p2i(caller_frame.pc())); 1502 } 1503 1504 1505 Deoptimization::deoptimize_frame(current, caller_frame.id()); 1506 1507 JRT_END 1508 1509 // Check exception if AbortVMOnException flag set 1510 JRT_LEAF(void, Runtime1::check_abort_on_vm_exception(oopDesc* ex)) 1511 ResourceMark rm; 1512 const char* message = nullptr; 1513 if (ex->is_a(vmClasses::Throwable_klass())) { 1514 oop msg = java_lang_Throwable::message(ex); 1515 if (msg != nullptr) { 1516 message = java_lang_String::as_utf8_string(msg); 1517 } 1518 } 1519 Exceptions::debug_check_abort(ex->klass()->external_name(), message); 1520 JRT_END 1521 1522 #define DO_COUNTERS(macro) \ 1523 macro(Runtime1, new_instance) \ 1524 macro(Runtime1, new_type_array) \ 1525 macro(Runtime1, new_object_array) \ 1526 macro(Runtime1, new_multi_array) \ 1527 macro(Runtime1, counter_overflow) \ 1528 macro(Runtime1, exception_handler_for_pc_helper) \ 1529 macro(Runtime1, monitorenter) \ 1530 macro(Runtime1, monitorexit) \ 1531 macro(Runtime1, deoptimize) \ 1532 macro(Runtime1, is_instance_of) \ 1533 macro(Runtime1, predicate_failed_trap) \ 1534 macro(Runtime1, patch_code) 1535 1536 #define INIT_COUNTER(sub, name) \ 1537 NEWPERFTICKCOUNTERS(_perf_##sub##_##name##_timer, SUN_CI, #sub "::" #name); \ 1538 NEWPERFEVENTCOUNTER(_perf_##sub##_##name##_count, SUN_CI, #sub "::" #name "_count"); 1539 1540 void Runtime1::init_counters() { 1541 assert(CompilerConfig::is_c1_enabled(), ""); 1542 1543 if (UsePerfData) { 1544 EXCEPTION_MARK; 1545 1546 DO_COUNTERS(INIT_COUNTER) 1547 1548 if (HAS_PENDING_EXCEPTION) { 1549 vm_exit_during_initialization("Runtime1::init_counters() failed unexpectedly"); 1550 } 1551 } 1552 } 1553 #undef INIT_COUNTER 1554 1555 #define PRINT_COUNTER(sub, name) { \ 1556 if (_perf_##sub##_##name##_count != nullptr) { \ 1557 jlong count = _perf_##sub##_##name##_count->get_value(); \ 1558 if (count > 0) { \ 1559 st->print_cr(" %-30s = %4ldms (elapsed) %4ldms (thread) (%5ld events)", #sub "::" #name, \ 1560 _perf_##sub##_##name##_timer->elapsed_counter_value_ms(), \ 1561 _perf_##sub##_##name##_timer->thread_counter_value_ms(), \ 1562 count); \ 1563 }}} 1564 1565 1566 void Runtime1::print_counters_on(outputStream* st) { 1567 if (UsePerfData && ProfileRuntimeCalls && CompilerConfig::is_c1_enabled()) { 1568 DO_COUNTERS(PRINT_COUNTER) 1569 } else { 1570 st->print_cr(" Runtime1: no info (%s is disabled)", 1571 (!CompilerConfig::is_c1_enabled() ? "C1" : (UsePerfData ? "ProfileRuntimeCalls" : "UsePerfData"))); 1572 } 1573 } 1574 1575 #undef PRINT_COUNTER 1576 #undef DO_COUNTERS 1577 1578 #ifndef PRODUCT 1579 void Runtime1::print_statistics_on(outputStream* st) { 1580 st->print_cr("C1 Runtime statistics:"); 1581 st->print_cr(" _resolve_invoke_virtual_cnt: %u", SharedRuntime::_resolve_virtual_ctr); 1582 st->print_cr(" _resolve_invoke_opt_virtual_cnt: %u", SharedRuntime::_resolve_opt_virtual_ctr); 1583 st->print_cr(" _resolve_invoke_static_cnt: %u", SharedRuntime::_resolve_static_ctr); 1584 st->print_cr(" _handle_wrong_method_cnt: %u", SharedRuntime::_wrong_method_ctr); 1585 st->print_cr(" _ic_miss_cnt: %u", SharedRuntime::_ic_miss_ctr); 1586 st->print_cr(" _generic_arraycopystub_cnt: %u", _generic_arraycopystub_cnt); 1587 st->print_cr(" _byte_arraycopy_cnt: %u", _byte_arraycopy_stub_cnt); 1588 st->print_cr(" _short_arraycopy_cnt: %u", _short_arraycopy_stub_cnt); 1589 st->print_cr(" _int_arraycopy_cnt: %u", _int_arraycopy_stub_cnt); 1590 st->print_cr(" _long_arraycopy_cnt: %u", _long_arraycopy_stub_cnt); 1591 st->print_cr(" _oop_arraycopy_cnt: %u", _oop_arraycopy_stub_cnt); 1592 st->print_cr(" _arraycopy_slowcase_cnt: %u", _arraycopy_slowcase_cnt); 1593 st->print_cr(" _arraycopy_checkcast_cnt: %u", _arraycopy_checkcast_cnt); 1594 st->print_cr(" _arraycopy_checkcast_attempt_cnt:%u", _arraycopy_checkcast_attempt_cnt); 1595 1596 st->print_cr(" _new_type_array_slowcase_cnt: %u", _new_type_array_slowcase_cnt); 1597 st->print_cr(" _new_object_array_slowcase_cnt: %u", _new_object_array_slowcase_cnt); 1598 st->print_cr(" _new_instance_slowcase_cnt: %u", _new_instance_slowcase_cnt); 1599 st->print_cr(" _new_multi_array_slowcase_cnt: %u", _new_multi_array_slowcase_cnt); 1600 st->print_cr(" _monitorenter_slowcase_cnt: %u", _monitorenter_slowcase_cnt); 1601 st->print_cr(" _monitorexit_slowcase_cnt: %u", _monitorexit_slowcase_cnt); 1602 st->print_cr(" _patch_code_slowcase_cnt: %u", _patch_code_slowcase_cnt); 1603 1604 st->print_cr(" _throw_range_check_exception_count: %u:", _throw_range_check_exception_count); 1605 st->print_cr(" _throw_index_exception_count: %u:", _throw_index_exception_count); 1606 st->print_cr(" _throw_div0_exception_count: %u:", _throw_div0_exception_count); 1607 st->print_cr(" _throw_null_pointer_exception_count: %u:", _throw_null_pointer_exception_count); 1608 st->print_cr(" _throw_class_cast_exception_count: %u:", _throw_class_cast_exception_count); 1609 st->print_cr(" _throw_incompatible_class_change_error_count: %u:", _throw_incompatible_class_change_error_count); 1610 st->print_cr(" _throw_count: %u:", _throw_count); 1611 1612 SharedRuntime::print_ic_miss_histogram_on(st); 1613 st->cr(); 1614 } 1615 #endif // PRODUCT