1 /* 2 * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/codeBuffer.hpp" 27 #include "c1/c1_CodeStubs.hpp" 28 #include "c1/c1_Defs.hpp" 29 #include "c1/c1_FrameMap.hpp" 30 #include "c1/c1_LIRAssembler.hpp" 31 #include "c1/c1_MacroAssembler.hpp" 32 #include "c1/c1_Runtime1.hpp" 33 #include "classfile/javaClasses.inline.hpp" 34 #include "classfile/vmClasses.hpp" 35 #include "classfile/vmSymbols.hpp" 36 #include "code/codeBlob.hpp" 37 #include "code/compiledIC.hpp" 38 #include "code/pcDesc.hpp" 39 #include "code/scopeDesc.hpp" 40 #include "code/vtableStubs.hpp" 41 #include "compiler/compilationPolicy.hpp" 42 #include "compiler/compilerDefinitions.inline.hpp" 43 #include "compiler/disassembler.hpp" 44 #include "compiler/oopMap.hpp" 45 #include "gc/shared/barrierSet.hpp" 46 #include "gc/shared/c1/barrierSetC1.hpp" 47 #include "gc/shared/collectedHeap.hpp" 48 #include "interpreter/bytecode.hpp" 49 #include "interpreter/interpreter.hpp" 50 #include "jfr/support/jfrIntrinsics.hpp" 51 #include "logging/log.hpp" 52 #include "memory/allocation.inline.hpp" 53 #include "memory/oopFactory.hpp" 54 #include "memory/resourceArea.hpp" 55 #include "memory/universe.hpp" 56 #include "oops/access.inline.hpp" 57 #include "oops/klass.inline.hpp" 58 #include "oops/objArrayOop.inline.hpp" 59 #include "oops/objArrayKlass.hpp" 60 #include "oops/oop.inline.hpp" 61 #include "prims/jvmtiExport.hpp" 62 #include "runtime/atomic.hpp" 63 #include "runtime/fieldDescriptor.inline.hpp" 64 #include "runtime/frame.inline.hpp" 65 #include "runtime/handles.inline.hpp" 66 #include "runtime/interfaceSupport.inline.hpp" 67 #include "runtime/javaCalls.hpp" 68 #include "runtime/perfData.inline.hpp" 69 #include "runtime/sharedRuntime.hpp" 70 #include "runtime/stackWatermarkSet.hpp" 71 #include "runtime/stubRoutines.hpp" 72 #include "runtime/threadCritical.hpp" 73 #include "runtime/vframe.inline.hpp" 74 #include "runtime/vframeArray.hpp" 75 #include "runtime/vm_version.hpp" 76 #include "services/management.hpp" 77 #include "utilities/copy.hpp" 78 #include "utilities/events.hpp" 79 80 81 // Implementation of StubAssembler 82 83 StubAssembler::StubAssembler(CodeBuffer* code, const char * name, int stub_id) : C1_MacroAssembler(code) { 84 _name = name; 85 _must_gc_arguments = false; 86 _frame_size = no_frame_size; 87 _num_rt_args = 0; 88 _stub_id = stub_id; 89 } 90 91 92 void StubAssembler::set_info(const char* name, bool must_gc_arguments) { 93 _name = name; 94 _must_gc_arguments = must_gc_arguments; 95 } 96 97 98 void StubAssembler::set_frame_size(int size) { 99 if (_frame_size == no_frame_size) { 100 _frame_size = size; 101 } 102 assert(_frame_size == size, "can't change the frame size"); 103 } 104 105 106 void StubAssembler::set_num_rt_args(int args) { 107 if (_num_rt_args == 0) { 108 _num_rt_args = args; 109 } 110 assert(_num_rt_args == args, "can't change the number of args"); 111 } 112 113 // Implementation of Runtime1 114 115 CodeBlob* Runtime1::_blobs[(int)C1StubId::NUM_STUBIDS]; 116 117 #define C1_BLOB_NAME_DEFINE(name) "C1 Runtime " # name "_blob", 118 const char *Runtime1::_blob_names[] = { 119 C1_STUBS_DO(C1_BLOB_NAME_DEFINE) 120 }; 121 #undef C1_STUB_NAME_DEFINE 122 123 #ifndef PRODUCT 124 // statistics 125 uint Runtime1::_generic_arraycopystub_cnt = 0; 126 uint Runtime1::_arraycopy_slowcase_cnt = 0; 127 uint Runtime1::_arraycopy_checkcast_cnt = 0; 128 uint Runtime1::_arraycopy_checkcast_attempt_cnt = 0; 129 uint Runtime1::_new_type_array_slowcase_cnt = 0; 130 uint Runtime1::_new_object_array_slowcase_cnt = 0; 131 uint Runtime1::_new_instance_slowcase_cnt = 0; 132 uint Runtime1::_new_multi_array_slowcase_cnt = 0; 133 uint Runtime1::_monitorenter_slowcase_cnt = 0; 134 uint Runtime1::_monitorexit_slowcase_cnt = 0; 135 uint Runtime1::_patch_code_slowcase_cnt = 0; 136 uint Runtime1::_throw_range_check_exception_count = 0; 137 uint Runtime1::_throw_index_exception_count = 0; 138 uint Runtime1::_throw_div0_exception_count = 0; 139 uint Runtime1::_throw_null_pointer_exception_count = 0; 140 uint Runtime1::_throw_class_cast_exception_count = 0; 141 uint Runtime1::_throw_incompatible_class_change_error_count = 0; 142 uint Runtime1::_throw_count = 0; 143 144 static uint _byte_arraycopy_stub_cnt = 0; 145 static uint _short_arraycopy_stub_cnt = 0; 146 static uint _int_arraycopy_stub_cnt = 0; 147 static uint _long_arraycopy_stub_cnt = 0; 148 static uint _oop_arraycopy_stub_cnt = 0; 149 150 address Runtime1::arraycopy_count_address(BasicType type) { 151 switch (type) { 152 case T_BOOLEAN: 153 case T_BYTE: return (address)&_byte_arraycopy_stub_cnt; 154 case T_CHAR: 155 case T_SHORT: return (address)&_short_arraycopy_stub_cnt; 156 case T_FLOAT: 157 case T_INT: return (address)&_int_arraycopy_stub_cnt; 158 case T_DOUBLE: 159 case T_LONG: return (address)&_long_arraycopy_stub_cnt; 160 case T_ARRAY: 161 case T_OBJECT: return (address)&_oop_arraycopy_stub_cnt; 162 default: 163 ShouldNotReachHere(); 164 return nullptr; 165 } 166 } 167 168 169 #endif 170 171 // Simple helper to see if the caller of a runtime stub which 172 // entered the VM has been deoptimized 173 174 static bool caller_is_deopted(JavaThread* current) { 175 RegisterMap reg_map(current, 176 RegisterMap::UpdateMap::skip, 177 RegisterMap::ProcessFrames::include, 178 RegisterMap::WalkContinuation::skip); 179 frame runtime_frame = current->last_frame(); 180 frame caller_frame = runtime_frame.sender(®_map); 181 assert(caller_frame.is_compiled_frame(), "must be compiled"); 182 return caller_frame.is_deoptimized_frame(); 183 } 184 185 // Stress deoptimization 186 static void deopt_caller(JavaThread* current) { 187 if (!caller_is_deopted(current)) { 188 RegisterMap reg_map(current, 189 RegisterMap::UpdateMap::skip, 190 RegisterMap::ProcessFrames::include, 191 RegisterMap::WalkContinuation::skip); 192 frame runtime_frame = current->last_frame(); 193 frame caller_frame = runtime_frame.sender(®_map); 194 Deoptimization::deoptimize_frame(current, caller_frame.id()); 195 assert(caller_is_deopted(current), "Must be deoptimized"); 196 } 197 } 198 199 class C1StubIdStubAssemblerCodeGenClosure: public StubAssemblerCodeGenClosure { 200 private: 201 C1StubId _id; 202 public: 203 C1StubIdStubAssemblerCodeGenClosure(C1StubId id) : _id(id) {} 204 virtual OopMapSet* generate_code(StubAssembler* sasm) { 205 return Runtime1::generate_code_for(_id, sasm); 206 } 207 }; 208 209 CodeBlob* Runtime1::generate_blob(BufferBlob* buffer_blob, C1StubId id, const char* name, bool expect_oop_map, StubAssemblerCodeGenClosure* cl) { 210 ResourceMark rm; 211 // create code buffer for code storage 212 CodeBuffer code(buffer_blob); 213 214 OopMapSet* oop_maps; 215 int frame_size; 216 bool must_gc_arguments; 217 218 Compilation::setup_code_buffer(&code, 0); 219 220 // create assembler for code generation 221 StubAssembler* sasm = new StubAssembler(&code, name, (int)id); 222 // generate code for runtime stub 223 oop_maps = cl->generate_code(sasm); 224 assert(oop_maps == nullptr || sasm->frame_size() != no_frame_size, 225 "if stub has an oop map it must have a valid frame size"); 226 assert(!expect_oop_map || oop_maps != nullptr, "must have an oopmap"); 227 228 // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned) 229 sasm->align(BytesPerWord); 230 // make sure all code is in code buffer 231 sasm->flush(); 232 233 frame_size = sasm->frame_size(); 234 must_gc_arguments = sasm->must_gc_arguments(); 235 // create blob - distinguish a few special cases 236 CodeBlob* blob = RuntimeStub::new_runtime_stub(name, 237 &code, 238 CodeOffsets::frame_never_safe, 239 frame_size, 240 oop_maps, 241 must_gc_arguments); 242 assert(blob != nullptr, "blob must exist"); 243 return blob; 244 } 245 246 void Runtime1::generate_blob_for(BufferBlob* buffer_blob, C1StubId id) { 247 assert(C1StubId::NO_STUBID < id && id < C1StubId::NUM_STUBIDS, "illegal stub id"); 248 bool expect_oop_map = true; 249 #ifdef ASSERT 250 // Make sure that stubs that need oopmaps have them 251 switch (id) { 252 // These stubs don't need to have an oopmap 253 case C1StubId::dtrace_object_alloc_id: 254 case C1StubId::slow_subtype_check_id: 255 case C1StubId::fpu2long_stub_id: 256 case C1StubId::unwind_exception_id: 257 case C1StubId::counter_overflow_id: 258 expect_oop_map = false; 259 break; 260 default: 261 break; 262 } 263 #endif 264 C1StubIdStubAssemblerCodeGenClosure cl(id); 265 CodeBlob* blob = generate_blob(buffer_blob, id, name_for(id), expect_oop_map, &cl); 266 // install blob 267 _blobs[(int)id] = blob; 268 } 269 270 void Runtime1::initialize(BufferBlob* blob) { 271 init_counters(); 272 // platform-dependent initialization 273 initialize_pd(); 274 // generate stubs 275 int limit = (int)C1StubId::NUM_STUBIDS; 276 for (int id = 0; id < limit; id++) generate_blob_for(blob, (C1StubId)id); 277 // printing 278 #ifndef PRODUCT 279 if (PrintSimpleStubs) { 280 ResourceMark rm; 281 for (int id = 0; id < limit; id++) { 282 _blobs[id]->print(); 283 if (_blobs[id]->oop_maps() != nullptr) { 284 _blobs[id]->oop_maps()->print(); 285 } 286 } 287 } 288 #endif 289 BarrierSetC1* bs = BarrierSet::barrier_set()->barrier_set_c1(); 290 bs->generate_c1_runtime_stubs(blob); 291 } 292 293 CodeBlob* Runtime1::blob_for(C1StubId id) { 294 assert(C1StubId::NO_STUBID < id && id < C1StubId::NUM_STUBIDS, "illegal stub id"); 295 return _blobs[(int)id]; 296 } 297 298 299 const char* Runtime1::name_for(C1StubId id) { 300 assert(C1StubId::NO_STUBID < id && id < C1StubId::NUM_STUBIDS, "illegal stub id"); 301 return _blob_names[(int)id]; 302 } 303 304 const char* Runtime1::name_for_address(address entry) { 305 int limit = (int)C1StubId::NUM_STUBIDS; 306 for (int i = 0; i < limit; i++) { 307 C1StubId id = (C1StubId)i; 308 if (entry == entry_for(id)) return name_for(id); 309 } 310 311 #define FUNCTION_CASE(a, f) \ 312 if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f)) return #f 313 314 FUNCTION_CASE(entry, os::javaTimeMillis); 315 FUNCTION_CASE(entry, os::javaTimeNanos); 316 FUNCTION_CASE(entry, SharedRuntime::OSR_migration_end); 317 FUNCTION_CASE(entry, SharedRuntime::d2f); 318 FUNCTION_CASE(entry, SharedRuntime::d2i); 319 FUNCTION_CASE(entry, SharedRuntime::d2l); 320 FUNCTION_CASE(entry, SharedRuntime::dcos); 321 FUNCTION_CASE(entry, SharedRuntime::dexp); 322 FUNCTION_CASE(entry, SharedRuntime::dlog); 323 FUNCTION_CASE(entry, SharedRuntime::dlog10); 324 FUNCTION_CASE(entry, SharedRuntime::dpow); 325 FUNCTION_CASE(entry, SharedRuntime::drem); 326 FUNCTION_CASE(entry, SharedRuntime::dsin); 327 FUNCTION_CASE(entry, SharedRuntime::dtan); 328 FUNCTION_CASE(entry, SharedRuntime::f2i); 329 FUNCTION_CASE(entry, SharedRuntime::f2l); 330 FUNCTION_CASE(entry, SharedRuntime::frem); 331 FUNCTION_CASE(entry, SharedRuntime::l2d); 332 FUNCTION_CASE(entry, SharedRuntime::l2f); 333 FUNCTION_CASE(entry, SharedRuntime::ldiv); 334 FUNCTION_CASE(entry, SharedRuntime::lmul); 335 FUNCTION_CASE(entry, SharedRuntime::lrem); 336 FUNCTION_CASE(entry, SharedRuntime::lrem); 337 FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry); 338 FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit); 339 FUNCTION_CASE(entry, is_instance_of); 340 FUNCTION_CASE(entry, trace_block_entry); 341 #ifdef JFR_HAVE_INTRINSICS 342 FUNCTION_CASE(entry, JfrTime::time_function()); 343 #endif 344 FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32()); 345 FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32C()); 346 FUNCTION_CASE(entry, StubRoutines::vectorizedMismatch()); 347 FUNCTION_CASE(entry, StubRoutines::dexp()); 348 FUNCTION_CASE(entry, StubRoutines::dlog()); 349 FUNCTION_CASE(entry, StubRoutines::dlog10()); 350 FUNCTION_CASE(entry, StubRoutines::dpow()); 351 FUNCTION_CASE(entry, StubRoutines::dsin()); 352 FUNCTION_CASE(entry, StubRoutines::dcos()); 353 FUNCTION_CASE(entry, StubRoutines::dtan()); 354 FUNCTION_CASE(entry, StubRoutines::dtanh()); 355 356 #undef FUNCTION_CASE 357 358 // Soft float adds more runtime names. 359 return pd_name_for_address(entry); 360 } 361 362 363 JRT_ENTRY_PROF(void, Runtime1, new_instance, Runtime1::new_instance(JavaThread* current, Klass* klass)) 364 #ifndef PRODUCT 365 if (PrintC1Statistics) { 366 _new_instance_slowcase_cnt++; 367 } 368 #endif 369 assert(klass->is_klass(), "not a class"); 370 Handle holder(current, klass->klass_holder()); // keep the klass alive 371 InstanceKlass* h = InstanceKlass::cast(klass); 372 h->check_valid_for_instantiation(true, CHECK); 373 // make sure klass is initialized 374 h->initialize(CHECK); 375 // allocate instance and return via TLS 376 oop obj = h->allocate_instance(CHECK); 377 current->set_vm_result(obj); 378 JRT_END 379 380 381 JRT_ENTRY_PROF(void, Runtime1, new_type_array, Runtime1::new_type_array(JavaThread* current, Klass* klass, jint length)) 382 #ifndef PRODUCT 383 if (PrintC1Statistics) { 384 _new_type_array_slowcase_cnt++; 385 } 386 #endif 387 // Note: no handle for klass needed since they are not used 388 // anymore after new_typeArray() and no GC can happen before. 389 // (This may have to change if this code changes!) 390 assert(klass->is_klass(), "not a class"); 391 BasicType elt_type = TypeArrayKlass::cast(klass)->element_type(); 392 oop obj = oopFactory::new_typeArray(elt_type, length, CHECK); 393 current->set_vm_result(obj); 394 // This is pretty rare but this runtime patch is stressful to deoptimization 395 // if we deoptimize here so force a deopt to stress the path. 396 if (DeoptimizeALot) { 397 deopt_caller(current); 398 } 399 400 JRT_END 401 402 403 JRT_ENTRY_PROF(void, Runtime1, new_object_array, Runtime1::new_object_array(JavaThread* current, Klass* array_klass, jint length)) 404 #ifndef PRODUCT 405 if (PrintC1Statistics) { 406 _new_object_array_slowcase_cnt++; 407 } 408 #endif 409 // Note: no handle for klass needed since they are not used 410 // anymore after new_objArray() and no GC can happen before. 411 // (This may have to change if this code changes!) 412 assert(array_klass->is_klass(), "not a class"); 413 Handle holder(current, array_klass->klass_holder()); // keep the klass alive 414 Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass(); 415 objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK); 416 current->set_vm_result(obj); 417 // This is pretty rare but this runtime patch is stressful to deoptimization 418 // if we deoptimize here so force a deopt to stress the path. 419 if (DeoptimizeALot) { 420 deopt_caller(current); 421 } 422 JRT_END 423 424 425 JRT_ENTRY_PROF(void, Runtime1, new_multi_array, Runtime1::new_multi_array(JavaThread* current, Klass* klass, int rank, jint* dims)) 426 #ifndef PRODUCT 427 if (PrintC1Statistics) { 428 _new_multi_array_slowcase_cnt++; 429 } 430 #endif 431 assert(klass->is_klass(), "not a class"); 432 assert(rank >= 1, "rank must be nonzero"); 433 Handle holder(current, klass->klass_holder()); // keep the klass alive 434 oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK); 435 current->set_vm_result(obj); 436 JRT_END 437 438 439 JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* current, C1StubId id)) 440 tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", (int)id); 441 JRT_END 442 443 444 JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* current, oopDesc* obj)) 445 ResourceMark rm(current); 446 const char* klass_name = obj->klass()->external_name(); 447 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArrayStoreException(), klass_name); 448 JRT_END 449 450 451 // counter_overflow() is called from within C1-compiled methods. The enclosing method is the method 452 // associated with the top activation record. The inlinee (that is possibly included in the enclosing 453 // method) method is passed as an argument. In order to do that it is embedded in the code as 454 // a constant. 455 static nmethod* counter_overflow_helper(JavaThread* current, int branch_bci, Method* m) { 456 nmethod* osr_nm = nullptr; 457 methodHandle method(current, m); 458 459 RegisterMap map(current, 460 RegisterMap::UpdateMap::skip, 461 RegisterMap::ProcessFrames::include, 462 RegisterMap::WalkContinuation::skip); 463 frame fr = current->last_frame().sender(&map); 464 nmethod* nm = (nmethod*) fr.cb(); 465 assert(nm!= nullptr && nm->is_nmethod(), "Sanity check"); 466 methodHandle enclosing_method(current, nm->method()); 467 468 CompLevel level = (CompLevel)nm->comp_level(); 469 int bci = InvocationEntryBci; 470 if (branch_bci != InvocationEntryBci) { 471 // Compute destination bci 472 address pc = method()->code_base() + branch_bci; 473 Bytecodes::Code branch = Bytecodes::code_at(method(), pc); 474 int offset = 0; 475 switch (branch) { 476 case Bytecodes::_if_icmplt: case Bytecodes::_iflt: 477 case Bytecodes::_if_icmpgt: case Bytecodes::_ifgt: 478 case Bytecodes::_if_icmple: case Bytecodes::_ifle: 479 case Bytecodes::_if_icmpge: case Bytecodes::_ifge: 480 case Bytecodes::_if_icmpeq: case Bytecodes::_if_acmpeq: case Bytecodes::_ifeq: 481 case Bytecodes::_if_icmpne: case Bytecodes::_if_acmpne: case Bytecodes::_ifne: 482 case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: case Bytecodes::_goto: 483 offset = (int16_t)Bytes::get_Java_u2(pc + 1); 484 break; 485 case Bytecodes::_goto_w: 486 offset = Bytes::get_Java_u4(pc + 1); 487 break; 488 default: ; 489 } 490 bci = branch_bci + offset; 491 } 492 osr_nm = CompilationPolicy::event(enclosing_method, method, branch_bci, bci, level, nm, current); 493 return osr_nm; 494 } 495 496 JRT_BLOCK_ENTRY_PROF(address, Runtime1, counter_overflow, Runtime1::counter_overflow(JavaThread* current, int bci, Method* method)) 497 nmethod* osr_nm; 498 JRT_BLOCK 499 osr_nm = counter_overflow_helper(current, bci, method); 500 if (osr_nm != nullptr) { 501 RegisterMap map(current, 502 RegisterMap::UpdateMap::skip, 503 RegisterMap::ProcessFrames::include, 504 RegisterMap::WalkContinuation::skip); 505 frame fr = current->last_frame().sender(&map); 506 Deoptimization::deoptimize_frame(current, fr.id()); 507 } 508 JRT_BLOCK_END 509 return nullptr; 510 JRT_END 511 512 extern void vm_exit(int code); 513 514 // Enter this method from compiled code handler below. This is where we transition 515 // to VM mode. This is done as a helper routine so that the method called directly 516 // from compiled code does not have to transition to VM. This allows the entry 517 // method to see if the nmethod that we have just looked up a handler for has 518 // been deoptimized while we were in the vm. This simplifies the assembly code 519 // cpu directories. 520 // 521 // We are entering here from exception stub (via the entry method below) 522 // If there is a compiled exception handler in this method, we will continue there; 523 // otherwise we will unwind the stack and continue at the caller of top frame method 524 // Note: we enter in Java using a special JRT wrapper. This wrapper allows us to 525 // control the area where we can allow a safepoint. After we exit the safepoint area we can 526 // check to see if the handler we are going to return is now in a nmethod that has 527 // been deoptimized. If that is the case we return the deopt blob 528 // unpack_with_exception entry instead. This makes life for the exception blob easier 529 // because making that same check and diverting is painful from assembly language. 530 JRT_ENTRY_NO_ASYNC_PROF(static address, Runtime1, exception_handler_for_pc_helper, exception_handler_for_pc_helper(JavaThread* current, oopDesc* ex, address pc, nmethod*& nm)) 531 // Reset method handle flag. 532 current->set_is_method_handle_return(false); 533 534 Handle exception(current, ex); 535 536 // This function is called when we are about to throw an exception. Therefore, 537 // we have to poll the stack watermark barrier to make sure that not yet safe 538 // stack frames are made safe before returning into them. 539 if (current->last_frame().cb() == Runtime1::blob_for(C1StubId::handle_exception_from_callee_id)) { 540 // The C1StubId::handle_exception_from_callee_id handler is invoked after the 541 // frame has been unwound. It instead builds its own stub frame, to call the 542 // runtime. But the throwing frame has already been unwound here. 543 StackWatermarkSet::after_unwind(current); 544 } 545 546 nm = CodeCache::find_nmethod(pc); 547 assert(nm != nullptr, "this is not an nmethod"); 548 // Adjust the pc as needed/ 549 if (nm->is_deopt_pc(pc)) { 550 RegisterMap map(current, 551 RegisterMap::UpdateMap::skip, 552 RegisterMap::ProcessFrames::include, 553 RegisterMap::WalkContinuation::skip); 554 frame exception_frame = current->last_frame().sender(&map); 555 // if the frame isn't deopted then pc must not correspond to the caller of last_frame 556 assert(exception_frame.is_deoptimized_frame(), "must be deopted"); 557 pc = exception_frame.pc(); 558 } 559 assert(exception.not_null(), "null exceptions should be handled by throw_exception"); 560 // Check that exception is a subclass of Throwable 561 assert(exception->is_a(vmClasses::Throwable_klass()), 562 "Exception not subclass of Throwable"); 563 564 // debugging support 565 // tracing 566 if (log_is_enabled(Info, exceptions)) { 567 ResourceMark rm; // print_value_string 568 stringStream tempst; 569 assert(nm->method() != nullptr, "Unexpected null method()"); 570 tempst.print("C1 compiled method <%s>\n" 571 " at PC" INTPTR_FORMAT " for thread " INTPTR_FORMAT, 572 nm->method()->print_value_string(), p2i(pc), p2i(current)); 573 Exceptions::log_exception(exception, tempst.freeze()); 574 } 575 // for AbortVMOnException flag 576 Exceptions::debug_check_abort(exception); 577 578 // Check the stack guard pages and re-enable them if necessary and there is 579 // enough space on the stack to do so. Use fast exceptions only if the guard 580 // pages are enabled. 581 bool guard_pages_enabled = current->stack_overflow_state()->reguard_stack_if_needed(); 582 583 if (JvmtiExport::can_post_on_exceptions()) { 584 // To ensure correct notification of exception catches and throws 585 // we have to deoptimize here. If we attempted to notify the 586 // catches and throws during this exception lookup it's possible 587 // we could deoptimize on the way out of the VM and end back in 588 // the interpreter at the throw site. This would result in double 589 // notifications since the interpreter would also notify about 590 // these same catches and throws as it unwound the frame. 591 592 RegisterMap reg_map(current, 593 RegisterMap::UpdateMap::include, 594 RegisterMap::ProcessFrames::include, 595 RegisterMap::WalkContinuation::skip); 596 frame stub_frame = current->last_frame(); 597 frame caller_frame = stub_frame.sender(®_map); 598 599 // We don't really want to deoptimize the nmethod itself since we 600 // can actually continue in the exception handler ourselves but I 601 // don't see an easy way to have the desired effect. 602 Deoptimization::deoptimize_frame(current, caller_frame.id()); 603 assert(caller_is_deopted(current), "Must be deoptimized"); 604 605 return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls(); 606 } 607 608 // ExceptionCache is used only for exceptions at call sites and not for implicit exceptions 609 if (guard_pages_enabled) { 610 address fast_continuation = nm->handler_for_exception_and_pc(exception, pc); 611 if (fast_continuation != nullptr) { 612 // Set flag if return address is a method handle call site. 613 current->set_is_method_handle_return(nm->is_method_handle_return(pc)); 614 return fast_continuation; 615 } 616 } 617 618 // If the stack guard pages are enabled, check whether there is a handler in 619 // the current method. Otherwise (guard pages disabled), force an unwind and 620 // skip the exception cache update (i.e., just leave continuation as null). 621 address continuation = nullptr; 622 if (guard_pages_enabled) { 623 624 // New exception handling mechanism can support inlined methods 625 // with exception handlers since the mappings are from PC to PC 626 627 // Clear out the exception oop and pc since looking up an 628 // exception handler can cause class loading, which might throw an 629 // exception and those fields are expected to be clear during 630 // normal bytecode execution. 631 current->clear_exception_oop_and_pc(); 632 633 bool recursive_exception = false; 634 continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false, recursive_exception); 635 // If an exception was thrown during exception dispatch, the exception oop may have changed 636 current->set_exception_oop(exception()); 637 current->set_exception_pc(pc); 638 639 // the exception cache is used only by non-implicit exceptions 640 // Update the exception cache only when there didn't happen 641 // another exception during the computation of the compiled 642 // exception handler. Checking for exception oop equality is not 643 // sufficient because some exceptions are pre-allocated and reused. 644 if (continuation != nullptr && !recursive_exception) { 645 nm->add_handler_for_exception_and_pc(exception, pc, continuation); 646 } 647 } 648 649 current->set_vm_result(exception()); 650 // Set flag if return address is a method handle call site. 651 current->set_is_method_handle_return(nm->is_method_handle_return(pc)); 652 653 if (log_is_enabled(Info, exceptions)) { 654 ResourceMark rm; 655 log_info(exceptions)("Thread " PTR_FORMAT " continuing at PC " PTR_FORMAT 656 " for exception thrown at PC " PTR_FORMAT, 657 p2i(current), p2i(continuation), p2i(pc)); 658 } 659 660 return continuation; 661 JRT_END 662 663 // Enter this method from compiled code only if there is a Java exception handler 664 // in the method handling the exception. 665 // We are entering here from exception stub. We don't do a normal VM transition here. 666 // We do it in a helper. This is so we can check to see if the nmethod we have just 667 // searched for an exception handler has been deoptimized in the meantime. 668 address Runtime1::exception_handler_for_pc(JavaThread* current) { 669 oop exception = current->exception_oop(); 670 address pc = current->exception_pc(); 671 // Still in Java mode 672 DEBUG_ONLY(NoHandleMark nhm); 673 nmethod* nm = nullptr; 674 address continuation = nullptr; 675 { 676 // Enter VM mode by calling the helper 677 ResetNoHandleMark rnhm; 678 continuation = exception_handler_for_pc_helper(current, exception, pc, nm); 679 } 680 // Back in JAVA, use no oops DON'T safepoint 681 682 // Now check to see if the nmethod we were called from is now deoptimized. 683 // If so we must return to the deopt blob and deoptimize the nmethod 684 if (nm != nullptr && caller_is_deopted(current)) { 685 continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls(); 686 } 687 688 assert(continuation != nullptr, "no handler found"); 689 return continuation; 690 } 691 692 693 JRT_ENTRY(void, Runtime1::throw_range_check_exception(JavaThread* current, int index, arrayOopDesc* a)) 694 #ifndef PRODUCT 695 if (PrintC1Statistics) { 696 _throw_range_check_exception_count++; 697 } 698 #endif 699 const int len = 35; 700 assert(len < strlen("Index %d out of bounds for length %d"), "Must allocate more space for message."); 701 char message[2 * jintAsStringSize + len]; 702 os::snprintf_checked(message, sizeof(message), "Index %d out of bounds for length %d", index, a->length()); 703 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message); 704 JRT_END 705 706 707 JRT_ENTRY(void, Runtime1::throw_index_exception(JavaThread* current, int index)) 708 #ifndef PRODUCT 709 if (PrintC1Statistics) { 710 _throw_index_exception_count++; 711 } 712 #endif 713 char message[16]; 714 os::snprintf_checked(message, sizeof(message), "%d", index); 715 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IndexOutOfBoundsException(), message); 716 JRT_END 717 718 719 JRT_ENTRY(void, Runtime1::throw_div0_exception(JavaThread* current)) 720 #ifndef PRODUCT 721 if (PrintC1Statistics) { 722 _throw_div0_exception_count++; 723 } 724 #endif 725 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArithmeticException(), "/ by zero"); 726 JRT_END 727 728 729 JRT_ENTRY(void, Runtime1::throw_null_pointer_exception(JavaThread* current)) 730 #ifndef PRODUCT 731 if (PrintC1Statistics) { 732 _throw_null_pointer_exception_count++; 733 } 734 #endif 735 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException()); 736 JRT_END 737 738 739 JRT_ENTRY(void, Runtime1::throw_class_cast_exception(JavaThread* current, oopDesc* object)) 740 #ifndef PRODUCT 741 if (PrintC1Statistics) { 742 _throw_class_cast_exception_count++; 743 } 744 #endif 745 ResourceMark rm(current); 746 char* message = SharedRuntime::generate_class_cast_message(current, object->klass()); 747 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ClassCastException(), message); 748 JRT_END 749 750 751 JRT_ENTRY(void, Runtime1::throw_incompatible_class_change_error(JavaThread* current)) 752 #ifndef PRODUCT 753 if (PrintC1Statistics) { 754 _throw_incompatible_class_change_error_count++; 755 } 756 #endif 757 ResourceMark rm(current); 758 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError()); 759 JRT_END 760 761 762 JRT_BLOCK_ENTRY_PROF(void, Runtime1, monitorenter, Runtime1::monitorenter(JavaThread* current, oopDesc* obj, BasicObjectLock* lock)) 763 #ifndef PRODUCT 764 if (PrintC1Statistics) { 765 _monitorenter_slowcase_cnt++; 766 } 767 #endif 768 if (LockingMode == LM_MONITOR) { 769 lock->set_obj(obj); 770 } 771 assert(obj == lock->obj(), "must match"); 772 SharedRuntime::monitor_enter_helper(obj, lock->lock(), current); 773 JRT_END 774 775 776 JRT_LEAF_PROF(void, Runtime1, monitorexit, Runtime1::monitorexit(JavaThread* current, BasicObjectLock* lock)) 777 assert(current == JavaThread::current(), "pre-condition"); 778 #ifndef PRODUCT 779 if (PrintC1Statistics) { 780 _monitorexit_slowcase_cnt++; 781 } 782 #endif 783 assert(current->last_Java_sp(), "last_Java_sp must be set"); 784 oop obj = lock->obj(); 785 assert(oopDesc::is_oop(obj), "must be null or an object"); 786 SharedRuntime::monitor_exit_helper(obj, lock->lock(), current); 787 JRT_END 788 789 // Cf. OptoRuntime::deoptimize_caller_frame 790 JRT_ENTRY_PROF(void, Runtime1, deoptimize, Runtime1::deoptimize(JavaThread* current, jint trap_request)) 791 // Called from within the owner thread, so no need for safepoint 792 RegisterMap reg_map(current, 793 RegisterMap::UpdateMap::skip, 794 RegisterMap::ProcessFrames::include, 795 RegisterMap::WalkContinuation::skip); 796 frame stub_frame = current->last_frame(); 797 assert(stub_frame.is_runtime_frame(), "Sanity check"); 798 frame caller_frame = stub_frame.sender(®_map); 799 nmethod* nm = caller_frame.cb()->as_nmethod_or_null(); 800 assert(nm != nullptr, "Sanity check"); 801 methodHandle method(current, nm->method()); 802 assert(nm == CodeCache::find_nmethod(caller_frame.pc()), "Should be the same"); 803 Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request); 804 Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request); 805 806 if (action == Deoptimization::Action_make_not_entrant) { 807 if (nm->make_not_entrant()) { 808 if (reason == Deoptimization::Reason_tenured) { 809 MethodData* trap_mdo = Deoptimization::get_method_data(current, method, true /*create_if_missing*/); 810 if (trap_mdo != nullptr) { 811 trap_mdo->inc_tenure_traps(); 812 } 813 } 814 } 815 } 816 817 // Deoptimize the caller frame. 818 Deoptimization::deoptimize_frame(current, caller_frame.id()); 819 // Return to the now deoptimized frame. 820 JRT_END 821 822 823 #ifndef DEOPTIMIZE_WHEN_PATCHING 824 825 static Klass* resolve_field_return_klass(const methodHandle& caller, int bci, TRAPS) { 826 Bytecode_field field_access(caller, bci); 827 // This can be static or non-static field access 828 Bytecodes::Code code = field_access.code(); 829 830 // We must load class, initialize class and resolve the field 831 fieldDescriptor result; // initialize class if needed 832 constantPoolHandle constants(THREAD, caller->constants()); 833 LinkResolver::resolve_field_access(result, constants, field_access.index(), caller, 834 Bytecodes::java_code(code), true /*initialize_class*/, CHECK_NULL); 835 return result.field_holder(); 836 } 837 838 839 // 840 // This routine patches sites where a class wasn't loaded or 841 // initialized at the time the code was generated. It handles 842 // references to classes, fields and forcing of initialization. Most 843 // of the cases are straightforward and involving simply forcing 844 // resolution of a class, rewriting the instruction stream with the 845 // needed constant and replacing the call in this function with the 846 // patched code. The case for static field is more complicated since 847 // the thread which is in the process of initializing a class can 848 // access it's static fields but other threads can't so the code 849 // either has to deoptimize when this case is detected or execute a 850 // check that the current thread is the initializing thread. The 851 // current 852 // 853 // Patches basically look like this: 854 // 855 // 856 // patch_site: jmp patch stub ;; will be patched 857 // continue: ... 858 // ... 859 // ... 860 // ... 861 // 862 // They have a stub which looks like this: 863 // 864 // ;; patch body 865 // movl <const>, reg (for class constants) 866 // <or> movl [reg1 + <const>], reg (for field offsets) 867 // <or> movl reg, [reg1 + <const>] (for field offsets) 868 // <being_init offset> <bytes to copy> <bytes to skip> 869 // patch_stub: call Runtime1::patch_code (through a runtime stub) 870 // jmp patch_site 871 // 872 // 873 // A normal patch is done by rewriting the patch body, usually a move, 874 // and then copying it into place over top of the jmp instruction 875 // being careful to flush caches and doing it in an MP-safe way. The 876 // constants following the patch body are used to find various pieces 877 // of the patch relative to the call site for Runtime1::patch_code. 878 // The case for getstatic and putstatic is more complicated because 879 // getstatic and putstatic have special semantics when executing while 880 // the class is being initialized. getstatic/putstatic on a class 881 // which is being_initialized may be executed by the initializing 882 // thread but other threads have to block when they execute it. This 883 // is accomplished in compiled code by executing a test of the current 884 // thread against the initializing thread of the class. It's emitted 885 // as boilerplate in their stub which allows the patched code to be 886 // executed before it's copied back into the main body of the nmethod. 887 // 888 // being_init: get_thread(<tmp reg> 889 // cmpl [reg1 + <init_thread_offset>], <tmp reg> 890 // jne patch_stub 891 // movl [reg1 + <const>], reg (for field offsets) <or> 892 // movl reg, [reg1 + <const>] (for field offsets) 893 // jmp continue 894 // <being_init offset> <bytes to copy> <bytes to skip> 895 // patch_stub: jmp Runtim1::patch_code (through a runtime stub) 896 // jmp patch_site 897 // 898 // If the class is being initialized the patch body is rewritten and 899 // the patch site is rewritten to jump to being_init, instead of 900 // patch_stub. Whenever this code is executed it checks the current 901 // thread against the initializing thread so other threads will enter 902 // the runtime and end up blocked waiting the class to finish 903 // initializing inside the calls to resolve_field below. The 904 // initializing class will continue on it's way. Once the class is 905 // fully_initialized, the intializing_thread of the class becomes 906 // null, so the next thread to execute this code will fail the test, 907 // call into patch_code and complete the patching process by copying 908 // the patch body back into the main part of the nmethod and resume 909 // executing. 910 911 // NB: 912 // 913 // Patchable instruction sequences inherently exhibit race conditions, 914 // where thread A is patching an instruction at the same time thread B 915 // is executing it. The algorithms we use ensure that any observation 916 // that B can make on any intermediate states during A's patching will 917 // always end up with a correct outcome. This is easiest if there are 918 // few or no intermediate states. (Some inline caches have two 919 // related instructions that must be patched in tandem. For those, 920 // intermediate states seem to be unavoidable, but we will get the 921 // right answer from all possible observation orders.) 922 // 923 // When patching the entry instruction at the head of a method, or a 924 // linkable call instruction inside of a method, we try very hard to 925 // use a patch sequence which executes as a single memory transaction. 926 // This means, in practice, that when thread A patches an instruction, 927 // it should patch a 32-bit or 64-bit word that somehow overlaps the 928 // instruction or is contained in it. We believe that memory hardware 929 // will never break up such a word write, if it is naturally aligned 930 // for the word being written. We also know that some CPUs work very 931 // hard to create atomic updates even of naturally unaligned words, 932 // but we don't want to bet the farm on this always working. 933 // 934 // Therefore, if there is any chance of a race condition, we try to 935 // patch only naturally aligned words, as single, full-word writes. 936 937 JRT_ENTRY_PROF(void, Runtime1, patch_code, Runtime1::patch_code(JavaThread* current, C1StubId stub_id)) 938 #ifndef PRODUCT 939 if (PrintC1Statistics) { 940 _patch_code_slowcase_cnt++; 941 } 942 #endif 943 944 ResourceMark rm(current); 945 RegisterMap reg_map(current, 946 RegisterMap::UpdateMap::skip, 947 RegisterMap::ProcessFrames::include, 948 RegisterMap::WalkContinuation::skip); 949 frame runtime_frame = current->last_frame(); 950 frame caller_frame = runtime_frame.sender(®_map); 951 952 // last java frame on stack 953 vframeStream vfst(current, true); 954 assert(!vfst.at_end(), "Java frame must exist"); 955 956 methodHandle caller_method(current, vfst.method()); 957 // Note that caller_method->code() may not be same as caller_code because of OSR's 958 // Note also that in the presence of inlining it is not guaranteed 959 // that caller_method() == caller_code->method() 960 961 int bci = vfst.bci(); 962 Bytecodes::Code code = caller_method()->java_code_at(bci); 963 964 // this is used by assertions in the access_field_patching_id 965 BasicType patch_field_type = T_ILLEGAL; 966 bool deoptimize_for_volatile = false; 967 bool deoptimize_for_atomic = false; 968 int patch_field_offset = -1; 969 Klass* init_klass = nullptr; // klass needed by load_klass_patching code 970 Klass* load_klass = nullptr; // klass needed by load_klass_patching code 971 Handle mirror(current, nullptr); // oop needed by load_mirror_patching code 972 Handle appendix(current, nullptr); // oop needed by appendix_patching code 973 bool load_klass_or_mirror_patch_id = 974 (stub_id == C1StubId::load_klass_patching_id || stub_id == C1StubId::load_mirror_patching_id); 975 976 if (stub_id == C1StubId::access_field_patching_id) { 977 978 Bytecode_field field_access(caller_method, bci); 979 fieldDescriptor result; // initialize class if needed 980 Bytecodes::Code code = field_access.code(); 981 constantPoolHandle constants(current, caller_method->constants()); 982 LinkResolver::resolve_field_access(result, constants, field_access.index(), caller_method, 983 Bytecodes::java_code(code), true /*initialize_class*/, CHECK); 984 patch_field_offset = result.offset(); 985 986 // If we're patching a field which is volatile then at compile it 987 // must not have been know to be volatile, so the generated code 988 // isn't correct for a volatile reference. The nmethod has to be 989 // deoptimized so that the code can be regenerated correctly. 990 // This check is only needed for access_field_patching since this 991 // is the path for patching field offsets. load_klass is only 992 // used for patching references to oops which don't need special 993 // handling in the volatile case. 994 995 deoptimize_for_volatile = result.access_flags().is_volatile(); 996 997 // If we are patching a field which should be atomic, then 998 // the generated code is not correct either, force deoptimizing. 999 // We need to only cover T_LONG and T_DOUBLE fields, as we can 1000 // break access atomicity only for them. 1001 1002 // Strictly speaking, the deoptimization on 64-bit platforms 1003 // is unnecessary, and T_LONG stores on 32-bit platforms need 1004 // to be handled by special patching code when AlwaysAtomicAccesses 1005 // becomes product feature. At this point, we are still going 1006 // for the deoptimization for consistency against volatile 1007 // accesses. 1008 1009 patch_field_type = result.field_type(); 1010 deoptimize_for_atomic = (AlwaysAtomicAccesses && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG)); 1011 1012 } else if (load_klass_or_mirror_patch_id) { 1013 Klass* k = nullptr; 1014 switch (code) { 1015 case Bytecodes::_putstatic: 1016 case Bytecodes::_getstatic: 1017 { Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK); 1018 init_klass = klass; 1019 mirror = Handle(current, klass->java_mirror()); 1020 } 1021 break; 1022 case Bytecodes::_new: 1023 { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci)); 1024 k = caller_method->constants()->klass_at(bnew.index(), CHECK); 1025 } 1026 break; 1027 case Bytecodes::_multianewarray: 1028 { Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci)); 1029 k = caller_method->constants()->klass_at(mna.index(), CHECK); 1030 } 1031 break; 1032 case Bytecodes::_instanceof: 1033 { Bytecode_instanceof io(caller_method(), caller_method->bcp_from(bci)); 1034 k = caller_method->constants()->klass_at(io.index(), CHECK); 1035 } 1036 break; 1037 case Bytecodes::_checkcast: 1038 { Bytecode_checkcast cc(caller_method(), caller_method->bcp_from(bci)); 1039 k = caller_method->constants()->klass_at(cc.index(), CHECK); 1040 } 1041 break; 1042 case Bytecodes::_anewarray: 1043 { Bytecode_anewarray anew(caller_method(), caller_method->bcp_from(bci)); 1044 Klass* ek = caller_method->constants()->klass_at(anew.index(), CHECK); 1045 k = ek->array_klass(CHECK); 1046 } 1047 break; 1048 case Bytecodes::_ldc: 1049 case Bytecodes::_ldc_w: 1050 case Bytecodes::_ldc2_w: 1051 { 1052 Bytecode_loadconstant cc(caller_method, bci); 1053 oop m = cc.resolve_constant(CHECK); 1054 mirror = Handle(current, m); 1055 } 1056 break; 1057 default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id"); 1058 } 1059 load_klass = k; 1060 } else if (stub_id == C1StubId::load_appendix_patching_id) { 1061 Bytecode_invoke bytecode(caller_method, bci); 1062 Bytecodes::Code bc = bytecode.invoke_code(); 1063 1064 CallInfo info; 1065 constantPoolHandle pool(current, caller_method->constants()); 1066 int index = bytecode.index(); 1067 LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK); 1068 switch (bc) { 1069 case Bytecodes::_invokehandle: { 1070 ResolvedMethodEntry* entry = pool->cache()->set_method_handle(index, info); 1071 appendix = Handle(current, pool->cache()->appendix_if_resolved(entry)); 1072 break; 1073 } 1074 case Bytecodes::_invokedynamic: { 1075 appendix = Handle(current, pool->cache()->set_dynamic_call(info, index)); 1076 break; 1077 } 1078 default: fatal("unexpected bytecode for load_appendix_patching_id"); 1079 } 1080 } else { 1081 ShouldNotReachHere(); 1082 } 1083 1084 if (deoptimize_for_volatile || deoptimize_for_atomic) { 1085 // At compile time we assumed the field wasn't volatile/atomic but after 1086 // loading it turns out it was volatile/atomic so we have to throw the 1087 // compiled code out and let it be regenerated. 1088 if (TracePatching) { 1089 if (deoptimize_for_volatile) { 1090 tty->print_cr("Deoptimizing for patching volatile field reference"); 1091 } 1092 if (deoptimize_for_atomic) { 1093 tty->print_cr("Deoptimizing for patching atomic field reference"); 1094 } 1095 } 1096 1097 // It's possible the nmethod was invalidated in the last 1098 // safepoint, but if it's still alive then make it not_entrant. 1099 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); 1100 if (nm != nullptr) { 1101 nm->make_not_entrant(); 1102 } 1103 1104 Deoptimization::deoptimize_frame(current, caller_frame.id()); 1105 1106 // Return to the now deoptimized frame. 1107 } 1108 1109 // Now copy code back 1110 1111 { 1112 MutexLocker ml_patch (current, Patching_lock, Mutex::_no_safepoint_check_flag); 1113 // 1114 // Deoptimization may have happened while we waited for the lock. 1115 // In that case we don't bother to do any patching we just return 1116 // and let the deopt happen 1117 if (!caller_is_deopted(current)) { 1118 NativeGeneralJump* jump = nativeGeneralJump_at(caller_frame.pc()); 1119 address instr_pc = jump->jump_destination(); 1120 NativeInstruction* ni = nativeInstruction_at(instr_pc); 1121 if (ni->is_jump() ) { 1122 // the jump has not been patched yet 1123 // The jump destination is slow case and therefore not part of the stubs 1124 // (stubs are only for StaticCalls) 1125 1126 // format of buffer 1127 // .... 1128 // instr byte 0 <-- copy_buff 1129 // instr byte 1 1130 // .. 1131 // instr byte n-1 1132 // n 1133 // .... <-- call destination 1134 1135 address stub_location = caller_frame.pc() + PatchingStub::patch_info_offset(); 1136 unsigned char* byte_count = (unsigned char*) (stub_location - 1); 1137 unsigned char* byte_skip = (unsigned char*) (stub_location - 2); 1138 unsigned char* being_initialized_entry_offset = (unsigned char*) (stub_location - 3); 1139 address copy_buff = stub_location - *byte_skip - *byte_count; 1140 address being_initialized_entry = stub_location - *being_initialized_entry_offset; 1141 if (TracePatching) { 1142 ttyLocker ttyl; 1143 tty->print_cr(" Patching %s at bci %d at address " INTPTR_FORMAT " (%s)", Bytecodes::name(code), bci, 1144 p2i(instr_pc), (stub_id == C1StubId::access_field_patching_id) ? "field" : "klass"); 1145 nmethod* caller_code = CodeCache::find_nmethod(caller_frame.pc()); 1146 assert(caller_code != nullptr, "nmethod not found"); 1147 1148 // NOTE we use pc() not original_pc() because we already know they are 1149 // identical otherwise we'd have never entered this block of code 1150 1151 const ImmutableOopMap* map = caller_code->oop_map_for_return_address(caller_frame.pc()); 1152 assert(map != nullptr, "null check"); 1153 map->print(); 1154 tty->cr(); 1155 1156 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty); 1157 } 1158 // depending on the code below, do_patch says whether to copy the patch body back into the nmethod 1159 bool do_patch = true; 1160 if (stub_id == C1StubId::access_field_patching_id) { 1161 // The offset may not be correct if the class was not loaded at code generation time. 1162 // Set it now. 1163 NativeMovRegMem* n_move = nativeMovRegMem_at(copy_buff); 1164 assert(n_move->offset() == 0 || (n_move->offset() == 4 && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG)), "illegal offset for type"); 1165 assert(patch_field_offset >= 0, "illegal offset"); 1166 n_move->add_offset_in_bytes(patch_field_offset); 1167 } else if (load_klass_or_mirror_patch_id) { 1168 // If a getstatic or putstatic is referencing a klass which 1169 // isn't fully initialized, the patch body isn't copied into 1170 // place until initialization is complete. In this case the 1171 // patch site is setup so that any threads besides the 1172 // initializing thread are forced to come into the VM and 1173 // block. 1174 do_patch = (code != Bytecodes::_getstatic && code != Bytecodes::_putstatic) || 1175 InstanceKlass::cast(init_klass)->is_initialized(); 1176 NativeGeneralJump* jump = nativeGeneralJump_at(instr_pc); 1177 if (jump->jump_destination() == being_initialized_entry) { 1178 assert(do_patch == true, "initialization must be complete at this point"); 1179 } else { 1180 // patch the instruction <move reg, klass> 1181 NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff); 1182 1183 assert(n_copy->data() == 0 || 1184 n_copy->data() == (intptr_t)Universe::non_oop_word(), 1185 "illegal init value"); 1186 if (stub_id == C1StubId::load_klass_patching_id) { 1187 assert(load_klass != nullptr, "klass not set"); 1188 n_copy->set_data((intx) (load_klass)); 1189 } else { 1190 // Don't need a G1 pre-barrier here since we assert above that data isn't an oop. 1191 n_copy->set_data(cast_from_oop<intx>(mirror())); 1192 } 1193 1194 if (TracePatching) { 1195 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty); 1196 } 1197 } 1198 } else if (stub_id == C1StubId::load_appendix_patching_id) { 1199 NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff); 1200 assert(n_copy->data() == 0 || 1201 n_copy->data() == (intptr_t)Universe::non_oop_word(), 1202 "illegal init value"); 1203 n_copy->set_data(cast_from_oop<intx>(appendix())); 1204 1205 if (TracePatching) { 1206 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty); 1207 } 1208 } else { 1209 ShouldNotReachHere(); 1210 } 1211 1212 if (do_patch) { 1213 // replace instructions 1214 // first replace the tail, then the call 1215 #ifdef ARM 1216 if((load_klass_or_mirror_patch_id || 1217 stub_id == C1StubId::load_appendix_patching_id) && 1218 nativeMovConstReg_at(copy_buff)->is_pc_relative()) { 1219 nmethod* nm = CodeCache::find_nmethod(instr_pc); 1220 address addr = nullptr; 1221 assert(nm != nullptr, "invalid nmethod_pc"); 1222 RelocIterator mds(nm, copy_buff, copy_buff + 1); 1223 while (mds.next()) { 1224 if (mds.type() == relocInfo::oop_type) { 1225 assert(stub_id == C1StubId::load_mirror_patching_id || 1226 stub_id == C1StubId::load_appendix_patching_id, "wrong stub id"); 1227 oop_Relocation* r = mds.oop_reloc(); 1228 addr = (address)r->oop_addr(); 1229 break; 1230 } else if (mds.type() == relocInfo::metadata_type) { 1231 assert(stub_id == C1StubId::load_klass_patching_id, "wrong stub id"); 1232 metadata_Relocation* r = mds.metadata_reloc(); 1233 addr = (address)r->metadata_addr(); 1234 break; 1235 } 1236 } 1237 assert(addr != nullptr, "metadata relocation must exist"); 1238 copy_buff -= *byte_count; 1239 NativeMovConstReg* n_copy2 = nativeMovConstReg_at(copy_buff); 1240 n_copy2->set_pc_relative_offset(addr, instr_pc); 1241 } 1242 #endif 1243 1244 for (int i = NativeGeneralJump::instruction_size; i < *byte_count; i++) { 1245 address ptr = copy_buff + i; 1246 int a_byte = (*ptr) & 0xFF; 1247 address dst = instr_pc + i; 1248 *(unsigned char*)dst = (unsigned char) a_byte; 1249 } 1250 ICache::invalidate_range(instr_pc, *byte_count); 1251 NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff); 1252 1253 if (load_klass_or_mirror_patch_id || 1254 stub_id == C1StubId::load_appendix_patching_id) { 1255 relocInfo::relocType rtype = 1256 (stub_id == C1StubId::load_klass_patching_id) ? 1257 relocInfo::metadata_type : 1258 relocInfo::oop_type; 1259 // update relocInfo to metadata 1260 nmethod* nm = CodeCache::find_nmethod(instr_pc); 1261 assert(nm != nullptr, "invalid nmethod_pc"); 1262 1263 // The old patch site is now a move instruction so update 1264 // the reloc info so that it will get updated during 1265 // future GCs. 1266 RelocIterator iter(nm, (address)instr_pc, (address)(instr_pc + 1)); 1267 relocInfo::change_reloc_info_for_address(&iter, (address) instr_pc, 1268 relocInfo::none, rtype); 1269 } 1270 1271 } else { 1272 ICache::invalidate_range(copy_buff, *byte_count); 1273 NativeGeneralJump::insert_unconditional(instr_pc, being_initialized_entry); 1274 } 1275 } 1276 } 1277 } 1278 1279 // If we are patching in a non-perm oop, make sure the nmethod 1280 // is on the right list. 1281 { 1282 MutexLocker ml_code (current, CodeCache_lock, Mutex::_no_safepoint_check_flag); 1283 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); 1284 guarantee(nm != nullptr, "only nmethods can contain non-perm oops"); 1285 1286 // Since we've patched some oops in the nmethod, 1287 // (re)register it with the heap. 1288 Universe::heap()->register_nmethod(nm); 1289 } 1290 JRT_END 1291 1292 #else // DEOPTIMIZE_WHEN_PATCHING 1293 1294 static bool is_patching_needed(JavaThread* current, C1StubId stub_id) { 1295 if (stub_id == C1StubId::load_klass_patching_id || 1296 stub_id == C1StubId::load_mirror_patching_id) { 1297 // last java frame on stack 1298 vframeStream vfst(current, true); 1299 assert(!vfst.at_end(), "Java frame must exist"); 1300 1301 methodHandle caller_method(current, vfst.method()); 1302 int bci = vfst.bci(); 1303 Bytecodes::Code code = caller_method()->java_code_at(bci); 1304 1305 switch (code) { 1306 case Bytecodes::_new: 1307 case Bytecodes::_anewarray: 1308 case Bytecodes::_multianewarray: 1309 case Bytecodes::_instanceof: 1310 case Bytecodes::_checkcast: { 1311 Bytecode bc(caller_method(), caller_method->bcp_from(bci)); 1312 constantTag tag = caller_method->constants()->tag_at(bc.get_index_u2(code)); 1313 if (tag.is_unresolved_klass_in_error()) { 1314 return false; // throws resolution error 1315 } 1316 break; 1317 } 1318 1319 default: break; 1320 } 1321 } 1322 return true; 1323 } 1324 1325 PROF_ENTRY(void, Runtime1, patch_code, Runtime1::patch_code(JavaThread* current, C1StubId stub_id)) 1326 #ifndef PRODUCT 1327 if (PrintC1Statistics) { 1328 _patch_code_slowcase_cnt++; 1329 } 1330 #endif 1331 1332 // Enable WXWrite: the function is called by c1 stub as a runtime function 1333 // (see another implementation above). 1334 MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, current)); 1335 1336 if (TracePatching) { 1337 tty->print_cr("Deoptimizing because patch is needed"); 1338 } 1339 1340 RegisterMap reg_map(current, 1341 RegisterMap::UpdateMap::skip, 1342 RegisterMap::ProcessFrames::include, 1343 RegisterMap::WalkContinuation::skip); 1344 1345 frame runtime_frame = current->last_frame(); 1346 frame caller_frame = runtime_frame.sender(®_map); 1347 assert(caller_frame.is_compiled_frame(), "Wrong frame type"); 1348 1349 if (is_patching_needed(current, stub_id)) { 1350 // Make sure the nmethod is invalidated, i.e. made not entrant. 1351 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); 1352 if (nm != nullptr) { 1353 nm->make_not_entrant(); 1354 } 1355 } 1356 1357 Deoptimization::deoptimize_frame(current, caller_frame.id()); 1358 // Return to the now deoptimized frame. 1359 postcond(caller_is_deopted(current)); 1360 PROF_END 1361 1362 #endif // DEOPTIMIZE_WHEN_PATCHING 1363 1364 // Entry point for compiled code. We want to patch a nmethod. 1365 // We don't do a normal VM transition here because we want to 1366 // know after the patching is complete and any safepoint(s) are taken 1367 // if the calling nmethod was deoptimized. We do this by calling a 1368 // helper method which does the normal VM transition and when it 1369 // completes we can check for deoptimization. This simplifies the 1370 // assembly code in the cpu directories. 1371 // 1372 int Runtime1::move_klass_patching(JavaThread* current) { 1373 // 1374 // NOTE: we are still in Java 1375 // 1376 debug_only(NoHandleMark nhm;) 1377 { 1378 // Enter VM mode 1379 ResetNoHandleMark rnhm; 1380 patch_code(current, C1StubId::load_klass_patching_id); 1381 } 1382 // Back in JAVA, use no oops DON'T safepoint 1383 1384 // Return true if calling code is deoptimized 1385 1386 return caller_is_deopted(current); 1387 } 1388 1389 int Runtime1::move_mirror_patching(JavaThread* current) { 1390 // 1391 // NOTE: we are still in Java 1392 // 1393 debug_only(NoHandleMark nhm;) 1394 { 1395 // Enter VM mode 1396 ResetNoHandleMark rnhm; 1397 patch_code(current, C1StubId::load_mirror_patching_id); 1398 } 1399 // Back in JAVA, use no oops DON'T safepoint 1400 1401 // Return true if calling code is deoptimized 1402 1403 return caller_is_deopted(current); 1404 } 1405 1406 int Runtime1::move_appendix_patching(JavaThread* current) { 1407 // 1408 // NOTE: we are still in Java 1409 // 1410 debug_only(NoHandleMark nhm;) 1411 { 1412 // Enter VM mode 1413 ResetNoHandleMark rnhm; 1414 patch_code(current, C1StubId::load_appendix_patching_id); 1415 } 1416 // Back in JAVA, use no oops DON'T safepoint 1417 1418 // Return true if calling code is deoptimized 1419 1420 return caller_is_deopted(current); 1421 } 1422 1423 // Entry point for compiled code. We want to patch a nmethod. 1424 // We don't do a normal VM transition here because we want to 1425 // know after the patching is complete and any safepoint(s) are taken 1426 // if the calling nmethod was deoptimized. We do this by calling a 1427 // helper method which does the normal VM transition and when it 1428 // completes we can check for deoptimization. This simplifies the 1429 // assembly code in the cpu directories. 1430 // 1431 int Runtime1::access_field_patching(JavaThread* current) { 1432 // 1433 // NOTE: we are still in Java 1434 // 1435 // Handles created in this function will be deleted by the 1436 // HandleMarkCleaner in the transition to the VM. 1437 NoHandleMark nhm; 1438 { 1439 // Enter VM mode 1440 ResetNoHandleMark rnhm; 1441 patch_code(current, C1StubId::access_field_patching_id); 1442 } 1443 // Back in JAVA, use no oops DON'T safepoint 1444 1445 // Return true if calling code is deoptimized 1446 1447 return caller_is_deopted(current); 1448 } 1449 1450 1451 JRT_LEAF_PROF_NO_THREAD(void, Runtime1, trace_block_entry, Runtime1::trace_block_entry(jint block_id)) 1452 // for now we just print out the block id 1453 tty->print("%d ", block_id); 1454 JRT_END 1455 1456 1457 JRT_LEAF_PROF_NO_THREAD(int, Runtime1, is_instance_of, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj)) 1458 // had to return int instead of bool, otherwise there may be a mismatch 1459 // between the C calling convention and the Java one. 1460 // e.g., on x86, GCC may clear only %al when returning a bool false, but 1461 // JVM takes the whole %eax as the return value, which may misinterpret 1462 // the return value as a boolean true. 1463 1464 assert(mirror != nullptr, "should null-check on mirror before calling"); 1465 Klass* k = java_lang_Class::as_Klass(mirror); 1466 return (k != nullptr && obj != nullptr && obj->is_a(k)) ? 1 : 0; 1467 JRT_END 1468 1469 JRT_ENTRY_PROF(void, Runtime1, predicate_failed_trap, Runtime1::predicate_failed_trap(JavaThread* current)) 1470 ResourceMark rm; 1471 1472 RegisterMap reg_map(current, 1473 RegisterMap::UpdateMap::skip, 1474 RegisterMap::ProcessFrames::include, 1475 RegisterMap::WalkContinuation::skip); 1476 frame runtime_frame = current->last_frame(); 1477 frame caller_frame = runtime_frame.sender(®_map); 1478 1479 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); 1480 assert (nm != nullptr, "no more nmethod?"); 1481 nm->make_not_entrant(); 1482 1483 methodHandle m(current, nm->method()); 1484 MethodData* mdo = m->method_data(); 1485 1486 if (mdo == nullptr && !HAS_PENDING_EXCEPTION) { 1487 // Build an MDO. Ignore errors like OutOfMemory; 1488 // that simply means we won't have an MDO to update. 1489 Method::build_profiling_method_data(m, THREAD); 1490 if (HAS_PENDING_EXCEPTION) { 1491 // Only metaspace OOM is expected. No Java code executed. 1492 assert((PENDING_EXCEPTION->is_a(vmClasses::OutOfMemoryError_klass())), "we expect only an OOM error here"); 1493 CLEAR_PENDING_EXCEPTION; 1494 } 1495 mdo = m->method_data(); 1496 } 1497 1498 if (mdo != nullptr) { 1499 mdo->inc_trap_count(Deoptimization::Reason_none); 1500 } 1501 1502 if (TracePredicateFailedTraps) { 1503 stringStream ss1, ss2; 1504 vframeStream vfst(current); 1505 Method* inlinee = vfst.method(); 1506 inlinee->print_short_name(&ss1); 1507 m->print_short_name(&ss2); 1508 tty->print_cr("Predicate failed trap in method %s at bci %d inlined in %s at pc " INTPTR_FORMAT, ss1.freeze(), vfst.bci(), ss2.freeze(), p2i(caller_frame.pc())); 1509 } 1510 1511 1512 Deoptimization::deoptimize_frame(current, caller_frame.id()); 1513 1514 JRT_END 1515 1516 // Check exception if AbortVMOnException flag set 1517 JRT_LEAF(void, Runtime1::check_abort_on_vm_exception(oopDesc* ex)) 1518 ResourceMark rm; 1519 const char* message = nullptr; 1520 if (ex->is_a(vmClasses::Throwable_klass())) { 1521 oop msg = java_lang_Throwable::message(ex); 1522 if (msg != nullptr) { 1523 message = java_lang_String::as_utf8_string(msg); 1524 } 1525 } 1526 Exceptions::debug_check_abort(ex->klass()->external_name(), message); 1527 JRT_END 1528 1529 #define DO_COUNTERS(macro) \ 1530 macro(Runtime1, new_instance) \ 1531 macro(Runtime1, new_type_array) \ 1532 macro(Runtime1, new_object_array) \ 1533 macro(Runtime1, new_multi_array) \ 1534 macro(Runtime1, counter_overflow) \ 1535 macro(Runtime1, exception_handler_for_pc_helper) \ 1536 macro(Runtime1, monitorenter) \ 1537 macro(Runtime1, monitorexit) \ 1538 macro(Runtime1, deoptimize) \ 1539 macro(Runtime1, is_instance_of) \ 1540 macro(Runtime1, predicate_failed_trap) \ 1541 macro(Runtime1, patch_code) 1542 1543 #define INIT_COUNTER(sub, name) \ 1544 NEWPERFTICKCOUNTERS(_perf_##sub##_##name##_timer, SUN_CI, #sub "::" #name); \ 1545 NEWPERFEVENTCOUNTER(_perf_##sub##_##name##_count, SUN_CI, #sub "::" #name "_count"); 1546 1547 void Runtime1::init_counters() { 1548 assert(CompilerConfig::is_c1_enabled(), ""); 1549 1550 if (UsePerfData) { 1551 EXCEPTION_MARK; 1552 1553 DO_COUNTERS(INIT_COUNTER) 1554 1555 if (HAS_PENDING_EXCEPTION) { 1556 vm_exit_during_initialization("Runtime1::init_counters() failed unexpectedly"); 1557 } 1558 } 1559 } 1560 #undef INIT_COUNTER 1561 1562 #define PRINT_COUNTER(sub, name) { \ 1563 if (_perf_##sub##_##name##_count != nullptr) { \ 1564 jlong count = _perf_##sub##_##name##_count->get_value(); \ 1565 if (count > 0) { \ 1566 st->print_cr(" %-50s = " JLONG_FORMAT_W(6) "us (elapsed) " JLONG_FORMAT_W(6) "us (thread) (" JLONG_FORMAT_W(5) " events)", #sub "::" #name, \ 1567 _perf_##sub##_##name##_timer->elapsed_counter_value_us(), \ 1568 _perf_##sub##_##name##_timer->thread_counter_value_us(), \ 1569 count); \ 1570 }}} 1571 1572 1573 void Runtime1::print_counters_on(outputStream* st) { 1574 if (UsePerfData && ProfileRuntimeCalls && CompilerConfig::is_c1_enabled()) { 1575 DO_COUNTERS(PRINT_COUNTER) 1576 } else { 1577 st->print_cr(" Runtime1: no info (%s is disabled)", 1578 (!CompilerConfig::is_c1_enabled() ? "C1" : (UsePerfData ? "ProfileRuntimeCalls" : "UsePerfData"))); 1579 } 1580 } 1581 1582 #undef PRINT_COUNTER 1583 #undef DO_COUNTERS 1584 1585 #ifndef PRODUCT 1586 void Runtime1::print_statistics_on(outputStream* st) { 1587 st->print_cr("C1 Runtime statistics:"); 1588 st->print_cr(" _resolve_invoke_virtual_cnt: %u", SharedRuntime::_resolve_virtual_ctr); 1589 st->print_cr(" _resolve_invoke_opt_virtual_cnt: %u", SharedRuntime::_resolve_opt_virtual_ctr); 1590 st->print_cr(" _resolve_invoke_static_cnt: %u", SharedRuntime::_resolve_static_ctr); 1591 st->print_cr(" _handle_wrong_method_cnt: %u", SharedRuntime::_wrong_method_ctr); 1592 st->print_cr(" _ic_miss_cnt: %u", SharedRuntime::_ic_miss_ctr); 1593 st->print_cr(" _generic_arraycopystub_cnt: %u", _generic_arraycopystub_cnt); 1594 st->print_cr(" _byte_arraycopy_cnt: %u", _byte_arraycopy_stub_cnt); 1595 st->print_cr(" _short_arraycopy_cnt: %u", _short_arraycopy_stub_cnt); 1596 st->print_cr(" _int_arraycopy_cnt: %u", _int_arraycopy_stub_cnt); 1597 st->print_cr(" _long_arraycopy_cnt: %u", _long_arraycopy_stub_cnt); 1598 st->print_cr(" _oop_arraycopy_cnt: %u", _oop_arraycopy_stub_cnt); 1599 st->print_cr(" _arraycopy_slowcase_cnt: %u", _arraycopy_slowcase_cnt); 1600 st->print_cr(" _arraycopy_checkcast_cnt: %u", _arraycopy_checkcast_cnt); 1601 st->print_cr(" _arraycopy_checkcast_attempt_cnt:%u", _arraycopy_checkcast_attempt_cnt); 1602 1603 st->print_cr(" _new_type_array_slowcase_cnt: %u", _new_type_array_slowcase_cnt); 1604 st->print_cr(" _new_object_array_slowcase_cnt: %u", _new_object_array_slowcase_cnt); 1605 st->print_cr(" _new_instance_slowcase_cnt: %u", _new_instance_slowcase_cnt); 1606 st->print_cr(" _new_multi_array_slowcase_cnt: %u", _new_multi_array_slowcase_cnt); 1607 st->print_cr(" _monitorenter_slowcase_cnt: %u", _monitorenter_slowcase_cnt); 1608 st->print_cr(" _monitorexit_slowcase_cnt: %u", _monitorexit_slowcase_cnt); 1609 st->print_cr(" _patch_code_slowcase_cnt: %u", _patch_code_slowcase_cnt); 1610 1611 st->print_cr(" _throw_range_check_exception_count: %u:", _throw_range_check_exception_count); 1612 st->print_cr(" _throw_index_exception_count: %u:", _throw_index_exception_count); 1613 st->print_cr(" _throw_div0_exception_count: %u:", _throw_div0_exception_count); 1614 st->print_cr(" _throw_null_pointer_exception_count: %u:", _throw_null_pointer_exception_count); 1615 st->print_cr(" _throw_class_cast_exception_count: %u:", _throw_class_cast_exception_count); 1616 st->print_cr(" _throw_incompatible_class_change_error_count: %u:", _throw_incompatible_class_change_error_count); 1617 st->print_cr(" _throw_count: %u:", _throw_count); 1618 1619 SharedRuntime::print_ic_miss_histogram_on(st); 1620 st->cr(); 1621 } 1622 #endif // PRODUCT