1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_RUNTIME_SHAREDRUNTIME_HPP 26 #define SHARE_RUNTIME_SHAREDRUNTIME_HPP 27 28 #include "asm/codeBuffer.hpp" 29 #include "code/codeBlob.hpp" 30 #include "code/vmreg.hpp" 31 #include "interpreter/linkResolver.hpp" 32 #include "memory/allStatic.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "runtime/signature.hpp" 35 #include "utilities/macros.hpp" 36 37 class AdapterHandlerEntry; 38 class AdapterFingerPrint; 39 class vframeStream; 40 class SigEntry; 41 42 // Runtime is the base class for various runtime interfaces 43 // (InterpreterRuntime, CompilerRuntime, etc.). It provides 44 // shared functionality such as exception forwarding (C++ to 45 // Java exceptions), locking/unlocking mechanisms, statistical 46 // information, etc. 47 48 class SharedRuntime: AllStatic { 49 friend class VMStructs; 50 51 private: 52 // Shared stub locations 53 54 static RuntimeStub* _wrong_method_blob; 55 static RuntimeStub* _wrong_method_abstract_blob; 56 static RuntimeStub* _ic_miss_blob; 57 static RuntimeStub* _resolve_opt_virtual_call_blob; 58 static RuntimeStub* _resolve_virtual_call_blob; 59 static RuntimeStub* _resolve_static_call_blob; 60 61 static DeoptimizationBlob* _deopt_blob; 62 63 static SafepointBlob* _polling_page_vectors_safepoint_handler_blob; 64 static SafepointBlob* _polling_page_safepoint_handler_blob; 65 static SafepointBlob* _polling_page_return_handler_blob; 66 67 #ifdef COMPILER2 68 static UncommonTrapBlob* _uncommon_trap_blob; 69 #endif // COMPILER2 70 71 static nmethod* _cont_doYield_stub; 72 73 #ifndef PRODUCT 74 // Counters 75 static int64_t _nof_megamorphic_calls; // total # of megamorphic calls (through vtable) 76 #endif // !PRODUCT 77 78 private: 79 enum { POLL_AT_RETURN, POLL_AT_LOOP, POLL_AT_VECTOR_LOOP }; 80 static SafepointBlob* generate_handler_blob(address call_ptr, int poll_type); 81 static RuntimeStub* generate_resolve_blob(address destination, const char* name); 82 public: 83 static void generate_stubs(void); 84 85 // max bytes for each dtrace string parameter 86 enum { max_dtrace_string_size = 256 }; 87 88 // The following arithmetic routines are used on platforms that do 89 // not have machine instructions to implement their functionality. 90 // Do not remove these. 91 92 // long arithmetics 93 static jlong lmul(jlong y, jlong x); 94 static jlong ldiv(jlong y, jlong x); 95 static jlong lrem(jlong y, jlong x); 96 97 // float and double remainder 98 static jfloat frem(jfloat x, jfloat y); 99 static jdouble drem(jdouble x, jdouble y); 100 101 102 #ifdef _WIN64 103 // Workaround for fmod issue in the Windows x64 CRT 104 static double fmod_winx64(double x, double y); 105 #endif 106 107 #ifdef __SOFTFP__ 108 static jfloat fadd(jfloat x, jfloat y); 109 static jfloat fsub(jfloat x, jfloat y); 110 static jfloat fmul(jfloat x, jfloat y); 111 static jfloat fdiv(jfloat x, jfloat y); 112 113 static jdouble dadd(jdouble x, jdouble y); 114 static jdouble dsub(jdouble x, jdouble y); 115 static jdouble dmul(jdouble x, jdouble y); 116 static jdouble ddiv(jdouble x, jdouble y); 117 #endif // __SOFTFP__ 118 119 // float conversion (needs to set appropriate rounding mode) 120 static jint f2i (jfloat x); 121 static jlong f2l (jfloat x); 122 static jint d2i (jdouble x); 123 static jlong d2l (jdouble x); 124 static jfloat d2f (jdouble x); 125 static jfloat l2f (jlong x); 126 static jdouble l2d (jlong x); 127 static jfloat i2f (jint x); 128 129 #ifdef __SOFTFP__ 130 static jdouble i2d (jint x); 131 static jdouble f2d (jfloat x); 132 #endif // __SOFTFP__ 133 134 // double trigonometrics and transcendentals 135 static jdouble dsin(jdouble x); 136 static jdouble dcos(jdouble x); 137 static jdouble dtan(jdouble x); 138 static jdouble dlog(jdouble x); 139 static jdouble dlog10(jdouble x); 140 static jdouble dexp(jdouble x); 141 static jdouble dpow(jdouble x, jdouble y); 142 143 #if defined(__SOFTFP__) || defined(E500V2) 144 static double dabs(double f); 145 #endif 146 147 #if defined(__SOFTFP__) || defined(PPC) 148 static double dsqrt(double f); 149 #endif 150 151 // Montgomery multiplication 152 static void montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints, 153 jint len, jlong inv, jint *m_ints); 154 static void montgomery_square(jint *a_ints, jint *n_ints, 155 jint len, jlong inv, jint *m_ints); 156 157 #ifdef __SOFTFP__ 158 // C++ compiler generates soft float instructions as well as passing 159 // float and double in registers. 160 static int fcmpl(float x, float y); 161 static int fcmpg(float x, float y); 162 static int dcmpl(double x, double y); 163 static int dcmpg(double x, double y); 164 165 static int unordered_fcmplt(float x, float y); 166 static int unordered_dcmplt(double x, double y); 167 static int unordered_fcmple(float x, float y); 168 static int unordered_dcmple(double x, double y); 169 static int unordered_fcmpge(float x, float y); 170 static int unordered_dcmpge(double x, double y); 171 static int unordered_fcmpgt(float x, float y); 172 static int unordered_dcmpgt(double x, double y); 173 174 static float fneg(float f); 175 static double dneg(double f); 176 #endif 177 178 // exception handling across interpreter/compiler boundaries 179 static address raw_exception_handler_for_return_address(JavaThread* current, address return_address); 180 static address exception_handler_for_return_address(JavaThread* current, address return_address); 181 182 // exception handling and implicit exceptions 183 static address compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception, 184 bool force_unwind, bool top_frame_only, bool& recursive_exception_occurred); 185 enum ImplicitExceptionKind { 186 IMPLICIT_NULL, 187 IMPLICIT_DIVIDE_BY_ZERO, 188 STACK_OVERFLOW 189 }; 190 static void throw_AbstractMethodError(JavaThread* current); 191 static void throw_IncompatibleClassChangeError(JavaThread* current); 192 static void throw_ArithmeticException(JavaThread* current); 193 static void throw_NullPointerException(JavaThread* current); 194 static void throw_NullPointerException_at_call(JavaThread* current); 195 static void throw_StackOverflowError(JavaThread* current); 196 static void throw_delayed_StackOverflowError(JavaThread* current); 197 static void throw_StackOverflowError_common(JavaThread* current, bool delayed); 198 static address continuation_for_implicit_exception(JavaThread* current, 199 address faulting_pc, 200 ImplicitExceptionKind exception_kind); 201 202 // Post-slow-path-allocation, pre-initializing-stores step for 203 // implementing e.g. ReduceInitialCardMarks 204 static void on_slowpath_allocation_exit(JavaThread* current); 205 206 static void enable_stack_reserved_zone(JavaThread* current); 207 static frame look_for_reserved_stack_annotated_method(JavaThread* current, frame fr); 208 209 // Shared stub locations 210 static address get_poll_stub(address pc); 211 212 static address get_ic_miss_stub() { 213 assert(_ic_miss_blob!= nullptr, "oops"); 214 return _ic_miss_blob->entry_point(); 215 } 216 217 static address get_handle_wrong_method_stub() { 218 assert(_wrong_method_blob!= nullptr, "oops"); 219 return _wrong_method_blob->entry_point(); 220 } 221 222 static address get_handle_wrong_method_abstract_stub() { 223 assert(_wrong_method_abstract_blob!= nullptr, "oops"); 224 return _wrong_method_abstract_blob->entry_point(); 225 } 226 227 #ifdef COMPILER2 228 static void generate_uncommon_trap_blob(void); 229 static UncommonTrapBlob* uncommon_trap_blob() { return _uncommon_trap_blob; } 230 #endif // COMPILER2 231 232 static address get_resolve_opt_virtual_call_stub() { 233 assert(_resolve_opt_virtual_call_blob != nullptr, "oops"); 234 return _resolve_opt_virtual_call_blob->entry_point(); 235 } 236 static address get_resolve_virtual_call_stub() { 237 assert(_resolve_virtual_call_blob != nullptr, "oops"); 238 return _resolve_virtual_call_blob->entry_point(); 239 } 240 static address get_resolve_static_call_stub() { 241 assert(_resolve_static_call_blob != nullptr, "oops"); 242 return _resolve_static_call_blob->entry_point(); 243 } 244 245 static SafepointBlob* polling_page_return_handler_blob() { return _polling_page_return_handler_blob; } 246 static SafepointBlob* polling_page_safepoint_handler_blob() { return _polling_page_safepoint_handler_blob; } 247 static SafepointBlob* polling_page_vectors_safepoint_handler_blob() { return _polling_page_vectors_safepoint_handler_blob; } 248 249 static nmethod* cont_doYield_stub() { 250 assert(_cont_doYield_stub != nullptr, "oops"); 251 return _cont_doYield_stub; 252 } 253 254 // Counters 255 #ifndef PRODUCT 256 static address nof_megamorphic_calls_addr() { return (address)&_nof_megamorphic_calls; } 257 #endif // PRODUCT 258 259 // Helper routine for full-speed JVMTI exception throwing support 260 static void throw_and_post_jvmti_exception(JavaThread* current, Handle h_exception); 261 static void throw_and_post_jvmti_exception(JavaThread* current, Symbol* name, const char *message = nullptr); 262 263 #if INCLUDE_JVMTI 264 // Functions for JVMTI notifications 265 static void notify_jvmti_vthread_start(oopDesc* vt, jboolean hide, JavaThread* current); 266 static void notify_jvmti_vthread_end(oopDesc* vt, jboolean hide, JavaThread* current); 267 static void notify_jvmti_vthread_mount(oopDesc* vt, jboolean hide, JavaThread* current); 268 static void notify_jvmti_vthread_unmount(oopDesc* vt, jboolean hide, JavaThread* current); 269 #endif 270 271 // RedefineClasses() tracing support for obsolete method entry 272 static int rc_trace_method_entry(JavaThread* thread, Method* m); 273 274 // To be used as the entry point for unresolved native methods. 275 static address native_method_throw_unsatisfied_link_error_entry(); 276 277 static void register_finalizer(JavaThread* thread, oopDesc* obj); 278 279 // dtrace notifications 280 static int dtrace_object_alloc(oopDesc* o); 281 static int dtrace_object_alloc(JavaThread* thread, oopDesc* o); 282 static int dtrace_object_alloc(JavaThread* thread, oopDesc* o, size_t size); 283 static int dtrace_method_entry(JavaThread* thread, Method* m); 284 static int dtrace_method_exit(JavaThread* thread, Method* m); 285 286 // Utility method for retrieving the Java thread id, returns 0 if the 287 // thread is not a well formed Java thread. 288 static jlong get_java_tid(JavaThread* thread); 289 290 291 // used by native wrappers to re-enable yellow if overflow happened in native code 292 static void reguard_yellow_pages(); 293 294 // Fill in the "X cannot be cast to a Y" message for ClassCastException 295 // 296 // @param thr the current thread 297 // @param caster_klass the class of the object we are casting 298 // @return the dynamically allocated exception message (must be freed 299 // by the caller using a resource mark) 300 // 301 // BCP must refer to the current 'checkcast' opcode for the frame 302 // on top of the stack. 303 // The caller (or one of its callers) must use a ResourceMark 304 // in order to correctly free the result. 305 // 306 static char* generate_class_cast_message(JavaThread* thr, Klass* caster_klass); 307 308 // Fill in the "X cannot be cast to a Y" message for ClassCastException 309 // 310 // @param caster_klass the class of the object we are casting 311 // @param target_klass the target klass attempt 312 // @return the dynamically allocated exception message (must be freed 313 // by the caller using a resource mark) 314 // 315 // This version does not require access the frame, so it can be called 316 // from interpreted code 317 // The caller (or one of it's callers) must use a ResourceMark 318 // in order to correctly free the result. 319 // 320 static char* generate_class_cast_message(Klass* caster_klass, Klass* target_klass, Symbol* target_klass_name = nullptr); 321 322 static char* generate_identity_exception_message(JavaThread* thr, Klass* klass); 323 324 // Resolves a call site- may patch in the destination of the call into the 325 // compiled code. 326 static methodHandle resolve_helper(bool is_virtual, bool is_optimized, bool& caller_is_c1, TRAPS); 327 328 private: 329 // deopt blob 330 static void generate_deopt_blob(void); 331 332 static bool handle_ic_miss_helper_internal(Handle receiver, nmethod* caller_nm, const frame& caller_frame, 333 methodHandle callee_method, Bytecodes::Code bc, CallInfo& call_info, 334 bool& needs_ic_stub_refill, bool& is_optimized, bool caller_is_c1, TRAPS); 335 336 public: 337 static DeoptimizationBlob* deopt_blob(void) { return _deopt_blob; } 338 339 // Resets a call-site in compiled code so it will get resolved again. 340 static methodHandle reresolve_call_site(bool& is_static_call, bool& is_optimized, bool& caller_is_c1, TRAPS); 341 342 // In the code prolog, if the klass comparison fails, the inline cache 343 // misses and the call site is patched to megamorphic 344 static methodHandle handle_ic_miss_helper(bool& is_optimized, bool& caller_is_c1, TRAPS); 345 346 // Find the method that called us. 347 static methodHandle find_callee_method(bool is_optimized, bool& caller_is_c1, TRAPS); 348 349 static void monitor_enter_helper(oopDesc* obj, BasicLock* lock, JavaThread* thread); 350 351 static void monitor_exit_helper(oopDesc* obj, BasicLock* lock, JavaThread* current); 352 353 static address entry_for_handle_wrong_method(methodHandle callee_method, bool is_static_call, bool is_optimized, bool caller_is_c1) { 354 assert(callee_method->verified_code_entry() != nullptr, "Jump to zero!"); 355 assert(callee_method->verified_inline_code_entry() != nullptr, "Jump to zero!"); 356 assert(callee_method->verified_inline_ro_code_entry() != nullptr, "Jump to zero!"); 357 if (caller_is_c1) { 358 return callee_method->verified_inline_code_entry(); 359 } else if (is_static_call || is_optimized) { 360 return callee_method->verified_code_entry(); 361 } else { 362 return callee_method->verified_inline_ro_code_entry(); 363 } 364 } 365 366 private: 367 static Handle find_callee_info(Bytecodes::Code& bc, CallInfo& callinfo, TRAPS); 368 static Handle find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc, CallInfo& callinfo, TRAPS); 369 370 static Method* extract_attached_method(vframeStream& vfst); 371 372 #if defined(X86) && defined(COMPILER1) 373 // For Object.hashCode, System.identityHashCode try to pull hashCode from object header if available. 374 static void inline_check_hashcode_from_object_header(MacroAssembler* masm, const methodHandle& method, Register obj_reg, Register result); 375 #endif // X86 && COMPILER1 376 377 public: 378 379 // Read the array of BasicTypes from a Java signature, and compute where 380 // compiled Java code would like to put the results. Values in reg_lo and 381 // reg_hi refer to 4-byte quantities. Values less than SharedInfo::stack0 are 382 // registers, those above refer to 4-byte stack slots. All stack slots are 383 // based off of the window top. SharedInfo::stack0 refers to the first usable 384 // slot in the bottom of the frame. SharedInfo::stack0+1 refers to the memory word 385 // 4-bytes higher. 386 // return value is the maximum number of VMReg stack slots the convention will use. 387 static int java_calling_convention(const BasicType* sig_bt, VMRegPair* regs, int total_args_passed); 388 static int java_calling_convention(const GrowableArray<SigEntry>* sig, VMRegPair* regs) { 389 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sig->length()); 390 int total_args_passed = SigEntry::fill_sig_bt(sig, sig_bt); 391 return java_calling_convention(sig_bt, regs, total_args_passed); 392 } 393 static int java_return_convention(const BasicType* sig_bt, VMRegPair* regs, int total_args_passed); 394 static const uint java_return_convention_max_int; 395 static const uint java_return_convention_max_float; 396 397 static void check_member_name_argument_is_last_argument(const methodHandle& method, 398 const BasicType* sig_bt, 399 const VMRegPair* regs) NOT_DEBUG_RETURN; 400 401 // Ditto except for calling C 402 // 403 // C argument in register AND stack slot. 404 // Some architectures require that an argument must be passed in a register 405 // AND in a stack slot. These architectures provide a second VMRegPair array 406 // to be filled by the c_calling_convention method. On other architectures, 407 // null is being passed as the second VMRegPair array, so arguments are either 408 // passed in a register OR in a stack slot. 409 static int c_calling_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed); 410 411 static int vector_calling_convention(VMRegPair *regs, 412 uint num_bits, 413 uint total_args_passed); 414 415 // Generate I2C and C2I adapters. These adapters are simple argument marshalling 416 // blobs. Unlike adapters in the tiger and earlier releases the code in these 417 // blobs does not create a new frame and are therefore virtually invisible 418 // to the stack walking code. In general these blobs extend the callers stack 419 // as needed for the conversion of argument locations. 420 421 // When calling a c2i blob the code will always call the interpreter even if 422 // by the time we reach the blob there is compiled code available. This allows 423 // the blob to pass the incoming stack pointer (the sender sp) in a known 424 // location for the interpreter to record. This is used by the frame code 425 // to correct the sender code to match up with the stack pointer when the 426 // thread left the compiled code. In addition it allows the interpreter 427 // to remove the space the c2i adapter allocated to do its argument conversion. 428 429 // Although a c2i blob will always run interpreted even if compiled code is 430 // present if we see that compiled code is present the compiled call site 431 // will be patched/re-resolved so that later calls will run compiled. 432 433 // Additionally a c2i blob need to have a unverified entry because it can be reached 434 // in situations where the call site is an inlined cache site and may go megamorphic. 435 436 // A i2c adapter is simpler than the c2i adapter. This is because it is assumed 437 // that the interpreter before it does any call dispatch will record the current 438 // stack pointer in the interpreter frame. On return it will restore the stack 439 // pointer as needed. This means the i2c adapter code doesn't need any special 440 // handshaking path with compiled code to keep the stack walking correct. 441 442 static AdapterHandlerEntry* generate_i2c2i_adapters(MacroAssembler *masm, 443 int comp_args_on_stack, 444 const GrowableArray<SigEntry>* sig, 445 const VMRegPair* regs, 446 const GrowableArray<SigEntry>* sig_cc, 447 const VMRegPair* regs_cc, 448 const GrowableArray<SigEntry>* sig_cc_ro, 449 const VMRegPair* regs_cc_ro, 450 AdapterFingerPrint* fingerprint, 451 AdapterBlob*& new_adapter, 452 bool allocate_code_blob); 453 454 static void gen_i2c_adapter(MacroAssembler *_masm, 455 int comp_args_on_stack, 456 const GrowableArray<SigEntry>* sig, 457 const VMRegPair *regs); 458 459 // OSR support 460 461 // OSR_migration_begin will extract the jvm state from an interpreter 462 // frame (locals, monitors) and store the data in a piece of C heap 463 // storage. This then allows the interpreter frame to be removed from the 464 // stack and the OSR nmethod to be called. That method is called with a 465 // pointer to the C heap storage. This pointer is the return value from 466 // OSR_migration_begin. 467 468 static intptr_t* OSR_migration_begin(JavaThread *thread); 469 470 // OSR_migration_end is a trivial routine. It is called after the compiled 471 // method has extracted the jvm state from the C heap that OSR_migration_begin 472 // created. It's entire job is to simply free this storage. 473 static void OSR_migration_end(intptr_t* buf); 474 475 // Convert a sig into a calling convention register layout 476 // and find interesting things about it. 477 static VMRegPair* find_callee_arguments(Symbol* sig, bool has_receiver, bool has_appendix, int *arg_size); 478 static VMReg name_for_receiver(); 479 480 // "Top of Stack" slots that may be unused by the calling convention but must 481 // otherwise be preserved. 482 // On Intel these are not necessary and the value can be zero. 483 // On Sparc this describes the words reserved for storing a register window 484 // when an interrupt occurs. 485 static uint out_preserve_stack_slots(); 486 487 // Stack slots that may be unused by the calling convention but must 488 // otherwise be preserved. On Intel this includes the return address. 489 // On PowerPC it includes the 4 words holding the old TOC & LR glue. 490 static uint in_preserve_stack_slots(); 491 492 // Is vector's size (in bytes) bigger than a size saved by default? 493 // For example, on x86 16 bytes XMM registers are saved by default. 494 static bool is_wide_vector(int size); 495 496 // Save and restore a native result 497 static void save_native_result(MacroAssembler *_masm, BasicType ret_type, int frame_slots); 498 static void restore_native_result(MacroAssembler *_masm, BasicType ret_type, int frame_slots); 499 500 // Generate a native wrapper for a given method. The method takes arguments 501 // in the Java compiled code convention, marshals them to the native 502 // convention (handlizes oops, etc), transitions to native, makes the call, 503 // returns to java state (possibly blocking), unhandlizes any result and 504 // returns. 505 // 506 // The wrapper may contain special-case code if the given method 507 // is a compiled method handle adapter, such as _invokeBasic, _linkToVirtual, etc. 508 static nmethod* generate_native_wrapper(MacroAssembler* masm, 509 const methodHandle& method, 510 int compile_id, 511 BasicType* sig_bt, 512 VMRegPair* regs, 513 BasicType ret_type); 514 515 // A compiled caller has just called the interpreter, but compiled code 516 // exists. Patch the caller so he no longer calls into the interpreter. 517 static void fixup_callers_callsite(Method* moop, address ret_pc); 518 static bool should_fixup_call_destination(address destination, address entry_point, address caller_pc, Method* moop, CodeBlob* cb); 519 520 // Slow-path Locking and Unlocking 521 static void complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current); 522 static void complete_monitor_unlocking_C(oopDesc* obj, BasicLock* lock, JavaThread* current); 523 524 // Resolving of calls 525 static address resolve_static_call_C (JavaThread* current); 526 static address resolve_virtual_call_C (JavaThread* current); 527 static address resolve_opt_virtual_call_C(JavaThread* current); 528 529 static void load_inline_type_fields_in_regs(JavaThread* current, oopDesc* res); 530 static void store_inline_type_fields_to_buf(JavaThread* current, intptr_t res); 531 532 // arraycopy, the non-leaf version. (See StubRoutines for all the leaf calls.) 533 static void slow_arraycopy_C(oopDesc* src, jint src_pos, 534 oopDesc* dest, jint dest_pos, 535 jint length, JavaThread* thread); 536 537 // handle ic miss with caller being compiled code 538 // wrong method handling (inline cache misses) 539 static address handle_wrong_method(JavaThread* current); 540 static address handle_wrong_method_abstract(JavaThread* current); 541 static address handle_wrong_method_ic_miss(JavaThread* current); 542 static void allocate_inline_types(JavaThread* current, Method* callee, bool allocate_receiver); 543 static oop allocate_inline_types_impl(JavaThread* current, methodHandle callee, bool allocate_receiver, TRAPS); 544 545 static address handle_unsafe_access(JavaThread* thread, address next_pc); 546 547 static BufferedInlineTypeBlob* generate_buffered_inline_type_adapter(const InlineKlass* vk); 548 #ifndef PRODUCT 549 550 // Collect and print inline cache miss statistics 551 private: 552 enum { maxICmiss_count = 100 }; 553 static int _ICmiss_index; // length of IC miss histogram 554 static int _ICmiss_count[maxICmiss_count]; // miss counts 555 static address _ICmiss_at[maxICmiss_count]; // miss addresses 556 static void trace_ic_miss(address at); 557 558 public: 559 static uint _ic_miss_ctr; // total # of IC misses 560 static uint _wrong_method_ctr; 561 static uint _resolve_static_ctr; 562 static uint _resolve_virtual_ctr; 563 static uint _resolve_opt_virtual_ctr; 564 static uint _implicit_null_throws; 565 static uint _implicit_div0_throws; 566 567 static uint _jbyte_array_copy_ctr; // Slow-path byte array copy 568 static uint _jshort_array_copy_ctr; // Slow-path short array copy 569 static uint _jint_array_copy_ctr; // Slow-path int array copy 570 static uint _jlong_array_copy_ctr; // Slow-path long array copy 571 static uint _oop_array_copy_ctr; // Slow-path oop array copy 572 static uint _checkcast_array_copy_ctr; // Slow-path oop array copy, with cast 573 static uint _unsafe_array_copy_ctr; // Slow-path includes alignment checks 574 static uint _generic_array_copy_ctr; // Slow-path includes type decoding 575 static uint _slow_array_copy_ctr; // Slow-path failed out to a method call 576 577 static uint _new_instance_ctr; // 'new' object requires GC 578 static uint _new_array_ctr; // 'new' array requires GC 579 static uint _multi2_ctr, _multi3_ctr, _multi4_ctr, _multi5_ctr; 580 static uint _find_handler_ctr; // find exception handler 581 static uint _rethrow_ctr; // rethrow exception 582 static uint _mon_enter_stub_ctr; // monitor enter stub 583 static uint _mon_exit_stub_ctr; // monitor exit stub 584 static uint _mon_enter_ctr; // monitor enter slow 585 static uint _mon_exit_ctr; // monitor exit slow 586 static uint _partial_subtype_ctr; // SubRoutines::partial_subtype_check 587 588 // Statistics code 589 // stats for "normal" compiled calls (non-interface) 590 static int64_t _nof_normal_calls; // total # of calls 591 static int64_t _nof_inlined_calls; // total # of inlined normal calls 592 static int64_t _nof_static_calls; // total # of calls to static methods or super methods (invokespecial) 593 static int64_t _nof_inlined_static_calls; // total # of inlined static calls 594 // stats for compiled interface calls 595 static int64_t _nof_interface_calls; // total # of compiled calls 596 static int64_t _nof_inlined_interface_calls; // total # of inlined interface calls 597 598 public: // for compiler 599 static address nof_normal_calls_addr() { return (address)&_nof_normal_calls; } 600 static address nof_inlined_calls_addr() { return (address)&_nof_inlined_calls; } 601 static address nof_static_calls_addr() { return (address)&_nof_static_calls; } 602 static address nof_inlined_static_calls_addr() { return (address)&_nof_inlined_static_calls; } 603 static address nof_interface_calls_addr() { return (address)&_nof_interface_calls; } 604 static address nof_inlined_interface_calls_addr() { return (address)&_nof_inlined_interface_calls; } 605 static void print_call_statistics(uint64_t comp_total); 606 static void print_ic_miss_histogram(); 607 608 #endif // PRODUCT 609 610 static void print_statistics() PRODUCT_RETURN; 611 }; 612 613 614 // --------------------------------------------------------------------------- 615 // Implementation of AdapterHandlerLibrary 616 // 617 // This library manages argument marshaling adapters and native wrappers. 618 // There are 2 flavors of adapters: I2C and C2I. 619 // 620 // The I2C flavor takes a stock interpreted call setup, marshals the 621 // arguments for a Java-compiled call, and jumps to Rmethod-> code()-> 622 // code_begin(). It is broken to call it without an nmethod assigned. 623 // The usual behavior is to lift any register arguments up out of the 624 // stack and possibly re-pack the extra arguments to be contiguous. 625 // I2C adapters will save what the interpreter's stack pointer will be 626 // after arguments are popped, then adjust the interpreter's frame 627 // size to force alignment and possibly to repack the arguments. 628 // After re-packing, it jumps to the compiled code start. There are 629 // no safepoints in this adapter code and a GC cannot happen while 630 // marshaling is in progress. 631 // 632 // The C2I flavor takes a stock compiled call setup plus the target method in 633 // Rmethod, marshals the arguments for an interpreted call and jumps to 634 // Rmethod->_i2i_entry. On entry, the interpreted frame has not yet been 635 // setup. Compiled frames are fixed-size and the args are likely not in the 636 // right place. Hence all the args will likely be copied into the 637 // interpreter's frame, forcing that frame to grow. The compiled frame's 638 // outgoing stack args will be dead after the copy. 639 // 640 // Native wrappers, like adapters, marshal arguments. Unlike adapters they 641 // also perform an official frame push & pop. They have a call to the native 642 // routine in their middles and end in a return (instead of ending in a jump). 643 // The native wrappers are stored in real nmethods instead of the BufferBlobs 644 // used by the adapters. The code generation happens here because it's very 645 // similar to what the adapters have to do. 646 647 class AdapterHandlerEntry : public CHeapObj<mtCode> { 648 friend class AdapterHandlerLibrary; 649 650 private: 651 AdapterFingerPrint* _fingerprint; 652 address _i2c_entry; 653 address _c2i_entry; 654 address _c2i_inline_entry; 655 address _c2i_inline_ro_entry; 656 address _c2i_unverified_entry; 657 address _c2i_unverified_inline_entry; 658 address _c2i_no_clinit_check_entry; 659 660 // Support for scalarized inline type calling convention 661 const GrowableArray<SigEntry>* _sig_cc; 662 663 #ifdef ASSERT 664 // Captures code and signature used to generate this adapter when 665 // verifying adapter equivalence. 666 unsigned char* _saved_code; 667 int _saved_code_length; 668 #endif 669 670 AdapterHandlerEntry(AdapterFingerPrint* fingerprint, address i2c_entry, address c2i_entry, 671 address c2i_inline_entry, address c2i_inline_ro_entry, 672 address c2i_unverified_entry, address c2i_unverified_inline_entry, 673 address c2i_no_clinit_check_entry) : 674 _fingerprint(fingerprint), 675 _i2c_entry(i2c_entry), 676 _c2i_entry(c2i_entry), 677 _c2i_inline_entry(c2i_inline_entry), 678 _c2i_inline_ro_entry(c2i_inline_ro_entry), 679 _c2i_unverified_entry(c2i_unverified_entry), 680 _c2i_unverified_inline_entry(c2i_unverified_inline_entry), 681 _c2i_no_clinit_check_entry(c2i_no_clinit_check_entry), 682 _sig_cc(nullptr) 683 #ifdef ASSERT 684 , _saved_code_length(0) 685 #endif 686 { } 687 688 ~AdapterHandlerEntry(); 689 690 public: 691 address get_i2c_entry() const { return _i2c_entry; } 692 address get_c2i_entry() const { return _c2i_entry; } 693 address get_c2i_inline_entry() const { return _c2i_inline_entry; } 694 address get_c2i_inline_ro_entry() const { return _c2i_inline_ro_entry; } 695 address get_c2i_unverified_entry() const { return _c2i_unverified_entry; } 696 address get_c2i_unverified_inline_entry() const { return _c2i_unverified_inline_entry; } 697 address get_c2i_no_clinit_check_entry() const { return _c2i_no_clinit_check_entry; } 698 699 address base_address(); 700 void relocate(address new_base); 701 702 // Support for scalarized inline type calling convention 703 void set_sig_cc(const GrowableArray<SigEntry>* sig) { _sig_cc = sig; } 704 const GrowableArray<SigEntry>* get_sig_cc() const { return _sig_cc; } 705 706 AdapterFingerPrint* fingerprint() const { return _fingerprint; } 707 708 #ifdef ASSERT 709 // Used to verify that code generated for shared adapters is equivalent 710 void save_code (unsigned char* code, int length); 711 bool compare_code(AdapterHandlerEntry* other); 712 #endif 713 714 //virtual void print_on(outputStream* st) const; DO NOT USE 715 void print_adapter_on(outputStream* st) const; 716 }; 717 718 class CompiledEntrySignature; 719 720 class AdapterHandlerLibrary: public AllStatic { 721 friend class SharedRuntime; 722 private: 723 static BufferBlob* _buffer; // the temporary code buffer in CodeCache 724 static AdapterHandlerEntry* _abstract_method_handler; 725 static AdapterHandlerEntry* _no_arg_handler; 726 static AdapterHandlerEntry* _int_arg_handler; 727 static AdapterHandlerEntry* _obj_arg_handler; 728 static AdapterHandlerEntry* _obj_int_arg_handler; 729 static AdapterHandlerEntry* _obj_obj_arg_handler; 730 731 static BufferBlob* buffer_blob(); 732 static void initialize(); 733 static AdapterHandlerEntry* create_adapter(AdapterBlob*& new_adapter, 734 CompiledEntrySignature& ces, 735 bool allocate_code_blob); 736 static AdapterHandlerEntry* get_simple_adapter(const methodHandle& method); 737 public: 738 739 static AdapterHandlerEntry* new_entry(AdapterFingerPrint* fingerprint, 740 address i2c_entry, address c2i_entry, address c2i_inline_entry, address c2i_inline_ro_entry, 741 address c2i_unverified_entry, address c2i_unverified_inline_entry, address c2i_no_clinit_check_entry = nullptr); 742 static void create_native_wrapper(const methodHandle& method); 743 static AdapterHandlerEntry* get_adapter(const methodHandle& method); 744 745 static void print_handler(const CodeBlob* b) { print_handler_on(tty, b); } 746 static void print_handler_on(outputStream* st, const CodeBlob* b); 747 static bool contains(const CodeBlob* b); 748 #ifndef PRODUCT 749 static void print_statistics(); 750 #endif // PRODUCT 751 752 }; 753 754 // Utility class for computing the calling convention of the 3 types 755 // of compiled method entries: 756 // Method::_from_compiled_entry - sig_cc 757 // Method::_from_compiled_inline_ro_entry - sig_cc_ro 758 // Method::_from_compiled_inline_entry - sig 759 class CompiledEntrySignature : public StackObj { 760 Method* _method; 761 int _num_inline_args; 762 bool _has_inline_recv; 763 GrowableArray<SigEntry>* _sig; 764 GrowableArray<SigEntry>* _sig_cc; 765 GrowableArray<SigEntry>* _sig_cc_ro; 766 VMRegPair* _regs; 767 VMRegPair* _regs_cc; 768 VMRegPair* _regs_cc_ro; 769 770 int _args_on_stack; 771 int _args_on_stack_cc; 772 int _args_on_stack_cc_ro; 773 774 bool _c1_needs_stack_repair; 775 bool _c2_needs_stack_repair; 776 777 GrowableArray<Method*>* _supers; 778 779 public: 780 Method* method() const { return _method; } 781 782 // Used by Method::_from_compiled_inline_entry 783 GrowableArray<SigEntry>* sig() const { return _sig; } 784 785 // Used by Method::_from_compiled_entry 786 GrowableArray<SigEntry>* sig_cc() const { return _sig_cc; } 787 788 // Used by Method::_from_compiled_inline_ro_entry 789 GrowableArray<SigEntry>* sig_cc_ro() const { return _sig_cc_ro; } 790 791 VMRegPair* regs() const { return _regs; } 792 VMRegPair* regs_cc() const { return _regs_cc; } 793 VMRegPair* regs_cc_ro() const { return _regs_cc_ro; } 794 795 int args_on_stack() const { return _args_on_stack; } 796 int args_on_stack_cc() const { return _args_on_stack_cc; } 797 int args_on_stack_cc_ro() const { return _args_on_stack_cc_ro; } 798 799 int num_inline_args() const { return _num_inline_args; } 800 bool has_inline_recv() const { return _has_inline_recv; } 801 802 bool has_scalarized_args() const { return _sig != _sig_cc; } 803 bool c1_needs_stack_repair() const { return _c1_needs_stack_repair; } 804 bool c2_needs_stack_repair() const { return _c2_needs_stack_repair; } 805 CodeOffsets::Entries c1_inline_ro_entry_type() const; 806 807 GrowableArray<Method*>* get_supers(); 808 809 CompiledEntrySignature(Method* method = nullptr); 810 void compute_calling_conventions(bool init = true); 811 }; 812 813 #endif // SHARE_RUNTIME_SHAREDRUNTIME_HPP