1 /* 2 * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_RUNTIME_SHAREDRUNTIME_HPP 26 #define SHARE_RUNTIME_SHAREDRUNTIME_HPP 27 28 #include "asm/codeBuffer.hpp" 29 #include "code/codeBlob.hpp" 30 #include "code/vmreg.hpp" 31 #include "interpreter/linkResolver.hpp" 32 #include "memory/allStatic.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "runtime/signature.hpp" 35 #include "utilities/macros.hpp" 36 37 class AdapterHandlerEntry; 38 class AdapterFingerPrint; 39 class vframeStream; 40 class SigEntry; 41 42 // Runtime is the base class for various runtime interfaces 43 // (InterpreterRuntime, CompilerRuntime, etc.). It provides 44 // shared functionality such as exception forwarding (C++ to 45 // Java exceptions), locking/unlocking mechanisms, statistical 46 // information, etc. 47 48 class SharedRuntime: AllStatic { 49 friend class VMStructs; 50 51 private: 52 static bool resolve_sub_helper_internal(methodHandle callee_method, const frame& caller_frame, 53 CompiledMethod* caller_nm, bool is_virtual, bool is_optimized, 54 Handle receiver, CallInfo& call_info, Bytecodes::Code invoke_code, TRAPS); 55 static methodHandle resolve_sub_helper(bool is_virtual, bool is_optimized, bool* caller_is_c1, TRAPS); 56 57 // Shared stub locations 58 59 static RuntimeStub* _wrong_method_blob; 60 static RuntimeStub* _wrong_method_abstract_blob; 61 static RuntimeStub* _ic_miss_blob; 62 static RuntimeStub* _resolve_opt_virtual_call_blob; 63 static RuntimeStub* _resolve_virtual_call_blob; 64 static RuntimeStub* _resolve_static_call_blob; 65 66 static DeoptimizationBlob* _deopt_blob; 67 68 static SafepointBlob* _polling_page_vectors_safepoint_handler_blob; 69 static SafepointBlob* _polling_page_safepoint_handler_blob; 70 static SafepointBlob* _polling_page_return_handler_blob; 71 72 #ifdef COMPILER2 73 static UncommonTrapBlob* _uncommon_trap_blob; 74 #endif // COMPILER2 75 76 static nmethod* _cont_doYield_stub; 77 78 #ifndef PRODUCT 79 // Counters 80 static int64_t _nof_megamorphic_calls; // total # of megamorphic calls (through vtable) 81 #endif // !PRODUCT 82 83 private: 84 enum { POLL_AT_RETURN, POLL_AT_LOOP, POLL_AT_VECTOR_LOOP }; 85 static SafepointBlob* generate_handler_blob(address call_ptr, int poll_type); 86 static RuntimeStub* generate_resolve_blob(address destination, const char* name); 87 public: 88 static void generate_stubs(void); 89 90 // max bytes for each dtrace string parameter 91 enum { max_dtrace_string_size = 256 }; 92 93 // The following arithmetic routines are used on platforms that do 94 // not have machine instructions to implement their functionality. 95 // Do not remove these. 96 97 // long arithmetics 98 static jlong lmul(jlong y, jlong x); 99 static jlong ldiv(jlong y, jlong x); 100 static jlong lrem(jlong y, jlong x); 101 102 // float and double remainder 103 static jfloat frem(jfloat x, jfloat y); 104 static jdouble drem(jdouble x, jdouble y); 105 106 107 #ifdef _WIN64 108 // Workaround for fmod issue in the Windows x64 CRT 109 static double fmod_winx64(double x, double y); 110 #endif 111 112 #ifdef __SOFTFP__ 113 static jfloat fadd(jfloat x, jfloat y); 114 static jfloat fsub(jfloat x, jfloat y); 115 static jfloat fmul(jfloat x, jfloat y); 116 static jfloat fdiv(jfloat x, jfloat y); 117 118 static jdouble dadd(jdouble x, jdouble y); 119 static jdouble dsub(jdouble x, jdouble y); 120 static jdouble dmul(jdouble x, jdouble y); 121 static jdouble ddiv(jdouble x, jdouble y); 122 #endif // __SOFTFP__ 123 124 // float conversion (needs to set appropriate rounding mode) 125 static jint f2i (jfloat x); 126 static jlong f2l (jfloat x); 127 static jint d2i (jdouble x); 128 static jlong d2l (jdouble x); 129 static jfloat d2f (jdouble x); 130 static jfloat l2f (jlong x); 131 static jdouble l2d (jlong x); 132 static jfloat hf2f(jshort x); 133 static jshort f2hf(jfloat x); 134 static jfloat i2f (jint x); 135 136 #ifdef __SOFTFP__ 137 static jdouble i2d (jint x); 138 static jdouble f2d (jfloat x); 139 #endif // __SOFTFP__ 140 141 // double trigonometrics and transcendentals 142 static jdouble dsin(jdouble x); 143 static jdouble dcos(jdouble x); 144 static jdouble dtan(jdouble x); 145 static jdouble dlog(jdouble x); 146 static jdouble dlog10(jdouble x); 147 static jdouble dexp(jdouble x); 148 static jdouble dpow(jdouble x, jdouble y); 149 150 #if defined(__SOFTFP__) || defined(E500V2) 151 static double dabs(double f); 152 #endif 153 154 #if defined(__SOFTFP__) || defined(PPC) 155 static double dsqrt(double f); 156 #endif 157 158 // Montgomery multiplication 159 static void montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints, 160 jint len, jlong inv, jint *m_ints); 161 static void montgomery_square(jint *a_ints, jint *n_ints, 162 jint len, jlong inv, jint *m_ints); 163 164 #ifdef __SOFTFP__ 165 // C++ compiler generates soft float instructions as well as passing 166 // float and double in registers. 167 static int fcmpl(float x, float y); 168 static int fcmpg(float x, float y); 169 static int dcmpl(double x, double y); 170 static int dcmpg(double x, double y); 171 172 static int unordered_fcmplt(float x, float y); 173 static int unordered_dcmplt(double x, double y); 174 static int unordered_fcmple(float x, float y); 175 static int unordered_dcmple(double x, double y); 176 static int unordered_fcmpge(float x, float y); 177 static int unordered_dcmpge(double x, double y); 178 static int unordered_fcmpgt(float x, float y); 179 static int unordered_dcmpgt(double x, double y); 180 181 static float fneg(float f); 182 static double dneg(double f); 183 #endif 184 185 // exception handling across interpreter/compiler boundaries 186 static address raw_exception_handler_for_return_address(JavaThread* current, address return_address); 187 static address exception_handler_for_return_address(JavaThread* current, address return_address); 188 189 // exception handling and implicit exceptions 190 static address compute_compiled_exc_handler(CompiledMethod* nm, address ret_pc, Handle& exception, 191 bool force_unwind, bool top_frame_only, bool& recursive_exception_occurred); 192 enum ImplicitExceptionKind { 193 IMPLICIT_NULL, 194 IMPLICIT_DIVIDE_BY_ZERO, 195 STACK_OVERFLOW 196 }; 197 static void throw_AbstractMethodError(JavaThread* current); 198 static void throw_IncompatibleClassChangeError(JavaThread* current); 199 static void throw_ArithmeticException(JavaThread* current); 200 static void throw_NullPointerException(JavaThread* current); 201 static void throw_NullPointerException_at_call(JavaThread* current); 202 static void throw_StackOverflowError(JavaThread* current); 203 static void throw_delayed_StackOverflowError(JavaThread* current); 204 static void throw_StackOverflowError_common(JavaThread* current, bool delayed); 205 static address continuation_for_implicit_exception(JavaThread* current, 206 address faulting_pc, 207 ImplicitExceptionKind exception_kind); 208 209 // Post-slow-path-allocation, pre-initializing-stores step for 210 // implementing e.g. ReduceInitialCardMarks 211 static void on_slowpath_allocation_exit(JavaThread* current); 212 213 static void enable_stack_reserved_zone(JavaThread* current); 214 static frame look_for_reserved_stack_annotated_method(JavaThread* current, frame fr); 215 216 // Shared stub locations 217 static address get_poll_stub(address pc); 218 219 static address get_ic_miss_stub() { 220 assert(_ic_miss_blob!= NULL, "oops"); 221 return _ic_miss_blob->entry_point(); 222 } 223 224 static address get_handle_wrong_method_stub() { 225 assert(_wrong_method_blob!= NULL, "oops"); 226 return _wrong_method_blob->entry_point(); 227 } 228 229 static address get_handle_wrong_method_abstract_stub() { 230 assert(_wrong_method_abstract_blob!= NULL, "oops"); 231 return _wrong_method_abstract_blob->entry_point(); 232 } 233 234 #ifdef COMPILER2 235 static void generate_uncommon_trap_blob(void); 236 static UncommonTrapBlob* uncommon_trap_blob() { return _uncommon_trap_blob; } 237 #endif // COMPILER2 238 239 static address get_resolve_opt_virtual_call_stub() { 240 assert(_resolve_opt_virtual_call_blob != NULL, "oops"); 241 return _resolve_opt_virtual_call_blob->entry_point(); 242 } 243 static address get_resolve_virtual_call_stub() { 244 assert(_resolve_virtual_call_blob != NULL, "oops"); 245 return _resolve_virtual_call_blob->entry_point(); 246 } 247 static address get_resolve_static_call_stub() { 248 assert(_resolve_static_call_blob != NULL, "oops"); 249 return _resolve_static_call_blob->entry_point(); 250 } 251 252 static SafepointBlob* polling_page_return_handler_blob() { return _polling_page_return_handler_blob; } 253 static SafepointBlob* polling_page_safepoint_handler_blob() { return _polling_page_safepoint_handler_blob; } 254 static SafepointBlob* polling_page_vectors_safepoint_handler_blob() { return _polling_page_vectors_safepoint_handler_blob; } 255 256 static nmethod* cont_doYield_stub() { 257 assert(_cont_doYield_stub != nullptr, "oops"); 258 return _cont_doYield_stub; 259 } 260 261 // Counters 262 #ifndef PRODUCT 263 static address nof_megamorphic_calls_addr() { return (address)&_nof_megamorphic_calls; } 264 #endif // PRODUCT 265 266 // Helper routine for full-speed JVMTI exception throwing support 267 static void throw_and_post_jvmti_exception(JavaThread* current, Handle h_exception); 268 static void throw_and_post_jvmti_exception(JavaThread* current, Symbol* name, const char *message = NULL); 269 270 // RedefineClasses() tracing support for obsolete method entry 271 static int rc_trace_method_entry(JavaThread* thread, Method* m); 272 273 // To be used as the entry point for unresolved native methods. 274 static address native_method_throw_unsatisfied_link_error_entry(); 275 276 static void register_finalizer(JavaThread* thread, oopDesc* obj); 277 278 // dtrace notifications 279 static int dtrace_object_alloc(oopDesc* o); 280 static int dtrace_object_alloc(JavaThread* thread, oopDesc* o); 281 static int dtrace_object_alloc(JavaThread* thread, oopDesc* o, size_t size); 282 static int dtrace_method_entry(JavaThread* thread, Method* m); 283 static int dtrace_method_exit(JavaThread* thread, Method* m); 284 285 // Utility method for retrieving the Java thread id, returns 0 if the 286 // thread is not a well formed Java thread. 287 static jlong get_java_tid(JavaThread* thread); 288 289 290 // used by native wrappers to re-enable yellow if overflow happened in native code 291 static void reguard_yellow_pages(); 292 293 // Fill in the "X cannot be cast to a Y" message for ClassCastException 294 // 295 // @param thr the current thread 296 // @param caster_klass the class of the object we are casting 297 // @return the dynamically allocated exception message (must be freed 298 // by the caller using a resource mark) 299 // 300 // BCP must refer to the current 'checkcast' opcode for the frame 301 // on top of the stack. 302 // The caller (or one of its callers) must use a ResourceMark 303 // in order to correctly free the result. 304 // 305 static char* generate_class_cast_message(JavaThread* thr, Klass* caster_klass); 306 307 // Fill in the "X cannot be cast to a Y" message for ClassCastException 308 // 309 // @param caster_klass the class of the object we are casting 310 // @param target_klass the target klass attempt 311 // @return the dynamically allocated exception message (must be freed 312 // by the caller using a resource mark) 313 // 314 // This version does not require access the frame, so it can be called 315 // from interpreted code 316 // The caller (or one of it's callers) must use a ResourceMark 317 // in order to correctly free the result. 318 // 319 static char* generate_class_cast_message(Klass* caster_klass, Klass* target_klass, Symbol* target_klass_name = NULL); 320 321 // Resolves a call site- may patch in the destination of the call into the 322 // compiled code. 323 static methodHandle resolve_helper(bool is_virtual, bool is_optimized, bool* caller_is_c1, TRAPS); 324 325 private: 326 // deopt blob 327 static void generate_deopt_blob(void); 328 329 static bool handle_ic_miss_helper_internal(Handle receiver, CompiledMethod* caller_nm, const frame& caller_frame, 330 methodHandle callee_method, Bytecodes::Code bc, CallInfo& call_info, 331 bool& needs_ic_stub_refill, bool& is_optimized, bool caller_is_c1, TRAPS); 332 333 public: 334 static DeoptimizationBlob* deopt_blob(void) { return _deopt_blob; } 335 336 // Resets a call-site in compiled code so it will get resolved again. 337 static methodHandle reresolve_call_site(bool& is_static_call, bool& is_optimized, bool& caller_is_c1, TRAPS); 338 339 // In the code prolog, if the klass comparison fails, the inline cache 340 // misses and the call site is patched to megamorphic 341 static methodHandle handle_ic_miss_helper(bool& is_optimized, bool& caller_is_c1, TRAPS); 342 343 // Find the method that called us. 344 static methodHandle find_callee_method(TRAPS); 345 346 static void monitor_enter_helper(oopDesc* obj, BasicLock* lock, JavaThread* thread); 347 348 static void monitor_exit_helper(oopDesc* obj, BasicLock* lock, JavaThread* current); 349 350 static address entry_for_handle_wrong_method(methodHandle callee_method, bool is_static_call, bool is_optimized, bool caller_is_c1) { 351 assert(callee_method->verified_code_entry() != NULL, "Jump to zero!"); 352 assert(callee_method->verified_inline_code_entry() != NULL, "Jump to zero!"); 353 assert(callee_method->verified_inline_ro_code_entry() != NULL, "Jump to zero!"); 354 if (caller_is_c1) { 355 return callee_method->verified_inline_code_entry(); 356 } else if (is_static_call || is_optimized) { 357 return callee_method->verified_code_entry(); 358 } else { 359 return callee_method->verified_inline_ro_code_entry(); 360 } 361 } 362 363 private: 364 static Handle find_callee_info(Bytecodes::Code& bc, CallInfo& callinfo, TRAPS); 365 static Handle find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc, CallInfo& callinfo, TRAPS); 366 367 static Method* extract_attached_method(vframeStream& vfst); 368 369 #if defined(X86) && defined(COMPILER1) 370 // For Object.hashCode, System.identityHashCode try to pull hashCode from object header if available. 371 static void inline_check_hashcode_from_object_header(MacroAssembler* masm, const methodHandle& method, Register obj_reg, Register result); 372 #endif // X86 && COMPILER1 373 374 public: 375 376 // Read the array of BasicTypes from a Java signature, and compute where 377 // compiled Java code would like to put the results. Values in reg_lo and 378 // reg_hi refer to 4-byte quantities. Values less than SharedInfo::stack0 are 379 // registers, those above refer to 4-byte stack slots. All stack slots are 380 // based off of the window top. SharedInfo::stack0 refers to the first usable 381 // slot in the bottom of the frame. SharedInfo::stack0+1 refers to the memory word 382 // 4-bytes higher. 383 // return value is the maximum number of VMReg stack slots the convention will use. 384 static int java_calling_convention(const BasicType* sig_bt, VMRegPair* regs, int total_args_passed); 385 static int java_calling_convention(const GrowableArray<SigEntry>* sig, VMRegPair* regs) { 386 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sig->length()); 387 int total_args_passed = SigEntry::fill_sig_bt(sig, sig_bt); 388 return java_calling_convention(sig_bt, regs, total_args_passed); 389 } 390 static int java_return_convention(const BasicType* sig_bt, VMRegPair* regs, int total_args_passed); 391 static const uint java_return_convention_max_int; 392 static const uint java_return_convention_max_float; 393 394 static void check_member_name_argument_is_last_argument(const methodHandle& method, 395 const BasicType* sig_bt, 396 const VMRegPair* regs) NOT_DEBUG_RETURN; 397 398 // Ditto except for calling C 399 // 400 // C argument in register AND stack slot. 401 // Some architectures require that an argument must be passed in a register 402 // AND in a stack slot. These architectures provide a second VMRegPair array 403 // to be filled by the c_calling_convention method. On other architectures, 404 // NULL is being passed as the second VMRegPair array, so arguments are either 405 // passed in a register OR in a stack slot. 406 static int c_calling_convention(const BasicType *sig_bt, VMRegPair *regs, VMRegPair *regs2, 407 int total_args_passed); 408 409 static int vector_calling_convention(VMRegPair *regs, 410 uint num_bits, 411 uint total_args_passed); 412 413 // Generate I2C and C2I adapters. These adapters are simple argument marshalling 414 // blobs. Unlike adapters in the tiger and earlier releases the code in these 415 // blobs does not create a new frame and are therefore virtually invisible 416 // to the stack walking code. In general these blobs extend the callers stack 417 // as needed for the conversion of argument locations. 418 419 // When calling a c2i blob the code will always call the interpreter even if 420 // by the time we reach the blob there is compiled code available. This allows 421 // the blob to pass the incoming stack pointer (the sender sp) in a known 422 // location for the interpreter to record. This is used by the frame code 423 // to correct the sender code to match up with the stack pointer when the 424 // thread left the compiled code. In addition it allows the interpreter 425 // to remove the space the c2i adapter allocated to do its argument conversion. 426 427 // Although a c2i blob will always run interpreted even if compiled code is 428 // present if we see that compiled code is present the compiled call site 429 // will be patched/re-resolved so that later calls will run compiled. 430 431 // Additionally a c2i blob need to have a unverified entry because it can be reached 432 // in situations where the call site is an inlined cache site and may go megamorphic. 433 434 // A i2c adapter is simpler than the c2i adapter. This is because it is assumed 435 // that the interpreter before it does any call dispatch will record the current 436 // stack pointer in the interpreter frame. On return it will restore the stack 437 // pointer as needed. This means the i2c adapter code doesn't need any special 438 // handshaking path with compiled code to keep the stack walking correct. 439 440 static AdapterHandlerEntry* generate_i2c2i_adapters(MacroAssembler *masm, 441 int comp_args_on_stack, 442 const GrowableArray<SigEntry>* sig, 443 const VMRegPair* regs, 444 const GrowableArray<SigEntry>* sig_cc, 445 const VMRegPair* regs_cc, 446 const GrowableArray<SigEntry>* sig_cc_ro, 447 const VMRegPair* regs_cc_ro, 448 AdapterFingerPrint* fingerprint, 449 AdapterBlob*& new_adapter, 450 bool allocate_code_blob); 451 452 static void gen_i2c_adapter(MacroAssembler *_masm, 453 int comp_args_on_stack, 454 const GrowableArray<SigEntry>* sig, 455 const VMRegPair *regs); 456 457 // OSR support 458 459 // OSR_migration_begin will extract the jvm state from an interpreter 460 // frame (locals, monitors) and store the data in a piece of C heap 461 // storage. This then allows the interpreter frame to be removed from the 462 // stack and the OSR nmethod to be called. That method is called with a 463 // pointer to the C heap storage. This pointer is the return value from 464 // OSR_migration_begin. 465 466 static intptr_t* OSR_migration_begin(JavaThread *thread); 467 468 // OSR_migration_end is a trivial routine. It is called after the compiled 469 // method has extracted the jvm state from the C heap that OSR_migration_begin 470 // created. It's entire job is to simply free this storage. 471 static void OSR_migration_end(intptr_t* buf); 472 473 // Convert a sig into a calling convention register layout 474 // and find interesting things about it. 475 static VMRegPair* find_callee_arguments(Symbol* sig, bool has_receiver, bool has_appendix, int *arg_size); 476 static VMReg name_for_receiver(); 477 478 // "Top of Stack" slots that may be unused by the calling convention but must 479 // otherwise be preserved. 480 // On Intel these are not necessary and the value can be zero. 481 // On Sparc this describes the words reserved for storing a register window 482 // when an interrupt occurs. 483 static uint out_preserve_stack_slots(); 484 485 // Stack slots that may be unused by the calling convention but must 486 // otherwise be preserved. On Intel this includes the return address. 487 // On PowerPC it includes the 4 words holding the old TOC & LR glue. 488 static uint in_preserve_stack_slots(); 489 490 // Is vector's size (in bytes) bigger than a size saved by default? 491 // For example, on x86 16 bytes XMM registers are saved by default. 492 static bool is_wide_vector(int size); 493 494 // Save and restore a native result 495 static void save_native_result(MacroAssembler *_masm, BasicType ret_type, int frame_slots); 496 static void restore_native_result(MacroAssembler *_masm, BasicType ret_type, int frame_slots); 497 498 // Generate a native wrapper for a given method. The method takes arguments 499 // in the Java compiled code convention, marshals them to the native 500 // convention (handlizes oops, etc), transitions to native, makes the call, 501 // returns to java state (possibly blocking), unhandlizes any result and 502 // returns. 503 // 504 // The wrapper may contain special-case code if the given method 505 // is a compiled method handle adapter, such as _invokeBasic, _linkToVirtual, etc. 506 static nmethod* generate_native_wrapper(MacroAssembler* masm, 507 const methodHandle& method, 508 int compile_id, 509 BasicType* sig_bt, 510 VMRegPair* regs, 511 BasicType ret_type); 512 513 // A compiled caller has just called the interpreter, but compiled code 514 // exists. Patch the caller so he no longer calls into the interpreter. 515 static void fixup_callers_callsite(Method* moop, address ret_pc); 516 static bool should_fixup_call_destination(address destination, address entry_point, address caller_pc, Method* moop, CodeBlob* cb); 517 518 // Slow-path Locking and Unlocking 519 static void complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current); 520 static void complete_monitor_unlocking_C(oopDesc* obj, BasicLock* lock, JavaThread* current); 521 522 // Resolving of calls 523 static address resolve_static_call_C (JavaThread* current); 524 static address resolve_virtual_call_C (JavaThread* current); 525 static address resolve_opt_virtual_call_C(JavaThread* current); 526 527 static void load_inline_type_fields_in_regs(JavaThread* current, oopDesc* res); 528 static void store_inline_type_fields_to_buf(JavaThread* current, intptr_t res); 529 530 // arraycopy, the non-leaf version. (See StubRoutines for all the leaf calls.) 531 static void slow_arraycopy_C(oopDesc* src, jint src_pos, 532 oopDesc* dest, jint dest_pos, 533 jint length, JavaThread* thread); 534 535 // handle ic miss with caller being compiled code 536 // wrong method handling (inline cache misses) 537 static address handle_wrong_method(JavaThread* current); 538 static address handle_wrong_method_abstract(JavaThread* current); 539 static address handle_wrong_method_ic_miss(JavaThread* current); 540 static void allocate_inline_types(JavaThread* current, Method* callee, bool allocate_receiver); 541 static oop allocate_inline_types_impl(JavaThread* current, methodHandle callee, bool allocate_receiver, TRAPS); 542 543 static address handle_unsafe_access(JavaThread* thread, address next_pc); 544 545 static BufferedInlineTypeBlob* generate_buffered_inline_type_adapter(const InlineKlass* vk); 546 #ifndef PRODUCT 547 548 // Collect and print inline cache miss statistics 549 private: 550 enum { maxICmiss_count = 100 }; 551 static int _ICmiss_index; // length of IC miss histogram 552 static int _ICmiss_count[maxICmiss_count]; // miss counts 553 static address _ICmiss_at[maxICmiss_count]; // miss addresses 554 static void trace_ic_miss(address at); 555 556 public: 557 static int _ic_miss_ctr; // total # of IC misses 558 static int _wrong_method_ctr; 559 static int _resolve_static_ctr; 560 static int _resolve_virtual_ctr; 561 static int _resolve_opt_virtual_ctr; 562 static int _implicit_null_throws; 563 static int _implicit_div0_throws; 564 565 static int _jbyte_array_copy_ctr; // Slow-path byte array copy 566 static int _jshort_array_copy_ctr; // Slow-path short array copy 567 static int _jint_array_copy_ctr; // Slow-path int array copy 568 static int _jlong_array_copy_ctr; // Slow-path long array copy 569 static int _oop_array_copy_ctr; // Slow-path oop array copy 570 static int _checkcast_array_copy_ctr; // Slow-path oop array copy, with cast 571 static int _unsafe_array_copy_ctr; // Slow-path includes alignment checks 572 static int _generic_array_copy_ctr; // Slow-path includes type decoding 573 static int _slow_array_copy_ctr; // Slow-path failed out to a method call 574 575 static int _new_instance_ctr; // 'new' object requires GC 576 static int _new_array_ctr; // 'new' array requires GC 577 static int _multi2_ctr, _multi3_ctr, _multi4_ctr, _multi5_ctr; 578 static int _find_handler_ctr; // find exception handler 579 static int _rethrow_ctr; // rethrow exception 580 static int _mon_enter_stub_ctr; // monitor enter stub 581 static int _mon_exit_stub_ctr; // monitor exit stub 582 static int _mon_enter_ctr; // monitor enter slow 583 static int _mon_exit_ctr; // monitor exit slow 584 static int _partial_subtype_ctr; // SubRoutines::partial_subtype_check 585 586 // Statistics code 587 // stats for "normal" compiled calls (non-interface) 588 static int64_t _nof_normal_calls; // total # of calls 589 static int64_t _nof_inlined_calls; // total # of inlined normal calls 590 static int64_t _nof_static_calls; // total # of calls to static methods or super methods (invokespecial) 591 static int64_t _nof_inlined_static_calls; // total # of inlined static calls 592 // stats for compiled interface calls 593 static int64_t _nof_interface_calls; // total # of compiled calls 594 static int64_t _nof_inlined_interface_calls; // total # of inlined interface calls 595 596 public: // for compiler 597 static address nof_normal_calls_addr() { return (address)&_nof_normal_calls; } 598 static address nof_inlined_calls_addr() { return (address)&_nof_inlined_calls; } 599 static address nof_static_calls_addr() { return (address)&_nof_static_calls; } 600 static address nof_inlined_static_calls_addr() { return (address)&_nof_inlined_static_calls; } 601 static address nof_interface_calls_addr() { return (address)&_nof_interface_calls; } 602 static address nof_inlined_interface_calls_addr() { return (address)&_nof_inlined_interface_calls; } 603 static void print_call_statistics(uint64_t comp_total); 604 static void print_statistics(); 605 static void print_ic_miss_histogram(); 606 607 #endif // PRODUCT 608 }; 609 610 611 // --------------------------------------------------------------------------- 612 // Implementation of AdapterHandlerLibrary 613 // 614 // This library manages argument marshaling adapters and native wrappers. 615 // There are 2 flavors of adapters: I2C and C2I. 616 // 617 // The I2C flavor takes a stock interpreted call setup, marshals the 618 // arguments for a Java-compiled call, and jumps to Rmethod-> code()-> 619 // code_begin(). It is broken to call it without an nmethod assigned. 620 // The usual behavior is to lift any register arguments up out of the 621 // stack and possibly re-pack the extra arguments to be contiguous. 622 // I2C adapters will save what the interpreter's stack pointer will be 623 // after arguments are popped, then adjust the interpreter's frame 624 // size to force alignment and possibly to repack the arguments. 625 // After re-packing, it jumps to the compiled code start. There are 626 // no safepoints in this adapter code and a GC cannot happen while 627 // marshaling is in progress. 628 // 629 // The C2I flavor takes a stock compiled call setup plus the target method in 630 // Rmethod, marshals the arguments for an interpreted call and jumps to 631 // Rmethod->_i2i_entry. On entry, the interpreted frame has not yet been 632 // setup. Compiled frames are fixed-size and the args are likely not in the 633 // right place. Hence all the args will likely be copied into the 634 // interpreter's frame, forcing that frame to grow. The compiled frame's 635 // outgoing stack args will be dead after the copy. 636 // 637 // Native wrappers, like adapters, marshal arguments. Unlike adapters they 638 // also perform an official frame push & pop. They have a call to the native 639 // routine in their middles and end in a return (instead of ending in a jump). 640 // The native wrappers are stored in real nmethods instead of the BufferBlobs 641 // used by the adapters. The code generation happens here because it's very 642 // similar to what the adapters have to do. 643 644 class AdapterHandlerEntry : public CHeapObj<mtCode> { 645 friend class AdapterHandlerLibrary; 646 647 private: 648 AdapterFingerPrint* _fingerprint; 649 address _i2c_entry; 650 address _c2i_entry; 651 address _c2i_inline_entry; 652 address _c2i_inline_ro_entry; 653 address _c2i_unverified_entry; 654 address _c2i_unverified_inline_entry; 655 address _c2i_no_clinit_check_entry; 656 657 // Support for scalarized inline type calling convention 658 const GrowableArray<SigEntry>* _sig_cc; 659 660 #ifdef ASSERT 661 // Captures code and signature used to generate this adapter when 662 // verifying adapter equivalence. 663 unsigned char* _saved_code; 664 int _saved_code_length; 665 #endif 666 667 AdapterHandlerEntry(AdapterFingerPrint* fingerprint, address i2c_entry, address c2i_entry, 668 address c2i_inline_entry, address c2i_inline_ro_entry, 669 address c2i_unverified_entry, address c2i_unverified_inline_entry, 670 address c2i_no_clinit_check_entry) : 671 _fingerprint(fingerprint), 672 _i2c_entry(i2c_entry), 673 _c2i_entry(c2i_entry), 674 _c2i_inline_entry(c2i_inline_entry), 675 _c2i_inline_ro_entry(c2i_inline_ro_entry), 676 _c2i_unverified_entry(c2i_unverified_entry), 677 _c2i_unverified_inline_entry(c2i_unverified_inline_entry), 678 _c2i_no_clinit_check_entry(c2i_no_clinit_check_entry), 679 _sig_cc(NULL) 680 #ifdef ASSERT 681 , _saved_code_length(0) 682 #endif 683 { } 684 685 ~AdapterHandlerEntry(); 686 687 public: 688 address get_i2c_entry() const { return _i2c_entry; } 689 address get_c2i_entry() const { return _c2i_entry; } 690 address get_c2i_inline_entry() const { return _c2i_inline_entry; } 691 address get_c2i_inline_ro_entry() const { return _c2i_inline_ro_entry; } 692 address get_c2i_unverified_entry() const { return _c2i_unverified_entry; } 693 address get_c2i_unverified_inline_entry() const { return _c2i_unverified_inline_entry; } 694 address get_c2i_no_clinit_check_entry() const { return _c2i_no_clinit_check_entry; } 695 696 address base_address(); 697 void relocate(address new_base); 698 699 // Support for scalarized inline type calling convention 700 void set_sig_cc(const GrowableArray<SigEntry>* sig) { _sig_cc = sig; } 701 const GrowableArray<SigEntry>* get_sig_cc() const { return _sig_cc; } 702 703 AdapterFingerPrint* fingerprint() const { return _fingerprint; } 704 705 #ifdef ASSERT 706 // Used to verify that code generated for shared adapters is equivalent 707 void save_code (unsigned char* code, int length); 708 bool compare_code(AdapterHandlerEntry* other); 709 #endif 710 711 //virtual void print_on(outputStream* st) const; DO NOT USE 712 void print_adapter_on(outputStream* st) const; 713 }; 714 715 class CompiledEntrySignature; 716 717 class AdapterHandlerLibrary: public AllStatic { 718 friend class SharedRuntime; 719 private: 720 static BufferBlob* _buffer; // the temporary code buffer in CodeCache 721 static AdapterHandlerEntry* _abstract_method_handler; 722 static AdapterHandlerEntry* _no_arg_handler; 723 static AdapterHandlerEntry* _int_arg_handler; 724 static AdapterHandlerEntry* _obj_arg_handler; 725 static AdapterHandlerEntry* _obj_int_arg_handler; 726 static AdapterHandlerEntry* _obj_obj_arg_handler; 727 728 static BufferBlob* buffer_blob(); 729 static void initialize(); 730 static AdapterHandlerEntry* create_adapter(AdapterBlob*& new_adapter, 731 CompiledEntrySignature& ces, 732 bool allocate_code_blob); 733 static AdapterHandlerEntry* get_simple_adapter(const methodHandle& method); 734 public: 735 736 static AdapterHandlerEntry* new_entry(AdapterFingerPrint* fingerprint, 737 address i2c_entry, address c2i_entry, address c2i_inline_entry, address c2i_inline_ro_entry, 738 address c2i_unverified_entry, address c2i_unverified_inline_entry, address c2i_no_clinit_check_entry = NULL); 739 static void create_native_wrapper(const methodHandle& method); 740 static AdapterHandlerEntry* get_adapter(const methodHandle& method); 741 742 static void print_handler(const CodeBlob* b) { print_handler_on(tty, b); } 743 static void print_handler_on(outputStream* st, const CodeBlob* b); 744 static bool contains(const CodeBlob* b); 745 #ifndef PRODUCT 746 static void print_statistics(); 747 #endif // PRODUCT 748 749 }; 750 751 // Utility class for computing the calling convention of the 3 types 752 // of compiled method entries: 753 // Method::_from_compiled_entry - sig_cc 754 // Method::_from_compiled_inline_ro_entry - sig_cc_ro 755 // Method::_from_compiled_inline_entry - sig 756 class CompiledEntrySignature : public StackObj { 757 Method* _method; 758 int _num_inline_args; 759 bool _has_inline_recv; 760 GrowableArray<SigEntry> *_sig; 761 GrowableArray<SigEntry> *_sig_cc; 762 GrowableArray<SigEntry> *_sig_cc_ro; 763 VMRegPair* _regs; 764 VMRegPair* _regs_cc; 765 VMRegPair* _regs_cc_ro; 766 767 int _args_on_stack; 768 int _args_on_stack_cc; 769 int _args_on_stack_cc_ro; 770 771 bool _c1_needs_stack_repair; 772 bool _c2_needs_stack_repair; 773 774 public: 775 Method* method() const { return _method; } 776 777 // Used by Method::_from_compiled_inline_entry 778 GrowableArray<SigEntry>& sig() const { return *_sig; } 779 780 // Used by Method::_from_compiled_entry 781 GrowableArray<SigEntry>& sig_cc() const { return *_sig_cc; } 782 783 // Used by Method::_from_compiled_inline_ro_entry 784 GrowableArray<SigEntry>& sig_cc_ro() const { return *_sig_cc_ro; } 785 786 VMRegPair* regs() const { return _regs; } 787 VMRegPair* regs_cc() const { return _regs_cc; } 788 VMRegPair* regs_cc_ro() const { return _regs_cc_ro; } 789 790 int args_on_stack() const { return _args_on_stack; } 791 int args_on_stack_cc() const { return _args_on_stack_cc; } 792 int args_on_stack_cc_ro() const { return _args_on_stack_cc_ro; } 793 794 int num_inline_args() const { return _num_inline_args; } 795 bool has_inline_recv() const { return _has_inline_recv; } 796 797 bool has_scalarized_args() const { return _sig != _sig_cc; } 798 bool c1_needs_stack_repair() const { return _c1_needs_stack_repair; } 799 bool c2_needs_stack_repair() const { return _c2_needs_stack_repair; } 800 CodeOffsets::Entries c1_inline_ro_entry_type() const; 801 802 CompiledEntrySignature(Method* method = NULL); 803 void compute_calling_conventions(bool init = true); 804 }; 805 806 #endif // SHARE_RUNTIME_SHAREDRUNTIME_HPP