1 /*
  2  * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_RUNTIME_SHAREDRUNTIME_HPP
 26 #define SHARE_RUNTIME_SHAREDRUNTIME_HPP
 27 
 28 #include "asm/codeBuffer.hpp"
 29 #include "code/codeBlob.hpp"
 30 #include "code/vmreg.hpp"
 31 #include "interpreter/linkResolver.hpp"
 32 #include "memory/allStatic.hpp"
 33 #include "memory/resourceArea.hpp"
 34 #include "runtime/signature.hpp"
 35 #include "utilities/macros.hpp"
 36 
 37 class AdapterHandlerEntry;
 38 class AdapterFingerPrint;
 39 class vframeStream;
 40 class SigEntry;
 41 
 42 // Runtime is the base class for various runtime interfaces
 43 // (InterpreterRuntime, CompilerRuntime, etc.). It provides
 44 // shared functionality such as exception forwarding (C++ to
 45 // Java exceptions), locking/unlocking mechanisms, statistical
 46 // information, etc.
 47 
 48 class SharedRuntime: AllStatic {
 49   friend class VMStructs;
 50 
 51  private:
 52   static bool resolve_sub_helper_internal(methodHandle callee_method, const frame& caller_frame,
 53                                           CompiledMethod* caller_nm, bool is_virtual, bool is_optimized, bool& caller_is_c1,
 54                                           Handle receiver, CallInfo& call_info, Bytecodes::Code invoke_code, TRAPS);
 55   static methodHandle resolve_sub_helper(bool is_virtual, bool is_optimized, bool& caller_is_c1, TRAPS);
 56 
 57   // Shared stub locations
 58 
 59   static RuntimeStub*        _wrong_method_blob;
 60   static RuntimeStub*        _wrong_method_abstract_blob;
 61   static RuntimeStub*        _ic_miss_blob;
 62   static RuntimeStub*        _resolve_opt_virtual_call_blob;
 63   static RuntimeStub*        _resolve_virtual_call_blob;
 64   static RuntimeStub*        _resolve_static_call_blob;
 65 
 66   static DeoptimizationBlob* _deopt_blob;
 67 
 68   static SafepointBlob*      _polling_page_vectors_safepoint_handler_blob;
 69   static SafepointBlob*      _polling_page_safepoint_handler_blob;
 70   static SafepointBlob*      _polling_page_return_handler_blob;
 71 
 72 #ifdef COMPILER2
 73   static UncommonTrapBlob*   _uncommon_trap_blob;
 74 #endif // COMPILER2
 75 
 76   static nmethod*            _cont_doYield_stub;
 77 
 78 #ifndef PRODUCT
 79   // Counters
 80   static int64_t _nof_megamorphic_calls;         // total # of megamorphic calls (through vtable)
 81 #endif // !PRODUCT
 82 
 83  private:
 84   enum { POLL_AT_RETURN,  POLL_AT_LOOP, POLL_AT_VECTOR_LOOP };
 85   static SafepointBlob* generate_handler_blob(address call_ptr, int poll_type);
 86   static RuntimeStub*   generate_resolve_blob(address destination, const char* name);
 87  public:
 88   static void generate_stubs(void);
 89 
 90   // max bytes for each dtrace string parameter
 91   enum { max_dtrace_string_size = 256 };
 92 
 93   // The following arithmetic routines are used on platforms that do
 94   // not have machine instructions to implement their functionality.
 95   // Do not remove these.
 96 
 97   // long arithmetics
 98   static jlong   lmul(jlong y, jlong x);
 99   static jlong   ldiv(jlong y, jlong x);
100   static jlong   lrem(jlong y, jlong x);
101 
102   // float and double remainder
103   static jfloat  frem(jfloat  x, jfloat  y);
104   static jdouble drem(jdouble x, jdouble y);
105 
106 
107 #ifdef _WIN64
108   // Workaround for fmod issue in the Windows x64 CRT
109   static double fmod_winx64(double x, double y);
110 #endif
111 
112 #ifdef __SOFTFP__
113   static jfloat  fadd(jfloat x, jfloat y);
114   static jfloat  fsub(jfloat x, jfloat y);
115   static jfloat  fmul(jfloat x, jfloat y);
116   static jfloat  fdiv(jfloat x, jfloat y);
117 
118   static jdouble dadd(jdouble x, jdouble y);
119   static jdouble dsub(jdouble x, jdouble y);
120   static jdouble dmul(jdouble x, jdouble y);
121   static jdouble ddiv(jdouble x, jdouble y);
122 #endif // __SOFTFP__
123 
124   // float conversion (needs to set appropriate rounding mode)
125   static jint    f2i (jfloat  x);
126   static jlong   f2l (jfloat  x);
127   static jint    d2i (jdouble x);
128   static jlong   d2l (jdouble x);
129   static jfloat  d2f (jdouble x);
130   static jfloat  l2f (jlong   x);
131   static jdouble l2d (jlong   x);
132   static jfloat  i2f (jint    x);
133 
134 #ifdef __SOFTFP__
135   static jdouble i2d (jint    x);
136   static jdouble f2d (jfloat  x);
137 #endif // __SOFTFP__
138 
139   // double trigonometrics and transcendentals
140   static jdouble dsin(jdouble x);
141   static jdouble dcos(jdouble x);
142   static jdouble dtan(jdouble x);
143   static jdouble dlog(jdouble x);
144   static jdouble dlog10(jdouble x);
145   static jdouble dexp(jdouble x);
146   static jdouble dpow(jdouble x, jdouble y);
147 
148 #if defined(__SOFTFP__) || defined(E500V2)
149   static double dabs(double f);
150 #endif
151 
152 #if defined(__SOFTFP__) || defined(PPC)
153   static double dsqrt(double f);
154 #endif
155 
156   // Montgomery multiplication
157   static void montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints,
158                                   jint len, jlong inv, jint *m_ints);
159   static void montgomery_square(jint *a_ints, jint *n_ints,
160                                 jint len, jlong inv, jint *m_ints);
161 
162 #ifdef __SOFTFP__
163   // C++ compiler generates soft float instructions as well as passing
164   // float and double in registers.
165   static int  fcmpl(float x, float y);
166   static int  fcmpg(float x, float y);
167   static int  dcmpl(double x, double y);
168   static int  dcmpg(double x, double y);
169 
170   static int unordered_fcmplt(float x, float y);
171   static int unordered_dcmplt(double x, double y);
172   static int unordered_fcmple(float x, float y);
173   static int unordered_dcmple(double x, double y);
174   static int unordered_fcmpge(float x, float y);
175   static int unordered_dcmpge(double x, double y);
176   static int unordered_fcmpgt(float x, float y);
177   static int unordered_dcmpgt(double x, double y);
178 
179   static float  fneg(float f);
180   static double dneg(double f);
181 #endif
182 
183   // exception handling across interpreter/compiler boundaries
184   static address raw_exception_handler_for_return_address(JavaThread* current, address return_address);
185   static address exception_handler_for_return_address(JavaThread* current, address return_address);
186 
187   // exception handling and implicit exceptions
188   static address compute_compiled_exc_handler(CompiledMethod* nm, address ret_pc, Handle& exception,
189                                               bool force_unwind, bool top_frame_only, bool& recursive_exception_occurred);
190   enum ImplicitExceptionKind {
191     IMPLICIT_NULL,
192     IMPLICIT_DIVIDE_BY_ZERO,
193     STACK_OVERFLOW
194   };
195   static void    throw_AbstractMethodError(JavaThread* current);
196   static void    throw_IncompatibleClassChangeError(JavaThread* current);
197   static void    throw_ArithmeticException(JavaThread* current);
198   static void    throw_NullPointerException(JavaThread* current);
199   static void    throw_NullPointerException_at_call(JavaThread* current);
200   static void    throw_StackOverflowError(JavaThread* current);
201   static void    throw_delayed_StackOverflowError(JavaThread* current);
202   static void    throw_StackOverflowError_common(JavaThread* current, bool delayed);
203   static address continuation_for_implicit_exception(JavaThread* current,
204                                                      address faulting_pc,
205                                                      ImplicitExceptionKind exception_kind);
206 
207   // Post-slow-path-allocation, pre-initializing-stores step for
208   // implementing e.g. ReduceInitialCardMarks
209   static void on_slowpath_allocation_exit(JavaThread* current);
210 
211   static void enable_stack_reserved_zone(JavaThread* current);
212   static frame look_for_reserved_stack_annotated_method(JavaThread* current, frame fr);
213 
214   // Shared stub locations
215   static address get_poll_stub(address pc);
216 
217   static address get_ic_miss_stub() {
218     assert(_ic_miss_blob!= nullptr, "oops");
219     return _ic_miss_blob->entry_point();
220   }
221 
222   static address get_handle_wrong_method_stub() {
223     assert(_wrong_method_blob!= nullptr, "oops");
224     return _wrong_method_blob->entry_point();
225   }
226 
227   static address get_handle_wrong_method_abstract_stub() {
228     assert(_wrong_method_abstract_blob!= nullptr, "oops");
229     return _wrong_method_abstract_blob->entry_point();
230   }
231 
232 #ifdef COMPILER2
233   static void generate_uncommon_trap_blob(void);
234   static UncommonTrapBlob* uncommon_trap_blob()                  { return _uncommon_trap_blob; }
235 #endif // COMPILER2
236 
237   static address get_resolve_opt_virtual_call_stub() {
238     assert(_resolve_opt_virtual_call_blob != nullptr, "oops");
239     return _resolve_opt_virtual_call_blob->entry_point();
240   }
241   static address get_resolve_virtual_call_stub() {
242     assert(_resolve_virtual_call_blob != nullptr, "oops");
243     return _resolve_virtual_call_blob->entry_point();
244   }
245   static address get_resolve_static_call_stub() {
246     assert(_resolve_static_call_blob != nullptr, "oops");
247     return _resolve_static_call_blob->entry_point();
248   }
249 
250   static SafepointBlob* polling_page_return_handler_blob()     { return _polling_page_return_handler_blob; }
251   static SafepointBlob* polling_page_safepoint_handler_blob()  { return _polling_page_safepoint_handler_blob; }
252   static SafepointBlob* polling_page_vectors_safepoint_handler_blob()  { return _polling_page_vectors_safepoint_handler_blob; }
253 
254   static nmethod* cont_doYield_stub() {
255     assert(_cont_doYield_stub != nullptr, "oops");
256     return _cont_doYield_stub;
257   }
258 
259   // Counters
260 #ifndef PRODUCT
261   static address nof_megamorphic_calls_addr() { return (address)&_nof_megamorphic_calls; }
262 #endif // PRODUCT
263 
264   // Helper routine for full-speed JVMTI exception throwing support
265   static void throw_and_post_jvmti_exception(JavaThread* current, Handle h_exception);
266   static void throw_and_post_jvmti_exception(JavaThread* current, Symbol* name, const char *message = nullptr);
267 
268 #if INCLUDE_JVMTI
269   // Functions for JVMTI notifications
270   static void notify_jvmti_vthread_start(oopDesc* vt, jboolean hide, JavaThread* current);
271   static void notify_jvmti_vthread_end(oopDesc* vt, jboolean hide, JavaThread* current);
272   static void notify_jvmti_vthread_mount(oopDesc* vt, jboolean hide, JavaThread* current);
273   static void notify_jvmti_vthread_unmount(oopDesc* vt, jboolean hide, JavaThread* current);
274 #endif
275 
276   // RedefineClasses() tracing support for obsolete method entry
277   static int rc_trace_method_entry(JavaThread* thread, Method* m);
278 
279   // To be used as the entry point for unresolved native methods.
280   static address native_method_throw_unsatisfied_link_error_entry();
281 
282   static void register_finalizer(JavaThread* thread, oopDesc* obj);
283 
284   // dtrace notifications
285   static int dtrace_object_alloc(oopDesc* o);
286   static int dtrace_object_alloc(JavaThread* thread, oopDesc* o);
287   static int dtrace_object_alloc(JavaThread* thread, oopDesc* o, size_t size);
288   static int dtrace_method_entry(JavaThread* thread, Method* m);
289   static int dtrace_method_exit(JavaThread* thread, Method* m);
290 
291   // Utility method for retrieving the Java thread id, returns 0 if the
292   // thread is not a well formed Java thread.
293   static jlong get_java_tid(JavaThread* thread);
294 
295 
296   // used by native wrappers to re-enable yellow if overflow happened in native code
297   static void reguard_yellow_pages();
298 
299   // Fill in the "X cannot be cast to a Y" message for ClassCastException
300   //
301   // @param thr the current thread
302   // @param caster_klass the class of the object we are casting
303   // @return the dynamically allocated exception message (must be freed
304   // by the caller using a resource mark)
305   //
306   // BCP must refer to the current 'checkcast' opcode for the frame
307   // on top of the stack.
308   // The caller (or one of its callers) must use a ResourceMark
309   // in order to correctly free the result.
310   //
311   static char* generate_class_cast_message(JavaThread* thr, Klass* caster_klass);
312 
313   // Fill in the "X cannot be cast to a Y" message for ClassCastException
314   //
315   // @param caster_klass the class of the object we are casting
316   // @param target_klass the target klass attempt
317   // @return the dynamically allocated exception message (must be freed
318   // by the caller using a resource mark)
319   //
320   // This version does not require access the frame, so it can be called
321   // from interpreted code
322   // The caller (or one of it's callers) must use a ResourceMark
323   // in order to correctly free the result.
324   //
325   static char* generate_class_cast_message(Klass* caster_klass, Klass* target_klass, Symbol* target_klass_name = nullptr);
326 
327   // Resolves a call site- may patch in the destination of the call into the
328   // compiled code.
329   static methodHandle resolve_helper(bool is_virtual, bool is_optimized, bool& caller_is_c1, TRAPS);
330 
331  private:
332   // deopt blob
333   static void generate_deopt_blob(void);
334 
335   static bool handle_ic_miss_helper_internal(Handle receiver, CompiledMethod* caller_nm, const frame& caller_frame,
336                                              methodHandle callee_method, Bytecodes::Code bc, CallInfo& call_info,
337                                              bool& needs_ic_stub_refill, bool& is_optimized, bool caller_is_c1, TRAPS);
338 
339  public:
340   static DeoptimizationBlob* deopt_blob(void)      { return _deopt_blob; }
341 
342   // Resets a call-site in compiled code so it will get resolved again.
343   static methodHandle reresolve_call_site(bool& is_static_call, bool& is_optimized, bool& caller_is_c1, TRAPS);
344 
345   // In the code prolog, if the klass comparison fails, the inline cache
346   // misses and the call site is patched to megamorphic
347   static methodHandle handle_ic_miss_helper(bool& is_optimized, bool& caller_is_c1, TRAPS);
348 
349   // Find the method that called us.
350   static methodHandle find_callee_method(bool is_optimized, bool& caller_is_c1, TRAPS);
351 
352   static void monitor_enter_helper(oopDesc* obj, BasicLock* lock, JavaThread* thread);
353 
354   static void monitor_exit_helper(oopDesc* obj, BasicLock* lock, JavaThread* current);
355 
356   static address entry_for_handle_wrong_method(methodHandle callee_method, bool is_static_call, bool is_optimized, bool caller_is_c1) {
357     assert(callee_method->verified_code_entry() != nullptr, "Jump to zero!");
358     assert(callee_method->verified_inline_code_entry() != nullptr, "Jump to zero!");
359     assert(callee_method->verified_inline_ro_code_entry() != nullptr, "Jump to zero!");
360     if (caller_is_c1) {
361       return callee_method->verified_inline_code_entry();
362     } else if (is_static_call || is_optimized) {
363       return callee_method->verified_code_entry();
364     } else {
365       return callee_method->verified_inline_ro_code_entry();
366     }
367   }
368 
369  private:
370   static Handle find_callee_info(Bytecodes::Code& bc, CallInfo& callinfo, TRAPS);
371   static Handle find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc, CallInfo& callinfo, TRAPS);
372 
373   static Method* extract_attached_method(vframeStream& vfst);
374 
375 #if defined(X86) && defined(COMPILER1)
376   // For Object.hashCode, System.identityHashCode try to pull hashCode from object header if available.
377   static void inline_check_hashcode_from_object_header(MacroAssembler* masm, const methodHandle& method, Register obj_reg, Register result);
378 #endif // X86 && COMPILER1
379 
380  public:
381 
382   // Read the array of BasicTypes from a Java signature, and compute where
383   // compiled Java code would like to put the results.  Values in reg_lo and
384   // reg_hi refer to 4-byte quantities.  Values less than SharedInfo::stack0 are
385   // registers, those above refer to 4-byte stack slots.  All stack slots are
386   // based off of the window top.  SharedInfo::stack0 refers to the first usable
387   // slot in the bottom of the frame. SharedInfo::stack0+1 refers to the memory word
388   // 4-bytes higher.
389   // return value is the maximum number of VMReg stack slots the convention will use.
390   static int java_calling_convention(const BasicType* sig_bt, VMRegPair* regs, int total_args_passed);
391   static int java_calling_convention(const GrowableArray<SigEntry>* sig, VMRegPair* regs) {
392     BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sig->length());
393     int total_args_passed = SigEntry::fill_sig_bt(sig, sig_bt);
394     return java_calling_convention(sig_bt, regs, total_args_passed);
395   }
396   static int java_return_convention(const BasicType* sig_bt, VMRegPair* regs, int total_args_passed);
397   static const uint java_return_convention_max_int;
398   static const uint java_return_convention_max_float;
399 
400   static void check_member_name_argument_is_last_argument(const methodHandle& method,
401                                                           const BasicType* sig_bt,
402                                                           const VMRegPair* regs) NOT_DEBUG_RETURN;
403 
404   // Ditto except for calling C
405   //
406   // C argument in register AND stack slot.
407   // Some architectures require that an argument must be passed in a register
408   // AND in a stack slot. These architectures provide a second VMRegPair array
409   // to be filled by the c_calling_convention method. On other architectures,
410   // null is being passed as the second VMRegPair array, so arguments are either
411   // passed in a register OR in a stack slot.
412   static int c_calling_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed);
413 
414   static int vector_calling_convention(VMRegPair *regs,
415                                        uint num_bits,
416                                        uint total_args_passed);
417 
418   // Generate I2C and C2I adapters. These adapters are simple argument marshalling
419   // blobs. Unlike adapters in the tiger and earlier releases the code in these
420   // blobs does not create a new frame and are therefore virtually invisible
421   // to the stack walking code. In general these blobs extend the callers stack
422   // as needed for the conversion of argument locations.
423 
424   // When calling a c2i blob the code will always call the interpreter even if
425   // by the time we reach the blob there is compiled code available. This allows
426   // the blob to pass the incoming stack pointer (the sender sp) in a known
427   // location for the interpreter to record. This is used by the frame code
428   // to correct the sender code to match up with the stack pointer when the
429   // thread left the compiled code. In addition it allows the interpreter
430   // to remove the space the c2i adapter allocated to do its argument conversion.
431 
432   // Although a c2i blob will always run interpreted even if compiled code is
433   // present if we see that compiled code is present the compiled call site
434   // will be patched/re-resolved so that later calls will run compiled.
435 
436   // Additionally a c2i blob need to have a unverified entry because it can be reached
437   // in situations where the call site is an inlined cache site and may go megamorphic.
438 
439   // A i2c adapter is simpler than the c2i adapter. This is because it is assumed
440   // that the interpreter before it does any call dispatch will record the current
441   // stack pointer in the interpreter frame. On return it will restore the stack
442   // pointer as needed. This means the i2c adapter code doesn't need any special
443   // handshaking path with compiled code to keep the stack walking correct.
444 
445   static AdapterHandlerEntry* generate_i2c2i_adapters(MacroAssembler *masm,
446                                                       int comp_args_on_stack,
447                                                       const GrowableArray<SigEntry>* sig,
448                                                       const VMRegPair* regs,
449                                                       const GrowableArray<SigEntry>* sig_cc,
450                                                       const VMRegPair* regs_cc,
451                                                       const GrowableArray<SigEntry>* sig_cc_ro,
452                                                       const VMRegPair* regs_cc_ro,
453                                                       AdapterFingerPrint* fingerprint,
454                                                       AdapterBlob*& new_adapter,
455                                                       bool allocate_code_blob);
456 
457   static void gen_i2c_adapter(MacroAssembler *_masm,
458                               int comp_args_on_stack,
459                               const GrowableArray<SigEntry>* sig,
460                               const VMRegPair *regs);
461 
462   // OSR support
463 
464   // OSR_migration_begin will extract the jvm state from an interpreter
465   // frame (locals, monitors) and store the data in a piece of C heap
466   // storage. This then allows the interpreter frame to be removed from the
467   // stack and the OSR nmethod to be called. That method is called with a
468   // pointer to the C heap storage. This pointer is the return value from
469   // OSR_migration_begin.
470 
471   static intptr_t* OSR_migration_begin(JavaThread *thread);
472 
473   // OSR_migration_end is a trivial routine. It is called after the compiled
474   // method has extracted the jvm state from the C heap that OSR_migration_begin
475   // created. It's entire job is to simply free this storage.
476   static void OSR_migration_end(intptr_t* buf);
477 
478   // Convert a sig into a calling convention register layout
479   // and find interesting things about it.
480   static VMRegPair* find_callee_arguments(Symbol* sig, bool has_receiver, bool has_appendix, int *arg_size);
481   static VMReg name_for_receiver();
482 
483   // "Top of Stack" slots that may be unused by the calling convention but must
484   // otherwise be preserved.
485   // On Intel these are not necessary and the value can be zero.
486   // On Sparc this describes the words reserved for storing a register window
487   // when an interrupt occurs.
488   static uint out_preserve_stack_slots();
489 
490   // Stack slots that may be unused by the calling convention but must
491   // otherwise be preserved.  On Intel this includes the return address.
492   // On PowerPC it includes the 4 words holding the old TOC & LR glue.
493   static uint in_preserve_stack_slots();
494 
495   // Is vector's size (in bytes) bigger than a size saved by default?
496   // For example, on x86 16 bytes XMM registers are saved by default.
497   static bool is_wide_vector(int size);
498 
499   // Save and restore a native result
500   static void    save_native_result(MacroAssembler *_masm, BasicType ret_type, int frame_slots);
501   static void restore_native_result(MacroAssembler *_masm, BasicType ret_type, int frame_slots);
502 
503   // Generate a native wrapper for a given method.  The method takes arguments
504   // in the Java compiled code convention, marshals them to the native
505   // convention (handlizes oops, etc), transitions to native, makes the call,
506   // returns to java state (possibly blocking), unhandlizes any result and
507   // returns.
508   //
509   // The wrapper may contain special-case code if the given method
510   // is a compiled method handle adapter, such as _invokeBasic, _linkToVirtual, etc.
511   static nmethod* generate_native_wrapper(MacroAssembler* masm,
512                                           const methodHandle& method,
513                                           int compile_id,
514                                           BasicType* sig_bt,
515                                           VMRegPair* regs,
516                                           BasicType ret_type);
517 
518   // A compiled caller has just called the interpreter, but compiled code
519   // exists.  Patch the caller so he no longer calls into the interpreter.
520   static void fixup_callers_callsite(Method* moop, address ret_pc);
521   static bool should_fixup_call_destination(address destination, address entry_point, address caller_pc, Method* moop, CodeBlob* cb);
522 
523   // Slow-path Locking and Unlocking
524   static void complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current);
525   static void complete_monitor_unlocking_C(oopDesc* obj, BasicLock* lock, JavaThread* current);
526 
527   // Resolving of calls
528   static address resolve_static_call_C     (JavaThread* current);
529   static address resolve_virtual_call_C    (JavaThread* current);
530   static address resolve_opt_virtual_call_C(JavaThread* current);
531 
532   static void load_inline_type_fields_in_regs(JavaThread* current, oopDesc* res);
533   static void store_inline_type_fields_to_buf(JavaThread* current, intptr_t res);
534 
535   // arraycopy, the non-leaf version.  (See StubRoutines for all the leaf calls.)
536   static void slow_arraycopy_C(oopDesc* src,  jint src_pos,
537                                oopDesc* dest, jint dest_pos,
538                                jint length, JavaThread* thread);
539 
540   // handle ic miss with caller being compiled code
541   // wrong method handling (inline cache misses)
542   static address handle_wrong_method(JavaThread* current);
543   static address handle_wrong_method_abstract(JavaThread* current);
544   static address handle_wrong_method_ic_miss(JavaThread* current);
545   static void allocate_inline_types(JavaThread* current, Method* callee, bool allocate_receiver);
546   static oop allocate_inline_types_impl(JavaThread* current, methodHandle callee, bool allocate_receiver, TRAPS);
547 
548   static address handle_unsafe_access(JavaThread* thread, address next_pc);
549 
550   static BufferedInlineTypeBlob* generate_buffered_inline_type_adapter(const InlineKlass* vk);
551 #ifndef PRODUCT
552 
553   // Collect and print inline cache miss statistics
554  private:
555   enum { maxICmiss_count = 100 };
556   static int     _ICmiss_index;                  // length of IC miss histogram
557   static int     _ICmiss_count[maxICmiss_count]; // miss counts
558   static address _ICmiss_at[maxICmiss_count];    // miss addresses
559   static void trace_ic_miss(address at);
560 
561  public:
562   static uint _ic_miss_ctr;                      // total # of IC misses
563   static uint _wrong_method_ctr;
564   static uint _resolve_static_ctr;
565   static uint _resolve_virtual_ctr;
566   static uint _resolve_opt_virtual_ctr;
567   static uint _implicit_null_throws;
568   static uint _implicit_div0_throws;
569 
570   static uint _jbyte_array_copy_ctr;       // Slow-path byte array copy
571   static uint _jshort_array_copy_ctr;      // Slow-path short array copy
572   static uint _jint_array_copy_ctr;        // Slow-path int array copy
573   static uint _jlong_array_copy_ctr;       // Slow-path long array copy
574   static uint _oop_array_copy_ctr;         // Slow-path oop array copy
575   static uint _checkcast_array_copy_ctr;   // Slow-path oop array copy, with cast
576   static uint _unsafe_array_copy_ctr;      // Slow-path includes alignment checks
577   static uint _generic_array_copy_ctr;     // Slow-path includes type decoding
578   static uint _slow_array_copy_ctr;        // Slow-path failed out to a method call
579 
580   static uint _new_instance_ctr;           // 'new' object requires GC
581   static uint _new_array_ctr;              // 'new' array requires GC
582   static uint _multi2_ctr, _multi3_ctr, _multi4_ctr, _multi5_ctr;
583   static uint _find_handler_ctr;           // find exception handler
584   static uint _rethrow_ctr;                // rethrow exception
585   static uint _mon_enter_stub_ctr;         // monitor enter stub
586   static uint _mon_exit_stub_ctr;          // monitor exit stub
587   static uint _mon_enter_ctr;              // monitor enter slow
588   static uint _mon_exit_ctr;               // monitor exit slow
589   static uint _partial_subtype_ctr;        // SubRoutines::partial_subtype_check
590 
591   // Statistics code
592   // stats for "normal" compiled calls (non-interface)
593   static int64_t _nof_normal_calls;               // total # of calls
594   static int64_t _nof_inlined_calls;              // total # of inlined normal calls
595   static int64_t _nof_static_calls;               // total # of calls to static methods or super methods (invokespecial)
596   static int64_t _nof_inlined_static_calls;       // total # of inlined static calls
597   // stats for compiled interface calls
598   static int64_t _nof_interface_calls;            // total # of compiled calls
599   static int64_t _nof_inlined_interface_calls;    // total # of inlined interface calls
600 
601  public: // for compiler
602   static address nof_normal_calls_addr()                { return (address)&_nof_normal_calls; }
603   static address nof_inlined_calls_addr()               { return (address)&_nof_inlined_calls; }
604   static address nof_static_calls_addr()                { return (address)&_nof_static_calls; }
605   static address nof_inlined_static_calls_addr()        { return (address)&_nof_inlined_static_calls; }
606   static address nof_interface_calls_addr()             { return (address)&_nof_interface_calls; }
607   static address nof_inlined_interface_calls_addr()     { return (address)&_nof_inlined_interface_calls; }
608   static void print_call_statistics(uint64_t comp_total);
609   static void print_ic_miss_histogram();
610 
611 #endif // PRODUCT
612 
613   static void print_statistics() PRODUCT_RETURN;
614 };
615 
616 
617 // ---------------------------------------------------------------------------
618 // Implementation of AdapterHandlerLibrary
619 //
620 // This library manages argument marshaling adapters and native wrappers.
621 // There are 2 flavors of adapters: I2C and C2I.
622 //
623 // The I2C flavor takes a stock interpreted call setup, marshals the
624 // arguments for a Java-compiled call, and jumps to Rmethod-> code()->
625 // code_begin().  It is broken to call it without an nmethod assigned.
626 // The usual behavior is to lift any register arguments up out of the
627 // stack and possibly re-pack the extra arguments to be contiguous.
628 // I2C adapters will save what the interpreter's stack pointer will be
629 // after arguments are popped, then adjust the interpreter's frame
630 // size to force alignment and possibly to repack the arguments.
631 // After re-packing, it jumps to the compiled code start.  There are
632 // no safepoints in this adapter code and a GC cannot happen while
633 // marshaling is in progress.
634 //
635 // The C2I flavor takes a stock compiled call setup plus the target method in
636 // Rmethod, marshals the arguments for an interpreted call and jumps to
637 // Rmethod->_i2i_entry.  On entry, the interpreted frame has not yet been
638 // setup.  Compiled frames are fixed-size and the args are likely not in the
639 // right place.  Hence all the args will likely be copied into the
640 // interpreter's frame, forcing that frame to grow.  The compiled frame's
641 // outgoing stack args will be dead after the copy.
642 //
643 // Native wrappers, like adapters, marshal arguments.  Unlike adapters they
644 // also perform an official frame push & pop.  They have a call to the native
645 // routine in their middles and end in a return (instead of ending in a jump).
646 // The native wrappers are stored in real nmethods instead of the BufferBlobs
647 // used by the adapters.  The code generation happens here because it's very
648 // similar to what the adapters have to do.
649 
650 class AdapterHandlerEntry : public CHeapObj<mtCode> {
651   friend class AdapterHandlerLibrary;
652 
653  private:
654   AdapterFingerPrint* _fingerprint;
655   address _i2c_entry;
656   address _c2i_entry;
657   address _c2i_inline_entry;
658   address _c2i_inline_ro_entry;
659   address _c2i_unverified_entry;
660   address _c2i_unverified_inline_entry;
661   address _c2i_no_clinit_check_entry;
662 
663   // Support for scalarized inline type calling convention
664   const GrowableArray<SigEntry>* _sig_cc;
665 
666 #ifdef ASSERT
667   // Captures code and signature used to generate this adapter when
668   // verifying adapter equivalence.
669   unsigned char* _saved_code;
670   int            _saved_code_length;
671 #endif
672 
673   AdapterHandlerEntry(AdapterFingerPrint* fingerprint, address i2c_entry, address c2i_entry,
674                       address c2i_inline_entry, address c2i_inline_ro_entry,
675                       address c2i_unverified_entry, address c2i_unverified_inline_entry,
676                       address c2i_no_clinit_check_entry) :
677     _fingerprint(fingerprint),
678     _i2c_entry(i2c_entry),
679     _c2i_entry(c2i_entry),
680     _c2i_inline_entry(c2i_inline_entry),
681     _c2i_inline_ro_entry(c2i_inline_ro_entry),
682     _c2i_unverified_entry(c2i_unverified_entry),
683     _c2i_unverified_inline_entry(c2i_unverified_inline_entry),
684     _c2i_no_clinit_check_entry(c2i_no_clinit_check_entry),
685     _sig_cc(nullptr)
686 #ifdef ASSERT
687     , _saved_code_length(0)
688 #endif
689   { }
690 
691   ~AdapterHandlerEntry();
692 
693  public:
694   address get_i2c_entry()                   const { return _i2c_entry; }
695   address get_c2i_entry()                   const { return _c2i_entry; }
696   address get_c2i_inline_entry()            const { return _c2i_inline_entry; }
697   address get_c2i_inline_ro_entry()         const { return _c2i_inline_ro_entry; }
698   address get_c2i_unverified_entry()        const { return _c2i_unverified_entry; }
699   address get_c2i_unverified_inline_entry() const { return _c2i_unverified_inline_entry; }
700   address get_c2i_no_clinit_check_entry()   const { return _c2i_no_clinit_check_entry; }
701 
702   address base_address();
703   void relocate(address new_base);
704 
705   // Support for scalarized inline type calling convention
706   void set_sig_cc(const GrowableArray<SigEntry>* sig)  { _sig_cc = sig; }
707   const GrowableArray<SigEntry>* get_sig_cc()    const { return _sig_cc; }
708 
709   AdapterFingerPrint* fingerprint() const { return _fingerprint; }
710 
711 #ifdef ASSERT
712   // Used to verify that code generated for shared adapters is equivalent
713   void save_code   (unsigned char* code, int length);
714   bool compare_code(AdapterHandlerEntry* other);
715 #endif
716 
717   //virtual void print_on(outputStream* st) const;  DO NOT USE
718   void print_adapter_on(outputStream* st) const;
719 };
720 
721 class CompiledEntrySignature;
722 
723 class AdapterHandlerLibrary: public AllStatic {
724   friend class SharedRuntime;
725  private:
726   static BufferBlob* _buffer; // the temporary code buffer in CodeCache
727   static AdapterHandlerEntry* _abstract_method_handler;
728   static AdapterHandlerEntry* _no_arg_handler;
729   static AdapterHandlerEntry* _int_arg_handler;
730   static AdapterHandlerEntry* _obj_arg_handler;
731   static AdapterHandlerEntry* _obj_int_arg_handler;
732   static AdapterHandlerEntry* _obj_obj_arg_handler;
733 
734   static BufferBlob* buffer_blob();
735   static void initialize();
736   static AdapterHandlerEntry* create_adapter(AdapterBlob*& new_adapter,
737                                              CompiledEntrySignature& ces,
738                                              bool allocate_code_blob);
739   static AdapterHandlerEntry* get_simple_adapter(const methodHandle& method);
740  public:
741 
742   static AdapterHandlerEntry* new_entry(AdapterFingerPrint* fingerprint,
743                                         address i2c_entry, address c2i_entry, address c2i_inline_entry, address c2i_inline_ro_entry,
744                                         address c2i_unverified_entry, address c2i_unverified_inline_entry, address c2i_no_clinit_check_entry = nullptr);
745   static void create_native_wrapper(const methodHandle& method);
746   static AdapterHandlerEntry* get_adapter(const methodHandle& method);
747 
748   static void print_handler(const CodeBlob* b) { print_handler_on(tty, b); }
749   static void print_handler_on(outputStream* st, const CodeBlob* b);
750   static bool contains(const CodeBlob* b);
751 #ifndef PRODUCT
752   static void print_statistics();
753 #endif // PRODUCT
754 
755 };
756 
757 // Utility class for computing the calling convention of the 3 types
758 // of compiled method entries:
759 //     Method::_from_compiled_entry               - sig_cc
760 //     Method::_from_compiled_inline_ro_entry     - sig_cc_ro
761 //     Method::_from_compiled_inline_entry        - sig
762 class CompiledEntrySignature : public StackObj {
763   Method* _method;
764   int  _num_inline_args;
765   bool _has_inline_recv;
766   GrowableArray<SigEntry>* _sig;
767   GrowableArray<SigEntry>* _sig_cc;
768   GrowableArray<SigEntry>* _sig_cc_ro;
769   VMRegPair* _regs;
770   VMRegPair* _regs_cc;
771   VMRegPair* _regs_cc_ro;
772 
773   int _args_on_stack;
774   int _args_on_stack_cc;
775   int _args_on_stack_cc_ro;
776 
777   bool _c1_needs_stack_repair;
778   bool _c2_needs_stack_repair;
779 
780   GrowableArray<Method*>* _supers;
781 
782 public:
783   Method* method()                     const { return _method; }
784 
785   // Used by Method::_from_compiled_inline_entry
786   GrowableArray<SigEntry>* sig()       const { return _sig; }
787 
788   // Used by Method::_from_compiled_entry
789   GrowableArray<SigEntry>* sig_cc()    const { return _sig_cc; }
790 
791   // Used by Method::_from_compiled_inline_ro_entry
792   GrowableArray<SigEntry>* sig_cc_ro() const { return _sig_cc_ro; }
793 
794   VMRegPair* regs()                    const { return _regs; }
795   VMRegPair* regs_cc()                 const { return _regs_cc; }
796   VMRegPair* regs_cc_ro()              const { return _regs_cc_ro; }
797 
798   int args_on_stack()                  const { return _args_on_stack; }
799   int args_on_stack_cc()               const { return _args_on_stack_cc; }
800   int args_on_stack_cc_ro()            const { return _args_on_stack_cc_ro; }
801 
802   int  num_inline_args()               const { return _num_inline_args; }
803   bool has_inline_recv()               const { return _has_inline_recv; }
804 
805   bool has_scalarized_args()           const { return _sig != _sig_cc; }
806   bool c1_needs_stack_repair()         const { return _c1_needs_stack_repair; }
807   bool c2_needs_stack_repair()         const { return _c2_needs_stack_repair; }
808   CodeOffsets::Entries c1_inline_ro_entry_type() const;
809 
810   GrowableArray<Method*>* get_supers();
811 
812   CompiledEntrySignature(Method* method = nullptr);
813   void compute_calling_conventions(bool init = true);
814 };
815 
816 #endif // SHARE_RUNTIME_SHAREDRUNTIME_HPP