8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_RUNTIME_SHAREDRUNTIME_HPP
26 #define SHARE_RUNTIME_SHAREDRUNTIME_HPP
27
28 #include "classfile/compactHashtable.hpp"
29 #include "code/codeBlob.hpp"
30 #include "code/vmreg.hpp"
31 #include "interpreter/linkResolver.hpp"
32 #include "memory/allStatic.hpp"
33 #include "memory/metaspaceClosure.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "runtime/safepointVerifiers.hpp"
36 #include "runtime/stubInfo.hpp"
37 #include "utilities/macros.hpp"
38
39 class AdapterHandlerEntry;
40 class AdapterFingerPrint;
41 class vframeStream;
42
43 // Runtime is the base class for various runtime interfaces
44 // (InterpreterRuntime, CompilerRuntime, etc.). It provides
45 // shared functionality such as exception forwarding (C++ to
46 // Java exceptions), locking/unlocking mechanisms, statistical
47 // information, etc.
48
49 class SharedRuntime: AllStatic {
50 private:
51 // Declare shared stub fields
52 #define SHARED_STUB_FIELD_DECLARE(name, type) \
53 static type* BLOB_FIELD_NAME(name);
54 SHARED_STUBS_DO(SHARED_STUB_FIELD_DECLARE)
55 #undef SHARED_STUB_FIELD_DECLARE
56
57 #ifdef ASSERT
58 static bool is_resolve_id(StubId id) {
59 return (id == StubId::shared_wrong_method_id ||
60 id == StubId::shared_wrong_method_abstract_id ||
61 id == StubId::shared_ic_miss_id ||
351 // on top of the stack.
352 // The caller (or one of its callers) must use a ResourceMark
353 // in order to correctly free the result.
354 //
355 static char* generate_class_cast_message(JavaThread* thr, Klass* caster_klass);
356
357 // Fill in the "X cannot be cast to a Y" message for ClassCastException
358 //
359 // @param caster_klass the class of the object we are casting
360 // @param target_klass the target klass attempt
361 // @return the dynamically allocated exception message (must be freed
362 // by the caller using a resource mark)
363 //
364 // This version does not require access the frame, so it can be called
365 // from interpreted code
366 // The caller (or one of it's callers) must use a ResourceMark
367 // in order to correctly free the result.
368 //
369 static char* generate_class_cast_message(Klass* caster_klass, Klass* target_klass, Symbol* target_klass_name = nullptr);
370
371 // Resolves a call site- may patch in the destination of the call into the
372 // compiled code.
373 static methodHandle resolve_helper(bool is_virtual, bool is_optimized, TRAPS);
374
375 private:
376 // deopt blob
377 static void generate_deopt_blob(void);
378
379 public:
380 static DeoptimizationBlob* deopt_blob(void) { return _deopt_blob; }
381
382 // Resets a call-site in compiled code so it will get resolved again.
383 static methodHandle reresolve_call_site(TRAPS);
384
385 // In the code prolog, if the klass comparison fails, the inline cache
386 // misses and the call site is patched to megamorphic
387 static methodHandle handle_ic_miss_helper(TRAPS);
388
389 // Find the method that called us.
390 static methodHandle find_callee_method(TRAPS);
391
392 static void monitor_enter_helper(oopDesc* obj, BasicLock* lock, JavaThread* thread);
393
394 static void monitor_exit_helper(oopDesc* obj, BasicLock* lock, JavaThread* current);
395
396 private:
397 static Handle find_callee_info(Bytecodes::Code& bc, CallInfo& callinfo, TRAPS);
398 static Handle find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc, CallInfo& callinfo, TRAPS);
399
400 static Method* extract_attached_method(vframeStream& vfst);
401
402 #if defined(X86) && defined(COMPILER1)
403 // For Object.hashCode, System.identityHashCode try to pull hashCode from object header if available.
404 static void inline_check_hashcode_from_object_header(MacroAssembler* masm, const methodHandle& method, Register obj_reg, Register result);
405 #endif // X86 && COMPILER1
406
407 public:
408
409 // Read the array of BasicTypes from a Java signature, and compute where
410 // compiled Java code would like to put the results. Values in reg_lo and
411 // reg_hi refer to 4-byte quantities. Values less than SharedInfo::stack0 are
412 // registers, those above refer to 4-byte stack slots. All stack slots are
413 // based off of the window top. SharedInfo::stack0 refers to the first usable
414 // slot in the bottom of the frame. SharedInfo::stack0+1 refers to the memory word
415 // 4-bytes higher.
416 // return value is the maximum number of VMReg stack slots the convention will use.
417 static int java_calling_convention(const BasicType* sig_bt, VMRegPair* regs, int total_args_passed);
418
419 static void check_member_name_argument_is_last_argument(const methodHandle& method,
420 const BasicType* sig_bt,
421 const VMRegPair* regs) NOT_DEBUG_RETURN;
422
423 // Ditto except for calling C
424 //
425 // C argument in register AND stack slot.
426 // Some architectures require that an argument must be passed in a register
427 // AND in a stack slot. These architectures provide a second VMRegPair array
428 // to be filled by the c_calling_convention method. On other architectures,
429 // null is being passed as the second VMRegPair array, so arguments are either
430 // passed in a register OR in a stack slot.
431 static int c_calling_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed);
432
433 static int vector_calling_convention(VMRegPair *regs,
434 uint num_bits,
435 uint total_args_passed);
436
437 // Generate I2C and C2I adapters. These adapters are simple argument marshalling
444 // by the time we reach the blob there is compiled code available. This allows
445 // the blob to pass the incoming stack pointer (the sender sp) in a known
446 // location for the interpreter to record. This is used by the frame code
447 // to correct the sender code to match up with the stack pointer when the
448 // thread left the compiled code. In addition it allows the interpreter
449 // to remove the space the c2i adapter allocated to do its argument conversion.
450
451 // Although a c2i blob will always run interpreted even if compiled code is
452 // present if we see that compiled code is present the compiled call site
453 // will be patched/re-resolved so that later calls will run compiled.
454
455 // Additionally a c2i blob need to have a unverified entry because it can be reached
456 // in situations where the call site is an inlined cache site and may go megamorphic.
457
458 // A i2c adapter is simpler than the c2i adapter. This is because it is assumed
459 // that the interpreter before it does any call dispatch will record the current
460 // stack pointer in the interpreter frame. On return it will restore the stack
461 // pointer as needed. This means the i2c adapter code doesn't need any special
462 // handshaking path with compiled code to keep the stack walking correct.
463
464 static void generate_i2c2i_adapters(MacroAssembler *_masm,
465 int total_args_passed,
466 int max_arg,
467 const BasicType *sig_bt,
468 const VMRegPair *regs,
469 address entry_address[AdapterBlob::ENTRY_COUNT]);
470
471 static void gen_i2c_adapter(MacroAssembler *_masm,
472 int total_args_passed,
473 int comp_args_on_stack,
474 const BasicType *sig_bt,
475 const VMRegPair *regs);
476
477 // OSR support
478
479 // OSR_migration_begin will extract the jvm state from an interpreter
480 // frame (locals, monitors) and store the data in a piece of C heap
481 // storage. This then allows the interpreter frame to be removed from the
482 // stack and the OSR nmethod to be called. That method is called with a
483 // pointer to the C heap storage. This pointer is the return value from
484 // OSR_migration_begin.
485
486 static intptr_t* OSR_migration_begin(JavaThread *thread);
487
488 // OSR_migration_end is a trivial routine. It is called after the compiled
489 // method has extracted the jvm state from the C heap that OSR_migration_begin
490 // created. It's entire job is to simply free this storage.
491 static void OSR_migration_end(intptr_t* buf);
492
493 // Convert a sig into a calling convention register layout
494 // and find interesting things about it.
526 // returns.
527 //
528 // The wrapper may contain special-case code if the given method
529 // is a compiled method handle adapter, such as _invokeBasic, _linkToVirtual, etc.
530 static nmethod* generate_native_wrapper(MacroAssembler* masm,
531 const methodHandle& method,
532 int compile_id,
533 BasicType* sig_bt,
534 VMRegPair* regs,
535 BasicType ret_type);
536
537 // A compiled caller has just called the interpreter, but compiled code
538 // exists. Patch the caller so he no longer calls into the interpreter.
539 static void fixup_callers_callsite(Method* moop, address ret_pc);
540
541 // Slow-path Locking and Unlocking
542 static void complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current);
543 static void complete_monitor_unlocking_C(oopDesc* obj, BasicLock* lock, JavaThread* current);
544
545 // Resolving of calls
546 static address get_resolved_entry (JavaThread* current, methodHandle callee_method);
547 static address resolve_static_call_C (JavaThread* current);
548 static address resolve_virtual_call_C (JavaThread* current);
549 static address resolve_opt_virtual_call_C(JavaThread* current);
550
551 // arraycopy, the non-leaf version. (See StubRoutines for all the leaf calls.)
552 static void slow_arraycopy_C(oopDesc* src, jint src_pos,
553 oopDesc* dest, jint dest_pos,
554 jint length, JavaThread* thread);
555
556 // handle ic miss with caller being compiled code
557 // wrong method handling (inline cache misses)
558 static address handle_wrong_method(JavaThread* current);
559 static address handle_wrong_method_abstract(JavaThread* current);
560 static address handle_wrong_method_ic_miss(JavaThread* current);
561
562 static address handle_unsafe_access(JavaThread* thread, address next_pc);
563
564 #ifndef PRODUCT
565
566 // Collect and print inline cache miss statistics
567 private:
568 enum { maxICmiss_count = 100 };
569 static int _ICmiss_index; // length of IC miss histogram
570 static int _ICmiss_count[maxICmiss_count]; // miss counts
571 static address _ICmiss_at[maxICmiss_count]; // miss addresses
572 static void trace_ic_miss(address at);
573
574 public:
575 static uint _ic_miss_ctr; // total # of IC misses
576 static uint _wrong_method_ctr;
577 static uint _resolve_static_ctr;
578 static uint _resolve_virtual_ctr;
579 static uint _resolve_opt_virtual_ctr;
580 static uint _implicit_null_throws;
581 static uint _implicit_div0_throws;
582
583 static uint _jbyte_array_copy_ctr; // Slow-path byte array copy
682 //
683 // The C2I flavor takes a stock compiled call setup plus the target method in
684 // Rmethod, marshals the arguments for an interpreted call and jumps to
685 // Rmethod->_i2i_entry. On entry, the interpreted frame has not yet been
686 // setup. Compiled frames are fixed-size and the args are likely not in the
687 // right place. Hence all the args will likely be copied into the
688 // interpreter's frame, forcing that frame to grow. The compiled frame's
689 // outgoing stack args will be dead after the copy.
690 //
691 // Native wrappers, like adapters, marshal arguments. Unlike adapters they
692 // also perform an official frame push & pop. They have a call to the native
693 // routine in their middles and end in a return (instead of ending in a jump).
694 // The native wrappers are stored in real nmethods instead of the BufferBlobs
695 // used by the adapters. The code generation happens here because it's very
696 // similar to what the adapters have to do.
697
698 class AdapterHandlerEntry : public MetaspaceObj {
699 friend class AdapterHandlerLibrary;
700
701 public:
702 static const int ENTRIES_COUNT = 4;
703
704 private:
705 AdapterFingerPrint* _fingerprint;
706 AdapterBlob* _adapter_blob;
707 uint _id;
708 bool _linked;
709
710 static const char *_entry_names[];
711
712 #ifdef ASSERT
713 // Captures code and signature used to generate this adapter when
714 // verifying adapter equivalence.
715 unsigned char* _saved_code;
716 int _saved_code_length;
717 #endif
718
719 AdapterHandlerEntry(int id, AdapterFingerPrint* fingerprint) :
720 _fingerprint(fingerprint),
721 _adapter_blob(nullptr),
722 _id(id),
723 _linked(false)
724 #ifdef ASSERT
725 , _saved_code(nullptr),
726 _saved_code_length(0)
727 #endif
728 { }
729
730 ~AdapterHandlerEntry();
731
732 // Allocate on CHeap instead of metaspace (see JDK-8331086).
733 // Dummy argument is used to avoid C++ warning about using
734 // deleted opearator MetaspaceObj::delete().
735 void* operator new(size_t size, size_t dummy) throw() {
736 assert(size == BytesPerWord * heap_word_size(sizeof(AdapterHandlerEntry)), "should match");
737 void* p = AllocateHeap(size, mtCode);
738 memset(p, 0, size);
739 return p;
740 }
741
742 public:
743 static AdapterHandlerEntry* allocate(uint id, AdapterFingerPrint* fingerprint) {
754 }
755
756 address get_i2c_entry() const {
757 #ifndef ZERO
758 assert(_adapter_blob != nullptr, "must be");
759 return _adapter_blob->i2c_entry();
760 #else
761 return nullptr;
762 #endif // ZERO
763 }
764
765 address get_c2i_entry() const {
766 #ifndef ZERO
767 assert(_adapter_blob != nullptr, "must be");
768 return _adapter_blob->c2i_entry();
769 #else
770 return nullptr;
771 #endif // ZERO
772 }
773
774 address get_c2i_unverified_entry() const {
775 #ifndef ZERO
776 assert(_adapter_blob != nullptr, "must be");
777 return _adapter_blob->c2i_unverified_entry();
778 #else
779 return nullptr;
780 #endif // ZERO
781 }
782
783 address get_c2i_no_clinit_check_entry() const {
784 #ifndef ZERO
785 assert(_adapter_blob != nullptr, "must be");
786 return _adapter_blob->c2i_no_clinit_check_entry();
787 #else
788 return nullptr;
789 #endif // ZERO
790 }
791
792 AdapterBlob* adapter_blob() const { return _adapter_blob; }
793 bool is_linked() const { return _linked; }
794
795 uint id() const { return _id; }
796 AdapterFingerPrint* fingerprint() const { return _fingerprint; }
797
798 #ifdef ASSERT
799 // Used to verify that code generated for shared adapters is equivalent
800 void save_code (unsigned char* code, int length);
801 bool compare_code(AdapterHandlerEntry* other);
802 #endif
803
804 //virtual void print_on(outputStream* st) const; DO NOT USE
805 void print_adapter_on(outputStream* st) const;
806
807 void metaspace_pointers_do(MetaspaceClosure* it);
808 int size() const {return (int)heap_word_size(sizeof(AdapterHandlerEntry)); }
809 MetaspaceObj::Type type() const { return AdapterHandlerEntryType; }
810
811 void remove_unshareable_info() NOT_CDS_RETURN;
812 void link() NOT_CDS_RETURN;
813 };
814
815 #if INCLUDE_CDS
816 class ArchivedAdapterTable;
817 #endif // INCLUDE_CDS
818
819 class AdapterHandlerLibrary: public AllStatic {
820 friend class SharedRuntime;
821 private:
822 static volatile uint _id_counter; // counter for generating unique adapter ids, range = [1,UINT_MAX]
823 static BufferBlob* _buffer; // the temporary code buffer in CodeCache
824 static AdapterHandlerEntry* _no_arg_handler;
825 static AdapterHandlerEntry* _int_arg_handler;
826 static AdapterHandlerEntry* _obj_arg_handler;
827 static AdapterHandlerEntry* _obj_int_arg_handler;
828 static AdapterHandlerEntry* _obj_obj_arg_handler;
829 #if INCLUDE_CDS
830 static ArchivedAdapterTable _aot_adapter_handler_table;
831 #endif // INCLUDE_CDS
832
833 static BufferBlob* buffer_blob();
834 static void initialize();
835 static AdapterHandlerEntry* get_simple_adapter(const methodHandle& method);
836 static void lookup_aot_cache(AdapterHandlerEntry* handler);
837 static AdapterHandlerEntry* create_adapter(int total_args_passed,
838 BasicType* sig_bt,
839 bool is_transient = false);
840 static void lookup_simple_adapters() NOT_CDS_RETURN;
841 #ifndef PRODUCT
842 static void print_adapter_handler_info(outputStream* st, AdapterHandlerEntry* handler);
843 #endif // PRODUCT
844 public:
845
846 static AdapterHandlerEntry* new_entry(AdapterFingerPrint* fingerprint);
847 static void create_native_wrapper(const methodHandle& method);
848 static AdapterHandlerEntry* get_adapter(const methodHandle& method);
849 static AdapterHandlerEntry* lookup(int total_args_passed, BasicType* sig_bt);
850 static bool generate_adapter_code(AdapterHandlerEntry* handler,
851 int total_args_passed,
852 BasicType* sig_bt,
853 bool is_transient);
854
855 #ifdef ASSERT
856 static void verify_adapter_sharing(int total_args_passed, BasicType* sig_bt, AdapterHandlerEntry* cached);
857 #endif // ASSERT
858
859 static void print_handler(const CodeBlob* b) { print_handler_on(tty, b); }
860 static void print_handler_on(outputStream* st, const CodeBlob* b);
861 static const char* name(AdapterHandlerEntry* handler);
862 static uint32_t id(AdapterHandlerEntry* handler);
863 #ifndef PRODUCT
864 static void print_statistics();
865 #endif // PRODUCT
866
867 static void link_aot_adapter_handler(AdapterHandlerEntry* handler) NOT_CDS_RETURN;
868 static void dump_aot_adapter_table() NOT_CDS_RETURN;
869 static void serialize_shared_table_header(SerializeClosure* soc) NOT_CDS_RETURN;
870 static void link_aot_adapters() NOT_CDS_RETURN;
871 static void address_to_offset(address entry_address[AdapterBlob::ENTRY_COUNT], int entry_offset[AdapterBlob::ENTRY_COUNT]);
872 };
873
874 #endif // SHARE_RUNTIME_SHAREDRUNTIME_HPP
|
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_RUNTIME_SHAREDRUNTIME_HPP
26 #define SHARE_RUNTIME_SHAREDRUNTIME_HPP
27
28 #include "asm/codeBuffer.hpp"
29 #include "classfile/compactHashtable.hpp"
30 #include "code/codeBlob.hpp"
31 #include "code/vmreg.hpp"
32 #include "interpreter/linkResolver.hpp"
33 #include "memory/allStatic.hpp"
34 #include "memory/metaspaceClosure.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "runtime/safepointVerifiers.hpp"
37 #include "runtime/signature.hpp"
38 #include "runtime/stubInfo.hpp"
39 #include "utilities/macros.hpp"
40
41 class AdapterHandlerEntry;
42 class AdapterFingerPrint;
43 class vframeStream;
44 class SigEntry;
45
46 // Runtime is the base class for various runtime interfaces
47 // (InterpreterRuntime, CompilerRuntime, etc.). It provides
48 // shared functionality such as exception forwarding (C++ to
49 // Java exceptions), locking/unlocking mechanisms, statistical
50 // information, etc.
51
52 class SharedRuntime: AllStatic {
53 private:
54 // Declare shared stub fields
55 #define SHARED_STUB_FIELD_DECLARE(name, type) \
56 static type* BLOB_FIELD_NAME(name);
57 SHARED_STUBS_DO(SHARED_STUB_FIELD_DECLARE)
58 #undef SHARED_STUB_FIELD_DECLARE
59
60 #ifdef ASSERT
61 static bool is_resolve_id(StubId id) {
62 return (id == StubId::shared_wrong_method_id ||
63 id == StubId::shared_wrong_method_abstract_id ||
64 id == StubId::shared_ic_miss_id ||
354 // on top of the stack.
355 // The caller (or one of its callers) must use a ResourceMark
356 // in order to correctly free the result.
357 //
358 static char* generate_class_cast_message(JavaThread* thr, Klass* caster_klass);
359
360 // Fill in the "X cannot be cast to a Y" message for ClassCastException
361 //
362 // @param caster_klass the class of the object we are casting
363 // @param target_klass the target klass attempt
364 // @return the dynamically allocated exception message (must be freed
365 // by the caller using a resource mark)
366 //
367 // This version does not require access the frame, so it can be called
368 // from interpreted code
369 // The caller (or one of it's callers) must use a ResourceMark
370 // in order to correctly free the result.
371 //
372 static char* generate_class_cast_message(Klass* caster_klass, Klass* target_klass, Symbol* target_klass_name = nullptr);
373
374 static char* generate_identity_exception_message(JavaThread* thr, Klass* klass);
375
376 // Resolves a call site- may patch in the destination of the call into the
377 // compiled code.
378 static methodHandle resolve_helper(bool is_virtual, bool is_optimized, bool& caller_does_not_scalarize, TRAPS);
379
380 private:
381 // deopt blob
382 static void generate_deopt_blob(void);
383
384 public:
385 static DeoptimizationBlob* deopt_blob(void) { return _deopt_blob; }
386
387 // Resets a call-site in compiled code so it will get resolved again.
388 static methodHandle reresolve_call_site(bool& is_optimized, bool& caller_does_not_scalarize, TRAPS);
389
390 // In the code prolog, if the klass comparison fails, the inline cache
391 // misses and the call site is patched to megamorphic
392 static methodHandle handle_ic_miss_helper(bool& caller_does_not_scalarize, TRAPS);
393
394 // Find the method that called us.
395 static methodHandle find_callee_method(bool& caller_does_not_scalarize, TRAPS);
396
397 static void monitor_enter_helper(oopDesc* obj, BasicLock* lock, JavaThread* thread);
398
399 static void monitor_exit_helper(oopDesc* obj, BasicLock* lock, JavaThread* current);
400
401 private:
402 static Handle find_callee_info(Bytecodes::Code& bc, CallInfo& callinfo, TRAPS);
403 static Handle find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc, CallInfo& callinfo, TRAPS);
404
405 static Method* extract_attached_method(vframeStream& vfst);
406
407 #if defined(X86) && defined(COMPILER1)
408 // For Object.hashCode, System.identityHashCode try to pull hashCode from object header if available.
409 static void inline_check_hashcode_from_object_header(MacroAssembler* masm, const methodHandle& method, Register obj_reg, Register result);
410 #endif // X86 && COMPILER1
411
412 public:
413
414 // Read the array of BasicTypes from a Java signature, and compute where
415 // compiled Java code would like to put the results. Values in reg_lo and
416 // reg_hi refer to 4-byte quantities. Values less than SharedInfo::stack0 are
417 // registers, those above refer to 4-byte stack slots. All stack slots are
418 // based off of the window top. SharedInfo::stack0 refers to the first usable
419 // slot in the bottom of the frame. SharedInfo::stack0+1 refers to the memory word
420 // 4-bytes higher.
421 // return value is the maximum number of VMReg stack slots the convention will use.
422 static int java_calling_convention(const BasicType* sig_bt, VMRegPair* regs, int total_args_passed);
423 static int java_calling_convention(const GrowableArray<SigEntry>* sig, VMRegPair* regs) {
424 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sig->length());
425 int total_args_passed = SigEntry::fill_sig_bt(sig, sig_bt);
426 return java_calling_convention(sig_bt, regs, total_args_passed);
427 }
428 static int java_return_convention(const BasicType* sig_bt, VMRegPair* regs, int total_args_passed);
429 static const uint java_return_convention_max_int;
430 static const uint java_return_convention_max_float;
431
432 static void check_member_name_argument_is_last_argument(const methodHandle& method,
433 const BasicType* sig_bt,
434 const VMRegPair* regs) NOT_DEBUG_RETURN;
435
436 // Ditto except for calling C
437 //
438 // C argument in register AND stack slot.
439 // Some architectures require that an argument must be passed in a register
440 // AND in a stack slot. These architectures provide a second VMRegPair array
441 // to be filled by the c_calling_convention method. On other architectures,
442 // null is being passed as the second VMRegPair array, so arguments are either
443 // passed in a register OR in a stack slot.
444 static int c_calling_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed);
445
446 static int vector_calling_convention(VMRegPair *regs,
447 uint num_bits,
448 uint total_args_passed);
449
450 // Generate I2C and C2I adapters. These adapters are simple argument marshalling
457 // by the time we reach the blob there is compiled code available. This allows
458 // the blob to pass the incoming stack pointer (the sender sp) in a known
459 // location for the interpreter to record. This is used by the frame code
460 // to correct the sender code to match up with the stack pointer when the
461 // thread left the compiled code. In addition it allows the interpreter
462 // to remove the space the c2i adapter allocated to do its argument conversion.
463
464 // Although a c2i blob will always run interpreted even if compiled code is
465 // present if we see that compiled code is present the compiled call site
466 // will be patched/re-resolved so that later calls will run compiled.
467
468 // Additionally a c2i blob need to have a unverified entry because it can be reached
469 // in situations where the call site is an inlined cache site and may go megamorphic.
470
471 // A i2c adapter is simpler than the c2i adapter. This is because it is assumed
472 // that the interpreter before it does any call dispatch will record the current
473 // stack pointer in the interpreter frame. On return it will restore the stack
474 // pointer as needed. This means the i2c adapter code doesn't need any special
475 // handshaking path with compiled code to keep the stack walking correct.
476
477 static void generate_i2c2i_adapters(MacroAssembler* _masm,
478 int total_args_passed,
479 const GrowableArray<SigEntry>* sig,
480 const VMRegPair* regs,
481 const GrowableArray<SigEntry>* sig_cc,
482 const VMRegPair* regs_cc,
483 const GrowableArray<SigEntry>* sig_cc_ro,
484 const VMRegPair* regs_cc_ro,
485 address entry_address[AdapterBlob::ENTRY_COUNT],
486 AdapterBlob*& new_adapter,
487 bool allocate_code_blob);
488
489 static void gen_i2c_adapter(MacroAssembler *_masm,
490 int comp_args_on_stack,
491 const GrowableArray<SigEntry>* sig,
492 const VMRegPair *regs);
493
494 // OSR support
495
496 // OSR_migration_begin will extract the jvm state from an interpreter
497 // frame (locals, monitors) and store the data in a piece of C heap
498 // storage. This then allows the interpreter frame to be removed from the
499 // stack and the OSR nmethod to be called. That method is called with a
500 // pointer to the C heap storage. This pointer is the return value from
501 // OSR_migration_begin.
502
503 static intptr_t* OSR_migration_begin(JavaThread *thread);
504
505 // OSR_migration_end is a trivial routine. It is called after the compiled
506 // method has extracted the jvm state from the C heap that OSR_migration_begin
507 // created. It's entire job is to simply free this storage.
508 static void OSR_migration_end(intptr_t* buf);
509
510 // Convert a sig into a calling convention register layout
511 // and find interesting things about it.
543 // returns.
544 //
545 // The wrapper may contain special-case code if the given method
546 // is a compiled method handle adapter, such as _invokeBasic, _linkToVirtual, etc.
547 static nmethod* generate_native_wrapper(MacroAssembler* masm,
548 const methodHandle& method,
549 int compile_id,
550 BasicType* sig_bt,
551 VMRegPair* regs,
552 BasicType ret_type);
553
554 // A compiled caller has just called the interpreter, but compiled code
555 // exists. Patch the caller so he no longer calls into the interpreter.
556 static void fixup_callers_callsite(Method* moop, address ret_pc);
557
558 // Slow-path Locking and Unlocking
559 static void complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current);
560 static void complete_monitor_unlocking_C(oopDesc* obj, BasicLock* lock, JavaThread* current);
561
562 // Resolving of calls
563 static address get_resolved_entry (JavaThread* current, methodHandle callee_method,
564 bool is_static_call, bool is_optimized, bool caller_does_not_scalarize);
565 static address resolve_static_call_C (JavaThread* current);
566 static address resolve_virtual_call_C (JavaThread* current);
567 static address resolve_opt_virtual_call_C(JavaThread* current);
568
569 static void load_inline_type_fields_in_regs(JavaThread* current, oopDesc* res);
570 static void store_inline_type_fields_to_buf(JavaThread* current, intptr_t res);
571
572 // arraycopy, the non-leaf version. (See StubRoutines for all the leaf calls.)
573 static void slow_arraycopy_C(oopDesc* src, jint src_pos,
574 oopDesc* dest, jint dest_pos,
575 jint length, JavaThread* thread);
576
577 // handle ic miss with caller being compiled code
578 // wrong method handling (inline cache misses)
579 static address handle_wrong_method(JavaThread* current);
580 static address handle_wrong_method_abstract(JavaThread* current);
581 static address handle_wrong_method_ic_miss(JavaThread* current);
582 static void allocate_inline_types(JavaThread* current, Method* callee, bool allocate_receiver);
583 static oop allocate_inline_types_impl(JavaThread* current, methodHandle callee, bool allocate_receiver, TRAPS);
584
585 static address handle_unsafe_access(JavaThread* thread, address next_pc);
586
587 static BufferedInlineTypeBlob* generate_buffered_inline_type_adapter(const InlineKlass* vk);
588 #ifndef PRODUCT
589
590 // Collect and print inline cache miss statistics
591 private:
592 enum { maxICmiss_count = 100 };
593 static int _ICmiss_index; // length of IC miss histogram
594 static int _ICmiss_count[maxICmiss_count]; // miss counts
595 static address _ICmiss_at[maxICmiss_count]; // miss addresses
596 static void trace_ic_miss(address at);
597
598 public:
599 static uint _ic_miss_ctr; // total # of IC misses
600 static uint _wrong_method_ctr;
601 static uint _resolve_static_ctr;
602 static uint _resolve_virtual_ctr;
603 static uint _resolve_opt_virtual_ctr;
604 static uint _implicit_null_throws;
605 static uint _implicit_div0_throws;
606
607 static uint _jbyte_array_copy_ctr; // Slow-path byte array copy
706 //
707 // The C2I flavor takes a stock compiled call setup plus the target method in
708 // Rmethod, marshals the arguments for an interpreted call and jumps to
709 // Rmethod->_i2i_entry. On entry, the interpreted frame has not yet been
710 // setup. Compiled frames are fixed-size and the args are likely not in the
711 // right place. Hence all the args will likely be copied into the
712 // interpreter's frame, forcing that frame to grow. The compiled frame's
713 // outgoing stack args will be dead after the copy.
714 //
715 // Native wrappers, like adapters, marshal arguments. Unlike adapters they
716 // also perform an official frame push & pop. They have a call to the native
717 // routine in their middles and end in a return (instead of ending in a jump).
718 // The native wrappers are stored in real nmethods instead of the BufferBlobs
719 // used by the adapters. The code generation happens here because it's very
720 // similar to what the adapters have to do.
721
722 class AdapterHandlerEntry : public MetaspaceObj {
723 friend class AdapterHandlerLibrary;
724
725 public:
726 static const int ENTRIES_COUNT = 7;
727
728 private:
729 AdapterFingerPrint* _fingerprint;
730 AdapterBlob* _adapter_blob;
731 uint _id;
732 bool _linked;
733
734 static const char *_entry_names[];
735
736 // Support for scalarized inline type calling convention
737 const GrowableArray<SigEntry>* _sig_cc;
738
739 #ifdef ASSERT
740 // Captures code and signature used to generate this adapter when
741 // verifying adapter equivalence.
742 unsigned char* _saved_code;
743 int _saved_code_length;
744 #endif
745
746 AdapterHandlerEntry(int id, AdapterFingerPrint* fingerprint) :
747 _fingerprint(fingerprint),
748 _adapter_blob(nullptr),
749 _id(id),
750 _linked(false),
751 _sig_cc(nullptr)
752 #ifdef ASSERT
753 , _saved_code(nullptr),
754 _saved_code_length(0)
755 #endif
756 { }
757
758 ~AdapterHandlerEntry();
759
760 // Allocate on CHeap instead of metaspace (see JDK-8331086).
761 // Dummy argument is used to avoid C++ warning about using
762 // deleted opearator MetaspaceObj::delete().
763 void* operator new(size_t size, size_t dummy) throw() {
764 assert(size == BytesPerWord * heap_word_size(sizeof(AdapterHandlerEntry)), "should match");
765 void* p = AllocateHeap(size, mtCode);
766 memset(p, 0, size);
767 return p;
768 }
769
770 public:
771 static AdapterHandlerEntry* allocate(uint id, AdapterFingerPrint* fingerprint) {
782 }
783
784 address get_i2c_entry() const {
785 #ifndef ZERO
786 assert(_adapter_blob != nullptr, "must be");
787 return _adapter_blob->i2c_entry();
788 #else
789 return nullptr;
790 #endif // ZERO
791 }
792
793 address get_c2i_entry() const {
794 #ifndef ZERO
795 assert(_adapter_blob != nullptr, "must be");
796 return _adapter_blob->c2i_entry();
797 #else
798 return nullptr;
799 #endif // ZERO
800 }
801
802 address get_c2i_inline_entry() const {
803 #ifndef ZERO
804 assert(_adapter_blob != nullptr, "must be");
805 return _adapter_blob->c2i_inline_entry();
806 #else
807 return nullptr;
808 #endif // ZERO
809 }
810
811 address get_c2i_inline_ro_entry() const {
812 #ifndef ZERO
813 assert(_adapter_blob != nullptr, "must be");
814 return _adapter_blob->c2i_inline_ro_entry();
815 #else
816 return nullptr;
817 #endif // ZERO
818 }
819
820 address get_c2i_unverified_entry() const {
821 #ifndef ZERO
822 assert(_adapter_blob != nullptr, "must be");
823 return _adapter_blob->c2i_unverified_entry();
824 #else
825 return nullptr;
826 #endif // ZERO
827 }
828
829 address get_c2i_unverified_inline_entry() const {
830 #ifndef ZERO
831 assert(_adapter_blob != nullptr, "must be");
832 return _adapter_blob->c2i_unverified_inline_entry();
833 #else
834 return nullptr;
835 #endif // ZERO
836 }
837
838 address get_c2i_no_clinit_check_entry() const {
839 #ifndef ZERO
840 assert(_adapter_blob != nullptr, "must be");
841 return _adapter_blob->c2i_no_clinit_check_entry();
842 #else
843 return nullptr;
844 #endif // ZERO
845 }
846
847 AdapterBlob* adapter_blob() const { return _adapter_blob; }
848 bool is_linked() const { return _linked; }
849
850 // Support for scalarized inline type calling convention
851 void set_sig_cc(const GrowableArray<SigEntry>* sig) { _sig_cc = sig; }
852 const GrowableArray<SigEntry>* get_sig_cc() const { return _sig_cc; }
853
854 uint id() const { return _id; }
855 AdapterFingerPrint* fingerprint() const { return _fingerprint; }
856
857 #ifdef ASSERT
858 // Used to verify that code generated for shared adapters is equivalent
859 void save_code (unsigned char* code, int length);
860 bool compare_code(AdapterHandlerEntry* other);
861 #endif
862
863 //virtual void print_on(outputStream* st) const; DO NOT USE
864 void print_adapter_on(outputStream* st) const;
865
866 void metaspace_pointers_do(MetaspaceClosure* it);
867 int size() const {return (int)heap_word_size(sizeof(AdapterHandlerEntry)); }
868 MetaspaceObj::Type type() const { return AdapterHandlerEntryType; }
869
870 void remove_unshareable_info() NOT_CDS_RETURN;
871 void link() NOT_CDS_RETURN;
872 };
873
874 #if INCLUDE_CDS
875 class ArchivedAdapterTable;
876 #endif // INCLUDE_CDS
877
878 class CompiledEntrySignature;
879
880 class AdapterHandlerLibrary: public AllStatic {
881 friend class SharedRuntime;
882 private:
883 static volatile uint _id_counter; // counter for generating unique adapter ids, range = [1,UINT_MAX]
884 static BufferBlob* _buffer; // the temporary code buffer in CodeCache
885 static AdapterHandlerEntry* _no_arg_handler;
886 static AdapterHandlerEntry* _int_arg_handler;
887 static AdapterHandlerEntry* _obj_arg_handler;
888 static AdapterHandlerEntry* _obj_int_arg_handler;
889 static AdapterHandlerEntry* _obj_obj_arg_handler;
890 #if INCLUDE_CDS
891 static ArchivedAdapterTable _aot_adapter_handler_table;
892 #endif // INCLUDE_CDS
893
894 static BufferBlob* buffer_blob();
895 static void initialize();
896 static AdapterHandlerEntry* get_simple_adapter(const methodHandle& method);
897 static void lookup_aot_cache(AdapterHandlerEntry* handler);
898 static AdapterHandlerEntry* create_adapter(CompiledEntrySignature& ces,
899 bool allocate_code_blob,
900 bool is_transient = false);
901 static void lookup_simple_adapters() NOT_CDS_RETURN;
902 #ifndef PRODUCT
903 static void print_adapter_handler_info(outputStream* st, AdapterHandlerEntry* handler);
904 #endif // PRODUCT
905 public:
906
907 static AdapterHandlerEntry* new_entry(AdapterFingerPrint* fingerprint);
908 static void create_native_wrapper(const methodHandle& method);
909 static AdapterHandlerEntry* get_adapter(const methodHandle& method);
910 static AdapterHandlerEntry* lookup(const GrowableArray<SigEntry>* sig, bool has_ro_adapter = false);
911 static bool generate_adapter_code(AdapterHandlerEntry* handler,
912 CompiledEntrySignature& ces,
913 bool allocate_code_blob,
914 bool is_transient);
915
916 #ifdef ASSERT
917 static void verify_adapter_sharing(CompiledEntrySignature& ces, AdapterHandlerEntry* cached_entry);
918 #endif // ASSERT
919
920 static void print_handler(const CodeBlob* b) { print_handler_on(tty, b); }
921 static void print_handler_on(outputStream* st, const CodeBlob* b);
922 static const char* name(AdapterHandlerEntry* handler);
923 static uint32_t id(AdapterHandlerEntry* handler);
924 #ifndef PRODUCT
925 static void print_statistics();
926 #endif // PRODUCT
927
928 static void link_aot_adapter_handler(AdapterHandlerEntry* handler) NOT_CDS_RETURN;
929 static void dump_aot_adapter_table() NOT_CDS_RETURN;
930 static void serialize_shared_table_header(SerializeClosure* soc) NOT_CDS_RETURN;
931 static void link_aot_adapters() NOT_CDS_RETURN;
932 static void address_to_offset(address entry_address[AdapterBlob::ENTRY_COUNT], int entry_offset[AdapterBlob::ENTRY_COUNT]);
933 };
934
935 // Utility class for computing the calling convention of the 3 types
936 // of compiled method entries:
937 // Method::_from_compiled_entry - sig_cc
938 // Method::_from_compiled_inline_ro_entry - sig_cc_ro
939 // Method::_from_compiled_inline_entry - sig
940 class CompiledEntrySignature : public StackObj {
941 Method* _method;
942 int _num_inline_args;
943 bool _has_inline_recv;
944 GrowableArray<SigEntry>* _sig;
945 GrowableArray<SigEntry>* _sig_cc;
946 GrowableArray<SigEntry>* _sig_cc_ro;
947 VMRegPair* _regs;
948 VMRegPair* _regs_cc;
949 VMRegPair* _regs_cc_ro;
950
951 int _args_on_stack;
952 int _args_on_stack_cc;
953 int _args_on_stack_cc_ro;
954
955 bool _c1_needs_stack_repair;
956 bool _c2_needs_stack_repair;
957
958 GrowableArray<Method*>* _supers;
959
960 public:
961 Method* method() const { return _method; }
962
963 // Used by Method::_from_compiled_inline_entry
964 GrowableArray<SigEntry>* sig() const { return _sig; }
965
966 // Used by Method::_from_compiled_entry
967 GrowableArray<SigEntry>* sig_cc() const { return _sig_cc; }
968
969 // Used by Method::_from_compiled_inline_ro_entry
970 GrowableArray<SigEntry>* sig_cc_ro() const { return _sig_cc_ro; }
971
972 VMRegPair* regs() const { return _regs; }
973 VMRegPair* regs_cc() const { return _regs_cc; }
974 VMRegPair* regs_cc_ro() const { return _regs_cc_ro; }
975
976 int args_on_stack() const { return _args_on_stack; }
977 int args_on_stack_cc() const { return _args_on_stack_cc; }
978 int args_on_stack_cc_ro() const { return _args_on_stack_cc_ro; }
979
980 int num_inline_args() const { return _num_inline_args; }
981 bool has_inline_recv() const { return _has_inline_recv; }
982
983 bool has_scalarized_args() const { return _sig != _sig_cc; }
984 bool c1_needs_stack_repair() const { return _c1_needs_stack_repair; }
985 bool c2_needs_stack_repair() const { return _c2_needs_stack_repair; }
986 CodeOffsets::Entries c1_inline_ro_entry_type() const;
987
988 GrowableArray<Method*>* get_supers();
989
990 CompiledEntrySignature(Method* method = nullptr);
991 void compute_calling_conventions(bool init = true);
992 void initialize_from_fingerprint(AdapterFingerPrint* fingerprint);
993 };
994
995 #endif // SHARE_RUNTIME_SHAREDRUNTIME_HPP
|