11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #ifndef _WINDOWS
27 #include "alloca.h"
28 #endif
29 #include "asm/macroAssembler.hpp"
30 #include "asm/macroAssembler.inline.hpp"
31 #include "code/compiledIC.hpp"
32 #include "code/debugInfoRec.hpp"
33 #include "code/nativeInst.hpp"
34 #include "code/vtableStubs.hpp"
35 #include "compiler/oopMap.hpp"
36 #include "gc/shared/collectedHeap.hpp"
37 #include "gc/shared/gcLocker.hpp"
38 #include "gc/shared/barrierSet.hpp"
39 #include "gc/shared/barrierSetAssembler.hpp"
40 #include "interpreter/interpreter.hpp"
41 #include "logging/log.hpp"
42 #include "memory/resourceArea.hpp"
43 #include "memory/universe.hpp"
44 #include "oops/klass.inline.hpp"
45 #include "oops/method.inline.hpp"
46 #include "prims/methodHandles.hpp"
47 #include "runtime/continuation.hpp"
48 #include "runtime/continuationEntry.inline.hpp"
49 #include "runtime/globals.hpp"
50 #include "runtime/jniHandles.hpp"
617 break;
618 case T_DOUBLE:
619 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
620 if (fp_args < Argument::n_float_register_parameters_j) {
621 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
622 } else {
623 stk_args = align_up(stk_args, 2);
624 regs[i].set2(VMRegImpl::stack2reg(stk_args));
625 stk_args += 2;
626 }
627 break;
628 default:
629 ShouldNotReachHere();
630 break;
631 }
632 }
633
634 return stk_args;
635 }
636
637 // Patch the callers callsite with entry to compiled code if it exists.
638 static void patch_callers_callsite(MacroAssembler *masm) {
639 Label L;
640 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
641 __ jcc(Assembler::equal, L);
642
643 // Save the current stack pointer
644 __ mov(r13, rsp);
645 // Schedule the branch target address early.
646 // Call into the VM to patch the caller, then jump to compiled callee
647 // rax isn't live so capture return address while we easily can
648 __ movptr(rax, Address(rsp, 0));
649
650 // align stack so push_CPU_state doesn't fault
651 __ andptr(rsp, -(StackAlignmentInBytes));
652 __ push_CPU_state();
653 __ vzeroupper();
654 // VM needs caller's callsite
655 // VM needs target method
656 // This needs to be a long call since we will relocate this adapter to
659 // Allocate argument register save area
660 if (frame::arg_reg_save_area_bytes != 0) {
661 __ subptr(rsp, frame::arg_reg_save_area_bytes);
662 }
663 __ mov(c_rarg0, rbx);
664 __ mov(c_rarg1, rax);
665 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
666
667 // De-allocate argument register save area
668 if (frame::arg_reg_save_area_bytes != 0) {
669 __ addptr(rsp, frame::arg_reg_save_area_bytes);
670 }
671
672 __ vzeroupper();
673 __ pop_CPU_state();
674 // restore sp
675 __ mov(rsp, r13);
676 __ bind(L);
677 }
678
679
680 static void gen_c2i_adapter(MacroAssembler *masm,
681 int total_args_passed,
682 int comp_args_on_stack,
683 const BasicType *sig_bt,
684 const VMRegPair *regs,
685 Label& skip_fixup) {
686 // Before we get into the guts of the C2I adapter, see if we should be here
687 // at all. We've come from compiled code and are attempting to jump to the
688 // interpreter, which means the caller made a static call to get here
689 // (vcalls always get a compiled target if there is one). Check for a
690 // compiled target. If there is one, we need to patch the caller's call.
691 patch_callers_callsite(masm);
692
693 __ bind(skip_fixup);
694
695 // Since all args are passed on the stack, total_args_passed *
696 // Interpreter::stackElementSize is the space we need.
697
698 assert(total_args_passed >= 0, "total_args_passed is %d", total_args_passed);
699
700 int extraspace = (total_args_passed * Interpreter::stackElementSize);
701
702 // stack is aligned, keep it that way
703 // This is not currently needed or enforced by the interpreter, but
704 // we might as well conform to the ABI.
705 extraspace = align_up(extraspace, 2*wordSize);
706
707 // set senderSP value
708 __ lea(r13, Address(rsp, wordSize));
709
710 #ifdef ASSERT
711 __ check_stack_alignment(r13, "sender stack not aligned");
712 #endif
713 if (extraspace > 0) {
714 // Pop the return address
715 __ pop(rax);
716
717 __ subptr(rsp, extraspace);
718
719 // Push the return address
720 __ push(rax);
721
722 // Account for the return address location since we store it first rather
723 // than hold it in a register across all the shuffling
724 extraspace += wordSize;
725 }
726
727 #ifdef ASSERT
728 __ check_stack_alignment(rsp, "callee stack not aligned", wordSize, rax);
729 #endif
730
731 // Now write the args into the outgoing interpreter space
732 for (int i = 0; i < total_args_passed; i++) {
733 if (sig_bt[i] == T_VOID) {
734 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
735 continue;
736 }
737
738 // offset to start parameters
739 int st_off = (total_args_passed - i) * Interpreter::stackElementSize;
740 int next_off = st_off - Interpreter::stackElementSize;
741
742 // Say 4 args:
743 // i st_off
744 // 0 32 T_LONG
745 // 1 24 T_VOID
746 // 2 16 T_OBJECT
747 // 3 8 T_BOOL
748 // - 0 return address
749 //
750 // However to make thing extra confusing. Because we can fit a long/double in
751 // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
752 // leaves one slot empty and only stores to a single slot. In this case the
753 // slot that is occupied is the T_VOID slot. See I said it was confusing.
754
755 VMReg r_1 = regs[i].first();
756 VMReg r_2 = regs[i].second();
757 if (!r_1->is_valid()) {
758 assert(!r_2->is_valid(), "");
759 continue;
760 }
761 if (r_1->is_stack()) {
762 // memory to memory use rax
763 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
764 if (!r_2->is_valid()) {
765 // sign extend??
766 __ movl(rax, Address(rsp, ld_off));
767 __ movptr(Address(rsp, st_off), rax);
768
769 } else {
770
771 __ movq(rax, Address(rsp, ld_off));
772
773 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
774 // T_DOUBLE and T_LONG use two slots in the interpreter
775 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
776 // ld_off == LSW, ld_off+wordSize == MSW
777 // st_off == MSW, next_off == LSW
778 __ movq(Address(rsp, next_off), rax);
779 #ifdef ASSERT
780 // Overwrite the unused slot with known junk
781 __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
782 __ movptr(Address(rsp, st_off), rax);
783 #endif /* ASSERT */
784 } else {
785 __ movq(Address(rsp, st_off), rax);
786 }
787 }
788 } else if (r_1->is_Register()) {
789 Register r = r_1->as_Register();
790 if (!r_2->is_valid()) {
791 // must be only an int (or less ) so move only 32bits to slot
792 // why not sign extend??
793 __ movl(Address(rsp, st_off), r);
794 } else {
795 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
796 // T_DOUBLE and T_LONG use two slots in the interpreter
797 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
798 // long/double in gpr
799 #ifdef ASSERT
800 // Overwrite the unused slot with known junk
801 __ mov64(rax, CONST64(0xdeadffffdeadaaab));
802 __ movptr(Address(rsp, st_off), rax);
803 #endif /* ASSERT */
804 __ movq(Address(rsp, next_off), r);
805 } else {
806 __ movptr(Address(rsp, st_off), r);
807 }
808 }
809 } else {
810 assert(r_1->is_XMMRegister(), "");
811 if (!r_2->is_valid()) {
812 // only a float use just part of the slot
813 __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
814 } else {
815 #ifdef ASSERT
816 // Overwrite the unused slot with known junk
817 __ mov64(rax, CONST64(0xdeadffffdeadaaac));
818 __ movptr(Address(rsp, st_off), rax);
819 #endif /* ASSERT */
820 __ movdbl(Address(rsp, next_off), r_1->as_XMMRegister());
821 }
822 }
823 }
824
825 // Schedule the branch target address early.
826 __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
827 __ jmp(rcx);
828 }
829
830 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
831 address code_start, address code_end,
832 Label& L_ok) {
833 Label L_fail;
834 __ lea(temp_reg, AddressLiteral(code_start, relocInfo::none));
835 __ cmpptr(pc_reg, temp_reg);
836 __ jcc(Assembler::belowEqual, L_fail);
837 __ lea(temp_reg, AddressLiteral(code_end, relocInfo::none));
838 __ cmpptr(pc_reg, temp_reg);
839 __ jcc(Assembler::below, L_ok);
840 __ bind(L_fail);
841 }
842
843 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
844 int total_args_passed,
845 int comp_args_on_stack,
846 const BasicType *sig_bt,
847 const VMRegPair *regs) {
848
849 // Note: r13 contains the senderSP on entry. We must preserve it since
850 // we may do a i2c -> c2i transition if we lose a race where compiled
851 // code goes non-entrant while we get args ready.
852 // In addition we use r13 to locate all the interpreter args as
853 // we must align the stack to 16 bytes on an i2c entry else we
854 // lose alignment we expect in all compiled code and register
855 // save code can segv when fxsave instructions find improperly
856 // aligned stack pointer.
857
858 // Adapters can be frameless because they do not require the caller
859 // to perform additional cleanup work, such as correcting the stack pointer.
860 // An i2c adapter is frameless because the *caller* frame, which is interpreted,
861 // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
862 // even if a callee has modified the stack pointer.
863 // A c2i adapter is frameless because the *callee* frame, which is interpreted,
864 // routinely repairs its caller's stack pointer (from sender_sp, which is set
865 // up via the senderSP register).
866 // In other words, if *either* the caller or callee is interpreted, we can
917 // Convert 4-byte c2 stack slots to words.
918 int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
919
920 if (comp_args_on_stack) {
921 __ subptr(rsp, comp_words_on_stack * wordSize);
922 }
923
924 // Ensure compiled code always sees stack at proper alignment
925 __ andptr(rsp, -16);
926
927 // push the return address and misalign the stack that youngest frame always sees
928 // as far as the placement of the call instruction
929 __ push(rax);
930
931 // Put saved SP in another register
932 const Register saved_sp = rax;
933 __ movptr(saved_sp, r11);
934
935 // Will jump to the compiled code just as if compiled code was doing it.
936 // Pre-load the register-jump target early, to schedule it better.
937 __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_offset())));
938
939 #if INCLUDE_JVMCI
940 if (EnableJVMCI) {
941 // check if this call should be routed towards a specific entry point
942 __ cmpptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
943 Label no_alternative_target;
944 __ jcc(Assembler::equal, no_alternative_target);
945 __ movptr(r11, Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
946 __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
947 __ bind(no_alternative_target);
948 }
949 #endif // INCLUDE_JVMCI
950
951 // Now generate the shuffle code. Pick up all register args and move the
952 // rest through the floating point stack top.
953 for (int i = 0; i < total_args_passed; i++) {
954 if (sig_bt[i] == T_VOID) {
955 // Longs and doubles are passed in native word order, but misaligned
956 // in the 32-bit build.
957 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
958 continue;
959 }
960
961 // Pick up 0, 1 or 2 words from SP+offset.
962
963 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
964 "scrambled load targets?");
965 // Load in argument order going down.
966 int ld_off = (total_args_passed - i)*Interpreter::stackElementSize;
967 // Point to interpreter value (vs. tag)
968 int next_off = ld_off - Interpreter::stackElementSize;
969 //
970 //
971 //
972 VMReg r_1 = regs[i].first();
973 VMReg r_2 = regs[i].second();
974 if (!r_1->is_valid()) {
975 assert(!r_2->is_valid(), "");
976 continue;
977 }
979 // Convert stack slot to an SP offset (+ wordSize to account for return address )
980 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
981
982 // We can use r13 as a temp here because compiled code doesn't need r13 as an input
983 // and if we end up going thru a c2i because of a miss a reasonable value of r13
984 // will be generated.
985 if (!r_2->is_valid()) {
986 // sign extend???
987 __ movl(r13, Address(saved_sp, ld_off));
988 __ movptr(Address(rsp, st_off), r13);
989 } else {
990 //
991 // We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
992 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
993 // So we must adjust where to pick up the data to match the interpreter.
994 //
995 // Interpreter local[n] == MSW, local[n+1] == LSW however locals
996 // are accessed as negative so LSW is at LOW address
997
998 // ld_off is MSW so get LSW
999 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
1000 next_off : ld_off;
1001 __ movq(r13, Address(saved_sp, offset));
1002 // st_off is LSW (i.e. reg.first())
1003 __ movq(Address(rsp, st_off), r13);
1004 }
1005 } else if (r_1->is_Register()) { // Register argument
1006 Register r = r_1->as_Register();
1007 assert(r != rax, "must be different");
1008 if (r_2->is_valid()) {
1009 //
1010 // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
1011 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
1012 // So we must adjust where to pick up the data to match the interpreter.
1013
1014 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
1015 next_off : ld_off;
1016
1017 // this can be a misaligned move
1018 __ movq(r, Address(saved_sp, offset));
1019 } else {
1020 // sign extend and use a full word?
1021 __ movl(r, Address(saved_sp, ld_off));
1022 }
1023 } else {
1024 if (!r_2->is_valid()) {
1025 __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
1026 } else {
1027 __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
1028 }
1029 }
1030 }
1031
1032 __ push_cont_fastpath(); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about
1033
1034 // 6243940 We might end up in handle_wrong_method if
1035 // the callee is deoptimized as we race thru here. If that
1036 // happens we don't want to take a safepoint because the
1037 // caller frame will look interpreted and arguments are now
1038 // "compiled" so it is much better to make this transition
1039 // invisible to the stack walking code. Unfortunately if
1040 // we try and find the callee by normal means a safepoint
1041 // is possible. So we stash the desired callee in the thread
1042 // and the vm will find there should this case occur.
1043
1044 __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
1045
1046 // put Method* where a c2i would expect should we end up there
1047 // only needed because eof c2 resolve stubs return Method* as a result in
1048 // rax
1049 __ mov(rax, rbx);
1050 __ jmp(r11);
1051 }
1052
1053 // ---------------------------------------------------------------
1054 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
1055 int total_args_passed,
1056 int comp_args_on_stack,
1057 const BasicType *sig_bt,
1058 const VMRegPair *regs,
1059 AdapterFingerPrint* fingerprint) {
1060 address i2c_entry = __ pc();
1061
1062 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
1063
1064 // -------------------------------------------------------------------------
1065 // Generate a C2I adapter. On entry we know rbx holds the Method* during calls
1066 // to the interpreter. The args start out packed in the compiled layout. They
1067 // need to be unpacked into the interpreter layout. This will almost always
1068 // require some stack space. We grow the current (compiled) stack, then repack
1069 // the args. We finally end in a jump to the generic interpreter entry point.
1070 // On exit from the interpreter, the interpreter will restore our SP (lest the
1071 // compiled code, which relies solely on SP and not RBP, get sick).
1072
1073 address c2i_unverified_entry = __ pc();
1074 Label skip_fixup;
1075
1076 Register data = rax;
1077 Register receiver = j_rarg0;
1078 Register temp = rbx;
1079
1080 {
1081 __ ic_check(1 /* end_alignment */);
1082 __ movptr(rbx, Address(data, CompiledICData::speculated_method_offset()));
1083 // Method might have been compiled since the call site was patched to
1084 // interpreted if that is the case treat it as a miss so we can get
1085 // the call site corrected.
1086 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
1087 __ jcc(Assembler::equal, skip_fixup);
1088 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1089 }
1090
1091 address c2i_entry = __ pc();
1092
1093 // Class initialization barrier for static methods
1094 address c2i_no_clinit_check_entry = nullptr;
1095 if (VM_Version::supports_fast_class_init_checks()) {
1096 Label L_skip_barrier;
1097 Register method = rbx;
1098
1099 { // Bypass the barrier for non-static methods
1100 Register flags = rscratch1;
1101 __ movl(flags, Address(method, Method::access_flags_offset()));
1102 __ testl(flags, JVM_ACC_STATIC);
1103 __ jcc(Assembler::zero, L_skip_barrier); // non-static
1104 }
1105
1106 Register klass = rscratch1;
1107 __ load_method_holder(klass, method);
1108 __ clinit_barrier(klass, r15_thread, &L_skip_barrier /*L_fast_path*/);
1109
1110 __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
1111
1112 __ bind(L_skip_barrier);
1113 c2i_no_clinit_check_entry = __ pc();
1114 }
1115
1116 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1117 bs->c2i_entry_barrier(masm);
1118
1119 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
1120
1121 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
1122 }
1123
1124 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1125 VMRegPair *regs,
1126 int total_args_passed) {
1127
1128 // We return the amount of VMRegImpl stack slots we need to reserve for all
1129 // the arguments NOT counting out_preserve_stack_slots.
1130
1131 // NOTE: These arrays will have to change when c1 is ported
1132 #ifdef _WIN64
1133 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1134 c_rarg0, c_rarg1, c_rarg2, c_rarg3
1135 };
1136 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1137 c_farg0, c_farg1, c_farg2, c_farg3
1138 };
1139 #else
1140 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1141 c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5
2228 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2229
2230 // Get the handle (the 2nd argument)
2231 __ mov(oop_handle_reg, c_rarg1);
2232
2233 // Get address of the box
2234
2235 __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2236
2237 // Load the oop from the handle
2238 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2239
2240 if (LockingMode == LM_MONITOR) {
2241 __ jmp(slow_path_lock);
2242 } else if (LockingMode == LM_LEGACY) {
2243 // Load immediate 1 into swap_reg %rax
2244 __ movl(swap_reg, 1);
2245
2246 // Load (object->mark() | 1) into swap_reg %rax
2247 __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2248
2249 // Save (object->mark() | 1) into BasicLock's displaced header
2250 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2251
2252 // src -> dest iff dest == rax else rax <- dest
2253 __ lock();
2254 __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2255 __ jcc(Assembler::equal, count_mon);
2256
2257 // Hmm should this move to the slow path code area???
2258
2259 // Test if the oopMark is an obvious stack pointer, i.e.,
2260 // 1) (mark & 3) == 0, and
2261 // 2) rsp <= mark < mark + os::pagesize()
2262 // These 3 tests can be done by evaluating the following
2263 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2264 // assuming both stack pointer and pagesize have their
2265 // least significant 2 bits clear.
2266 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2267
3569 julong *scratch = (julong *)alloca(total_allocation);
3570
3571 // Local scratch arrays
3572 julong
3573 *a = scratch + 0 * longwords,
3574 *n = scratch + 1 * longwords,
3575 *m = scratch + 2 * longwords;
3576
3577 reverse_words((julong *)a_ints, a, longwords);
3578 reverse_words((julong *)n_ints, n, longwords);
3579
3580 if (len >= MONTGOMERY_SQUARING_THRESHOLD) {
3581 ::montgomery_square(a, n, m, (julong)inv, longwords);
3582 } else {
3583 ::montgomery_multiply(a, a, n, m, (julong)inv, longwords);
3584 }
3585
3586 reverse_words(m, (julong *)m_ints, longwords);
3587 }
3588
3589 #if INCLUDE_JFR
3590
3591 // For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
3592 // It returns a jobject handle to the event writer.
3593 // The handle is dereferenced and the return value is the event writer oop.
3594 RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
3595 enum layout {
3596 rbp_off,
3597 rbpH_off,
3598 return_off,
3599 return_off2,
3600 framesize // inclusive of return address
3601 };
3602
3603 const char* name = SharedRuntime::stub_name(SharedStubId::jfr_write_checkpoint_id);
3604 CodeBuffer code(name, 1024, 64);
3605 MacroAssembler* masm = new MacroAssembler(&code);
3606 address start = __ pc();
3607
3608 __ enter();
3661 __ reset_last_Java_frame(true);
3662
3663 __ leave();
3664 __ ret(0);
3665
3666 OopMapSet* oop_maps = new OopMapSet();
3667 OopMap* map = new OopMap(framesize, 1);
3668 oop_maps->add_gc_map(frame_complete, map);
3669
3670 RuntimeStub* stub =
3671 RuntimeStub::new_runtime_stub(name,
3672 &code,
3673 frame_complete,
3674 (framesize >> (LogBytesPerWord - LogBytesPerInt)),
3675 oop_maps,
3676 false);
3677 return stub;
3678 }
3679
3680 #endif // INCLUDE_JFR
3681
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #ifndef _WINDOWS
27 #include "alloca.h"
28 #endif
29 #include "asm/macroAssembler.hpp"
30 #include "asm/macroAssembler.inline.hpp"
31 #include "classfile/symbolTable.hpp"
32 #include "code/compiledIC.hpp"
33 #include "code/debugInfoRec.hpp"
34 #include "code/nativeInst.hpp"
35 #include "code/vtableStubs.hpp"
36 #include "compiler/oopMap.hpp"
37 #include "gc/shared/collectedHeap.hpp"
38 #include "gc/shared/gcLocker.hpp"
39 #include "gc/shared/barrierSet.hpp"
40 #include "gc/shared/barrierSetAssembler.hpp"
41 #include "interpreter/interpreter.hpp"
42 #include "logging/log.hpp"
43 #include "memory/resourceArea.hpp"
44 #include "memory/universe.hpp"
45 #include "oops/klass.inline.hpp"
46 #include "oops/method.inline.hpp"
47 #include "prims/methodHandles.hpp"
48 #include "runtime/continuation.hpp"
49 #include "runtime/continuationEntry.inline.hpp"
50 #include "runtime/globals.hpp"
51 #include "runtime/jniHandles.hpp"
618 break;
619 case T_DOUBLE:
620 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
621 if (fp_args < Argument::n_float_register_parameters_j) {
622 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
623 } else {
624 stk_args = align_up(stk_args, 2);
625 regs[i].set2(VMRegImpl::stack2reg(stk_args));
626 stk_args += 2;
627 }
628 break;
629 default:
630 ShouldNotReachHere();
631 break;
632 }
633 }
634
635 return stk_args;
636 }
637
638 // Same as java_calling_convention() but for multiple return
639 // values. There's no way to store them on the stack so if we don't
640 // have enough registers, multiple values can't be returned.
641 const uint SharedRuntime::java_return_convention_max_int = Argument::n_int_register_parameters_j+1;
642 const uint SharedRuntime::java_return_convention_max_float = Argument::n_float_register_parameters_j;
643 int SharedRuntime::java_return_convention(const BasicType *sig_bt,
644 VMRegPair *regs,
645 int total_args_passed) {
646 // Create the mapping between argument positions and
647 // registers.
648 static const Register INT_ArgReg[java_return_convention_max_int] = {
649 rax, j_rarg5, j_rarg4, j_rarg3, j_rarg2, j_rarg1, j_rarg0
650 };
651 static const XMMRegister FP_ArgReg[java_return_convention_max_float] = {
652 j_farg0, j_farg1, j_farg2, j_farg3,
653 j_farg4, j_farg5, j_farg6, j_farg7
654 };
655
656
657 uint int_args = 0;
658 uint fp_args = 0;
659
660 for (int i = 0; i < total_args_passed; i++) {
661 switch (sig_bt[i]) {
662 case T_BOOLEAN:
663 case T_CHAR:
664 case T_BYTE:
665 case T_SHORT:
666 case T_INT:
667 if (int_args < Argument::n_int_register_parameters_j+1) {
668 regs[i].set1(INT_ArgReg[int_args]->as_VMReg());
669 int_args++;
670 } else {
671 return -1;
672 }
673 break;
674 case T_VOID:
675 // halves of T_LONG or T_DOUBLE
676 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
677 regs[i].set_bad();
678 break;
679 case T_LONG:
680 assert(sig_bt[i + 1] == T_VOID, "expecting half");
681 // fall through
682 case T_OBJECT:
683 case T_ARRAY:
684 case T_ADDRESS:
685 case T_METADATA:
686 if (int_args < Argument::n_int_register_parameters_j+1) {
687 regs[i].set2(INT_ArgReg[int_args]->as_VMReg());
688 int_args++;
689 } else {
690 return -1;
691 }
692 break;
693 case T_FLOAT:
694 if (fp_args < Argument::n_float_register_parameters_j) {
695 regs[i].set1(FP_ArgReg[fp_args]->as_VMReg());
696 fp_args++;
697 } else {
698 return -1;
699 }
700 break;
701 case T_DOUBLE:
702 assert(sig_bt[i + 1] == T_VOID, "expecting half");
703 if (fp_args < Argument::n_float_register_parameters_j) {
704 regs[i].set2(FP_ArgReg[fp_args]->as_VMReg());
705 fp_args++;
706 } else {
707 return -1;
708 }
709 break;
710 default:
711 ShouldNotReachHere();
712 break;
713 }
714 }
715
716 return int_args + fp_args;
717 }
718
719 // Patch the callers callsite with entry to compiled code if it exists.
720 static void patch_callers_callsite(MacroAssembler *masm) {
721 Label L;
722 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
723 __ jcc(Assembler::equal, L);
724
725 // Save the current stack pointer
726 __ mov(r13, rsp);
727 // Schedule the branch target address early.
728 // Call into the VM to patch the caller, then jump to compiled callee
729 // rax isn't live so capture return address while we easily can
730 __ movptr(rax, Address(rsp, 0));
731
732 // align stack so push_CPU_state doesn't fault
733 __ andptr(rsp, -(StackAlignmentInBytes));
734 __ push_CPU_state();
735 __ vzeroupper();
736 // VM needs caller's callsite
737 // VM needs target method
738 // This needs to be a long call since we will relocate this adapter to
741 // Allocate argument register save area
742 if (frame::arg_reg_save_area_bytes != 0) {
743 __ subptr(rsp, frame::arg_reg_save_area_bytes);
744 }
745 __ mov(c_rarg0, rbx);
746 __ mov(c_rarg1, rax);
747 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
748
749 // De-allocate argument register save area
750 if (frame::arg_reg_save_area_bytes != 0) {
751 __ addptr(rsp, frame::arg_reg_save_area_bytes);
752 }
753
754 __ vzeroupper();
755 __ pop_CPU_state();
756 // restore sp
757 __ mov(rsp, r13);
758 __ bind(L);
759 }
760
761 // For each inline type argument, sig includes the list of fields of
762 // the inline type. This utility function computes the number of
763 // arguments for the call if inline types are passed by reference (the
764 // calling convention the interpreter expects).
765 static int compute_total_args_passed_int(const GrowableArray<SigEntry>* sig_extended) {
766 int total_args_passed = 0;
767 if (InlineTypePassFieldsAsArgs) {
768 for (int i = 0; i < sig_extended->length(); i++) {
769 BasicType bt = sig_extended->at(i)._bt;
770 if (bt == T_METADATA) {
771 // In sig_extended, an inline type argument starts with:
772 // T_METADATA, followed by the types of the fields of the
773 // inline type and T_VOID to mark the end of the value
774 // type. Inline types are flattened so, for instance, in the
775 // case of an inline type with an int field and an inline type
776 // field that itself has 2 fields, an int and a long:
777 // T_METADATA T_INT T_METADATA T_INT T_LONG T_VOID (second
778 // slot for the T_LONG) T_VOID (inner inline type) T_VOID
779 // (outer inline type)
780 total_args_passed++;
781 int vt = 1;
782 do {
783 i++;
784 BasicType bt = sig_extended->at(i)._bt;
785 BasicType prev_bt = sig_extended->at(i-1)._bt;
786 if (bt == T_METADATA) {
787 vt++;
788 } else if (bt == T_VOID &&
789 prev_bt != T_LONG &&
790 prev_bt != T_DOUBLE) {
791 vt--;
792 }
793 } while (vt != 0);
794 } else {
795 total_args_passed++;
796 }
797 }
798 } else {
799 total_args_passed = sig_extended->length();
800 }
801 return total_args_passed;
802 }
803
804
805 static void gen_c2i_adapter_helper(MacroAssembler* masm,
806 BasicType bt,
807 BasicType prev_bt,
808 size_t size_in_bytes,
809 const VMRegPair& reg_pair,
810 const Address& to,
811 int extraspace,
812 bool is_oop) {
813 if (bt == T_VOID) {
814 assert(prev_bt == T_LONG || prev_bt == T_DOUBLE, "missing half");
815 return;
816 }
817
818 // Say 4 args:
819 // i st_off
820 // 0 32 T_LONG
821 // 1 24 T_VOID
822 // 2 16 T_OBJECT
823 // 3 8 T_BOOL
824 // - 0 return address
825 //
826 // However to make thing extra confusing. Because we can fit a long/double in
827 // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
828 // leaves one slot empty and only stores to a single slot. In this case the
829 // slot that is occupied is the T_VOID slot. See I said it was confusing.
830
831 bool wide = (size_in_bytes == wordSize);
832 VMReg r_1 = reg_pair.first();
833 VMReg r_2 = reg_pair.second();
834 assert(r_2->is_valid() == wide, "invalid size");
835 if (!r_1->is_valid()) {
836 assert(!r_2->is_valid(), "must be invalid");
837 return;
838 }
839
840 if (!r_1->is_XMMRegister()) {
841 Register val = rax;
842 if (r_1->is_stack()) {
843 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
844 __ load_sized_value(val, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false);
845 } else {
846 val = r_1->as_Register();
847 }
848 assert_different_registers(to.base(), val, rscratch1);
849 if (is_oop) {
850 __ push(r13);
851 __ push(rbx);
852 __ store_heap_oop(to, val, rscratch1, r13, rbx, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
853 __ pop(rbx);
854 __ pop(r13);
855 } else {
856 __ store_sized_value(to, val, size_in_bytes);
857 }
858 } else {
859 if (wide) {
860 __ movdbl(to, r_1->as_XMMRegister());
861 } else {
862 __ movflt(to, r_1->as_XMMRegister());
863 }
864 }
865 }
866
867 static void gen_c2i_adapter(MacroAssembler *masm,
868 const GrowableArray<SigEntry>* sig_extended,
869 const VMRegPair *regs,
870 bool requires_clinit_barrier,
871 address& c2i_no_clinit_check_entry,
872 Label& skip_fixup,
873 address start,
874 OopMapSet* oop_maps,
875 int& frame_complete,
876 int& frame_size_in_words,
877 bool alloc_inline_receiver) {
878 if (requires_clinit_barrier && VM_Version::supports_fast_class_init_checks()) {
879 Label L_skip_barrier;
880 Register method = rbx;
881
882 { // Bypass the barrier for non-static methods
883 Register flags = rscratch1;
884 __ movl(flags, Address(method, Method::access_flags_offset()));
885 __ testl(flags, JVM_ACC_STATIC);
886 __ jcc(Assembler::zero, L_skip_barrier); // non-static
887 }
888
889 Register klass = rscratch1;
890 __ load_method_holder(klass, method);
891 __ clinit_barrier(klass, r15_thread, &L_skip_barrier /*L_fast_path*/);
892
893 __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
894
895 __ bind(L_skip_barrier);
896 c2i_no_clinit_check_entry = __ pc();
897 }
898
899 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
900 bs->c2i_entry_barrier(masm);
901
902 // Before we get into the guts of the C2I adapter, see if we should be here
903 // at all. We've come from compiled code and are attempting to jump to the
904 // interpreter, which means the caller made a static call to get here
905 // (vcalls always get a compiled target if there is one). Check for a
906 // compiled target. If there is one, we need to patch the caller's call.
907 patch_callers_callsite(masm);
908
909 __ bind(skip_fixup);
910
911 if (InlineTypePassFieldsAsArgs) {
912 // Is there an inline type argument?
913 bool has_inline_argument = false;
914 for (int i = 0; i < sig_extended->length() && !has_inline_argument; i++) {
915 has_inline_argument = (sig_extended->at(i)._bt == T_METADATA);
916 }
917 if (has_inline_argument) {
918 // There is at least an inline type argument: we're coming from
919 // compiled code so we have no buffers to back the inline types.
920 // Allocate the buffers here with a runtime call.
921 OopMap* map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, /*save_vectors*/ false);
922
923 frame_complete = __ offset();
924
925 __ set_last_Java_frame(noreg, noreg, nullptr, rscratch1);
926
927 __ mov(c_rarg0, r15_thread);
928 __ mov(c_rarg1, rbx);
929 __ mov64(c_rarg2, (int64_t)alloc_inline_receiver);
930 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::allocate_inline_types)));
931
932 oop_maps->add_gc_map((int)(__ pc() - start), map);
933 __ reset_last_Java_frame(false);
934
935 RegisterSaver::restore_live_registers(masm);
936
937 Label no_exception;
938 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
939 __ jcc(Assembler::equal, no_exception);
940
941 __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), NULL_WORD);
942 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
943 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
944
945 __ bind(no_exception);
946
947 // We get an array of objects from the runtime call
948 __ get_vm_result(rscratch2, r15_thread); // Use rscratch2 (r11) as temporary because rscratch1 (r10) is trashed by movptr()
949 __ get_vm_result_2(rbx, r15_thread); // TODO: required to keep the callee Method live?
950 }
951 }
952
953 // Since all args are passed on the stack, total_args_passed *
954 // Interpreter::stackElementSize is the space we need.
955 int total_args_passed = compute_total_args_passed_int(sig_extended);
956 assert(total_args_passed >= 0, "total_args_passed is %d", total_args_passed);
957
958 int extraspace = (total_args_passed * Interpreter::stackElementSize);
959
960 // stack is aligned, keep it that way
961 // This is not currently needed or enforced by the interpreter, but
962 // we might as well conform to the ABI.
963 extraspace = align_up(extraspace, 2*wordSize);
964
965 // set senderSP value
966 __ lea(r13, Address(rsp, wordSize));
967
968 #ifdef ASSERT
969 __ check_stack_alignment(r13, "sender stack not aligned");
970 #endif
971 if (extraspace > 0) {
972 // Pop the return address
973 __ pop(rax);
974
975 __ subptr(rsp, extraspace);
976
977 // Push the return address
978 __ push(rax);
979
980 // Account for the return address location since we store it first rather
981 // than hold it in a register across all the shuffling
982 extraspace += wordSize;
983 }
984
985 #ifdef ASSERT
986 __ check_stack_alignment(rsp, "callee stack not aligned", wordSize, rax);
987 #endif
988
989 // Now write the args into the outgoing interpreter space
990
991 // next_arg_comp is the next argument from the compiler point of
992 // view (inline type fields are passed in registers/on the stack). In
993 // sig_extended, an inline type argument starts with: T_METADATA,
994 // followed by the types of the fields of the inline type and T_VOID
995 // to mark the end of the inline type. ignored counts the number of
996 // T_METADATA/T_VOID. next_vt_arg is the next inline type argument:
997 // used to get the buffer for that argument from the pool of buffers
998 // we allocated above and want to pass to the
999 // interpreter. next_arg_int is the next argument from the
1000 // interpreter point of view (inline types are passed by reference).
1001 for (int next_arg_comp = 0, ignored = 0, next_vt_arg = 0, next_arg_int = 0;
1002 next_arg_comp < sig_extended->length(); next_arg_comp++) {
1003 assert(ignored <= next_arg_comp, "shouldn't skip over more slots than there are arguments");
1004 assert(next_arg_int <= total_args_passed, "more arguments for the interpreter than expected?");
1005 BasicType bt = sig_extended->at(next_arg_comp)._bt;
1006 int st_off = (total_args_passed - next_arg_int) * Interpreter::stackElementSize;
1007 if (!InlineTypePassFieldsAsArgs || bt != T_METADATA) {
1008 int next_off = st_off - Interpreter::stackElementSize;
1009 const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : st_off;
1010 const VMRegPair reg_pair = regs[next_arg_comp-ignored];
1011 size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4;
1012 gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
1013 size_in_bytes, reg_pair, Address(rsp, offset), extraspace, false);
1014 next_arg_int++;
1015 #ifdef ASSERT
1016 if (bt == T_LONG || bt == T_DOUBLE) {
1017 // Overwrite the unused slot with known junk
1018 __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
1019 __ movptr(Address(rsp, st_off), rax);
1020 }
1021 #endif /* ASSERT */
1022 } else {
1023 ignored++;
1024 // get the buffer from the just allocated pool of buffers
1025 int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + next_vt_arg * type2aelembytes(T_OBJECT);
1026 __ load_heap_oop(r14, Address(rscratch2, index));
1027 next_vt_arg++; next_arg_int++;
1028 int vt = 1;
1029 // write fields we get from compiled code in registers/stack
1030 // slots to the buffer: we know we are done with that inline type
1031 // argument when we hit the T_VOID that acts as an end of inline
1032 // type delimiter for this inline type. Inline types are flattened
1033 // so we might encounter embedded inline types. Each entry in
1034 // sig_extended contains a field offset in the buffer.
1035 Label L_null;
1036 do {
1037 next_arg_comp++;
1038 BasicType bt = sig_extended->at(next_arg_comp)._bt;
1039 BasicType prev_bt = sig_extended->at(next_arg_comp-1)._bt;
1040 if (bt == T_METADATA) {
1041 vt++;
1042 ignored++;
1043 } else if (bt == T_VOID &&
1044 prev_bt != T_LONG &&
1045 prev_bt != T_DOUBLE) {
1046 vt--;
1047 ignored++;
1048 } else {
1049 int off = sig_extended->at(next_arg_comp)._offset;
1050 if (off == -1) {
1051 // Nullable inline type argument, emit null check
1052 VMReg reg = regs[next_arg_comp-ignored].first();
1053 Label L_notNull;
1054 if (reg->is_stack()) {
1055 int ld_off = reg->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
1056 __ testb(Address(rsp, ld_off), 1);
1057 } else {
1058 __ testb(reg->as_Register(), 1);
1059 }
1060 __ jcc(Assembler::notZero, L_notNull);
1061 __ movptr(Address(rsp, st_off), 0);
1062 __ jmp(L_null);
1063 __ bind(L_notNull);
1064 continue;
1065 }
1066 assert(off > 0, "offset in object should be positive");
1067 size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
1068 bool is_oop = is_reference_type(bt);
1069 gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
1070 size_in_bytes, regs[next_arg_comp-ignored], Address(r14, off), extraspace, is_oop);
1071 }
1072 } while (vt != 0);
1073 // pass the buffer to the interpreter
1074 __ movptr(Address(rsp, st_off), r14);
1075 __ bind(L_null);
1076 }
1077 }
1078
1079 // Schedule the branch target address early.
1080 __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
1081 __ jmp(rcx);
1082 }
1083
1084 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
1085 address code_start, address code_end,
1086 Label& L_ok) {
1087 Label L_fail;
1088 __ lea(temp_reg, AddressLiteral(code_start, relocInfo::none));
1089 __ cmpptr(pc_reg, temp_reg);
1090 __ jcc(Assembler::belowEqual, L_fail);
1091 __ lea(temp_reg, AddressLiteral(code_end, relocInfo::none));
1092 __ cmpptr(pc_reg, temp_reg);
1093 __ jcc(Assembler::below, L_ok);
1094 __ bind(L_fail);
1095 }
1096
1097 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
1098 int comp_args_on_stack,
1099 const GrowableArray<SigEntry>* sig,
1100 const VMRegPair *regs) {
1101
1102 // Note: r13 contains the senderSP on entry. We must preserve it since
1103 // we may do a i2c -> c2i transition if we lose a race where compiled
1104 // code goes non-entrant while we get args ready.
1105 // In addition we use r13 to locate all the interpreter args as
1106 // we must align the stack to 16 bytes on an i2c entry else we
1107 // lose alignment we expect in all compiled code and register
1108 // save code can segv when fxsave instructions find improperly
1109 // aligned stack pointer.
1110
1111 // Adapters can be frameless because they do not require the caller
1112 // to perform additional cleanup work, such as correcting the stack pointer.
1113 // An i2c adapter is frameless because the *caller* frame, which is interpreted,
1114 // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
1115 // even if a callee has modified the stack pointer.
1116 // A c2i adapter is frameless because the *callee* frame, which is interpreted,
1117 // routinely repairs its caller's stack pointer (from sender_sp, which is set
1118 // up via the senderSP register).
1119 // In other words, if *either* the caller or callee is interpreted, we can
1170 // Convert 4-byte c2 stack slots to words.
1171 int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
1172
1173 if (comp_args_on_stack) {
1174 __ subptr(rsp, comp_words_on_stack * wordSize);
1175 }
1176
1177 // Ensure compiled code always sees stack at proper alignment
1178 __ andptr(rsp, -16);
1179
1180 // push the return address and misalign the stack that youngest frame always sees
1181 // as far as the placement of the call instruction
1182 __ push(rax);
1183
1184 // Put saved SP in another register
1185 const Register saved_sp = rax;
1186 __ movptr(saved_sp, r11);
1187
1188 // Will jump to the compiled code just as if compiled code was doing it.
1189 // Pre-load the register-jump target early, to schedule it better.
1190 __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_inline_offset())));
1191
1192 #if INCLUDE_JVMCI
1193 if (EnableJVMCI) {
1194 // check if this call should be routed towards a specific entry point
1195 __ cmpptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
1196 Label no_alternative_target;
1197 __ jcc(Assembler::equal, no_alternative_target);
1198 __ movptr(r11, Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
1199 __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
1200 __ bind(no_alternative_target);
1201 }
1202 #endif // INCLUDE_JVMCI
1203
1204 int total_args_passed = sig->length();
1205
1206 // Now generate the shuffle code. Pick up all register args and move the
1207 // rest through the floating point stack top.
1208 for (int i = 0; i < total_args_passed; i++) {
1209 BasicType bt = sig->at(i)._bt;
1210 if (bt == T_VOID) {
1211 // Longs and doubles are passed in native word order, but misaligned
1212 // in the 32-bit build.
1213 BasicType prev_bt = (i > 0) ? sig->at(i-1)._bt : T_ILLEGAL;
1214 assert(i > 0 && (prev_bt == T_LONG || prev_bt == T_DOUBLE), "missing half");
1215 continue;
1216 }
1217
1218 // Pick up 0, 1 or 2 words from SP+offset.
1219
1220 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
1221 "scrambled load targets?");
1222 // Load in argument order going down.
1223 int ld_off = (total_args_passed - i)*Interpreter::stackElementSize;
1224 // Point to interpreter value (vs. tag)
1225 int next_off = ld_off - Interpreter::stackElementSize;
1226 //
1227 //
1228 //
1229 VMReg r_1 = regs[i].first();
1230 VMReg r_2 = regs[i].second();
1231 if (!r_1->is_valid()) {
1232 assert(!r_2->is_valid(), "");
1233 continue;
1234 }
1236 // Convert stack slot to an SP offset (+ wordSize to account for return address )
1237 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
1238
1239 // We can use r13 as a temp here because compiled code doesn't need r13 as an input
1240 // and if we end up going thru a c2i because of a miss a reasonable value of r13
1241 // will be generated.
1242 if (!r_2->is_valid()) {
1243 // sign extend???
1244 __ movl(r13, Address(saved_sp, ld_off));
1245 __ movptr(Address(rsp, st_off), r13);
1246 } else {
1247 //
1248 // We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
1249 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
1250 // So we must adjust where to pick up the data to match the interpreter.
1251 //
1252 // Interpreter local[n] == MSW, local[n+1] == LSW however locals
1253 // are accessed as negative so LSW is at LOW address
1254
1255 // ld_off is MSW so get LSW
1256 const int offset = (bt==T_LONG||bt==T_DOUBLE)?
1257 next_off : ld_off;
1258 __ movq(r13, Address(saved_sp, offset));
1259 // st_off is LSW (i.e. reg.first())
1260 __ movq(Address(rsp, st_off), r13);
1261 }
1262 } else if (r_1->is_Register()) { // Register argument
1263 Register r = r_1->as_Register();
1264 assert(r != rax, "must be different");
1265 if (r_2->is_valid()) {
1266 //
1267 // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
1268 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
1269 // So we must adjust where to pick up the data to match the interpreter.
1270
1271 const int offset = (bt==T_LONG||bt==T_DOUBLE)?
1272 next_off : ld_off;
1273
1274 // this can be a misaligned move
1275 __ movq(r, Address(saved_sp, offset));
1276 } else {
1277 // sign extend and use a full word?
1278 __ movl(r, Address(saved_sp, ld_off));
1279 }
1280 } else {
1281 if (!r_2->is_valid()) {
1282 __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
1283 } else {
1284 __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
1285 }
1286 }
1287 }
1288
1289 __ push_cont_fastpath(); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about
1290
1291 // 6243940 We might end up in handle_wrong_method if
1292 // the callee is deoptimized as we race thru here. If that
1293 // happens we don't want to take a safepoint because the
1294 // caller frame will look interpreted and arguments are now
1295 // "compiled" so it is much better to make this transition
1296 // invisible to the stack walking code. Unfortunately if
1297 // we try and find the callee by normal means a safepoint
1298 // is possible. So we stash the desired callee in the thread
1299 // and the vm will find there should this case occur.
1300
1301 __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
1302
1303 // put Method* where a c2i would expect should we end up there
1304 // only needed because of c2 resolve stubs return Method* as a result in
1305 // rax
1306 __ mov(rax, rbx);
1307 __ jmp(r11);
1308 }
1309
1310 static void gen_inline_cache_check(MacroAssembler *masm, Label& skip_fixup) {
1311 Register data = rax;
1312 __ ic_check(1 /* end_alignment */);
1313 __ movptr(rbx, Address(data, CompiledICData::speculated_method_offset()));
1314
1315 // Method might have been compiled since the call site was patched to
1316 // interpreted if that is the case treat it as a miss so we can get
1317 // the call site corrected.
1318 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
1319 __ jcc(Assembler::equal, skip_fixup);
1320 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1321 }
1322
1323 // ---------------------------------------------------------------
1324 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler* masm,
1325 int comp_args_on_stack,
1326 const GrowableArray<SigEntry>* sig,
1327 const VMRegPair* regs,
1328 const GrowableArray<SigEntry>* sig_cc,
1329 const VMRegPair* regs_cc,
1330 const GrowableArray<SigEntry>* sig_cc_ro,
1331 const VMRegPair* regs_cc_ro,
1332 AdapterFingerPrint* fingerprint,
1333 AdapterBlob*& new_adapter,
1334 bool allocate_code_blob) {
1335 address i2c_entry = __ pc();
1336 gen_i2c_adapter(masm, comp_args_on_stack, sig, regs);
1337
1338 // -------------------------------------------------------------------------
1339 // Generate a C2I adapter. On entry we know rbx holds the Method* during calls
1340 // to the interpreter. The args start out packed in the compiled layout. They
1341 // need to be unpacked into the interpreter layout. This will almost always
1342 // require some stack space. We grow the current (compiled) stack, then repack
1343 // the args. We finally end in a jump to the generic interpreter entry point.
1344 // On exit from the interpreter, the interpreter will restore our SP (lest the
1345 // compiled code, which relies solely on SP and not RBP, get sick).
1346
1347 address c2i_unverified_entry = __ pc();
1348 address c2i_unverified_inline_entry = __ pc();
1349 Label skip_fixup;
1350
1351 gen_inline_cache_check(masm, skip_fixup);
1352
1353 OopMapSet* oop_maps = new OopMapSet();
1354 int frame_complete = CodeOffsets::frame_never_safe;
1355 int frame_size_in_words = 0;
1356
1357 // Scalarized c2i adapter with non-scalarized receiver (i.e., don't pack receiver)
1358 address c2i_no_clinit_check_entry = nullptr;
1359 address c2i_inline_ro_entry = __ pc();
1360 if (regs_cc != regs_cc_ro) {
1361 // No class init barrier needed because method is guaranteed to be non-static
1362 gen_c2i_adapter(masm, sig_cc_ro, regs_cc_ro, /* requires_clinit_barrier = */ false, c2i_no_clinit_check_entry,
1363 skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
1364 skip_fixup.reset();
1365 }
1366
1367 // Scalarized c2i adapter
1368 address c2i_entry = __ pc();
1369 address c2i_inline_entry = __ pc();
1370 gen_c2i_adapter(masm, sig_cc, regs_cc, /* requires_clinit_barrier = */ true, c2i_no_clinit_check_entry,
1371 skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ true);
1372
1373 // Non-scalarized c2i adapter
1374 if (regs != regs_cc) {
1375 c2i_unverified_inline_entry = __ pc();
1376 Label inline_entry_skip_fixup;
1377 gen_inline_cache_check(masm, inline_entry_skip_fixup);
1378
1379 c2i_inline_entry = __ pc();
1380 gen_c2i_adapter(masm, sig, regs, /* requires_clinit_barrier = */ true, c2i_no_clinit_check_entry,
1381 inline_entry_skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
1382 }
1383
1384 // The c2i adapters might safepoint and trigger a GC. The caller must make sure that
1385 // the GC knows about the location of oop argument locations passed to the c2i adapter.
1386 if (allocate_code_blob) {
1387 bool caller_must_gc_arguments = (regs != regs_cc);
1388 new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words, oop_maps, caller_must_gc_arguments);
1389 }
1390
1391 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_inline_entry, c2i_inline_ro_entry, c2i_unverified_entry, c2i_unverified_inline_entry, c2i_no_clinit_check_entry);
1392 }
1393
1394 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1395 VMRegPair *regs,
1396 int total_args_passed) {
1397
1398 // We return the amount of VMRegImpl stack slots we need to reserve for all
1399 // the arguments NOT counting out_preserve_stack_slots.
1400
1401 // NOTE: These arrays will have to change when c1 is ported
1402 #ifdef _WIN64
1403 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1404 c_rarg0, c_rarg1, c_rarg2, c_rarg3
1405 };
1406 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1407 c_farg0, c_farg1, c_farg2, c_farg3
1408 };
1409 #else
1410 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1411 c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5
2498 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2499
2500 // Get the handle (the 2nd argument)
2501 __ mov(oop_handle_reg, c_rarg1);
2502
2503 // Get address of the box
2504
2505 __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2506
2507 // Load the oop from the handle
2508 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2509
2510 if (LockingMode == LM_MONITOR) {
2511 __ jmp(slow_path_lock);
2512 } else if (LockingMode == LM_LEGACY) {
2513 // Load immediate 1 into swap_reg %rax
2514 __ movl(swap_reg, 1);
2515
2516 // Load (object->mark() | 1) into swap_reg %rax
2517 __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2518 if (EnableValhalla) {
2519 // Mask inline_type bit such that we go to the slow path if object is an inline type
2520 __ andptr(swap_reg, ~((int) markWord::inline_type_bit_in_place));
2521 }
2522
2523 // Save (object->mark() | 1) into BasicLock's displaced header
2524 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2525
2526 // src -> dest iff dest == rax else rax <- dest
2527 __ lock();
2528 __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2529 __ jcc(Assembler::equal, count_mon);
2530
2531 // Hmm should this move to the slow path code area???
2532
2533 // Test if the oopMark is an obvious stack pointer, i.e.,
2534 // 1) (mark & 3) == 0, and
2535 // 2) rsp <= mark < mark + os::pagesize()
2536 // These 3 tests can be done by evaluating the following
2537 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2538 // assuming both stack pointer and pagesize have their
2539 // least significant 2 bits clear.
2540 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2541
3843 julong *scratch = (julong *)alloca(total_allocation);
3844
3845 // Local scratch arrays
3846 julong
3847 *a = scratch + 0 * longwords,
3848 *n = scratch + 1 * longwords,
3849 *m = scratch + 2 * longwords;
3850
3851 reverse_words((julong *)a_ints, a, longwords);
3852 reverse_words((julong *)n_ints, n, longwords);
3853
3854 if (len >= MONTGOMERY_SQUARING_THRESHOLD) {
3855 ::montgomery_square(a, n, m, (julong)inv, longwords);
3856 } else {
3857 ::montgomery_multiply(a, a, n, m, (julong)inv, longwords);
3858 }
3859
3860 reverse_words(m, (julong *)m_ints, longwords);
3861 }
3862
3863 BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(const InlineKlass* vk) {
3864 BufferBlob* buf = BufferBlob::create("inline types pack/unpack", 16 * K);
3865 CodeBuffer buffer(buf);
3866 short buffer_locs[20];
3867 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3868 sizeof(buffer_locs)/sizeof(relocInfo));
3869
3870 MacroAssembler* masm = new MacroAssembler(&buffer);
3871
3872 const Array<SigEntry>* sig_vk = vk->extended_sig();
3873 const Array<VMRegPair>* regs = vk->return_regs();
3874
3875 int pack_fields_jobject_off = __ offset();
3876 // Resolve pre-allocated buffer from JNI handle.
3877 // We cannot do this in generate_call_stub() because it requires GC code to be initialized.
3878 __ movptr(rax, Address(r13, 0));
3879 __ resolve_jobject(rax /* value */,
3880 r15_thread /* thread */,
3881 r12 /* tmp */);
3882 __ movptr(Address(r13, 0), rax);
3883
3884 int pack_fields_off = __ offset();
3885
3886 int j = 1;
3887 for (int i = 0; i < sig_vk->length(); i++) {
3888 BasicType bt = sig_vk->at(i)._bt;
3889 if (bt == T_METADATA) {
3890 continue;
3891 }
3892 if (bt == T_VOID) {
3893 if (sig_vk->at(i-1)._bt == T_LONG ||
3894 sig_vk->at(i-1)._bt == T_DOUBLE) {
3895 j++;
3896 }
3897 continue;
3898 }
3899 int off = sig_vk->at(i)._offset;
3900 assert(off > 0, "offset in object should be positive");
3901 VMRegPair pair = regs->at(j);
3902 VMReg r_1 = pair.first();
3903 VMReg r_2 = pair.second();
3904 Address to(rax, off);
3905 if (bt == T_FLOAT) {
3906 __ movflt(to, r_1->as_XMMRegister());
3907 } else if (bt == T_DOUBLE) {
3908 __ movdbl(to, r_1->as_XMMRegister());
3909 } else {
3910 Register val = r_1->as_Register();
3911 assert_different_registers(to.base(), val, r14, r13, rbx, rscratch1);
3912 if (is_reference_type(bt)) {
3913 __ store_heap_oop(to, val, r14, r13, rbx, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
3914 } else {
3915 __ store_sized_value(to, r_1->as_Register(), type2aelembytes(bt));
3916 }
3917 }
3918 j++;
3919 }
3920 assert(j == regs->length(), "missed a field?");
3921
3922 __ ret(0);
3923
3924 int unpack_fields_off = __ offset();
3925
3926 Label skip;
3927 __ testptr(rax, rax);
3928 __ jcc(Assembler::zero, skip);
3929
3930 j = 1;
3931 for (int i = 0; i < sig_vk->length(); i++) {
3932 BasicType bt = sig_vk->at(i)._bt;
3933 if (bt == T_METADATA) {
3934 continue;
3935 }
3936 if (bt == T_VOID) {
3937 if (sig_vk->at(i-1)._bt == T_LONG ||
3938 sig_vk->at(i-1)._bt == T_DOUBLE) {
3939 j++;
3940 }
3941 continue;
3942 }
3943 int off = sig_vk->at(i)._offset;
3944 assert(off > 0, "offset in object should be positive");
3945 VMRegPair pair = regs->at(j);
3946 VMReg r_1 = pair.first();
3947 VMReg r_2 = pair.second();
3948 Address from(rax, off);
3949 if (bt == T_FLOAT) {
3950 __ movflt(r_1->as_XMMRegister(), from);
3951 } else if (bt == T_DOUBLE) {
3952 __ movdbl(r_1->as_XMMRegister(), from);
3953 } else if (bt == T_OBJECT || bt == T_ARRAY) {
3954 assert_different_registers(rax, r_1->as_Register());
3955 __ load_heap_oop(r_1->as_Register(), from);
3956 } else {
3957 assert(is_java_primitive(bt), "unexpected basic type");
3958 assert_different_registers(rax, r_1->as_Register());
3959 size_t size_in_bytes = type2aelembytes(bt);
3960 __ load_sized_value(r_1->as_Register(), from, size_in_bytes, bt != T_CHAR && bt != T_BOOLEAN);
3961 }
3962 j++;
3963 }
3964 assert(j == regs->length(), "missed a field?");
3965
3966 __ bind(skip);
3967 __ ret(0);
3968
3969 __ flush();
3970
3971 return BufferedInlineTypeBlob::create(&buffer, pack_fields_off, pack_fields_jobject_off, unpack_fields_off);
3972 }
3973
3974 #if INCLUDE_JFR
3975
3976 // For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
3977 // It returns a jobject handle to the event writer.
3978 // The handle is dereferenced and the return value is the event writer oop.
3979 RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
3980 enum layout {
3981 rbp_off,
3982 rbpH_off,
3983 return_off,
3984 return_off2,
3985 framesize // inclusive of return address
3986 };
3987
3988 const char* name = SharedRuntime::stub_name(SharedStubId::jfr_write_checkpoint_id);
3989 CodeBuffer code(name, 1024, 64);
3990 MacroAssembler* masm = new MacroAssembler(&code);
3991 address start = __ pc();
3992
3993 __ enter();
4046 __ reset_last_Java_frame(true);
4047
4048 __ leave();
4049 __ ret(0);
4050
4051 OopMapSet* oop_maps = new OopMapSet();
4052 OopMap* map = new OopMap(framesize, 1);
4053 oop_maps->add_gc_map(frame_complete, map);
4054
4055 RuntimeStub* stub =
4056 RuntimeStub::new_runtime_stub(name,
4057 &code,
4058 frame_complete,
4059 (framesize >> (LogBytesPerWord - LogBytesPerInt)),
4060 oop_maps,
4061 false);
4062 return stub;
4063 }
4064
4065 #endif // INCLUDE_JFR
|