11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #ifndef _WINDOWS
27 #include "alloca.h"
28 #endif
29 #include "asm/macroAssembler.hpp"
30 #include "asm/macroAssembler.inline.hpp"
31 #include "code/compiledIC.hpp"
32 #include "code/debugInfoRec.hpp"
33 #include "code/icBuffer.hpp"
34 #include "code/nativeInst.hpp"
35 #include "code/vtableStubs.hpp"
36 #include "compiler/oopMap.hpp"
37 #include "gc/shared/collectedHeap.hpp"
38 #include "gc/shared/gcLocker.hpp"
39 #include "gc/shared/barrierSet.hpp"
40 #include "gc/shared/barrierSetAssembler.hpp"
41 #include "interpreter/interpreter.hpp"
42 #include "logging/log.hpp"
43 #include "memory/resourceArea.hpp"
44 #include "memory/universe.hpp"
45 #include "oops/compiledICHolder.hpp"
46 #include "oops/klass.inline.hpp"
47 #include "oops/method.inline.hpp"
48 #include "prims/methodHandles.hpp"
49 #include "runtime/continuation.hpp"
50 #include "runtime/continuationEntry.inline.hpp"
541 }
542 break;
543 case T_DOUBLE:
544 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
545 if (fp_args < Argument::n_float_register_parameters_j) {
546 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
547 } else {
548 regs[i].set2(VMRegImpl::stack2reg(stk_args));
549 stk_args += 2;
550 }
551 break;
552 default:
553 ShouldNotReachHere();
554 break;
555 }
556 }
557
558 return align_up(stk_args, 2);
559 }
560
561 // Patch the callers callsite with entry to compiled code if it exists.
562 static void patch_callers_callsite(MacroAssembler *masm) {
563 Label L;
564 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
565 __ jcc(Assembler::equal, L);
566
567 // Save the current stack pointer
568 __ mov(r13, rsp);
569 // Schedule the branch target address early.
570 // Call into the VM to patch the caller, then jump to compiled callee
571 // rax isn't live so capture return address while we easily can
572 __ movptr(rax, Address(rsp, 0));
573
574 // align stack so push_CPU_state doesn't fault
575 __ andptr(rsp, -(StackAlignmentInBytes));
576 __ push_CPU_state();
577 __ vzeroupper();
578 // VM needs caller's callsite
579 // VM needs target method
580 // This needs to be a long call since we will relocate this adapter to
583 // Allocate argument register save area
584 if (frame::arg_reg_save_area_bytes != 0) {
585 __ subptr(rsp, frame::arg_reg_save_area_bytes);
586 }
587 __ mov(c_rarg0, rbx);
588 __ mov(c_rarg1, rax);
589 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
590
591 // De-allocate argument register save area
592 if (frame::arg_reg_save_area_bytes != 0) {
593 __ addptr(rsp, frame::arg_reg_save_area_bytes);
594 }
595
596 __ vzeroupper();
597 __ pop_CPU_state();
598 // restore sp
599 __ mov(rsp, r13);
600 __ bind(L);
601 }
602
603
604 static void gen_c2i_adapter(MacroAssembler *masm,
605 int total_args_passed,
606 int comp_args_on_stack,
607 const BasicType *sig_bt,
608 const VMRegPair *regs,
609 Label& skip_fixup) {
610 // Before we get into the guts of the C2I adapter, see if we should be here
611 // at all. We've come from compiled code and are attempting to jump to the
612 // interpreter, which means the caller made a static call to get here
613 // (vcalls always get a compiled target if there is one). Check for a
614 // compiled target. If there is one, we need to patch the caller's call.
615 patch_callers_callsite(masm);
616
617 __ bind(skip_fixup);
618
619 // Since all args are passed on the stack, total_args_passed *
620 // Interpreter::stackElementSize is the space we need.
621
622 assert(total_args_passed >= 0, "total_args_passed is %d", total_args_passed);
623
624 int extraspace = (total_args_passed * Interpreter::stackElementSize);
625
626 // stack is aligned, keep it that way
627 // This is not currently needed or enforced by the interpreter, but
628 // we might as well conform to the ABI.
629 extraspace = align_up(extraspace, 2*wordSize);
630
631 // set senderSP value
632 __ lea(r13, Address(rsp, wordSize));
633
634 #ifdef ASSERT
635 __ check_stack_alignment(r13, "sender stack not aligned");
636 #endif
637 if (extraspace > 0) {
638 // Pop the return address
639 __ pop(rax);
640
641 __ subptr(rsp, extraspace);
642
643 // Push the return address
644 __ push(rax);
645
646 // Account for the return address location since we store it first rather
647 // than hold it in a register across all the shuffling
648 extraspace += wordSize;
649 }
650
651 #ifdef ASSERT
652 __ check_stack_alignment(rsp, "callee stack not aligned", wordSize, rax);
653 #endif
654
655 // Now write the args into the outgoing interpreter space
656 for (int i = 0; i < total_args_passed; i++) {
657 if (sig_bt[i] == T_VOID) {
658 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
659 continue;
660 }
661
662 // offset to start parameters
663 int st_off = (total_args_passed - i) * Interpreter::stackElementSize;
664 int next_off = st_off - Interpreter::stackElementSize;
665
666 // Say 4 args:
667 // i st_off
668 // 0 32 T_LONG
669 // 1 24 T_VOID
670 // 2 16 T_OBJECT
671 // 3 8 T_BOOL
672 // - 0 return address
673 //
674 // However to make thing extra confusing. Because we can fit a long/double in
675 // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
676 // leaves one slot empty and only stores to a single slot. In this case the
677 // slot that is occupied is the T_VOID slot. See I said it was confusing.
678
679 VMReg r_1 = regs[i].first();
680 VMReg r_2 = regs[i].second();
681 if (!r_1->is_valid()) {
682 assert(!r_2->is_valid(), "");
683 continue;
684 }
685 if (r_1->is_stack()) {
686 // memory to memory use rax
687 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
688 if (!r_2->is_valid()) {
689 // sign extend??
690 __ movl(rax, Address(rsp, ld_off));
691 __ movptr(Address(rsp, st_off), rax);
692
693 } else {
694
695 __ movq(rax, Address(rsp, ld_off));
696
697 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
698 // T_DOUBLE and T_LONG use two slots in the interpreter
699 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
700 // ld_off == LSW, ld_off+wordSize == MSW
701 // st_off == MSW, next_off == LSW
702 __ movq(Address(rsp, next_off), rax);
703 #ifdef ASSERT
704 // Overwrite the unused slot with known junk
705 __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
706 __ movptr(Address(rsp, st_off), rax);
707 #endif /* ASSERT */
708 } else {
709 __ movq(Address(rsp, st_off), rax);
710 }
711 }
712 } else if (r_1->is_Register()) {
713 Register r = r_1->as_Register();
714 if (!r_2->is_valid()) {
715 // must be only an int (or less ) so move only 32bits to slot
716 // why not sign extend??
717 __ movl(Address(rsp, st_off), r);
718 } else {
719 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
720 // T_DOUBLE and T_LONG use two slots in the interpreter
721 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
722 // long/double in gpr
723 #ifdef ASSERT
724 // Overwrite the unused slot with known junk
725 __ mov64(rax, CONST64(0xdeadffffdeadaaab));
726 __ movptr(Address(rsp, st_off), rax);
727 #endif /* ASSERT */
728 __ movq(Address(rsp, next_off), r);
729 } else {
730 __ movptr(Address(rsp, st_off), r);
731 }
732 }
733 } else {
734 assert(r_1->is_XMMRegister(), "");
735 if (!r_2->is_valid()) {
736 // only a float use just part of the slot
737 __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
738 } else {
739 #ifdef ASSERT
740 // Overwrite the unused slot with known junk
741 __ mov64(rax, CONST64(0xdeadffffdeadaaac));
742 __ movptr(Address(rsp, st_off), rax);
743 #endif /* ASSERT */
744 __ movdbl(Address(rsp, next_off), r_1->as_XMMRegister());
745 }
746 }
747 }
748
749 // Schedule the branch target address early.
750 __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
751 __ jmp(rcx);
752 }
753
754 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
755 address code_start, address code_end,
756 Label& L_ok) {
757 Label L_fail;
758 __ lea(temp_reg, ExternalAddress(code_start));
759 __ cmpptr(pc_reg, temp_reg);
760 __ jcc(Assembler::belowEqual, L_fail);
761 __ lea(temp_reg, ExternalAddress(code_end));
762 __ cmpptr(pc_reg, temp_reg);
763 __ jcc(Assembler::below, L_ok);
764 __ bind(L_fail);
765 }
766
767 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
768 int total_args_passed,
769 int comp_args_on_stack,
770 const BasicType *sig_bt,
771 const VMRegPair *regs) {
772
773 // Note: r13 contains the senderSP on entry. We must preserve it since
774 // we may do a i2c -> c2i transition if we lose a race where compiled
775 // code goes non-entrant while we get args ready.
776 // In addition we use r13 to locate all the interpreter args as
777 // we must align the stack to 16 bytes on an i2c entry else we
778 // lose alignment we expect in all compiled code and register
779 // save code can segv when fxsave instructions find improperly
780 // aligned stack pointer.
781
782 // Adapters can be frameless because they do not require the caller
783 // to perform additional cleanup work, such as correcting the stack pointer.
784 // An i2c adapter is frameless because the *caller* frame, which is interpreted,
785 // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
786 // even if a callee has modified the stack pointer.
787 // A c2i adapter is frameless because the *callee* frame, which is interpreted,
788 // routinely repairs its caller's stack pointer (from sender_sp, which is set
789 // up via the senderSP register).
790 // In other words, if *either* the caller or callee is interpreted, we can
841 // Convert 4-byte c2 stack slots to words.
842 int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
843
844 if (comp_args_on_stack) {
845 __ subptr(rsp, comp_words_on_stack * wordSize);
846 }
847
848 // Ensure compiled code always sees stack at proper alignment
849 __ andptr(rsp, -16);
850
851 // push the return address and misalign the stack that youngest frame always sees
852 // as far as the placement of the call instruction
853 __ push(rax);
854
855 // Put saved SP in another register
856 const Register saved_sp = rax;
857 __ movptr(saved_sp, r11);
858
859 // Will jump to the compiled code just as if compiled code was doing it.
860 // Pre-load the register-jump target early, to schedule it better.
861 __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_offset())));
862
863 #if INCLUDE_JVMCI
864 if (EnableJVMCI) {
865 // check if this call should be routed towards a specific entry point
866 __ cmpptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
867 Label no_alternative_target;
868 __ jcc(Assembler::equal, no_alternative_target);
869 __ movptr(r11, Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
870 __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
871 __ bind(no_alternative_target);
872 }
873 #endif // INCLUDE_JVMCI
874
875 // Now generate the shuffle code. Pick up all register args and move the
876 // rest through the floating point stack top.
877 for (int i = 0; i < total_args_passed; i++) {
878 if (sig_bt[i] == T_VOID) {
879 // Longs and doubles are passed in native word order, but misaligned
880 // in the 32-bit build.
881 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
882 continue;
883 }
884
885 // Pick up 0, 1 or 2 words from SP+offset.
886
887 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
888 "scrambled load targets?");
889 // Load in argument order going down.
890 int ld_off = (total_args_passed - i)*Interpreter::stackElementSize;
891 // Point to interpreter value (vs. tag)
892 int next_off = ld_off - Interpreter::stackElementSize;
893 //
894 //
895 //
896 VMReg r_1 = regs[i].first();
897 VMReg r_2 = regs[i].second();
898 if (!r_1->is_valid()) {
899 assert(!r_2->is_valid(), "");
900 continue;
901 }
903 // Convert stack slot to an SP offset (+ wordSize to account for return address )
904 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
905
906 // We can use r13 as a temp here because compiled code doesn't need r13 as an input
907 // and if we end up going thru a c2i because of a miss a reasonable value of r13
908 // will be generated.
909 if (!r_2->is_valid()) {
910 // sign extend???
911 __ movl(r13, Address(saved_sp, ld_off));
912 __ movptr(Address(rsp, st_off), r13);
913 } else {
914 //
915 // We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
916 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
917 // So we must adjust where to pick up the data to match the interpreter.
918 //
919 // Interpreter local[n] == MSW, local[n+1] == LSW however locals
920 // are accessed as negative so LSW is at LOW address
921
922 // ld_off is MSW so get LSW
923 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
924 next_off : ld_off;
925 __ movq(r13, Address(saved_sp, offset));
926 // st_off is LSW (i.e. reg.first())
927 __ movq(Address(rsp, st_off), r13);
928 }
929 } else if (r_1->is_Register()) { // Register argument
930 Register r = r_1->as_Register();
931 assert(r != rax, "must be different");
932 if (r_2->is_valid()) {
933 //
934 // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
935 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
936 // So we must adjust where to pick up the data to match the interpreter.
937
938 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
939 next_off : ld_off;
940
941 // this can be a misaligned move
942 __ movq(r, Address(saved_sp, offset));
943 } else {
944 // sign extend and use a full word?
945 __ movl(r, Address(saved_sp, ld_off));
946 }
947 } else {
948 if (!r_2->is_valid()) {
949 __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
950 } else {
951 __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
952 }
953 }
954 }
955
956 __ push_cont_fastpath(); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about
957
958 // 6243940 We might end up in handle_wrong_method if
959 // the callee is deoptimized as we race thru here. If that
960 // happens we don't want to take a safepoint because the
961 // caller frame will look interpreted and arguments are now
962 // "compiled" so it is much better to make this transition
963 // invisible to the stack walking code. Unfortunately if
964 // we try and find the callee by normal means a safepoint
965 // is possible. So we stash the desired callee in the thread
966 // and the vm will find there should this case occur.
967
968 __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
969
970 // put Method* where a c2i would expect should we end up there
971 // only needed because eof c2 resolve stubs return Method* as a result in
972 // rax
973 __ mov(rax, rbx);
974 __ jmp(r11);
975 }
976
977 // ---------------------------------------------------------------
978 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
979 int total_args_passed,
980 int comp_args_on_stack,
981 const BasicType *sig_bt,
982 const VMRegPair *regs,
983 AdapterFingerPrint* fingerprint) {
984 address i2c_entry = __ pc();
985
986 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
987
988 // -------------------------------------------------------------------------
989 // Generate a C2I adapter. On entry we know rbx holds the Method* during calls
990 // to the interpreter. The args start out packed in the compiled layout. They
991 // need to be unpacked into the interpreter layout. This will almost always
992 // require some stack space. We grow the current (compiled) stack, then repack
993 // the args. We finally end in a jump to the generic interpreter entry point.
994 // On exit from the interpreter, the interpreter will restore our SP (lest the
995 // compiled code, which relies solely on SP and not RBP, get sick).
996
997 address c2i_unverified_entry = __ pc();
998 Label skip_fixup;
999 Label ok;
1000
1001 Register holder = rax;
1002 Register receiver = j_rarg0;
1003 Register temp = rbx;
1004
1005 {
1006 __ load_klass(temp, receiver, rscratch1);
1007 __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
1008 __ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
1009 __ jcc(Assembler::equal, ok);
1010 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1011
1012 __ bind(ok);
1013 // Method might have been compiled since the call site was patched to
1014 // interpreted if that is the case treat it as a miss so we can get
1015 // the call site corrected.
1016 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
1017 __ jcc(Assembler::equal, skip_fixup);
1018 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1019 }
1020
1021 address c2i_entry = __ pc();
1022
1023 // Class initialization barrier for static methods
1024 address c2i_no_clinit_check_entry = nullptr;
1025 if (VM_Version::supports_fast_class_init_checks()) {
1026 Label L_skip_barrier;
1027 Register method = rbx;
1028
1029 { // Bypass the barrier for non-static methods
1030 Register flags = rscratch1;
1031 __ movl(flags, Address(method, Method::access_flags_offset()));
1032 __ testl(flags, JVM_ACC_STATIC);
1033 __ jcc(Assembler::zero, L_skip_barrier); // non-static
1034 }
1035
1036 Register klass = rscratch1;
1037 __ load_method_holder(klass, method);
1038 __ clinit_barrier(klass, r15_thread, &L_skip_barrier /*L_fast_path*/);
1039
1040 __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
1041
1042 __ bind(L_skip_barrier);
1043 c2i_no_clinit_check_entry = __ pc();
1044 }
1045
1046 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1047 bs->c2i_entry_barrier(masm);
1048
1049 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
1050
1051 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
1052 }
1053
1054 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1055 VMRegPair *regs,
1056 int total_args_passed) {
1057
1058 // We return the amount of VMRegImpl stack slots we need to reserve for all
1059 // the arguments NOT counting out_preserve_stack_slots.
1060
1061 // NOTE: These arrays will have to change when c1 is ported
1062 #ifdef _WIN64
1063 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1064 c_rarg0, c_rarg1, c_rarg2, c_rarg3
1065 };
1066 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1067 c_farg0, c_farg1, c_farg2, c_farg3
1068 };
1069 #else
1070 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1071 c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5
2140 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2141
2142 // Get the handle (the 2nd argument)
2143 __ mov(oop_handle_reg, c_rarg1);
2144
2145 // Get address of the box
2146
2147 __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2148
2149 // Load the oop from the handle
2150 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2151
2152 if (LockingMode == LM_MONITOR) {
2153 __ jmp(slow_path_lock);
2154 } else if (LockingMode == LM_LEGACY) {
2155 // Load immediate 1 into swap_reg %rax
2156 __ movl(swap_reg, 1);
2157
2158 // Load (object->mark() | 1) into swap_reg %rax
2159 __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2160
2161 // Save (object->mark() | 1) into BasicLock's displaced header
2162 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2163
2164 // src -> dest iff dest == rax else rax <- dest
2165 __ lock();
2166 __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2167 __ jcc(Assembler::equal, count_mon);
2168
2169 // Hmm should this move to the slow path code area???
2170
2171 // Test if the oopMark is an obvious stack pointer, i.e.,
2172 // 1) (mark & 3) == 0, and
2173 // 2) rsp <= mark < mark + os::pagesize()
2174 // These 3 tests can be done by evaluating the following
2175 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2176 // assuming both stack pointer and pagesize have their
2177 // least significant 2 bits clear.
2178 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2179
3701 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), NULL_WORD);
3702 #endif
3703 // Clear the exception oop so GC no longer processes it as a root.
3704 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), NULL_WORD);
3705
3706 // rax: exception oop
3707 // r8: exception handler
3708 // rdx: exception pc
3709 // Jump to handler
3710
3711 __ jmp(r8);
3712
3713 // Make sure all code is generated
3714 masm->flush();
3715
3716 // Set exception blob
3717 _exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
3718 }
3719 #endif // COMPILER2
3720
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #ifndef _WINDOWS
27 #include "alloca.h"
28 #endif
29 #include "asm/macroAssembler.hpp"
30 #include "asm/macroAssembler.inline.hpp"
31 #include "classfile/symbolTable.hpp"
32 #include "code/compiledIC.hpp"
33 #include "code/debugInfoRec.hpp"
34 #include "code/icBuffer.hpp"
35 #include "code/nativeInst.hpp"
36 #include "code/vtableStubs.hpp"
37 #include "compiler/oopMap.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "gc/shared/gcLocker.hpp"
40 #include "gc/shared/barrierSet.hpp"
41 #include "gc/shared/barrierSetAssembler.hpp"
42 #include "interpreter/interpreter.hpp"
43 #include "logging/log.hpp"
44 #include "memory/resourceArea.hpp"
45 #include "memory/universe.hpp"
46 #include "oops/compiledICHolder.hpp"
47 #include "oops/klass.inline.hpp"
48 #include "oops/method.inline.hpp"
49 #include "prims/methodHandles.hpp"
50 #include "runtime/continuation.hpp"
51 #include "runtime/continuationEntry.inline.hpp"
542 }
543 break;
544 case T_DOUBLE:
545 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
546 if (fp_args < Argument::n_float_register_parameters_j) {
547 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
548 } else {
549 regs[i].set2(VMRegImpl::stack2reg(stk_args));
550 stk_args += 2;
551 }
552 break;
553 default:
554 ShouldNotReachHere();
555 break;
556 }
557 }
558
559 return align_up(stk_args, 2);
560 }
561
562 // Same as java_calling_convention() but for multiple return
563 // values. There's no way to store them on the stack so if we don't
564 // have enough registers, multiple values can't be returned.
565 const uint SharedRuntime::java_return_convention_max_int = Argument::n_int_register_parameters_j+1;
566 const uint SharedRuntime::java_return_convention_max_float = Argument::n_float_register_parameters_j;
567 int SharedRuntime::java_return_convention(const BasicType *sig_bt,
568 VMRegPair *regs,
569 int total_args_passed) {
570 // Create the mapping between argument positions and
571 // registers.
572 static const Register INT_ArgReg[java_return_convention_max_int] = {
573 rax, j_rarg5, j_rarg4, j_rarg3, j_rarg2, j_rarg1, j_rarg0
574 };
575 static const XMMRegister FP_ArgReg[java_return_convention_max_float] = {
576 j_farg0, j_farg1, j_farg2, j_farg3,
577 j_farg4, j_farg5, j_farg6, j_farg7
578 };
579
580
581 uint int_args = 0;
582 uint fp_args = 0;
583
584 for (int i = 0; i < total_args_passed; i++) {
585 switch (sig_bt[i]) {
586 case T_BOOLEAN:
587 case T_CHAR:
588 case T_BYTE:
589 case T_SHORT:
590 case T_INT:
591 if (int_args < Argument::n_int_register_parameters_j+1) {
592 regs[i].set1(INT_ArgReg[int_args]->as_VMReg());
593 int_args++;
594 } else {
595 return -1;
596 }
597 break;
598 case T_VOID:
599 // halves of T_LONG or T_DOUBLE
600 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
601 regs[i].set_bad();
602 break;
603 case T_LONG:
604 assert(sig_bt[i + 1] == T_VOID, "expecting half");
605 // fall through
606 case T_OBJECT:
607 case T_ARRAY:
608 case T_ADDRESS:
609 case T_METADATA:
610 if (int_args < Argument::n_int_register_parameters_j+1) {
611 regs[i].set2(INT_ArgReg[int_args]->as_VMReg());
612 int_args++;
613 } else {
614 return -1;
615 }
616 break;
617 case T_FLOAT:
618 if (fp_args < Argument::n_float_register_parameters_j) {
619 regs[i].set1(FP_ArgReg[fp_args]->as_VMReg());
620 fp_args++;
621 } else {
622 return -1;
623 }
624 break;
625 case T_DOUBLE:
626 assert(sig_bt[i + 1] == T_VOID, "expecting half");
627 if (fp_args < Argument::n_float_register_parameters_j) {
628 regs[i].set2(FP_ArgReg[fp_args]->as_VMReg());
629 fp_args++;
630 } else {
631 return -1;
632 }
633 break;
634 default:
635 ShouldNotReachHere();
636 break;
637 }
638 }
639
640 return int_args + fp_args;
641 }
642
643 // Patch the callers callsite with entry to compiled code if it exists.
644 static void patch_callers_callsite(MacroAssembler *masm) {
645 Label L;
646 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
647 __ jcc(Assembler::equal, L);
648
649 // Save the current stack pointer
650 __ mov(r13, rsp);
651 // Schedule the branch target address early.
652 // Call into the VM to patch the caller, then jump to compiled callee
653 // rax isn't live so capture return address while we easily can
654 __ movptr(rax, Address(rsp, 0));
655
656 // align stack so push_CPU_state doesn't fault
657 __ andptr(rsp, -(StackAlignmentInBytes));
658 __ push_CPU_state();
659 __ vzeroupper();
660 // VM needs caller's callsite
661 // VM needs target method
662 // This needs to be a long call since we will relocate this adapter to
665 // Allocate argument register save area
666 if (frame::arg_reg_save_area_bytes != 0) {
667 __ subptr(rsp, frame::arg_reg_save_area_bytes);
668 }
669 __ mov(c_rarg0, rbx);
670 __ mov(c_rarg1, rax);
671 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
672
673 // De-allocate argument register save area
674 if (frame::arg_reg_save_area_bytes != 0) {
675 __ addptr(rsp, frame::arg_reg_save_area_bytes);
676 }
677
678 __ vzeroupper();
679 __ pop_CPU_state();
680 // restore sp
681 __ mov(rsp, r13);
682 __ bind(L);
683 }
684
685 // For each inline type argument, sig includes the list of fields of
686 // the inline type. This utility function computes the number of
687 // arguments for the call if inline types are passed by reference (the
688 // calling convention the interpreter expects).
689 static int compute_total_args_passed_int(const GrowableArray<SigEntry>* sig_extended) {
690 int total_args_passed = 0;
691 if (InlineTypePassFieldsAsArgs) {
692 for (int i = 0; i < sig_extended->length(); i++) {
693 BasicType bt = sig_extended->at(i)._bt;
694 if (bt == T_METADATA) {
695 // In sig_extended, an inline type argument starts with:
696 // T_METADATA, followed by the types of the fields of the
697 // inline type and T_VOID to mark the end of the value
698 // type. Inline types are flattened so, for instance, in the
699 // case of an inline type with an int field and an inline type
700 // field that itself has 2 fields, an int and a long:
701 // T_METADATA T_INT T_METADATA T_INT T_LONG T_VOID (second
702 // slot for the T_LONG) T_VOID (inner inline type) T_VOID
703 // (outer inline type)
704 total_args_passed++;
705 int vt = 1;
706 do {
707 i++;
708 BasicType bt = sig_extended->at(i)._bt;
709 BasicType prev_bt = sig_extended->at(i-1)._bt;
710 if (bt == T_METADATA) {
711 vt++;
712 } else if (bt == T_VOID &&
713 prev_bt != T_LONG &&
714 prev_bt != T_DOUBLE) {
715 vt--;
716 }
717 } while (vt != 0);
718 } else {
719 total_args_passed++;
720 }
721 }
722 } else {
723 total_args_passed = sig_extended->length();
724 }
725 return total_args_passed;
726 }
727
728
729 static void gen_c2i_adapter_helper(MacroAssembler* masm,
730 BasicType bt,
731 BasicType prev_bt,
732 size_t size_in_bytes,
733 const VMRegPair& reg_pair,
734 const Address& to,
735 int extraspace,
736 bool is_oop) {
737 if (bt == T_VOID) {
738 assert(prev_bt == T_LONG || prev_bt == T_DOUBLE, "missing half");
739 return;
740 }
741
742 // Say 4 args:
743 // i st_off
744 // 0 32 T_LONG
745 // 1 24 T_VOID
746 // 2 16 T_OBJECT
747 // 3 8 T_BOOL
748 // - 0 return address
749 //
750 // However to make thing extra confusing. Because we can fit a long/double in
751 // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
752 // leaves one slot empty and only stores to a single slot. In this case the
753 // slot that is occupied is the T_VOID slot. See I said it was confusing.
754
755 bool wide = (size_in_bytes == wordSize);
756 VMReg r_1 = reg_pair.first();
757 VMReg r_2 = reg_pair.second();
758 assert(r_2->is_valid() == wide, "invalid size");
759 if (!r_1->is_valid()) {
760 assert(!r_2->is_valid(), "must be invalid");
761 return;
762 }
763
764 if (!r_1->is_XMMRegister()) {
765 Register val = rax;
766 if (r_1->is_stack()) {
767 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
768 __ load_sized_value(val, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false);
769 } else {
770 val = r_1->as_Register();
771 }
772 assert_different_registers(to.base(), val, rscratch1);
773 if (is_oop) {
774 __ push(r13);
775 __ push(rbx);
776 __ store_heap_oop(to, val, rscratch1, r13, rbx, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
777 __ pop(rbx);
778 __ pop(r13);
779 } else {
780 __ store_sized_value(to, val, size_in_bytes);
781 }
782 } else {
783 if (wide) {
784 __ movdbl(to, r_1->as_XMMRegister());
785 } else {
786 __ movflt(to, r_1->as_XMMRegister());
787 }
788 }
789 }
790
791 static void gen_c2i_adapter(MacroAssembler *masm,
792 const GrowableArray<SigEntry>* sig_extended,
793 const VMRegPair *regs,
794 bool requires_clinit_barrier,
795 address& c2i_no_clinit_check_entry,
796 Label& skip_fixup,
797 address start,
798 OopMapSet* oop_maps,
799 int& frame_complete,
800 int& frame_size_in_words,
801 bool alloc_inline_receiver) {
802 if (requires_clinit_barrier && VM_Version::supports_fast_class_init_checks()) {
803 Label L_skip_barrier;
804 Register method = rbx;
805
806 { // Bypass the barrier for non-static methods
807 Register flags = rscratch1;
808 __ movl(flags, Address(method, Method::access_flags_offset()));
809 __ testl(flags, JVM_ACC_STATIC);
810 __ jcc(Assembler::zero, L_skip_barrier); // non-static
811 }
812
813 Register klass = rscratch1;
814 __ load_method_holder(klass, method);
815 __ clinit_barrier(klass, r15_thread, &L_skip_barrier /*L_fast_path*/);
816
817 __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
818
819 __ bind(L_skip_barrier);
820 c2i_no_clinit_check_entry = __ pc();
821 }
822
823 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
824 bs->c2i_entry_barrier(masm);
825
826 // Before we get into the guts of the C2I adapter, see if we should be here
827 // at all. We've come from compiled code and are attempting to jump to the
828 // interpreter, which means the caller made a static call to get here
829 // (vcalls always get a compiled target if there is one). Check for a
830 // compiled target. If there is one, we need to patch the caller's call.
831 patch_callers_callsite(masm);
832
833 __ bind(skip_fixup);
834
835 if (InlineTypePassFieldsAsArgs) {
836 // Is there an inline type argument?
837 bool has_inline_argument = false;
838 for (int i = 0; i < sig_extended->length() && !has_inline_argument; i++) {
839 has_inline_argument = (sig_extended->at(i)._bt == T_METADATA);
840 }
841 if (has_inline_argument) {
842 // There is at least an inline type argument: we're coming from
843 // compiled code so we have no buffers to back the inline types.
844 // Allocate the buffers here with a runtime call.
845 OopMap* map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, /*save_vectors*/ false);
846
847 frame_complete = __ offset();
848
849 __ set_last_Java_frame(noreg, noreg, nullptr, rscratch1);
850
851 __ mov(c_rarg0, r15_thread);
852 __ mov(c_rarg1, rbx);
853 __ mov64(c_rarg2, (int64_t)alloc_inline_receiver);
854 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::allocate_inline_types)));
855
856 oop_maps->add_gc_map((int)(__ pc() - start), map);
857 __ reset_last_Java_frame(false);
858
859 RegisterSaver::restore_live_registers(masm);
860
861 Label no_exception;
862 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
863 __ jcc(Assembler::equal, no_exception);
864
865 __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), NULL_WORD);
866 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
867 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
868
869 __ bind(no_exception);
870
871 // We get an array of objects from the runtime call
872 __ get_vm_result(rscratch2, r15_thread); // Use rscratch2 (r11) as temporary because rscratch1 (r10) is trashed by movptr()
873 __ get_vm_result_2(rbx, r15_thread); // TODO: required to keep the callee Method live?
874 }
875 }
876
877 // Since all args are passed on the stack, total_args_passed *
878 // Interpreter::stackElementSize is the space we need.
879 int total_args_passed = compute_total_args_passed_int(sig_extended);
880 assert(total_args_passed >= 0, "total_args_passed is %d", total_args_passed);
881
882 int extraspace = (total_args_passed * Interpreter::stackElementSize);
883
884 // stack is aligned, keep it that way
885 // This is not currently needed or enforced by the interpreter, but
886 // we might as well conform to the ABI.
887 extraspace = align_up(extraspace, 2*wordSize);
888
889 // set senderSP value
890 __ lea(r13, Address(rsp, wordSize));
891
892 #ifdef ASSERT
893 __ check_stack_alignment(r13, "sender stack not aligned");
894 #endif
895 if (extraspace > 0) {
896 // Pop the return address
897 __ pop(rax);
898
899 __ subptr(rsp, extraspace);
900
901 // Push the return address
902 __ push(rax);
903
904 // Account for the return address location since we store it first rather
905 // than hold it in a register across all the shuffling
906 extraspace += wordSize;
907 }
908
909 #ifdef ASSERT
910 __ check_stack_alignment(rsp, "callee stack not aligned", wordSize, rax);
911 #endif
912
913 // Now write the args into the outgoing interpreter space
914
915 // next_arg_comp is the next argument from the compiler point of
916 // view (inline type fields are passed in registers/on the stack). In
917 // sig_extended, an inline type argument starts with: T_METADATA,
918 // followed by the types of the fields of the inline type and T_VOID
919 // to mark the end of the inline type. ignored counts the number of
920 // T_METADATA/T_VOID. next_vt_arg is the next inline type argument:
921 // used to get the buffer for that argument from the pool of buffers
922 // we allocated above and want to pass to the
923 // interpreter. next_arg_int is the next argument from the
924 // interpreter point of view (inline types are passed by reference).
925 for (int next_arg_comp = 0, ignored = 0, next_vt_arg = 0, next_arg_int = 0;
926 next_arg_comp < sig_extended->length(); next_arg_comp++) {
927 assert(ignored <= next_arg_comp, "shouldn't skip over more slots than there are arguments");
928 assert(next_arg_int <= total_args_passed, "more arguments for the interpreter than expected?");
929 BasicType bt = sig_extended->at(next_arg_comp)._bt;
930 int st_off = (total_args_passed - next_arg_int) * Interpreter::stackElementSize;
931 if (!InlineTypePassFieldsAsArgs || bt != T_METADATA) {
932 int next_off = st_off - Interpreter::stackElementSize;
933 const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : st_off;
934 const VMRegPair reg_pair = regs[next_arg_comp-ignored];
935 size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4;
936 gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
937 size_in_bytes, reg_pair, Address(rsp, offset), extraspace, false);
938 next_arg_int++;
939 #ifdef ASSERT
940 if (bt == T_LONG || bt == T_DOUBLE) {
941 // Overwrite the unused slot with known junk
942 __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
943 __ movptr(Address(rsp, st_off), rax);
944 }
945 #endif /* ASSERT */
946 } else {
947 ignored++;
948 // get the buffer from the just allocated pool of buffers
949 int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + next_vt_arg * type2aelembytes(T_OBJECT);
950 __ load_heap_oop(r14, Address(rscratch2, index));
951 next_vt_arg++; next_arg_int++;
952 int vt = 1;
953 // write fields we get from compiled code in registers/stack
954 // slots to the buffer: we know we are done with that inline type
955 // argument when we hit the T_VOID that acts as an end of inline
956 // type delimiter for this inline type. Inline types are flattened
957 // so we might encounter embedded inline types. Each entry in
958 // sig_extended contains a field offset in the buffer.
959 Label L_null;
960 do {
961 next_arg_comp++;
962 BasicType bt = sig_extended->at(next_arg_comp)._bt;
963 BasicType prev_bt = sig_extended->at(next_arg_comp-1)._bt;
964 if (bt == T_METADATA) {
965 vt++;
966 ignored++;
967 } else if (bt == T_VOID &&
968 prev_bt != T_LONG &&
969 prev_bt != T_DOUBLE) {
970 vt--;
971 ignored++;
972 } else {
973 int off = sig_extended->at(next_arg_comp)._offset;
974 if (off == -1) {
975 // Nullable inline type argument, emit null check
976 VMReg reg = regs[next_arg_comp-ignored].first();
977 Label L_notNull;
978 if (reg->is_stack()) {
979 int ld_off = reg->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
980 __ testb(Address(rsp, ld_off), 1);
981 } else {
982 __ testb(reg->as_Register(), 1);
983 }
984 __ jcc(Assembler::notZero, L_notNull);
985 __ movptr(Address(rsp, st_off), 0);
986 __ jmp(L_null);
987 __ bind(L_notNull);
988 continue;
989 }
990 assert(off > 0, "offset in object should be positive");
991 size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
992 bool is_oop = is_reference_type(bt);
993 gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
994 size_in_bytes, regs[next_arg_comp-ignored], Address(r14, off), extraspace, is_oop);
995 }
996 } while (vt != 0);
997 // pass the buffer to the interpreter
998 __ movptr(Address(rsp, st_off), r14);
999 __ bind(L_null);
1000 }
1001 }
1002
1003 // Schedule the branch target address early.
1004 __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
1005 __ jmp(rcx);
1006 }
1007
1008 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
1009 address code_start, address code_end,
1010 Label& L_ok) {
1011 Label L_fail;
1012 __ lea(temp_reg, ExternalAddress(code_start));
1013 __ cmpptr(pc_reg, temp_reg);
1014 __ jcc(Assembler::belowEqual, L_fail);
1015 __ lea(temp_reg, ExternalAddress(code_end));
1016 __ cmpptr(pc_reg, temp_reg);
1017 __ jcc(Assembler::below, L_ok);
1018 __ bind(L_fail);
1019 }
1020
1021 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
1022 int comp_args_on_stack,
1023 const GrowableArray<SigEntry>* sig,
1024 const VMRegPair *regs) {
1025
1026 // Note: r13 contains the senderSP on entry. We must preserve it since
1027 // we may do a i2c -> c2i transition if we lose a race where compiled
1028 // code goes non-entrant while we get args ready.
1029 // In addition we use r13 to locate all the interpreter args as
1030 // we must align the stack to 16 bytes on an i2c entry else we
1031 // lose alignment we expect in all compiled code and register
1032 // save code can segv when fxsave instructions find improperly
1033 // aligned stack pointer.
1034
1035 // Adapters can be frameless because they do not require the caller
1036 // to perform additional cleanup work, such as correcting the stack pointer.
1037 // An i2c adapter is frameless because the *caller* frame, which is interpreted,
1038 // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
1039 // even if a callee has modified the stack pointer.
1040 // A c2i adapter is frameless because the *callee* frame, which is interpreted,
1041 // routinely repairs its caller's stack pointer (from sender_sp, which is set
1042 // up via the senderSP register).
1043 // In other words, if *either* the caller or callee is interpreted, we can
1094 // Convert 4-byte c2 stack slots to words.
1095 int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
1096
1097 if (comp_args_on_stack) {
1098 __ subptr(rsp, comp_words_on_stack * wordSize);
1099 }
1100
1101 // Ensure compiled code always sees stack at proper alignment
1102 __ andptr(rsp, -16);
1103
1104 // push the return address and misalign the stack that youngest frame always sees
1105 // as far as the placement of the call instruction
1106 __ push(rax);
1107
1108 // Put saved SP in another register
1109 const Register saved_sp = rax;
1110 __ movptr(saved_sp, r11);
1111
1112 // Will jump to the compiled code just as if compiled code was doing it.
1113 // Pre-load the register-jump target early, to schedule it better.
1114 __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_inline_offset())));
1115
1116 #if INCLUDE_JVMCI
1117 if (EnableJVMCI) {
1118 // check if this call should be routed towards a specific entry point
1119 __ cmpptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
1120 Label no_alternative_target;
1121 __ jcc(Assembler::equal, no_alternative_target);
1122 __ movptr(r11, Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
1123 __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
1124 __ bind(no_alternative_target);
1125 }
1126 #endif // INCLUDE_JVMCI
1127
1128 int total_args_passed = sig->length();
1129
1130 // Now generate the shuffle code. Pick up all register args and move the
1131 // rest through the floating point stack top.
1132 for (int i = 0; i < total_args_passed; i++) {
1133 BasicType bt = sig->at(i)._bt;
1134 if (bt == T_VOID) {
1135 // Longs and doubles are passed in native word order, but misaligned
1136 // in the 32-bit build.
1137 BasicType prev_bt = (i > 0) ? sig->at(i-1)._bt : T_ILLEGAL;
1138 assert(i > 0 && (prev_bt == T_LONG || prev_bt == T_DOUBLE), "missing half");
1139 continue;
1140 }
1141
1142 // Pick up 0, 1 or 2 words from SP+offset.
1143
1144 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
1145 "scrambled load targets?");
1146 // Load in argument order going down.
1147 int ld_off = (total_args_passed - i)*Interpreter::stackElementSize;
1148 // Point to interpreter value (vs. tag)
1149 int next_off = ld_off - Interpreter::stackElementSize;
1150 //
1151 //
1152 //
1153 VMReg r_1 = regs[i].first();
1154 VMReg r_2 = regs[i].second();
1155 if (!r_1->is_valid()) {
1156 assert(!r_2->is_valid(), "");
1157 continue;
1158 }
1160 // Convert stack slot to an SP offset (+ wordSize to account for return address )
1161 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
1162
1163 // We can use r13 as a temp here because compiled code doesn't need r13 as an input
1164 // and if we end up going thru a c2i because of a miss a reasonable value of r13
1165 // will be generated.
1166 if (!r_2->is_valid()) {
1167 // sign extend???
1168 __ movl(r13, Address(saved_sp, ld_off));
1169 __ movptr(Address(rsp, st_off), r13);
1170 } else {
1171 //
1172 // We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
1173 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
1174 // So we must adjust where to pick up the data to match the interpreter.
1175 //
1176 // Interpreter local[n] == MSW, local[n+1] == LSW however locals
1177 // are accessed as negative so LSW is at LOW address
1178
1179 // ld_off is MSW so get LSW
1180 const int offset = (bt==T_LONG||bt==T_DOUBLE)?
1181 next_off : ld_off;
1182 __ movq(r13, Address(saved_sp, offset));
1183 // st_off is LSW (i.e. reg.first())
1184 __ movq(Address(rsp, st_off), r13);
1185 }
1186 } else if (r_1->is_Register()) { // Register argument
1187 Register r = r_1->as_Register();
1188 assert(r != rax, "must be different");
1189 if (r_2->is_valid()) {
1190 //
1191 // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
1192 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
1193 // So we must adjust where to pick up the data to match the interpreter.
1194
1195 const int offset = (bt==T_LONG||bt==T_DOUBLE)?
1196 next_off : ld_off;
1197
1198 // this can be a misaligned move
1199 __ movq(r, Address(saved_sp, offset));
1200 } else {
1201 // sign extend and use a full word?
1202 __ movl(r, Address(saved_sp, ld_off));
1203 }
1204 } else {
1205 if (!r_2->is_valid()) {
1206 __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
1207 } else {
1208 __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
1209 }
1210 }
1211 }
1212
1213 __ push_cont_fastpath(); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about
1214
1215 // 6243940 We might end up in handle_wrong_method if
1216 // the callee is deoptimized as we race thru here. If that
1217 // happens we don't want to take a safepoint because the
1218 // caller frame will look interpreted and arguments are now
1219 // "compiled" so it is much better to make this transition
1220 // invisible to the stack walking code. Unfortunately if
1221 // we try and find the callee by normal means a safepoint
1222 // is possible. So we stash the desired callee in the thread
1223 // and the vm will find there should this case occur.
1224
1225 __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
1226
1227 // put Method* where a c2i would expect should we end up there
1228 // only needed because of c2 resolve stubs return Method* as a result in
1229 // rax
1230 __ mov(rax, rbx);
1231 __ jmp(r11);
1232 }
1233
1234 static void gen_inline_cache_check(MacroAssembler *masm, Label& skip_fixup) {
1235 Label ok;
1236
1237 Register holder = rax;
1238 Register receiver = j_rarg0;
1239 Register temp = rbx;
1240
1241 __ load_klass(temp, receiver, rscratch1);
1242 __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
1243 __ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
1244 __ jcc(Assembler::equal, ok);
1245 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1246
1247 __ bind(ok);
1248 // Method might have been compiled since the call site was patched to
1249 // interpreted if that is the case treat it as a miss so we can get
1250 // the call site corrected.
1251 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
1252 __ jcc(Assembler::equal, skip_fixup);
1253 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1254 }
1255
1256 // ---------------------------------------------------------------
1257 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler* masm,
1258 int comp_args_on_stack,
1259 const GrowableArray<SigEntry>* sig,
1260 const VMRegPair* regs,
1261 const GrowableArray<SigEntry>* sig_cc,
1262 const VMRegPair* regs_cc,
1263 const GrowableArray<SigEntry>* sig_cc_ro,
1264 const VMRegPair* regs_cc_ro,
1265 AdapterFingerPrint* fingerprint,
1266 AdapterBlob*& new_adapter,
1267 bool allocate_code_blob) {
1268 address i2c_entry = __ pc();
1269 gen_i2c_adapter(masm, comp_args_on_stack, sig, regs);
1270
1271 // -------------------------------------------------------------------------
1272 // Generate a C2I adapter. On entry we know rbx holds the Method* during calls
1273 // to the interpreter. The args start out packed in the compiled layout. They
1274 // need to be unpacked into the interpreter layout. This will almost always
1275 // require some stack space. We grow the current (compiled) stack, then repack
1276 // the args. We finally end in a jump to the generic interpreter entry point.
1277 // On exit from the interpreter, the interpreter will restore our SP (lest the
1278 // compiled code, which relies solely on SP and not RBP, get sick).
1279
1280 address c2i_unverified_entry = __ pc();
1281 address c2i_unverified_inline_entry = __ pc();
1282 Label skip_fixup;
1283
1284 gen_inline_cache_check(masm, skip_fixup);
1285
1286 OopMapSet* oop_maps = new OopMapSet();
1287 int frame_complete = CodeOffsets::frame_never_safe;
1288 int frame_size_in_words = 0;
1289
1290 // Scalarized c2i adapter with non-scalarized receiver (i.e., don't pack receiver)
1291 address c2i_no_clinit_check_entry = nullptr;
1292 address c2i_inline_ro_entry = __ pc();
1293 if (regs_cc != regs_cc_ro) {
1294 // No class init barrier needed because method is guaranteed to be non-static
1295 gen_c2i_adapter(masm, sig_cc_ro, regs_cc_ro, /* requires_clinit_barrier = */ false, c2i_no_clinit_check_entry,
1296 skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
1297 skip_fixup.reset();
1298 }
1299
1300 // Scalarized c2i adapter
1301 address c2i_entry = __ pc();
1302 address c2i_inline_entry = __ pc();
1303 gen_c2i_adapter(masm, sig_cc, regs_cc, /* requires_clinit_barrier = */ true, c2i_no_clinit_check_entry,
1304 skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ true);
1305
1306 // Non-scalarized c2i adapter
1307 if (regs != regs_cc) {
1308 c2i_unverified_inline_entry = __ pc();
1309 Label inline_entry_skip_fixup;
1310 gen_inline_cache_check(masm, inline_entry_skip_fixup);
1311
1312 c2i_inline_entry = __ pc();
1313 gen_c2i_adapter(masm, sig, regs, /* requires_clinit_barrier = */ true, c2i_no_clinit_check_entry,
1314 inline_entry_skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
1315 }
1316
1317
1318 // The c2i adapters might safepoint and trigger a GC. The caller must make sure that
1319 // the GC knows about the location of oop argument locations passed to the c2i adapter.
1320 if (allocate_code_blob) {
1321 bool caller_must_gc_arguments = (regs != regs_cc);
1322 new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words, oop_maps, caller_must_gc_arguments);
1323 }
1324
1325 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_inline_entry, c2i_inline_ro_entry, c2i_unverified_entry, c2i_unverified_inline_entry, c2i_no_clinit_check_entry);
1326 }
1327
1328 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1329 VMRegPair *regs,
1330 int total_args_passed) {
1331
1332 // We return the amount of VMRegImpl stack slots we need to reserve for all
1333 // the arguments NOT counting out_preserve_stack_slots.
1334
1335 // NOTE: These arrays will have to change when c1 is ported
1336 #ifdef _WIN64
1337 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1338 c_rarg0, c_rarg1, c_rarg2, c_rarg3
1339 };
1340 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1341 c_farg0, c_farg1, c_farg2, c_farg3
1342 };
1343 #else
1344 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1345 c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5
2414 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2415
2416 // Get the handle (the 2nd argument)
2417 __ mov(oop_handle_reg, c_rarg1);
2418
2419 // Get address of the box
2420
2421 __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2422
2423 // Load the oop from the handle
2424 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2425
2426 if (LockingMode == LM_MONITOR) {
2427 __ jmp(slow_path_lock);
2428 } else if (LockingMode == LM_LEGACY) {
2429 // Load immediate 1 into swap_reg %rax
2430 __ movl(swap_reg, 1);
2431
2432 // Load (object->mark() | 1) into swap_reg %rax
2433 __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2434 if (EnableValhalla) {
2435 // Mask inline_type bit such that we go to the slow path if object is an inline type
2436 __ andptr(swap_reg, ~((int) markWord::inline_type_bit_in_place));
2437 }
2438
2439 // Save (object->mark() | 1) into BasicLock's displaced header
2440 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2441
2442 // src -> dest iff dest == rax else rax <- dest
2443 __ lock();
2444 __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2445 __ jcc(Assembler::equal, count_mon);
2446
2447 // Hmm should this move to the slow path code area???
2448
2449 // Test if the oopMark is an obvious stack pointer, i.e.,
2450 // 1) (mark & 3) == 0, and
2451 // 2) rsp <= mark < mark + os::pagesize()
2452 // These 3 tests can be done by evaluating the following
2453 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2454 // assuming both stack pointer and pagesize have their
2455 // least significant 2 bits clear.
2456 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2457
3979 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), NULL_WORD);
3980 #endif
3981 // Clear the exception oop so GC no longer processes it as a root.
3982 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), NULL_WORD);
3983
3984 // rax: exception oop
3985 // r8: exception handler
3986 // rdx: exception pc
3987 // Jump to handler
3988
3989 __ jmp(r8);
3990
3991 // Make sure all code is generated
3992 masm->flush();
3993
3994 // Set exception blob
3995 _exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
3996 }
3997 #endif // COMPILER2
3998
3999 BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(const InlineKlass* vk) {
4000 BufferBlob* buf = BufferBlob::create("inline types pack/unpack", 16 * K);
4001 CodeBuffer buffer(buf);
4002 short buffer_locs[20];
4003 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
4004 sizeof(buffer_locs)/sizeof(relocInfo));
4005
4006 MacroAssembler* masm = new MacroAssembler(&buffer);
4007
4008 const Array<SigEntry>* sig_vk = vk->extended_sig();
4009 const Array<VMRegPair>* regs = vk->return_regs();
4010
4011 int pack_fields_jobject_off = __ offset();
4012 // Resolve pre-allocated buffer from JNI handle.
4013 // We cannot do this in generate_call_stub() because it requires GC code to be initialized.
4014 __ movptr(rax, Address(r13, 0));
4015 __ resolve_jobject(rax /* value */,
4016 r15_thread /* thread */,
4017 r12 /* tmp */);
4018 __ movptr(Address(r13, 0), rax);
4019
4020 int pack_fields_off = __ offset();
4021
4022 int j = 1;
4023 for (int i = 0; i < sig_vk->length(); i++) {
4024 BasicType bt = sig_vk->at(i)._bt;
4025 if (bt == T_METADATA) {
4026 continue;
4027 }
4028 if (bt == T_VOID) {
4029 if (sig_vk->at(i-1)._bt == T_LONG ||
4030 sig_vk->at(i-1)._bt == T_DOUBLE) {
4031 j++;
4032 }
4033 continue;
4034 }
4035 int off = sig_vk->at(i)._offset;
4036 assert(off > 0, "offset in object should be positive");
4037 VMRegPair pair = regs->at(j);
4038 VMReg r_1 = pair.first();
4039 VMReg r_2 = pair.second();
4040 Address to(rax, off);
4041 if (bt == T_FLOAT) {
4042 __ movflt(to, r_1->as_XMMRegister());
4043 } else if (bt == T_DOUBLE) {
4044 __ movdbl(to, r_1->as_XMMRegister());
4045 } else {
4046 Register val = r_1->as_Register();
4047 assert_different_registers(to.base(), val, r14, r13, rbx, rscratch1);
4048 if (is_reference_type(bt)) {
4049 __ store_heap_oop(to, val, r14, r13, rbx, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
4050 } else {
4051 __ store_sized_value(to, r_1->as_Register(), type2aelembytes(bt));
4052 }
4053 }
4054 j++;
4055 }
4056 assert(j == regs->length(), "missed a field?");
4057
4058 __ ret(0);
4059
4060 int unpack_fields_off = __ offset();
4061
4062 Label skip;
4063 __ testptr(rax, rax);
4064 __ jcc(Assembler::zero, skip);
4065
4066 j = 1;
4067 for (int i = 0; i < sig_vk->length(); i++) {
4068 BasicType bt = sig_vk->at(i)._bt;
4069 if (bt == T_METADATA) {
4070 continue;
4071 }
4072 if (bt == T_VOID) {
4073 if (sig_vk->at(i-1)._bt == T_LONG ||
4074 sig_vk->at(i-1)._bt == T_DOUBLE) {
4075 j++;
4076 }
4077 continue;
4078 }
4079 int off = sig_vk->at(i)._offset;
4080 assert(off > 0, "offset in object should be positive");
4081 VMRegPair pair = regs->at(j);
4082 VMReg r_1 = pair.first();
4083 VMReg r_2 = pair.second();
4084 Address from(rax, off);
4085 if (bt == T_FLOAT) {
4086 __ movflt(r_1->as_XMMRegister(), from);
4087 } else if (bt == T_DOUBLE) {
4088 __ movdbl(r_1->as_XMMRegister(), from);
4089 } else if (bt == T_OBJECT || bt == T_ARRAY) {
4090 assert_different_registers(rax, r_1->as_Register());
4091 __ load_heap_oop(r_1->as_Register(), from);
4092 } else {
4093 assert(is_java_primitive(bt), "unexpected basic type");
4094 assert_different_registers(rax, r_1->as_Register());
4095 size_t size_in_bytes = type2aelembytes(bt);
4096 __ load_sized_value(r_1->as_Register(), from, size_in_bytes, bt != T_CHAR && bt != T_BOOLEAN);
4097 }
4098 j++;
4099 }
4100 assert(j == regs->length(), "missed a field?");
4101
4102 __ bind(skip);
4103 __ ret(0);
4104
4105 __ flush();
4106
4107 return BufferedInlineTypeBlob::create(&buffer, pack_fields_off, pack_fields_jobject_off, unpack_fields_off);
4108 }
|