< prev index next >

src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp

Print this page

  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef _WINDOWS
  26 #include "alloca.h"
  27 #endif
  28 #include "asm/macroAssembler.hpp"
  29 #include "asm/macroAssembler.inline.hpp"

  30 #include "code/compiledIC.hpp"
  31 #include "code/debugInfoRec.hpp"
  32 #include "code/nativeInst.hpp"
  33 #include "code/vtableStubs.hpp"
  34 #include "compiler/oopMap.hpp"
  35 #include "gc/shared/collectedHeap.hpp"
  36 #include "gc/shared/gcLocker.hpp"
  37 #include "gc/shared/barrierSet.hpp"
  38 #include "gc/shared/barrierSetAssembler.hpp"
  39 #include "interpreter/interpreter.hpp"
  40 #include "logging/log.hpp"
  41 #include "memory/resourceArea.hpp"
  42 #include "memory/universe.hpp"
  43 #include "oops/klass.inline.hpp"
  44 #include "oops/method.inline.hpp"
  45 #include "prims/methodHandles.hpp"
  46 #include "runtime/continuation.hpp"
  47 #include "runtime/continuationEntry.inline.hpp"
  48 #include "runtime/globals.hpp"
  49 #include "runtime/jniHandles.hpp"

 616       break;
 617     case T_DOUBLE:
 618       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 619       if (fp_args < Argument::n_float_register_parameters_j) {
 620         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 621       } else {
 622         stk_args = align_up(stk_args, 2);
 623         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 624         stk_args += 2;
 625       }
 626       break;
 627     default:
 628       ShouldNotReachHere();
 629       break;
 630     }
 631   }
 632 
 633   return stk_args;
 634 }
 635 

















































































 636 // Patch the callers callsite with entry to compiled code if it exists.
 637 static void patch_callers_callsite(MacroAssembler *masm) {
 638   Label L;
 639   __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
 640   __ jcc(Assembler::equal, L);
 641 
 642   // Save the current stack pointer
 643   __ mov(r13, rsp);
 644   // Schedule the branch target address early.
 645   // Call into the VM to patch the caller, then jump to compiled callee
 646   // rax isn't live so capture return address while we easily can
 647   __ movptr(rax, Address(rsp, 0));
 648 
 649   // align stack so push_CPU_state doesn't fault
 650   __ andptr(rsp, -(StackAlignmentInBytes));
 651   __ push_CPU_state();
 652   __ vzeroupper();
 653   // VM needs caller's callsite
 654   // VM needs target method
 655   // This needs to be a long call since we will relocate this adapter to

 658   // Allocate argument register save area
 659   if (frame::arg_reg_save_area_bytes != 0) {
 660     __ subptr(rsp, frame::arg_reg_save_area_bytes);
 661   }
 662   __ mov(c_rarg0, rbx);
 663   __ mov(c_rarg1, rax);
 664   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 665 
 666   // De-allocate argument register save area
 667   if (frame::arg_reg_save_area_bytes != 0) {
 668     __ addptr(rsp, frame::arg_reg_save_area_bytes);
 669   }
 670 
 671   __ vzeroupper();
 672   __ pop_CPU_state();
 673   // restore sp
 674   __ mov(rsp, r13);
 675   __ bind(L);
 676 }
 677 









































































































 678 
 679 static void gen_c2i_adapter(MacroAssembler *masm,
 680                             int total_args_passed,
 681                             int comp_args_on_stack,
 682                             const BasicType *sig_bt,
 683                             const VMRegPair *regs,
 684                             Label& skip_fixup) {































 685   // Before we get into the guts of the C2I adapter, see if we should be here
 686   // at all.  We've come from compiled code and are attempting to jump to the
 687   // interpreter, which means the caller made a static call to get here
 688   // (vcalls always get a compiled target if there is one).  Check for a
 689   // compiled target.  If there is one, we need to patch the caller's call.
 690   patch_callers_callsite(masm);
 691 
 692   __ bind(skip_fixup);
 693 










































 694   // Since all args are passed on the stack, total_args_passed *
 695   // Interpreter::stackElementSize is the space we need.
 696 
 697   assert(total_args_passed >= 0, "total_args_passed is %d", total_args_passed);
 698 
 699   int extraspace = (total_args_passed * Interpreter::stackElementSize);
 700 
 701   // stack is aligned, keep it that way
 702   // This is not currently needed or enforced by the interpreter, but
 703   // we might as well conform to the ABI.
 704   extraspace = align_up(extraspace, 2*wordSize);
 705 
 706   // set senderSP value
 707   __ lea(r13, Address(rsp, wordSize));
 708 
 709 #ifdef ASSERT
 710   __ check_stack_alignment(r13, "sender stack not aligned");
 711 #endif
 712   if (extraspace > 0) {
 713     // Pop the return address
 714     __ pop(rax);
 715 
 716     __ subptr(rsp, extraspace);
 717 
 718     // Push the return address
 719     __ push(rax);
 720 
 721     // Account for the return address location since we store it first rather
 722     // than hold it in a register across all the shuffling
 723     extraspace += wordSize;
 724   }
 725 
 726 #ifdef ASSERT
 727   __ check_stack_alignment(rsp, "callee stack not aligned", wordSize, rax);
 728 #endif
 729 
 730   // Now write the args into the outgoing interpreter space
 731   for (int i = 0; i < total_args_passed; i++) {
 732     if (sig_bt[i] == T_VOID) {
 733       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 734       continue;
 735     }
 736 
 737     // offset to start parameters
 738     int st_off   = (total_args_passed - i) * Interpreter::stackElementSize;
 739     int next_off = st_off - Interpreter::stackElementSize;
 740 
 741     // Say 4 args:
 742     // i   st_off
 743     // 0   32 T_LONG
 744     // 1   24 T_VOID
 745     // 2   16 T_OBJECT
 746     // 3    8 T_BOOL
 747     // -    0 return address
 748     //
 749     // However to make thing extra confusing. Because we can fit a long/double in
 750     // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 751     // leaves one slot empty and only stores to a single slot. In this case the
 752     // slot that is occupied is the T_VOID slot. See I said it was confusing.
 753 
 754     VMReg r_1 = regs[i].first();
 755     VMReg r_2 = regs[i].second();
 756     if (!r_1->is_valid()) {
 757       assert(!r_2->is_valid(), "");
 758       continue;
 759     }
 760     if (r_1->is_stack()) {
 761       // memory to memory use rax
 762       int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 763       if (!r_2->is_valid()) {
 764         // sign extend??
 765         __ movl(rax, Address(rsp, ld_off));
 766         __ movptr(Address(rsp, st_off), rax);
 767 
 768       } else {
 769 
 770         __ movq(rax, Address(rsp, ld_off));
 771 
 772         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 773         // T_DOUBLE and T_LONG use two slots in the interpreter
 774         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 775           // ld_off == LSW, ld_off+wordSize == MSW
 776           // st_off == MSW, next_off == LSW
 777           __ movq(Address(rsp, next_off), rax);














 778 #ifdef ASSERT
 779           // Overwrite the unused slot with known junk
 780           __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
 781           __ movptr(Address(rsp, st_off), rax);
 782 #endif /* ASSERT */
 783         } else {
 784           __ movq(Address(rsp, st_off), rax);
 785         }
 786       }
 787     } else if (r_1->is_Register()) {
 788       Register r = r_1->as_Register();
 789       if (!r_2->is_valid()) {
 790         // must be only an int (or less ) so move only 32bits to slot
 791         // why not sign extend??
 792         __ movl(Address(rsp, st_off), r);
 793       } else {
 794         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 795         // T_DOUBLE and T_LONG use two slots in the interpreter
 796         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 797           // long/double in gpr
 798 #ifdef ASSERT
 799           // Overwrite the unused slot with known junk
 800           __ mov64(rax, CONST64(0xdeadffffdeadaaab));
 801           __ movptr(Address(rsp, st_off), rax);
 802 #endif /* ASSERT */
 803           __ movq(Address(rsp, next_off), r);

























 804         } else {
 805           __ movptr(Address(rsp, st_off), r);





















 806         }
 807       }
 808     } else {
 809       assert(r_1->is_XMMRegister(), "");
 810       if (!r_2->is_valid()) {
 811         // only a float use just part of the slot
 812         __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
 813       } else {
 814 #ifdef ASSERT
 815         // Overwrite the unused slot with known junk
 816         __ mov64(rax, CONST64(0xdeadffffdeadaaac));
 817         __ movptr(Address(rsp, st_off), rax);
 818 #endif /* ASSERT */
 819         __ movdbl(Address(rsp, next_off), r_1->as_XMMRegister());
 820       }
 821     }
 822   }
 823 
 824   // Schedule the branch target address early.
 825   __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
 826   __ jmp(rcx);
 827 }
 828 
 829 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
 830                         address code_start, address code_end,
 831                         Label& L_ok) {
 832   Label L_fail;
 833   __ lea(temp_reg, AddressLiteral(code_start, relocInfo::none));
 834   __ cmpptr(pc_reg, temp_reg);
 835   __ jcc(Assembler::belowEqual, L_fail);
 836   __ lea(temp_reg, AddressLiteral(code_end, relocInfo::none));
 837   __ cmpptr(pc_reg, temp_reg);
 838   __ jcc(Assembler::below, L_ok);
 839   __ bind(L_fail);
 840 }
 841 
 842 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 843                                     int total_args_passed,
 844                                     int comp_args_on_stack,
 845                                     const BasicType *sig_bt,
 846                                     const VMRegPair *regs) {
 847 
 848   // Note: r13 contains the senderSP on entry. We must preserve it since
 849   // we may do a i2c -> c2i transition if we lose a race where compiled
 850   // code goes non-entrant while we get args ready.
 851   // In addition we use r13 to locate all the interpreter args as
 852   // we must align the stack to 16 bytes on an i2c entry else we
 853   // lose alignment we expect in all compiled code and register
 854   // save code can segv when fxsave instructions find improperly
 855   // aligned stack pointer.
 856 
 857   // Adapters can be frameless because they do not require the caller
 858   // to perform additional cleanup work, such as correcting the stack pointer.
 859   // An i2c adapter is frameless because the *caller* frame, which is interpreted,
 860   // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
 861   // even if a callee has modified the stack pointer.
 862   // A c2i adapter is frameless because the *callee* frame, which is interpreted,
 863   // routinely repairs its caller's stack pointer (from sender_sp, which is set
 864   // up via the senderSP register).
 865   // In other words, if *either* the caller or callee is interpreted, we can

 916   // Convert 4-byte c2 stack slots to words.
 917   int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
 918 
 919   if (comp_args_on_stack) {
 920     __ subptr(rsp, comp_words_on_stack * wordSize);
 921   }
 922 
 923   // Ensure compiled code always sees stack at proper alignment
 924   __ andptr(rsp, -16);
 925 
 926   // push the return address and misalign the stack that youngest frame always sees
 927   // as far as the placement of the call instruction
 928   __ push(rax);
 929 
 930   // Put saved SP in another register
 931   const Register saved_sp = rax;
 932   __ movptr(saved_sp, r11);
 933 
 934   // Will jump to the compiled code just as if compiled code was doing it.
 935   // Pre-load the register-jump target early, to schedule it better.
 936   __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_offset())));
 937 
 938 #if INCLUDE_JVMCI
 939   if (EnableJVMCI) {
 940     // check if this call should be routed towards a specific entry point
 941     __ cmpptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
 942     Label no_alternative_target;
 943     __ jcc(Assembler::equal, no_alternative_target);
 944     __ movptr(r11, Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 945     __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
 946     __ bind(no_alternative_target);
 947   }
 948 #endif // INCLUDE_JVMCI
 949 


 950   // Now generate the shuffle code.  Pick up all register args and move the
 951   // rest through the floating point stack top.
 952   for (int i = 0; i < total_args_passed; i++) {
 953     if (sig_bt[i] == T_VOID) {

 954       // Longs and doubles are passed in native word order, but misaligned
 955       // in the 32-bit build.
 956       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");

 957       continue;
 958     }
 959 
 960     // Pick up 0, 1 or 2 words from SP+offset.
 961 
 962     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
 963             "scrambled load targets?");
 964     // Load in argument order going down.
 965     int ld_off = (total_args_passed - i)*Interpreter::stackElementSize;
 966     // Point to interpreter value (vs. tag)
 967     int next_off = ld_off - Interpreter::stackElementSize;
 968     //
 969     //
 970     //
 971     VMReg r_1 = regs[i].first();
 972     VMReg r_2 = regs[i].second();
 973     if (!r_1->is_valid()) {
 974       assert(!r_2->is_valid(), "");
 975       continue;
 976     }

 978       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 979       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
 980 
 981       // We can use r13 as a temp here because compiled code doesn't need r13 as an input
 982       // and if we end up going thru a c2i because of a miss a reasonable value of r13
 983       // will be generated.
 984       if (!r_2->is_valid()) {
 985         // sign extend???
 986         __ movl(r13, Address(saved_sp, ld_off));
 987         __ movptr(Address(rsp, st_off), r13);
 988       } else {
 989         //
 990         // We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
 991         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
 992         // So we must adjust where to pick up the data to match the interpreter.
 993         //
 994         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 995         // are accessed as negative so LSW is at LOW address
 996 
 997         // ld_off is MSW so get LSW
 998         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 999                            next_off : ld_off;
1000         __ movq(r13, Address(saved_sp, offset));
1001         // st_off is LSW (i.e. reg.first())
1002         __ movq(Address(rsp, st_off), r13);
1003       }
1004     } else if (r_1->is_Register()) {  // Register argument
1005       Register r = r_1->as_Register();
1006       assert(r != rax, "must be different");
1007       if (r_2->is_valid()) {
1008         //
1009         // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
1010         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
1011         // So we must adjust where to pick up the data to match the interpreter.
1012 
1013         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
1014                            next_off : ld_off;
1015 
1016         // this can be a misaligned move
1017         __ movq(r, Address(saved_sp, offset));
1018       } else {
1019         // sign extend and use a full word?
1020         __ movl(r, Address(saved_sp, ld_off));
1021       }
1022     } else {
1023       if (!r_2->is_valid()) {
1024         __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
1025       } else {
1026         __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
1027       }
1028     }
1029   }
1030 
1031   __ push_cont_fastpath(); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about
1032 
1033   // 6243940 We might end up in handle_wrong_method if
1034   // the callee is deoptimized as we race thru here. If that
1035   // happens we don't want to take a safepoint because the
1036   // caller frame will look interpreted and arguments are now
1037   // "compiled" so it is much better to make this transition
1038   // invisible to the stack walking code. Unfortunately if
1039   // we try and find the callee by normal means a safepoint
1040   // is possible. So we stash the desired callee in the thread
1041   // and the vm will find there should this case occur.
1042 
1043   __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
1044 
1045   // put Method* where a c2i would expect should we end up there
1046   // only needed because eof c2 resolve stubs return Method* as a result in
1047   // rax
1048   __ mov(rax, rbx);
1049   __ jmp(r11);
1050 }
1051 













1052 // ---------------------------------------------------------------
1053 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
1054                                                             int total_args_passed,
1055                                                             int comp_args_on_stack,
1056                                                             const BasicType *sig_bt,
1057                                                             const VMRegPair *regs,
1058                                                             AdapterFingerPrint* fingerprint) {






1059   address i2c_entry = __ pc();
1060 
1061   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
1062 
1063   // -------------------------------------------------------------------------
1064   // Generate a C2I adapter.  On entry we know rbx holds the Method* during calls
1065   // to the interpreter.  The args start out packed in the compiled layout.  They
1066   // need to be unpacked into the interpreter layout.  This will almost always
1067   // require some stack space.  We grow the current (compiled) stack, then repack
1068   // the args.  We  finally end in a jump to the generic interpreter entry point.
1069   // On exit from the interpreter, the interpreter will restore our SP (lest the
1070   // compiled code, which relies solely on SP and not RBP, get sick).
1071 
1072   address c2i_unverified_entry = __ pc();

1073   Label skip_fixup;
1074 
1075   Register data = rax;
1076   Register receiver = j_rarg0;
1077   Register temp = rbx;
1078 
1079   {
1080     __ ic_check(1 /* end_alignment */);
1081     __ movptr(rbx, Address(data, CompiledICData::speculated_method_offset()));
1082     // Method might have been compiled since the call site was patched to
1083     // interpreted if that is the case treat it as a miss so we can get
1084     // the call site corrected.
1085     __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
1086     __ jcc(Assembler::equal, skip_fixup);
1087     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1088   }
1089 
1090   address c2i_entry = __ pc();


1091 
1092   // Class initialization barrier for static methods
1093   address c2i_no_clinit_check_entry = nullptr;
1094   if (VM_Version::supports_fast_class_init_checks()) {
1095     Label L_skip_barrier;
1096     Register method = rbx;
1097 
1098     { // Bypass the barrier for non-static methods
1099       Register flags = rscratch1;
1100       __ load_unsigned_short(flags, Address(method, Method::access_flags_offset()));
1101       __ testl(flags, JVM_ACC_STATIC);
1102       __ jcc(Assembler::zero, L_skip_barrier); // non-static
1103     }
1104 
1105     Register klass = rscratch1;
1106     __ load_method_holder(klass, method);
1107     __ clinit_barrier(klass, r15_thread, &L_skip_barrier /*L_fast_path*/);
1108 
1109     __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
1110 
1111     __ bind(L_skip_barrier);
1112     c2i_no_clinit_check_entry = __ pc();
1113   }
1114 
1115   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1116   bs->c2i_entry_barrier(masm);














1117 
1118   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);





1119 
1120   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
1121 }
1122 
1123 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1124                                          VMRegPair *regs,
1125                                          int total_args_passed) {
1126 
1127 // We return the amount of VMRegImpl stack slots we need to reserve for all
1128 // the arguments NOT counting out_preserve_stack_slots.
1129 
1130 // NOTE: These arrays will have to change when c1 is ported
1131 #ifdef _WIN64
1132     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1133       c_rarg0, c_rarg1, c_rarg2, c_rarg3
1134     };
1135     static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1136       c_farg0, c_farg1, c_farg2, c_farg3
1137     };
1138 #else
1139     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1140       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5

2232     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2233 
2234     // Get the handle (the 2nd argument)
2235     __ mov(oop_handle_reg, c_rarg1);
2236 
2237     // Get address of the box
2238 
2239     __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2240 
2241     // Load the oop from the handle
2242     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2243 
2244     if (LockingMode == LM_MONITOR) {
2245       __ jmp(slow_path_lock);
2246     } else if (LockingMode == LM_LEGACY) {
2247       // Load immediate 1 into swap_reg %rax
2248       __ movl(swap_reg, 1);
2249 
2250       // Load (object->mark() | 1) into swap_reg %rax
2251       __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));




2252 
2253       // Save (object->mark() | 1) into BasicLock's displaced header
2254       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2255 
2256       // src -> dest iff dest == rax else rax <- dest
2257       __ lock();
2258       __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2259       __ jcc(Assembler::equal, count_mon);
2260 
2261       // Hmm should this move to the slow path code area???
2262 
2263       // Test if the oopMark is an obvious stack pointer, i.e.,
2264       //  1) (mark & 3) == 0, and
2265       //  2) rsp <= mark < mark + os::pagesize()
2266       // These 3 tests can be done by evaluating the following
2267       // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2268       // assuming both stack pointer and pagesize have their
2269       // least significant 2 bits clear.
2270       // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2271 

3614   julong *scratch = (julong *)alloca(total_allocation);
3615 
3616   // Local scratch arrays
3617   julong
3618     *a = scratch + 0 * longwords,
3619     *n = scratch + 1 * longwords,
3620     *m = scratch + 2 * longwords;
3621 
3622   reverse_words((julong *)a_ints, a, longwords);
3623   reverse_words((julong *)n_ints, n, longwords);
3624 
3625   if (len >= MONTGOMERY_SQUARING_THRESHOLD) {
3626     ::montgomery_square(a, n, m, (julong)inv, longwords);
3627   } else {
3628     ::montgomery_multiply(a, a, n, m, (julong)inv, longwords);
3629   }
3630 
3631   reverse_words(m, (julong *)m_ints, longwords);
3632 }
3633 


















































































































3634 #if INCLUDE_JFR
3635 
3636 // For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
3637 // It returns a jobject handle to the event writer.
3638 // The handle is dereferenced and the return value is the event writer oop.
3639 RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
3640   enum layout {
3641     rbp_off,
3642     rbpH_off,
3643     return_off,
3644     return_off2,
3645     framesize // inclusive of return address
3646   };
3647 
3648   const char* name = SharedRuntime::stub_name(SharedStubId::jfr_write_checkpoint_id);
3649   CodeBuffer code(name, 1024, 64);
3650   MacroAssembler* masm = new MacroAssembler(&code);
3651   address start = __ pc();
3652 
3653   __ enter();

3706   __ reset_last_Java_frame(true);
3707 
3708   __ leave();
3709   __ ret(0);
3710 
3711   OopMapSet* oop_maps = new OopMapSet();
3712   OopMap* map = new OopMap(framesize, 1);
3713   oop_maps->add_gc_map(frame_complete, map);
3714 
3715   RuntimeStub* stub =
3716     RuntimeStub::new_runtime_stub(name,
3717                                   &code,
3718                                   frame_complete,
3719                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
3720                                   oop_maps,
3721                                   false);
3722   return stub;
3723 }
3724 
3725 #endif // INCLUDE_JFR
3726 

  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef _WINDOWS
  26 #include "alloca.h"
  27 #endif
  28 #include "asm/macroAssembler.hpp"
  29 #include "asm/macroAssembler.inline.hpp"
  30 #include "classfile/symbolTable.hpp"
  31 #include "code/compiledIC.hpp"
  32 #include "code/debugInfoRec.hpp"
  33 #include "code/nativeInst.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "compiler/oopMap.hpp"
  36 #include "gc/shared/collectedHeap.hpp"
  37 #include "gc/shared/gcLocker.hpp"
  38 #include "gc/shared/barrierSet.hpp"
  39 #include "gc/shared/barrierSetAssembler.hpp"
  40 #include "interpreter/interpreter.hpp"
  41 #include "logging/log.hpp"
  42 #include "memory/resourceArea.hpp"
  43 #include "memory/universe.hpp"
  44 #include "oops/klass.inline.hpp"
  45 #include "oops/method.inline.hpp"
  46 #include "prims/methodHandles.hpp"
  47 #include "runtime/continuation.hpp"
  48 #include "runtime/continuationEntry.inline.hpp"
  49 #include "runtime/globals.hpp"
  50 #include "runtime/jniHandles.hpp"

 617       break;
 618     case T_DOUBLE:
 619       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 620       if (fp_args < Argument::n_float_register_parameters_j) {
 621         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 622       } else {
 623         stk_args = align_up(stk_args, 2);
 624         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 625         stk_args += 2;
 626       }
 627       break;
 628     default:
 629       ShouldNotReachHere();
 630       break;
 631     }
 632   }
 633 
 634   return stk_args;
 635 }
 636 
 637 // Same as java_calling_convention() but for multiple return
 638 // values. There's no way to store them on the stack so if we don't
 639 // have enough registers, multiple values can't be returned.
 640 const uint SharedRuntime::java_return_convention_max_int = Argument::n_int_register_parameters_j+1;
 641 const uint SharedRuntime::java_return_convention_max_float = Argument::n_float_register_parameters_j;
 642 int SharedRuntime::java_return_convention(const BasicType *sig_bt,
 643                                           VMRegPair *regs,
 644                                           int total_args_passed) {
 645   // Create the mapping between argument positions and
 646   // registers.
 647   static const Register INT_ArgReg[java_return_convention_max_int] = {
 648     rax, j_rarg5, j_rarg4, j_rarg3, j_rarg2, j_rarg1, j_rarg0
 649   };
 650   static const XMMRegister FP_ArgReg[java_return_convention_max_float] = {
 651     j_farg0, j_farg1, j_farg2, j_farg3,
 652     j_farg4, j_farg5, j_farg6, j_farg7
 653   };
 654 
 655 
 656   uint int_args = 0;
 657   uint fp_args = 0;
 658 
 659   for (int i = 0; i < total_args_passed; i++) {
 660     switch (sig_bt[i]) {
 661     case T_BOOLEAN:
 662     case T_CHAR:
 663     case T_BYTE:
 664     case T_SHORT:
 665     case T_INT:
 666       if (int_args < Argument::n_int_register_parameters_j+1) {
 667         regs[i].set1(INT_ArgReg[int_args]->as_VMReg());
 668         int_args++;
 669       } else {
 670         return -1;
 671       }
 672       break;
 673     case T_VOID:
 674       // halves of T_LONG or T_DOUBLE
 675       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 676       regs[i].set_bad();
 677       break;
 678     case T_LONG:
 679       assert(sig_bt[i + 1] == T_VOID, "expecting half");
 680       // fall through
 681     case T_OBJECT:
 682     case T_ARRAY:
 683     case T_ADDRESS:
 684     case T_METADATA:
 685       if (int_args < Argument::n_int_register_parameters_j+1) {
 686         regs[i].set2(INT_ArgReg[int_args]->as_VMReg());
 687         int_args++;
 688       } else {
 689         return -1;
 690       }
 691       break;
 692     case T_FLOAT:
 693       if (fp_args < Argument::n_float_register_parameters_j) {
 694         regs[i].set1(FP_ArgReg[fp_args]->as_VMReg());
 695         fp_args++;
 696       } else {
 697         return -1;
 698       }
 699       break;
 700     case T_DOUBLE:
 701       assert(sig_bt[i + 1] == T_VOID, "expecting half");
 702       if (fp_args < Argument::n_float_register_parameters_j) {
 703         regs[i].set2(FP_ArgReg[fp_args]->as_VMReg());
 704         fp_args++;
 705       } else {
 706         return -1;
 707       }
 708       break;
 709     default:
 710       ShouldNotReachHere();
 711       break;
 712     }
 713   }
 714 
 715   return int_args + fp_args;
 716 }
 717 
 718 // Patch the callers callsite with entry to compiled code if it exists.
 719 static void patch_callers_callsite(MacroAssembler *masm) {
 720   Label L;
 721   __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
 722   __ jcc(Assembler::equal, L);
 723 
 724   // Save the current stack pointer
 725   __ mov(r13, rsp);
 726   // Schedule the branch target address early.
 727   // Call into the VM to patch the caller, then jump to compiled callee
 728   // rax isn't live so capture return address while we easily can
 729   __ movptr(rax, Address(rsp, 0));
 730 
 731   // align stack so push_CPU_state doesn't fault
 732   __ andptr(rsp, -(StackAlignmentInBytes));
 733   __ push_CPU_state();
 734   __ vzeroupper();
 735   // VM needs caller's callsite
 736   // VM needs target method
 737   // This needs to be a long call since we will relocate this adapter to

 740   // Allocate argument register save area
 741   if (frame::arg_reg_save_area_bytes != 0) {
 742     __ subptr(rsp, frame::arg_reg_save_area_bytes);
 743   }
 744   __ mov(c_rarg0, rbx);
 745   __ mov(c_rarg1, rax);
 746   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 747 
 748   // De-allocate argument register save area
 749   if (frame::arg_reg_save_area_bytes != 0) {
 750     __ addptr(rsp, frame::arg_reg_save_area_bytes);
 751   }
 752 
 753   __ vzeroupper();
 754   __ pop_CPU_state();
 755   // restore sp
 756   __ mov(rsp, r13);
 757   __ bind(L);
 758 }
 759 
 760 // For each inline type argument, sig includes the list of fields of
 761 // the inline type. This utility function computes the number of
 762 // arguments for the call if inline types are passed by reference (the
 763 // calling convention the interpreter expects).
 764 static int compute_total_args_passed_int(const GrowableArray<SigEntry>* sig_extended) {
 765   int total_args_passed = 0;
 766   if (InlineTypePassFieldsAsArgs) {
 767     for (int i = 0; i < sig_extended->length(); i++) {
 768       BasicType bt = sig_extended->at(i)._bt;
 769       if (bt == T_METADATA) {
 770         // In sig_extended, an inline type argument starts with:
 771         // T_METADATA, followed by the types of the fields of the
 772         // inline type and T_VOID to mark the end of the value
 773         // type. Inline types are flattened so, for instance, in the
 774         // case of an inline type with an int field and an inline type
 775         // field that itself has 2 fields, an int and a long:
 776         // T_METADATA T_INT T_METADATA T_INT T_LONG T_VOID (second
 777         // slot for the T_LONG) T_VOID (inner inline type) T_VOID
 778         // (outer inline type)
 779         total_args_passed++;
 780         int vt = 1;
 781         do {
 782           i++;
 783           BasicType bt = sig_extended->at(i)._bt;
 784           BasicType prev_bt = sig_extended->at(i-1)._bt;
 785           if (bt == T_METADATA) {
 786             vt++;
 787           } else if (bt == T_VOID &&
 788                      prev_bt != T_LONG &&
 789                      prev_bt != T_DOUBLE) {
 790             vt--;
 791           }
 792         } while (vt != 0);
 793       } else {
 794         total_args_passed++;
 795       }
 796     }
 797   } else {
 798     total_args_passed = sig_extended->length();
 799   }
 800   return total_args_passed;
 801 }
 802 
 803 
 804 static void gen_c2i_adapter_helper(MacroAssembler* masm,
 805                                    BasicType bt,
 806                                    BasicType prev_bt,
 807                                    size_t size_in_bytes,
 808                                    const VMRegPair& reg_pair,
 809                                    const Address& to,
 810                                    int extraspace,
 811                                    bool is_oop) {
 812   if (bt == T_VOID) {
 813     assert(prev_bt == T_LONG || prev_bt == T_DOUBLE, "missing half");
 814     return;
 815   }
 816 
 817   // Say 4 args:
 818   // i   st_off
 819   // 0   32 T_LONG
 820   // 1   24 T_VOID
 821   // 2   16 T_OBJECT
 822   // 3    8 T_BOOL
 823   // -    0 return address
 824   //
 825   // However to make thing extra confusing. Because we can fit a long/double in
 826   // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 827   // leaves one slot empty and only stores to a single slot. In this case the
 828   // slot that is occupied is the T_VOID slot. See I said it was confusing.
 829 
 830   bool wide = (size_in_bytes == wordSize);
 831   VMReg r_1 = reg_pair.first();
 832   VMReg r_2 = reg_pair.second();
 833   assert(r_2->is_valid() == wide, "invalid size");
 834   if (!r_1->is_valid()) {
 835     assert(!r_2->is_valid(), "must be invalid");
 836     return;
 837   }
 838 
 839   if (!r_1->is_XMMRegister()) {
 840     Register val = rax;
 841     if (r_1->is_stack()) {
 842       int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 843       __ load_sized_value(val, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false);
 844     } else {
 845       val = r_1->as_Register();
 846     }
 847     assert_different_registers(to.base(), val, rscratch1);
 848     if (is_oop) {
 849       __ push(r13);
 850       __ push(rbx);
 851       __ store_heap_oop(to, val, rscratch1, r13, rbx, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
 852       __ pop(rbx);
 853       __ pop(r13);
 854     } else {
 855       __ store_sized_value(to, val, size_in_bytes);
 856     }
 857   } else {
 858     if (wide) {
 859       __ movdbl(to, r_1->as_XMMRegister());
 860     } else {
 861       __ movflt(to, r_1->as_XMMRegister());
 862     }
 863   }
 864 }
 865 
 866 static void gen_c2i_adapter(MacroAssembler *masm,
 867                             const GrowableArray<SigEntry>* sig_extended,


 868                             const VMRegPair *regs,
 869                             bool requires_clinit_barrier,
 870                             address& c2i_no_clinit_check_entry,
 871                             Label& skip_fixup,
 872                             address start,
 873                             OopMapSet* oop_maps,
 874                             int& frame_complete,
 875                             int& frame_size_in_words,
 876                             bool alloc_inline_receiver) {
 877   if (requires_clinit_barrier && VM_Version::supports_fast_class_init_checks()) {
 878     Label L_skip_barrier;
 879     Register method = rbx;
 880 
 881     { // Bypass the barrier for non-static methods
 882       Register flags = rscratch1;
 883       __ load_unsigned_short(flags, Address(method, Method::access_flags_offset()));
 884       __ testl(flags, JVM_ACC_STATIC);
 885       __ jcc(Assembler::zero, L_skip_barrier); // non-static
 886     }
 887 
 888     Register klass = rscratch1;
 889     __ load_method_holder(klass, method);
 890     __ clinit_barrier(klass, r15_thread, &L_skip_barrier /*L_fast_path*/);
 891 
 892     __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
 893 
 894     __ bind(L_skip_barrier);
 895     c2i_no_clinit_check_entry = __ pc();
 896   }
 897 
 898   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 899   bs->c2i_entry_barrier(masm);
 900 
 901   // Before we get into the guts of the C2I adapter, see if we should be here
 902   // at all.  We've come from compiled code and are attempting to jump to the
 903   // interpreter, which means the caller made a static call to get here
 904   // (vcalls always get a compiled target if there is one).  Check for a
 905   // compiled target.  If there is one, we need to patch the caller's call.
 906   patch_callers_callsite(masm);
 907 
 908   __ bind(skip_fixup);
 909 
 910   if (InlineTypePassFieldsAsArgs) {
 911     // Is there an inline type argument?
 912     bool has_inline_argument = false;
 913     for (int i = 0; i < sig_extended->length() && !has_inline_argument; i++) {
 914       has_inline_argument = (sig_extended->at(i)._bt == T_METADATA);
 915     }
 916     if (has_inline_argument) {
 917       // There is at least an inline type argument: we're coming from
 918       // compiled code so we have no buffers to back the inline types.
 919       // Allocate the buffers here with a runtime call.
 920       OopMap* map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, /*save_vectors*/ false);
 921 
 922       frame_complete = __ offset();
 923 
 924       __ set_last_Java_frame(noreg, noreg, nullptr, rscratch1);
 925 
 926       __ mov(c_rarg0, r15_thread);
 927       __ mov(c_rarg1, rbx);
 928       __ mov64(c_rarg2, (int64_t)alloc_inline_receiver);
 929       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::allocate_inline_types)));
 930 
 931       oop_maps->add_gc_map((int)(__ pc() - start), map);
 932       __ reset_last_Java_frame(false);
 933 
 934       RegisterSaver::restore_live_registers(masm);
 935 
 936       Label no_exception;
 937       __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
 938       __ jcc(Assembler::equal, no_exception);
 939 
 940       __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), NULL_WORD);
 941       __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
 942       __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
 943 
 944       __ bind(no_exception);
 945 
 946       // We get an array of objects from the runtime call
 947       __ get_vm_result(rscratch2, r15_thread); // Use rscratch2 (r11) as temporary because rscratch1 (r10) is trashed by movptr()
 948       __ get_vm_result_2(rbx, r15_thread); // TODO: required to keep the callee Method live?
 949     }
 950   }
 951 
 952   // Since all args are passed on the stack, total_args_passed *
 953   // Interpreter::stackElementSize is the space we need.
 954   int total_args_passed = compute_total_args_passed_int(sig_extended);
 955   assert(total_args_passed >= 0, "total_args_passed is %d", total_args_passed);
 956 
 957   int extraspace = (total_args_passed * Interpreter::stackElementSize);
 958 
 959   // stack is aligned, keep it that way
 960   // This is not currently needed or enforced by the interpreter, but
 961   // we might as well conform to the ABI.
 962   extraspace = align_up(extraspace, 2*wordSize);
 963 
 964   // set senderSP value
 965   __ lea(r13, Address(rsp, wordSize));
 966 
 967 #ifdef ASSERT
 968   __ check_stack_alignment(r13, "sender stack not aligned");
 969 #endif
 970   if (extraspace > 0) {
 971     // Pop the return address
 972     __ pop(rax);
 973 
 974     __ subptr(rsp, extraspace);
 975 
 976     // Push the return address
 977     __ push(rax);
 978 
 979     // Account for the return address location since we store it first rather
 980     // than hold it in a register across all the shuffling
 981     extraspace += wordSize;
 982   }
 983 
 984 #ifdef ASSERT
 985   __ check_stack_alignment(rsp, "callee stack not aligned", wordSize, rax);
 986 #endif
 987 
 988   // Now write the args into the outgoing interpreter space




































 989 
 990   // next_arg_comp is the next argument from the compiler point of
 991   // view (inline type fields are passed in registers/on the stack). In
 992   // sig_extended, an inline type argument starts with: T_METADATA,
 993   // followed by the types of the fields of the inline type and T_VOID
 994   // to mark the end of the inline type. ignored counts the number of
 995   // T_METADATA/T_VOID. next_vt_arg is the next inline type argument:
 996   // used to get the buffer for that argument from the pool of buffers
 997   // we allocated above and want to pass to the
 998   // interpreter. next_arg_int is the next argument from the
 999   // interpreter point of view (inline types are passed by reference).
1000   for (int next_arg_comp = 0, ignored = 0, next_vt_arg = 0, next_arg_int = 0;
1001        next_arg_comp < sig_extended->length(); next_arg_comp++) {
1002     assert(ignored <= next_arg_comp, "shouldn't skip over more slots than there are arguments");
1003     assert(next_arg_int <= total_args_passed, "more arguments for the interpreter than expected?");
1004     BasicType bt = sig_extended->at(next_arg_comp)._bt;
1005     int st_off = (total_args_passed - next_arg_int) * Interpreter::stackElementSize;
1006     if (!InlineTypePassFieldsAsArgs || bt != T_METADATA) {
1007       int next_off = st_off - Interpreter::stackElementSize;
1008       const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : st_off;
1009       const VMRegPair reg_pair = regs[next_arg_comp-ignored];
1010       size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4;
1011       gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
1012                              size_in_bytes, reg_pair, Address(rsp, offset), extraspace, false);
1013       next_arg_int++;
1014 #ifdef ASSERT
1015       if (bt == T_LONG || bt == T_DOUBLE) {
1016         // Overwrite the unused slot with known junk
1017         __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
1018         __ movptr(Address(rsp, st_off), rax);



1019       }















1020 #endif /* ASSERT */
1021     } else {
1022       ignored++;
1023       // get the buffer from the just allocated pool of buffers
1024       int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + next_vt_arg * type2aelembytes(T_OBJECT);
1025       __ load_heap_oop(r14, Address(rscratch2, index));
1026       next_vt_arg++; next_arg_int++;
1027       int vt = 1;
1028       // write fields we get from compiled code in registers/stack
1029       // slots to the buffer: we know we are done with that inline type
1030       // argument when we hit the T_VOID that acts as an end of inline
1031       // type delimiter for this inline type. Inline types are flattened
1032       // so we might encounter embedded inline types. Each entry in
1033       // sig_extended contains a field offset in the buffer.
1034       Label L_null;
1035       do {
1036         next_arg_comp++;
1037         BasicType bt = sig_extended->at(next_arg_comp)._bt;
1038         BasicType prev_bt = sig_extended->at(next_arg_comp-1)._bt;
1039         if (bt == T_METADATA) {
1040           vt++;
1041           ignored++;
1042         } else if (bt == T_VOID &&
1043                    prev_bt != T_LONG &&
1044                    prev_bt != T_DOUBLE) {
1045           vt--;
1046           ignored++;
1047         } else {
1048           int off = sig_extended->at(next_arg_comp)._offset;
1049           if (off == -1) {
1050             // Nullable inline type argument, emit null check
1051             VMReg reg = regs[next_arg_comp-ignored].first();
1052             Label L_notNull;
1053             if (reg->is_stack()) {
1054               int ld_off = reg->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
1055               __ testb(Address(rsp, ld_off), 1);
1056             } else {
1057               __ testb(reg->as_Register(), 1);
1058             }
1059             __ jcc(Assembler::notZero, L_notNull);
1060             __ movptr(Address(rsp, st_off), 0);
1061             __ jmp(L_null);
1062             __ bind(L_notNull);
1063             continue;
1064           }
1065           assert(off > 0, "offset in object should be positive");
1066           size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
1067           bool is_oop = is_reference_type(bt);
1068           gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
1069                                  size_in_bytes, regs[next_arg_comp-ignored], Address(r14, off), extraspace, is_oop);
1070         }
1071       } while (vt != 0);
1072       // pass the buffer to the interpreter
1073       __ movptr(Address(rsp, st_off), r14);
1074       __ bind(L_null);










1075     }
1076   }
1077 
1078   // Schedule the branch target address early.
1079   __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
1080   __ jmp(rcx);
1081 }
1082 
1083 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
1084                         address code_start, address code_end,
1085                         Label& L_ok) {
1086   Label L_fail;
1087   __ lea(temp_reg, AddressLiteral(code_start, relocInfo::none));
1088   __ cmpptr(pc_reg, temp_reg);
1089   __ jcc(Assembler::belowEqual, L_fail);
1090   __ lea(temp_reg, AddressLiteral(code_end, relocInfo::none));
1091   __ cmpptr(pc_reg, temp_reg);
1092   __ jcc(Assembler::below, L_ok);
1093   __ bind(L_fail);
1094 }
1095 
1096 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,

1097                                     int comp_args_on_stack,
1098                                     const GrowableArray<SigEntry>* sig,
1099                                     const VMRegPair *regs) {
1100 
1101   // Note: r13 contains the senderSP on entry. We must preserve it since
1102   // we may do a i2c -> c2i transition if we lose a race where compiled
1103   // code goes non-entrant while we get args ready.
1104   // In addition we use r13 to locate all the interpreter args as
1105   // we must align the stack to 16 bytes on an i2c entry else we
1106   // lose alignment we expect in all compiled code and register
1107   // save code can segv when fxsave instructions find improperly
1108   // aligned stack pointer.
1109 
1110   // Adapters can be frameless because they do not require the caller
1111   // to perform additional cleanup work, such as correcting the stack pointer.
1112   // An i2c adapter is frameless because the *caller* frame, which is interpreted,
1113   // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
1114   // even if a callee has modified the stack pointer.
1115   // A c2i adapter is frameless because the *callee* frame, which is interpreted,
1116   // routinely repairs its caller's stack pointer (from sender_sp, which is set
1117   // up via the senderSP register).
1118   // In other words, if *either* the caller or callee is interpreted, we can

1169   // Convert 4-byte c2 stack slots to words.
1170   int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
1171 
1172   if (comp_args_on_stack) {
1173     __ subptr(rsp, comp_words_on_stack * wordSize);
1174   }
1175 
1176   // Ensure compiled code always sees stack at proper alignment
1177   __ andptr(rsp, -16);
1178 
1179   // push the return address and misalign the stack that youngest frame always sees
1180   // as far as the placement of the call instruction
1181   __ push(rax);
1182 
1183   // Put saved SP in another register
1184   const Register saved_sp = rax;
1185   __ movptr(saved_sp, r11);
1186 
1187   // Will jump to the compiled code just as if compiled code was doing it.
1188   // Pre-load the register-jump target early, to schedule it better.
1189   __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_inline_offset())));
1190 
1191 #if INCLUDE_JVMCI
1192   if (EnableJVMCI) {
1193     // check if this call should be routed towards a specific entry point
1194     __ cmpptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
1195     Label no_alternative_target;
1196     __ jcc(Assembler::equal, no_alternative_target);
1197     __ movptr(r11, Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
1198     __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
1199     __ bind(no_alternative_target);
1200   }
1201 #endif // INCLUDE_JVMCI
1202 
1203   int total_args_passed = sig->length();
1204 
1205   // Now generate the shuffle code.  Pick up all register args and move the
1206   // rest through the floating point stack top.
1207   for (int i = 0; i < total_args_passed; i++) {
1208     BasicType bt = sig->at(i)._bt;
1209     if (bt == T_VOID) {
1210       // Longs and doubles are passed in native word order, but misaligned
1211       // in the 32-bit build.
1212       BasicType prev_bt = (i > 0) ? sig->at(i-1)._bt : T_ILLEGAL;
1213       assert(i > 0 && (prev_bt == T_LONG || prev_bt == T_DOUBLE), "missing half");
1214       continue;
1215     }
1216 
1217     // Pick up 0, 1 or 2 words from SP+offset.
1218 
1219     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
1220             "scrambled load targets?");
1221     // Load in argument order going down.
1222     int ld_off = (total_args_passed - i)*Interpreter::stackElementSize;
1223     // Point to interpreter value (vs. tag)
1224     int next_off = ld_off - Interpreter::stackElementSize;
1225     //
1226     //
1227     //
1228     VMReg r_1 = regs[i].first();
1229     VMReg r_2 = regs[i].second();
1230     if (!r_1->is_valid()) {
1231       assert(!r_2->is_valid(), "");
1232       continue;
1233     }

1235       // Convert stack slot to an SP offset (+ wordSize to account for return address )
1236       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
1237 
1238       // We can use r13 as a temp here because compiled code doesn't need r13 as an input
1239       // and if we end up going thru a c2i because of a miss a reasonable value of r13
1240       // will be generated.
1241       if (!r_2->is_valid()) {
1242         // sign extend???
1243         __ movl(r13, Address(saved_sp, ld_off));
1244         __ movptr(Address(rsp, st_off), r13);
1245       } else {
1246         //
1247         // We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
1248         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
1249         // So we must adjust where to pick up the data to match the interpreter.
1250         //
1251         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
1252         // are accessed as negative so LSW is at LOW address
1253 
1254         // ld_off is MSW so get LSW
1255         const int offset = (bt==T_LONG||bt==T_DOUBLE)?
1256                            next_off : ld_off;
1257         __ movq(r13, Address(saved_sp, offset));
1258         // st_off is LSW (i.e. reg.first())
1259         __ movq(Address(rsp, st_off), r13);
1260       }
1261     } else if (r_1->is_Register()) {  // Register argument
1262       Register r = r_1->as_Register();
1263       assert(r != rax, "must be different");
1264       if (r_2->is_valid()) {
1265         //
1266         // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
1267         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
1268         // So we must adjust where to pick up the data to match the interpreter.
1269 
1270         const int offset = (bt==T_LONG||bt==T_DOUBLE)?
1271                            next_off : ld_off;
1272 
1273         // this can be a misaligned move
1274         __ movq(r, Address(saved_sp, offset));
1275       } else {
1276         // sign extend and use a full word?
1277         __ movl(r, Address(saved_sp, ld_off));
1278       }
1279     } else {
1280       if (!r_2->is_valid()) {
1281         __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
1282       } else {
1283         __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
1284       }
1285     }
1286   }
1287 
1288   __ push_cont_fastpath(); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about
1289 
1290   // 6243940 We might end up in handle_wrong_method if
1291   // the callee is deoptimized as we race thru here. If that
1292   // happens we don't want to take a safepoint because the
1293   // caller frame will look interpreted and arguments are now
1294   // "compiled" so it is much better to make this transition
1295   // invisible to the stack walking code. Unfortunately if
1296   // we try and find the callee by normal means a safepoint
1297   // is possible. So we stash the desired callee in the thread
1298   // and the vm will find there should this case occur.
1299 
1300   __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
1301 
1302   // put Method* where a c2i would expect should we end up there
1303   // only needed because of c2 resolve stubs return Method* as a result in
1304   // rax
1305   __ mov(rax, rbx);
1306   __ jmp(r11);
1307 }
1308 
1309 static void gen_inline_cache_check(MacroAssembler *masm, Label& skip_fixup) {
1310   Register data = rax;
1311   __ ic_check(1 /* end_alignment */);
1312   __ movptr(rbx, Address(data, CompiledICData::speculated_method_offset()));
1313 
1314   // Method might have been compiled since the call site was patched to
1315   // interpreted if that is the case treat it as a miss so we can get
1316   // the call site corrected.
1317   __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
1318   __ jcc(Assembler::equal, skip_fixup);
1319   __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1320 }
1321 
1322 // ---------------------------------------------------------------
1323 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler* masm,

1324                                                             int comp_args_on_stack,
1325                                                             const GrowableArray<SigEntry>* sig,
1326                                                             const VMRegPair* regs,
1327                                                             const GrowableArray<SigEntry>* sig_cc,
1328                                                             const VMRegPair* regs_cc,
1329                                                             const GrowableArray<SigEntry>* sig_cc_ro,
1330                                                             const VMRegPair* regs_cc_ro,
1331                                                             AdapterFingerPrint* fingerprint,
1332                                                             AdapterBlob*& new_adapter,
1333                                                             bool allocate_code_blob) {
1334   address i2c_entry = __ pc();
1335   gen_i2c_adapter(masm, comp_args_on_stack, sig, regs);

1336 
1337   // -------------------------------------------------------------------------
1338   // Generate a C2I adapter.  On entry we know rbx holds the Method* during calls
1339   // to the interpreter.  The args start out packed in the compiled layout.  They
1340   // need to be unpacked into the interpreter layout.  This will almost always
1341   // require some stack space.  We grow the current (compiled) stack, then repack
1342   // the args.  We  finally end in a jump to the generic interpreter entry point.
1343   // On exit from the interpreter, the interpreter will restore our SP (lest the
1344   // compiled code, which relies solely on SP and not RBP, get sick).
1345 
1346   address c2i_unverified_entry        = __ pc();
1347   address c2i_unverified_inline_entry = __ pc();
1348   Label skip_fixup;
1349 
1350   gen_inline_cache_check(masm, skip_fixup);













1351 
1352   OopMapSet* oop_maps = new OopMapSet();
1353   int frame_complete = CodeOffsets::frame_never_safe;
1354   int frame_size_in_words = 0;
1355 
1356   // Scalarized c2i adapter with non-scalarized receiver (i.e., don't pack receiver)
1357   address c2i_no_clinit_check_entry = nullptr;
1358   address c2i_inline_ro_entry = __ pc();
1359   if (regs_cc != regs_cc_ro) {
1360     // No class init barrier needed because method is guaranteed to be non-static
1361     gen_c2i_adapter(masm, sig_cc_ro, regs_cc_ro, /* requires_clinit_barrier = */ false, c2i_no_clinit_check_entry,
1362                     skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
1363     skip_fixup.reset();













1364   }
1365 
1366   // Scalarized c2i adapter
1367   address c2i_entry        = __ pc();
1368   address c2i_inline_entry = __ pc();
1369   gen_c2i_adapter(masm, sig_cc, regs_cc, /* requires_clinit_barrier = */ true, c2i_no_clinit_check_entry,
1370                   skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ true);
1371 
1372   // Non-scalarized c2i adapter
1373   if (regs != regs_cc) {
1374     c2i_unverified_inline_entry = __ pc();
1375     Label inline_entry_skip_fixup;
1376     gen_inline_cache_check(masm, inline_entry_skip_fixup);
1377 
1378     c2i_inline_entry = __ pc();
1379     gen_c2i_adapter(masm, sig, regs, /* requires_clinit_barrier = */ true, c2i_no_clinit_check_entry,
1380                     inline_entry_skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
1381   }
1382 
1383   // The c2i adapters might safepoint and trigger a GC. The caller must make sure that
1384   // the GC knows about the location of oop argument locations passed to the c2i adapter.
1385   if (allocate_code_blob) {
1386     bool caller_must_gc_arguments = (regs != regs_cc);
1387     new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words, oop_maps, caller_must_gc_arguments);
1388   }
1389 
1390   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_inline_entry, c2i_inline_ro_entry, c2i_unverified_entry, c2i_unverified_inline_entry, c2i_no_clinit_check_entry);
1391 }
1392 
1393 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1394                                          VMRegPair *regs,
1395                                          int total_args_passed) {
1396 
1397 // We return the amount of VMRegImpl stack slots we need to reserve for all
1398 // the arguments NOT counting out_preserve_stack_slots.
1399 
1400 // NOTE: These arrays will have to change when c1 is ported
1401 #ifdef _WIN64
1402     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1403       c_rarg0, c_rarg1, c_rarg2, c_rarg3
1404     };
1405     static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1406       c_farg0, c_farg1, c_farg2, c_farg3
1407     };
1408 #else
1409     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1410       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5

2502     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2503 
2504     // Get the handle (the 2nd argument)
2505     __ mov(oop_handle_reg, c_rarg1);
2506 
2507     // Get address of the box
2508 
2509     __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2510 
2511     // Load the oop from the handle
2512     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2513 
2514     if (LockingMode == LM_MONITOR) {
2515       __ jmp(slow_path_lock);
2516     } else if (LockingMode == LM_LEGACY) {
2517       // Load immediate 1 into swap_reg %rax
2518       __ movl(swap_reg, 1);
2519 
2520       // Load (object->mark() | 1) into swap_reg %rax
2521       __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2522       if (EnableValhalla) {
2523         // Mask inline_type bit such that we go to the slow path if object is an inline type
2524         __ andptr(swap_reg, ~((int) markWord::inline_type_bit_in_place));
2525       }
2526 
2527       // Save (object->mark() | 1) into BasicLock's displaced header
2528       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2529 
2530       // src -> dest iff dest == rax else rax <- dest
2531       __ lock();
2532       __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2533       __ jcc(Assembler::equal, count_mon);
2534 
2535       // Hmm should this move to the slow path code area???
2536 
2537       // Test if the oopMark is an obvious stack pointer, i.e.,
2538       //  1) (mark & 3) == 0, and
2539       //  2) rsp <= mark < mark + os::pagesize()
2540       // These 3 tests can be done by evaluating the following
2541       // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2542       // assuming both stack pointer and pagesize have their
2543       // least significant 2 bits clear.
2544       // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2545 

3888   julong *scratch = (julong *)alloca(total_allocation);
3889 
3890   // Local scratch arrays
3891   julong
3892     *a = scratch + 0 * longwords,
3893     *n = scratch + 1 * longwords,
3894     *m = scratch + 2 * longwords;
3895 
3896   reverse_words((julong *)a_ints, a, longwords);
3897   reverse_words((julong *)n_ints, n, longwords);
3898 
3899   if (len >= MONTGOMERY_SQUARING_THRESHOLD) {
3900     ::montgomery_square(a, n, m, (julong)inv, longwords);
3901   } else {
3902     ::montgomery_multiply(a, a, n, m, (julong)inv, longwords);
3903   }
3904 
3905   reverse_words(m, (julong *)m_ints, longwords);
3906 }
3907 
3908 BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(const InlineKlass* vk) {
3909   BufferBlob* buf = BufferBlob::create("inline types pack/unpack", 16 * K);
3910   CodeBuffer buffer(buf);
3911   short buffer_locs[20];
3912   buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3913                                          sizeof(buffer_locs)/sizeof(relocInfo));
3914 
3915   MacroAssembler* masm = new MacroAssembler(&buffer);
3916 
3917   const Array<SigEntry>* sig_vk = vk->extended_sig();
3918   const Array<VMRegPair>* regs = vk->return_regs();
3919 
3920   int pack_fields_jobject_off = __ offset();
3921   // Resolve pre-allocated buffer from JNI handle.
3922   // We cannot do this in generate_call_stub() because it requires GC code to be initialized.
3923   __ movptr(rax, Address(r13, 0));
3924   __ resolve_jobject(rax /* value */,
3925                      r15_thread /* thread */,
3926                      r12 /* tmp */);
3927   __ movptr(Address(r13, 0), rax);
3928 
3929   int pack_fields_off = __ offset();
3930 
3931   int j = 1;
3932   for (int i = 0; i < sig_vk->length(); i++) {
3933     BasicType bt = sig_vk->at(i)._bt;
3934     if (bt == T_METADATA) {
3935       continue;
3936     }
3937     if (bt == T_VOID) {
3938       if (sig_vk->at(i-1)._bt == T_LONG ||
3939           sig_vk->at(i-1)._bt == T_DOUBLE) {
3940         j++;
3941       }
3942       continue;
3943     }
3944     int off = sig_vk->at(i)._offset;
3945     assert(off > 0, "offset in object should be positive");
3946     VMRegPair pair = regs->at(j);
3947     VMReg r_1 = pair.first();
3948     VMReg r_2 = pair.second();
3949     Address to(rax, off);
3950     if (bt == T_FLOAT) {
3951       __ movflt(to, r_1->as_XMMRegister());
3952     } else if (bt == T_DOUBLE) {
3953       __ movdbl(to, r_1->as_XMMRegister());
3954     } else {
3955       Register val = r_1->as_Register();
3956       assert_different_registers(to.base(), val, r14, r13, rbx, rscratch1);
3957       if (is_reference_type(bt)) {
3958         __ store_heap_oop(to, val, r14, r13, rbx, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
3959       } else {
3960         __ store_sized_value(to, r_1->as_Register(), type2aelembytes(bt));
3961       }
3962     }
3963     j++;
3964   }
3965   assert(j == regs->length(), "missed a field?");
3966   if (vk->has_nullable_atomic_layout()) {
3967     // Set the null marker
3968     __ movb(Address(rax, vk->null_marker_offset()), 1);
3969   }
3970   __ ret(0);
3971 
3972   int unpack_fields_off = __ offset();
3973 
3974   Label skip;
3975   __ testptr(rax, rax);
3976   __ jcc(Assembler::zero, skip);
3977 
3978   j = 1;
3979   for (int i = 0; i < sig_vk->length(); i++) {
3980     BasicType bt = sig_vk->at(i)._bt;
3981     if (bt == T_METADATA) {
3982       continue;
3983     }
3984     if (bt == T_VOID) {
3985       if (sig_vk->at(i-1)._bt == T_LONG ||
3986           sig_vk->at(i-1)._bt == T_DOUBLE) {
3987         j++;
3988       }
3989       continue;
3990     }
3991     int off = sig_vk->at(i)._offset;
3992     assert(off > 0, "offset in object should be positive");
3993     VMRegPair pair = regs->at(j);
3994     VMReg r_1 = pair.first();
3995     VMReg r_2 = pair.second();
3996     Address from(rax, off);
3997     if (bt == T_FLOAT) {
3998       __ movflt(r_1->as_XMMRegister(), from);
3999     } else if (bt == T_DOUBLE) {
4000       __ movdbl(r_1->as_XMMRegister(), from);
4001     } else if (bt == T_OBJECT || bt == T_ARRAY) {
4002       assert_different_registers(rax, r_1->as_Register());
4003       __ load_heap_oop(r_1->as_Register(), from);
4004     } else {
4005       assert(is_java_primitive(bt), "unexpected basic type");
4006       assert_different_registers(rax, r_1->as_Register());
4007       size_t size_in_bytes = type2aelembytes(bt);
4008       __ load_sized_value(r_1->as_Register(), from, size_in_bytes, bt != T_CHAR && bt != T_BOOLEAN);
4009     }
4010     j++;
4011   }
4012   assert(j == regs->length(), "missed a field?");
4013 
4014   __ bind(skip);
4015   __ ret(0);
4016 
4017   __ flush();
4018 
4019   return BufferedInlineTypeBlob::create(&buffer, pack_fields_off, pack_fields_jobject_off, unpack_fields_off);
4020 }
4021 
4022 #if INCLUDE_JFR
4023 
4024 // For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
4025 // It returns a jobject handle to the event writer.
4026 // The handle is dereferenced and the return value is the event writer oop.
4027 RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
4028   enum layout {
4029     rbp_off,
4030     rbpH_off,
4031     return_off,
4032     return_off2,
4033     framesize // inclusive of return address
4034   };
4035 
4036   const char* name = SharedRuntime::stub_name(SharedStubId::jfr_write_checkpoint_id);
4037   CodeBuffer code(name, 1024, 64);
4038   MacroAssembler* masm = new MacroAssembler(&code);
4039   address start = __ pc();
4040 
4041   __ enter();

4094   __ reset_last_Java_frame(true);
4095 
4096   __ leave();
4097   __ ret(0);
4098 
4099   OopMapSet* oop_maps = new OopMapSet();
4100   OopMap* map = new OopMap(framesize, 1);
4101   oop_maps->add_gc_map(frame_complete, map);
4102 
4103   RuntimeStub* stub =
4104     RuntimeStub::new_runtime_stub(name,
4105                                   &code,
4106                                   frame_complete,
4107                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
4108                                   oop_maps,
4109                                   false);
4110   return stub;
4111 }
4112 
4113 #endif // INCLUDE_JFR

< prev index next >