< prev index next >

src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp

Print this page

  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #ifndef _WINDOWS
  27 #include "alloca.h"
  28 #endif
  29 #include "asm/macroAssembler.hpp"
  30 #include "asm/macroAssembler.inline.hpp"
  31 #include "code/debugInfoRec.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "code/nativeInst.hpp"
  34 #include "code/vtableStubs.hpp"

  35 #include "compiler/oopMap.hpp"
  36 #include "gc/shared/collectedHeap.hpp"
  37 #include "gc/shared/gcLocker.hpp"
  38 #include "gc/shared/barrierSet.hpp"
  39 #include "gc/shared/barrierSetAssembler.hpp"
  40 #include "interpreter/interpreter.hpp"
  41 #include "logging/log.hpp"

  42 #include "memory/resourceArea.hpp"
  43 #include "memory/universe.hpp"
  44 #include "oops/compiledICHolder.hpp"
  45 #include "oops/klass.inline.hpp"
  46 #include "prims/methodHandles.hpp"
  47 #include "runtime/jniHandles.hpp"
  48 #include "runtime/safepointMechanism.hpp"
  49 #include "runtime/sharedRuntime.hpp"
  50 #include "runtime/signature.hpp"
  51 #include "runtime/stubRoutines.hpp"
  52 #include "runtime/vframeArray.hpp"
  53 #include "runtime/vm_version.hpp"
  54 #include "utilities/align.hpp"
  55 #include "utilities/formatBuffer.hpp"
  56 #include "vmreg_x86.inline.hpp"
  57 #ifdef COMPILER1
  58 #include "c1/c1_Runtime1.hpp"
  59 #endif
  60 #ifdef COMPILER2
  61 #include "opto/runtime.hpp"

1430   }
1431 }
1432 
1433 static void gen_special_dispatch(MacroAssembler* masm,
1434                                  const methodHandle& method,
1435                                  const BasicType* sig_bt,
1436                                  const VMRegPair* regs) {
1437   verify_oop_args(masm, method, sig_bt, regs);
1438   vmIntrinsics::ID iid = method->intrinsic_id();
1439 
1440   // Now write the args into the outgoing interpreter space
1441   bool     has_receiver   = false;
1442   Register receiver_reg   = noreg;
1443   int      member_arg_pos = -1;
1444   Register member_reg     = noreg;
1445   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1446   if (ref_kind != 0) {
1447     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1448     member_reg = rbx;  // known to be free at this point
1449     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1450   } else if (iid == vmIntrinsics::_invokeBasic || iid == vmIntrinsics::_linkToNative) {
1451     has_receiver = true;



1452   } else {
1453     fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
1454   }
1455 
1456   if (member_reg != noreg) {
1457     // Load the member_arg into register, if necessary.
1458     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1459     VMReg r = regs[member_arg_pos].first();
1460     if (r->is_stack()) {
1461       __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1462     } else {
1463       // no data motion is needed
1464       member_reg = r->as_Register();
1465     }
1466   }
1467 
1468   if (has_receiver) {
1469     // Make sure the receiver is loaded into a register.
1470     assert(method->size_of_parameters() > 0, "oob");
1471     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");

3046   __ bind(pending);
3047 
3048   RegisterSaver::restore_live_registers(masm);
3049 
3050   // exception pending => remove activation and forward to exception handler
3051 
3052   __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int)NULL_WORD);
3053 
3054   __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
3055   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3056 
3057   // -------------
3058   // make sure all code is generated
3059   masm->flush();
3060 
3061   // return the  blob
3062   // frame_size_words or bytes??
3063   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
3064 }
3065 
3066 #ifdef COMPILER2
3067 static const int native_invoker_code_size = MethodHandles::adapter_code_size;
3068 
3069 class NativeInvokerGenerator : public StubCodeGenerator {
3070   address _call_target;
3071   int _shadow_space_bytes;
3072 
3073   const GrowableArray<VMReg>& _input_registers;
3074   const GrowableArray<VMReg>& _output_registers;
3075 
3076   int _frame_complete;
3077   int _framesize;
3078   OopMapSet* _oop_maps;
3079 public:
3080   NativeInvokerGenerator(CodeBuffer* buffer,
3081                          address call_target,
3082                          int shadow_space_bytes,
3083                          const GrowableArray<VMReg>& input_registers,
3084                          const GrowableArray<VMReg>& output_registers)
3085    : StubCodeGenerator(buffer, PrintMethodHandleStubs),
3086      _call_target(call_target),
3087      _shadow_space_bytes(shadow_space_bytes),
3088      _input_registers(input_registers),
3089      _output_registers(output_registers),
3090      _frame_complete(0),
3091      _framesize(0),
3092      _oop_maps(NULL) {
3093     assert(_output_registers.length() <= 1
3094            || (_output_registers.length() == 2 && !_output_registers.at(1)->is_valid()), "no multi-reg returns");
3095 
3096   }
3097 
3098   void generate();
3099 
3100   int spill_size_in_bytes() const {
3101     if (_output_registers.length() == 0) {
3102       return 0;
3103     }
3104     VMReg reg = _output_registers.at(0);
3105     assert(reg->is_reg(), "must be a register");
3106     if (reg->is_Register()) {
3107       return 8;
3108     } else if (reg->is_XMMRegister()) {
3109       if (UseAVX >= 3) {
3110         return 64;
3111       } else if (UseAVX >= 1) {
3112         return 32;
3113       } else {
3114         return 16;
3115       }
3116     } else {
3117       ShouldNotReachHere();
3118     }
3119     return 0;
3120   }
3121 
3122   void spill_out_registers() {
3123     if (_output_registers.length() == 0) {
3124       return;
3125     }
3126     VMReg reg = _output_registers.at(0);
3127     assert(reg->is_reg(), "must be a register");
3128     MacroAssembler* masm = _masm;
3129     if (reg->is_Register()) {
3130       __ movptr(Address(rsp, 0), reg->as_Register());
3131     } else if (reg->is_XMMRegister()) {
3132       if (UseAVX >= 3) {
3133         __ evmovdqul(Address(rsp, 0), reg->as_XMMRegister(), Assembler::AVX_512bit);
3134       } else if (UseAVX >= 1) {
3135         __ vmovdqu(Address(rsp, 0), reg->as_XMMRegister());
3136       } else {
3137         __ movdqu(Address(rsp, 0), reg->as_XMMRegister());
3138       }
3139     } else {
3140       ShouldNotReachHere();
3141     }
3142   }
3143 
3144   void fill_out_registers() {
3145     if (_output_registers.length() == 0) {
3146       return;
3147     }
3148     VMReg reg = _output_registers.at(0);
3149     assert(reg->is_reg(), "must be a register");
3150     MacroAssembler* masm = _masm;
3151     if (reg->is_Register()) {
3152       __ movptr(reg->as_Register(), Address(rsp, 0));
3153     } else if (reg->is_XMMRegister()) {
3154       if (UseAVX >= 3) {
3155         __ evmovdqul(reg->as_XMMRegister(), Address(rsp, 0), Assembler::AVX_512bit);
3156       } else if (UseAVX >= 1) {
3157         __ vmovdqu(reg->as_XMMRegister(), Address(rsp, 0));
3158       } else {
3159         __ movdqu(reg->as_XMMRegister(), Address(rsp, 0));
3160       }
3161     } else {
3162       ShouldNotReachHere();
3163     }
3164   }
3165 
3166   int frame_complete() const {
3167     return _frame_complete;
3168   }
3169 
3170   int framesize() const {
3171     return (_framesize >> (LogBytesPerWord - LogBytesPerInt));
3172   }
3173 
3174   OopMapSet* oop_maps() const {
3175     return _oop_maps;
3176   }
3177 
3178 private:
3179 #ifdef ASSERT
3180 bool target_uses_register(VMReg reg) {
3181   return _input_registers.contains(reg) || _output_registers.contains(reg);
3182 }
3183 #endif
3184 };
3185 
3186 RuntimeStub* SharedRuntime::make_native_invoker(address call_target,
3187                                                 int shadow_space_bytes,
3188                                                 const GrowableArray<VMReg>& input_registers,
3189                                                 const GrowableArray<VMReg>& output_registers) {
3190   int locs_size  = 64;
3191   CodeBuffer code("nep_invoker_blob", native_invoker_code_size, locs_size);
3192   NativeInvokerGenerator g(&code, call_target, shadow_space_bytes, input_registers, output_registers);
3193   g.generate();
3194   code.log_section_sizes("nep_invoker_blob");
3195 
3196   RuntimeStub* stub =
3197     RuntimeStub::new_runtime_stub("nep_invoker_blob",
3198                                   &code,
3199                                   g.frame_complete(),
3200                                   g.framesize(),
3201                                   g.oop_maps(), false);
3202   return stub;
3203 }
3204 
3205 void NativeInvokerGenerator::generate() {
3206   assert(!(target_uses_register(r15_thread->as_VMReg()) || target_uses_register(rscratch1->as_VMReg())), "Register conflict");
3207 
3208   enum layout {
3209     rbp_off,
3210     rbp_off2,
3211     return_off,
3212     return_off2,
3213     framesize // inclusive of return address
3214   };
3215 
3216   _framesize = align_up(framesize + ((_shadow_space_bytes + spill_size_in_bytes()) >> LogBytesPerInt), 4);
3217   assert(is_even(_framesize/2), "sp not 16-byte aligned");
3218 
3219   _oop_maps  = new OopMapSet();
3220   MacroAssembler* masm = _masm;
3221 
3222   address start = __ pc();
3223 
3224   __ enter();
3225 
3226   // return address and rbp are already in place
3227   __ subptr(rsp, (_framesize-4) << LogBytesPerInt); // prolog
3228 
3229   _frame_complete = __ pc() - start;
3230 
3231   address the_pc = __ pc();
3232 
3233   __ set_last_Java_frame(rsp, rbp, (address)the_pc);
3234   OopMap* map = new OopMap(_framesize, 0);
3235   _oop_maps->add_gc_map(the_pc - start, map);
3236 
3237   // State transition
3238   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
3239 
3240   __ call(RuntimeAddress(_call_target));
3241 
3242   __ restore_cpu_control_state_after_jni();
3243 
3244   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
3245 
3246   // Force this write out before the read below
3247   __ membar(Assembler::Membar_mask_bits(
3248           Assembler::LoadLoad | Assembler::LoadStore |
3249           Assembler::StoreLoad | Assembler::StoreStore));
3250 
3251   Label L_after_safepoint_poll;
3252   Label L_safepoint_poll_slow_path;
3253 
3254   __ safepoint_poll(L_safepoint_poll_slow_path, r15_thread, true /* at_return */, false /* in_nmethod */);
3255   __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
3256   __ jcc(Assembler::notEqual, L_safepoint_poll_slow_path);
3257 
3258   __ bind(L_after_safepoint_poll);
3259 
3260   // change thread state
3261   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
3262 
3263   __ block_comment("reguard stack check");
3264   Label L_reguard;
3265   Label L_after_reguard;
3266   __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
3267   __ jcc(Assembler::equal, L_reguard);
3268   __ bind(L_after_reguard);
3269 
3270   __ reset_last_Java_frame(r15_thread, true);
3271 
3272   __ leave(); // required for proper stackwalking of RuntimeStub frame
3273   __ ret(0);
3274 
3275   //////////////////////////////////////////////////////////////////////////////
3276 
3277   __ block_comment("{ L_safepoint_poll_slow_path");
3278   __ bind(L_safepoint_poll_slow_path);
3279   __ vzeroupper();
3280 
3281   spill_out_registers();
3282 
3283   __ mov(c_rarg0, r15_thread);
3284   __ mov(r12, rsp); // remember sp
3285   __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
3286   __ andptr(rsp, -16); // align stack as required by ABI
3287   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
3288   __ mov(rsp, r12); // restore sp
3289   __ reinit_heapbase();
3290 
3291   fill_out_registers();
3292 
3293   __ jmp(L_after_safepoint_poll);
3294   __ block_comment("} L_safepoint_poll_slow_path");
3295 
3296   //////////////////////////////////////////////////////////////////////////////
3297 
3298   __ block_comment("{ L_reguard");
3299   __ bind(L_reguard);
3300   __ vzeroupper();
3301 
3302   spill_out_registers();
3303 
3304   __ mov(r12, rsp); // remember sp
3305   __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
3306   __ andptr(rsp, -16); // align stack as required by ABI
3307   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
3308   __ mov(rsp, r12); // restore sp
3309   __ reinit_heapbase();
3310 
3311   fill_out_registers();
3312 
3313   __ jmp(L_after_reguard);
3314 
3315   __ block_comment("} L_reguard");
3316 
3317   //////////////////////////////////////////////////////////////////////////////
3318 
3319   __ flush();
3320 }
3321 #endif // COMPILER2
3322 
3323 //------------------------------Montgomery multiplication------------------------
3324 //
3325 
3326 #ifndef _WINDOWS
3327 
3328 // Subtract 0:b from carry:a.  Return carry.
3329 static julong
3330 sub(julong a[], julong b[], julong carry, long len) {
3331   long long i = 0, cnt = len;
3332   julong tmp;
3333   asm volatile("clc; "
3334                "0: ; "
3335                "mov (%[b], %[i], 8), %[tmp]; "
3336                "sbb %[tmp], (%[a], %[i], 8); "
3337                "inc %[i]; dec %[cnt]; "
3338                "jne 0b; "
3339                "mov %[carry], %[tmp]; sbb $0, %[tmp]; "
3340                : [i]"+r"(i), [cnt]"+r"(cnt), [tmp]"=&r"(tmp)
3341                : [a]"r"(a), [b]"r"(b), [carry]"r"(carry)
3342                : "memory");

3700   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int)NULL_WORD);
3701 #endif
3702   // Clear the exception oop so GC no longer processes it as a root.
3703   __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int)NULL_WORD);
3704 
3705   // rax: exception oop
3706   // r8:  exception handler
3707   // rdx: exception pc
3708   // Jump to handler
3709 
3710   __ jmp(r8);
3711 
3712   // Make sure all code is generated
3713   masm->flush();
3714 
3715   // Set exception blob
3716   _exception_blob =  ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
3717 }
3718 #endif // COMPILER2
3719 
3720 void SharedRuntime::compute_move_order(const BasicType* in_sig_bt,
3721                                        int total_in_args, const VMRegPair* in_regs,
3722                                        int total_out_args, VMRegPair* out_regs,
3723                                        GrowableArray<int>& arg_order,
3724                                        VMRegPair tmp_vmreg) {
3725   ComputeMoveOrder order(total_in_args, in_regs,
3726                          total_out_args, out_regs,
3727                          in_sig_bt, arg_order, tmp_vmreg);
3728 }

  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #ifndef _WINDOWS
  27 #include "alloca.h"
  28 #endif
  29 #include "asm/macroAssembler.hpp"
  30 #include "asm/macroAssembler.inline.hpp"
  31 #include "code/debugInfoRec.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "code/nativeInst.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "compiler/disassembler.hpp"
  36 #include "compiler/oopMap.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "gc/shared/gcLocker.hpp"
  39 #include "gc/shared/barrierSet.hpp"
  40 #include "gc/shared/barrierSetAssembler.hpp"
  41 #include "interpreter/interpreter.hpp"
  42 #include "logging/log.hpp"
  43 #include "logging/logStream.hpp"
  44 #include "memory/resourceArea.hpp"
  45 #include "memory/universe.hpp"
  46 #include "oops/compiledICHolder.hpp"
  47 #include "oops/klass.inline.hpp"
  48 #include "prims/methodHandles.hpp"
  49 #include "runtime/jniHandles.hpp"
  50 #include "runtime/safepointMechanism.hpp"
  51 #include "runtime/sharedRuntime.hpp"
  52 #include "runtime/signature.hpp"
  53 #include "runtime/stubRoutines.hpp"
  54 #include "runtime/vframeArray.hpp"
  55 #include "runtime/vm_version.hpp"
  56 #include "utilities/align.hpp"
  57 #include "utilities/formatBuffer.hpp"
  58 #include "vmreg_x86.inline.hpp"
  59 #ifdef COMPILER1
  60 #include "c1/c1_Runtime1.hpp"
  61 #endif
  62 #ifdef COMPILER2
  63 #include "opto/runtime.hpp"

1432   }
1433 }
1434 
1435 static void gen_special_dispatch(MacroAssembler* masm,
1436                                  const methodHandle& method,
1437                                  const BasicType* sig_bt,
1438                                  const VMRegPair* regs) {
1439   verify_oop_args(masm, method, sig_bt, regs);
1440   vmIntrinsics::ID iid = method->intrinsic_id();
1441 
1442   // Now write the args into the outgoing interpreter space
1443   bool     has_receiver   = false;
1444   Register receiver_reg   = noreg;
1445   int      member_arg_pos = -1;
1446   Register member_reg     = noreg;
1447   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1448   if (ref_kind != 0) {
1449     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1450     member_reg = rbx;  // known to be free at this point
1451     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1452   } else if (iid == vmIntrinsics::_invokeBasic) {
1453     has_receiver = true;
1454   } else if (iid == vmIntrinsics::_linkToNative) {
1455     member_arg_pos = method->size_of_parameters() - 1;  // trailing NativeEntryPoint argument
1456     member_reg = rbx;  // known to be free at this point
1457   } else {
1458     fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
1459   }
1460 
1461   if (member_reg != noreg) {
1462     // Load the member_arg into register, if necessary.
1463     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1464     VMReg r = regs[member_arg_pos].first();
1465     if (r->is_stack()) {
1466       __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1467     } else {
1468       // no data motion is needed
1469       member_reg = r->as_Register();
1470     }
1471   }
1472 
1473   if (has_receiver) {
1474     // Make sure the receiver is loaded into a register.
1475     assert(method->size_of_parameters() > 0, "oob");
1476     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");

3051   __ bind(pending);
3052 
3053   RegisterSaver::restore_live_registers(masm);
3054 
3055   // exception pending => remove activation and forward to exception handler
3056 
3057   __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int)NULL_WORD);
3058 
3059   __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
3060   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3061 
3062   // -------------
3063   // make sure all code is generated
3064   masm->flush();
3065 
3066   // return the  blob
3067   // frame_size_words or bytes??
3068   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
3069 }
3070 

































































































































































































































































3071 //------------------------------Montgomery multiplication------------------------
3072 //
3073 
3074 #ifndef _WINDOWS
3075 
3076 // Subtract 0:b from carry:a.  Return carry.
3077 static julong
3078 sub(julong a[], julong b[], julong carry, long len) {
3079   long long i = 0, cnt = len;
3080   julong tmp;
3081   asm volatile("clc; "
3082                "0: ; "
3083                "mov (%[b], %[i], 8), %[tmp]; "
3084                "sbb %[tmp], (%[a], %[i], 8); "
3085                "inc %[i]; dec %[cnt]; "
3086                "jne 0b; "
3087                "mov %[carry], %[tmp]; sbb $0, %[tmp]; "
3088                : [i]"+r"(i), [cnt]"+r"(cnt), [tmp]"=&r"(tmp)
3089                : [a]"r"(a), [b]"r"(b), [carry]"r"(carry)
3090                : "memory");

3448   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int)NULL_WORD);
3449 #endif
3450   // Clear the exception oop so GC no longer processes it as a root.
3451   __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int)NULL_WORD);
3452 
3453   // rax: exception oop
3454   // r8:  exception handler
3455   // rdx: exception pc
3456   // Jump to handler
3457 
3458   __ jmp(r8);
3459 
3460   // Make sure all code is generated
3461   masm->flush();
3462 
3463   // Set exception blob
3464   _exception_blob =  ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
3465 }
3466 #endif // COMPILER2
3467 









< prev index next >