< prev index next >

src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp

Print this page

  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #ifndef _WINDOWS
  27 #include "alloca.h"
  28 #endif
  29 #include "asm/macroAssembler.hpp"
  30 #include "asm/macroAssembler.inline.hpp"
  31 #include "code/debugInfoRec.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "code/nativeInst.hpp"
  34 #include "code/vtableStubs.hpp"

  35 #include "compiler/oopMap.hpp"
  36 #include "gc/shared/collectedHeap.hpp"
  37 #include "gc/shared/gcLocker.hpp"
  38 #include "gc/shared/barrierSet.hpp"
  39 #include "gc/shared/barrierSetAssembler.hpp"
  40 #include "interpreter/interpreter.hpp"
  41 #include "logging/log.hpp"

  42 #include "memory/resourceArea.hpp"
  43 #include "memory/universe.hpp"
  44 #include "oops/compiledICHolder.hpp"
  45 #include "oops/klass.inline.hpp"
  46 #include "prims/methodHandles.hpp"
  47 #include "runtime/jniHandles.hpp"
  48 #include "runtime/safepointMechanism.hpp"
  49 #include "runtime/sharedRuntime.hpp"
  50 #include "runtime/signature.hpp"
  51 #include "runtime/stubRoutines.hpp"
  52 #include "runtime/vframeArray.hpp"
  53 #include "runtime/vm_version.hpp"
  54 #include "utilities/align.hpp"
  55 #include "utilities/formatBuffer.hpp"
  56 #include "vmreg_x86.inline.hpp"
  57 #ifdef COMPILER1
  58 #include "c1/c1_Runtime1.hpp"
  59 #endif
  60 #ifdef COMPILER2
  61 #include "opto/runtime.hpp"

1424   }
1425 }
1426 
1427 static void gen_special_dispatch(MacroAssembler* masm,
1428                                  const methodHandle& method,
1429                                  const BasicType* sig_bt,
1430                                  const VMRegPair* regs) {
1431   verify_oop_args(masm, method, sig_bt, regs);
1432   vmIntrinsics::ID iid = method->intrinsic_id();
1433 
1434   // Now write the args into the outgoing interpreter space
1435   bool     has_receiver   = false;
1436   Register receiver_reg   = noreg;
1437   int      member_arg_pos = -1;
1438   Register member_reg     = noreg;
1439   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1440   if (ref_kind != 0) {
1441     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1442     member_reg = rbx;  // known to be free at this point
1443     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1444   } else if (iid == vmIntrinsics::_invokeBasic || iid == vmIntrinsics::_linkToNative) {
1445     has_receiver = true;



1446   } else {
1447     fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
1448   }
1449 
1450   if (member_reg != noreg) {
1451     // Load the member_arg into register, if necessary.
1452     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1453     VMReg r = regs[member_arg_pos].first();
1454     if (r->is_stack()) {
1455       __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1456     } else {
1457       // no data motion is needed
1458       member_reg = r->as_Register();
1459     }
1460   }
1461 
1462   if (has_receiver) {
1463     // Make sure the receiver is loaded into a register.
1464     assert(method->size_of_parameters() > 0, "oob");
1465     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");

3050   __ bind(pending);
3051 
3052   RegisterSaver::restore_live_registers(masm);
3053 
3054   // exception pending => remove activation and forward to exception handler
3055 
3056   __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int)NULL_WORD);
3057 
3058   __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
3059   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3060 
3061   // -------------
3062   // make sure all code is generated
3063   masm->flush();
3064 
3065   // return the  blob
3066   // frame_size_words or bytes??
3067   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
3068 }
3069 
3070 #ifdef COMPILER2
3071 static const int native_invoker_code_size = MethodHandles::adapter_code_size;
3072 
3073 class NativeInvokerGenerator : public StubCodeGenerator {
3074   address _call_target;
3075   int _shadow_space_bytes;
3076 
3077   const GrowableArray<VMReg>& _input_registers;
3078   const GrowableArray<VMReg>& _output_registers;
3079 
3080   int _frame_complete;
3081   int _framesize;
3082   OopMapSet* _oop_maps;
3083 public:
3084   NativeInvokerGenerator(CodeBuffer* buffer,
3085                          address call_target,
3086                          int shadow_space_bytes,
3087                          const GrowableArray<VMReg>& input_registers,
3088                          const GrowableArray<VMReg>& output_registers)
3089    : StubCodeGenerator(buffer, PrintMethodHandleStubs),
3090      _call_target(call_target),
3091      _shadow_space_bytes(shadow_space_bytes),
3092      _input_registers(input_registers),
3093      _output_registers(output_registers),
3094      _frame_complete(0),
3095      _framesize(0),
3096      _oop_maps(NULL) {
3097     assert(_output_registers.length() <= 1
3098            || (_output_registers.length() == 2 && !_output_registers.at(1)->is_valid()), "no multi-reg returns");
3099 
3100   }
3101 
3102   void generate();
3103 
3104   int spill_size_in_bytes() const {
3105     if (_output_registers.length() == 0) {
3106       return 0;
3107     }
3108     VMReg reg = _output_registers.at(0);
3109     assert(reg->is_reg(), "must be a register");
3110     if (reg->is_Register()) {
3111       return 8;
3112     } else if (reg->is_XMMRegister()) {
3113       if (UseAVX >= 3) {
3114         return 64;
3115       } else if (UseAVX >= 1) {
3116         return 32;
3117       } else {
3118         return 16;
3119       }
3120     } else {
3121       ShouldNotReachHere();
3122     }
3123     return 0;
3124   }
3125 
3126   void spill_out_registers() {
3127     if (_output_registers.length() == 0) {
3128       return;
3129     }
3130     VMReg reg = _output_registers.at(0);
3131     assert(reg->is_reg(), "must be a register");
3132     MacroAssembler* masm = _masm;
3133     if (reg->is_Register()) {
3134       __ movptr(Address(rsp, 0), reg->as_Register());
3135     } else if (reg->is_XMMRegister()) {
3136       if (UseAVX >= 3) {
3137         __ evmovdqul(Address(rsp, 0), reg->as_XMMRegister(), Assembler::AVX_512bit);
3138       } else if (UseAVX >= 1) {
3139         __ vmovdqu(Address(rsp, 0), reg->as_XMMRegister());
3140       } else {
3141         __ movdqu(Address(rsp, 0), reg->as_XMMRegister());
3142       }
3143     } else {
3144       ShouldNotReachHere();
3145     }
3146   }
3147 
3148   void fill_out_registers() {
3149     if (_output_registers.length() == 0) {
3150       return;
3151     }
3152     VMReg reg = _output_registers.at(0);
3153     assert(reg->is_reg(), "must be a register");
3154     MacroAssembler* masm = _masm;
3155     if (reg->is_Register()) {
3156       __ movptr(reg->as_Register(), Address(rsp, 0));
3157     } else if (reg->is_XMMRegister()) {
3158       if (UseAVX >= 3) {
3159         __ evmovdqul(reg->as_XMMRegister(), Address(rsp, 0), Assembler::AVX_512bit);
3160       } else if (UseAVX >= 1) {
3161         __ vmovdqu(reg->as_XMMRegister(), Address(rsp, 0));
3162       } else {
3163         __ movdqu(reg->as_XMMRegister(), Address(rsp, 0));
3164       }
3165     } else {
3166       ShouldNotReachHere();
3167     }
3168   }
3169 
3170   int frame_complete() const {
3171     return _frame_complete;
3172   }
3173 
3174   int framesize() const {
3175     return (_framesize >> (LogBytesPerWord - LogBytesPerInt));
3176   }
3177 
3178   OopMapSet* oop_maps() const {
3179     return _oop_maps;
3180   }
3181 
3182 private:
3183 #ifdef ASSERT
3184 bool target_uses_register(VMReg reg) {
3185   return _input_registers.contains(reg) || _output_registers.contains(reg);
3186 }
3187 #endif
3188 };
3189 
3190 RuntimeStub* SharedRuntime::make_native_invoker(address call_target,
3191                                                 int shadow_space_bytes,
3192                                                 const GrowableArray<VMReg>& input_registers,
3193                                                 const GrowableArray<VMReg>& output_registers) {
3194   int locs_size  = 64;
3195   CodeBuffer code("nep_invoker_blob", native_invoker_code_size, locs_size);
3196   NativeInvokerGenerator g(&code, call_target, shadow_space_bytes, input_registers, output_registers);
3197   g.generate();
3198   code.log_section_sizes("nep_invoker_blob");
3199 
3200   RuntimeStub* stub =
3201     RuntimeStub::new_runtime_stub("nep_invoker_blob",
3202                                   &code,
3203                                   g.frame_complete(),
3204                                   g.framesize(),
3205                                   g.oop_maps(), false);
3206   return stub;
3207 }
3208 
3209 void NativeInvokerGenerator::generate() {
3210   assert(!(target_uses_register(r15_thread->as_VMReg()) || target_uses_register(rscratch1->as_VMReg())), "Register conflict");
3211 
3212   enum layout {
3213     rbp_off,
3214     rbp_off2,
3215     return_off,
3216     return_off2,
3217     framesize // inclusive of return address
3218   };
3219 
3220   _framesize = align_up(framesize + ((_shadow_space_bytes + spill_size_in_bytes()) >> LogBytesPerInt), 4);
3221   assert(is_even(_framesize/2), "sp not 16-byte aligned");
3222 
3223   _oop_maps  = new OopMapSet();
3224   MacroAssembler* masm = _masm;
3225 
3226   address start = __ pc();
3227 
3228   __ enter();
3229 
3230   // return address and rbp are already in place
3231   __ subptr(rsp, (_framesize-4) << LogBytesPerInt); // prolog
3232 
3233   _frame_complete = __ pc() - start;
3234 
3235   address the_pc = __ pc();
3236 
3237   __ set_last_Java_frame(rsp, rbp, (address)the_pc);
3238   OopMap* map = new OopMap(_framesize, 0);
3239   _oop_maps->add_gc_map(the_pc - start, map);
3240 
3241   // State transition
3242   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
3243 
3244   __ call(RuntimeAddress(_call_target));
3245 
3246   __ restore_cpu_control_state_after_jni();
3247 
3248   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
3249 
3250   // Force this write out before the read below
3251   __ membar(Assembler::Membar_mask_bits(
3252           Assembler::LoadLoad | Assembler::LoadStore |
3253           Assembler::StoreLoad | Assembler::StoreStore));
3254 
3255   Label L_after_safepoint_poll;
3256   Label L_safepoint_poll_slow_path;
3257 
3258   __ safepoint_poll(L_safepoint_poll_slow_path, r15_thread, true /* at_return */, false /* in_nmethod */);
3259   __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
3260   __ jcc(Assembler::notEqual, L_safepoint_poll_slow_path);
3261 
3262   __ bind(L_after_safepoint_poll);
3263 
3264   // change thread state
3265   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
3266 
3267   __ block_comment("reguard stack check");
3268   Label L_reguard;
3269   Label L_after_reguard;
3270   __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
3271   __ jcc(Assembler::equal, L_reguard);
3272   __ bind(L_after_reguard);
3273 
3274   __ reset_last_Java_frame(r15_thread, true);
3275 
3276   __ leave(); // required for proper stackwalking of RuntimeStub frame
3277   __ ret(0);
3278 
3279   //////////////////////////////////////////////////////////////////////////////
3280 
3281   __ block_comment("{ L_safepoint_poll_slow_path");
3282   __ bind(L_safepoint_poll_slow_path);
3283   __ vzeroupper();
3284 
3285   spill_out_registers();
3286 
3287   __ mov(c_rarg0, r15_thread);
3288   __ mov(r12, rsp); // remember sp
3289   __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
3290   __ andptr(rsp, -16); // align stack as required by ABI
3291   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
3292   __ mov(rsp, r12); // restore sp
3293   __ reinit_heapbase();
3294 
3295   fill_out_registers();
3296 
3297   __ jmp(L_after_safepoint_poll);
3298   __ block_comment("} L_safepoint_poll_slow_path");
3299 
3300   //////////////////////////////////////////////////////////////////////////////
3301 
3302   __ block_comment("{ L_reguard");
3303   __ bind(L_reguard);
3304   __ vzeroupper();
3305 
3306   spill_out_registers();
3307 
3308   __ mov(r12, rsp); // remember sp
3309   __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
3310   __ andptr(rsp, -16); // align stack as required by ABI
3311   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
3312   __ mov(rsp, r12); // restore sp
3313   __ reinit_heapbase();
3314 
3315   fill_out_registers();
3316 
3317   __ jmp(L_after_reguard);
3318 
3319   __ block_comment("} L_reguard");
3320 
3321   //////////////////////////////////////////////////////////////////////////////
3322 
3323   __ flush();
3324 }
3325 #endif // COMPILER2
3326 
3327 //------------------------------Montgomery multiplication------------------------
3328 //
3329 
3330 #ifndef _WINDOWS
3331 
3332 // Subtract 0:b from carry:a.  Return carry.
3333 static julong
3334 sub(julong a[], julong b[], julong carry, long len) {
3335   long long i = 0, cnt = len;
3336   julong tmp;
3337   asm volatile("clc; "
3338                "0: ; "
3339                "mov (%[b], %[i], 8), %[tmp]; "
3340                "sbb %[tmp], (%[a], %[i], 8); "
3341                "inc %[i]; dec %[cnt]; "
3342                "jne 0b; "
3343                "mov %[carry], %[tmp]; sbb $0, %[tmp]; "
3344                : [i]"+r"(i), [cnt]"+r"(cnt), [tmp]"=&r"(tmp)
3345                : [a]"r"(a), [b]"r"(b), [carry]"r"(carry)
3346                : "memory");

3706   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int)NULL_WORD);
3707 #endif
3708   // Clear the exception oop so GC no longer processes it as a root.
3709   __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int)NULL_WORD);
3710 
3711   // rax: exception oop
3712   // r8:  exception handler
3713   // rdx: exception pc
3714   // Jump to handler
3715 
3716   __ jmp(r8);
3717 
3718   // Make sure all code is generated
3719   masm->flush();
3720 
3721   // Set exception blob
3722   _exception_blob =  ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
3723 }
3724 #endif // COMPILER2
3725 
3726 void SharedRuntime::compute_move_order(const BasicType* in_sig_bt,
3727                                        int total_in_args, const VMRegPair* in_regs,
3728                                        int total_out_args, VMRegPair* out_regs,
3729                                        GrowableArray<int>& arg_order,
3730                                        VMRegPair tmp_vmreg) {
3731   ComputeMoveOrder order(total_in_args, in_regs,
3732                          total_out_args, out_regs,
3733                          in_sig_bt, arg_order, tmp_vmreg);
3734 }

  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #ifndef _WINDOWS
  27 #include "alloca.h"
  28 #endif
  29 #include "asm/macroAssembler.hpp"
  30 #include "asm/macroAssembler.inline.hpp"
  31 #include "code/debugInfoRec.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "code/nativeInst.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "compiler/disassembler.hpp"
  36 #include "compiler/oopMap.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "gc/shared/gcLocker.hpp"
  39 #include "gc/shared/barrierSet.hpp"
  40 #include "gc/shared/barrierSetAssembler.hpp"
  41 #include "interpreter/interpreter.hpp"
  42 #include "logging/log.hpp"
  43 #include "logging/logStream.hpp"
  44 #include "memory/resourceArea.hpp"
  45 #include "memory/universe.hpp"
  46 #include "oops/compiledICHolder.hpp"
  47 #include "oops/klass.inline.hpp"
  48 #include "prims/methodHandles.hpp"
  49 #include "runtime/jniHandles.hpp"
  50 #include "runtime/safepointMechanism.hpp"
  51 #include "runtime/sharedRuntime.hpp"
  52 #include "runtime/signature.hpp"
  53 #include "runtime/stubRoutines.hpp"
  54 #include "runtime/vframeArray.hpp"
  55 #include "runtime/vm_version.hpp"
  56 #include "utilities/align.hpp"
  57 #include "utilities/formatBuffer.hpp"
  58 #include "vmreg_x86.inline.hpp"
  59 #ifdef COMPILER1
  60 #include "c1/c1_Runtime1.hpp"
  61 #endif
  62 #ifdef COMPILER2
  63 #include "opto/runtime.hpp"

1426   }
1427 }
1428 
1429 static void gen_special_dispatch(MacroAssembler* masm,
1430                                  const methodHandle& method,
1431                                  const BasicType* sig_bt,
1432                                  const VMRegPair* regs) {
1433   verify_oop_args(masm, method, sig_bt, regs);
1434   vmIntrinsics::ID iid = method->intrinsic_id();
1435 
1436   // Now write the args into the outgoing interpreter space
1437   bool     has_receiver   = false;
1438   Register receiver_reg   = noreg;
1439   int      member_arg_pos = -1;
1440   Register member_reg     = noreg;
1441   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1442   if (ref_kind != 0) {
1443     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1444     member_reg = rbx;  // known to be free at this point
1445     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1446   } else if (iid == vmIntrinsics::_invokeBasic) {
1447     has_receiver = true;
1448   } else if (iid == vmIntrinsics::_linkToNative) {
1449     member_arg_pos = method->size_of_parameters() - 1;  // trailing NativeEntryPoint argument
1450     member_reg = rbx;  // known to be free at this point
1451   } else {
1452     fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
1453   }
1454 
1455   if (member_reg != noreg) {
1456     // Load the member_arg into register, if necessary.
1457     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1458     VMReg r = regs[member_arg_pos].first();
1459     if (r->is_stack()) {
1460       __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1461     } else {
1462       // no data motion is needed
1463       member_reg = r->as_Register();
1464     }
1465   }
1466 
1467   if (has_receiver) {
1468     // Make sure the receiver is loaded into a register.
1469     assert(method->size_of_parameters() > 0, "oob");
1470     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");

3055   __ bind(pending);
3056 
3057   RegisterSaver::restore_live_registers(masm);
3058 
3059   // exception pending => remove activation and forward to exception handler
3060 
3061   __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int)NULL_WORD);
3062 
3063   __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
3064   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3065 
3066   // -------------
3067   // make sure all code is generated
3068   masm->flush();
3069 
3070   // return the  blob
3071   // frame_size_words or bytes??
3072   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
3073 }
3074 

































































































































































































































































3075 //------------------------------Montgomery multiplication------------------------
3076 //
3077 
3078 #ifndef _WINDOWS
3079 
3080 // Subtract 0:b from carry:a.  Return carry.
3081 static julong
3082 sub(julong a[], julong b[], julong carry, long len) {
3083   long long i = 0, cnt = len;
3084   julong tmp;
3085   asm volatile("clc; "
3086                "0: ; "
3087                "mov (%[b], %[i], 8), %[tmp]; "
3088                "sbb %[tmp], (%[a], %[i], 8); "
3089                "inc %[i]; dec %[cnt]; "
3090                "jne 0b; "
3091                "mov %[carry], %[tmp]; sbb $0, %[tmp]; "
3092                : [i]"+r"(i), [cnt]"+r"(cnt), [tmp]"=&r"(tmp)
3093                : [a]"r"(a), [b]"r"(b), [carry]"r"(carry)
3094                : "memory");

3454   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int)NULL_WORD);
3455 #endif
3456   // Clear the exception oop so GC no longer processes it as a root.
3457   __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int)NULL_WORD);
3458 
3459   // rax: exception oop
3460   // r8:  exception handler
3461   // rdx: exception pc
3462   // Jump to handler
3463 
3464   __ jmp(r8);
3465 
3466   // Make sure all code is generated
3467   masm->flush();
3468 
3469   // Set exception blob
3470   _exception_blob =  ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
3471 }
3472 #endif // COMPILER2
3473 









< prev index next >