< prev index next >

src/hotspot/cpu/x86/stubGenerator_x86_64.cpp

Print this page




  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "ci/ciUtilities.hpp"
  29 #include "gc/shared/barrierSet.hpp"
  30 #include "gc/shared/barrierSetAssembler.hpp"
  31 #include "gc/shared/barrierSetNMethod.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "memory/universe.hpp"
  34 #include "nativeInst_x86.hpp"
  35 #include "oops/instanceOop.hpp"
  36 #include "oops/method.hpp"
  37 #include "oops/objArrayKlass.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "prims/methodHandles.hpp"

  40 #include "runtime/frame.inline.hpp"
  41 #include "runtime/handles.inline.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "runtime/stubCodeGenerator.hpp"
  44 #include "runtime/stubRoutines.hpp"
  45 #include "runtime/thread.inline.hpp"
  46 #ifdef COMPILER2
  47 #include "opto/runtime.hpp"
  48 #endif
  49 #if INCLUDE_ZGC
  50 #include "gc/z/zThreadLocalData.hpp"
  51 #endif
  52 
  53 // Declaration and definition of StubGenerator (no .hpp file).
  54 // For a more detailed description of the stub routine structure
  55 // see the comment in stubRoutines.hpp
  56 
  57 #define __ _masm->
  58 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
  59 #define a__ ((Assembler*)_masm)->


5677     __ enter(); // required for proper stackwalking of RuntimeStub frame
5678 
5679 #ifdef _WIN64
5680     __ push(rsi);
5681     __ push(rdi);
5682 #endif
5683     __ fast_tan(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4);
5684 
5685 #ifdef _WIN64
5686     __ pop(rdi);
5687     __ pop(rsi);
5688 #endif
5689 
5690     __ leave(); // required for proper stackwalking of RuntimeStub frame
5691     __ ret(0);
5692 
5693     return start;
5694 
5695   }
5696 



















































































































































































































































































































































































5697 #undef __
5698 #define __ masm->
5699 
5700   // Continuation point for throwing of implicit exceptions that are
5701   // not handled in the current activation. Fabricates an exception
5702   // oop and initiates normal exception dispatching in this
5703   // frame. Since we need to preserve callee-saved values (currently
5704   // only for C2, but done for C1 as well) we need a callee-saved oop
5705   // map and therefore have to make these stubs into RuntimeStubs
5706   // rather than BufferBlobs.  If the compiler needs all registers to
5707   // be preserved between the fault point and the exception handler
5708   // then it must assume responsibility for that in
5709   // AbstractCompiler::continuation_for_implicit_null_exception or
5710   // continuation_for_implicit_division_by_zero_exception. All other
5711   // implicit exceptions (e.g., NullPointerException or
5712   // AbstractMethodError on entry) are either at call sites or
5713   // otherwise assume that stack unwinding will be initiated, so
5714   // caller saved registers were assumed volatile in the compiler.
5715   address generate_throw_exception(const char* name,
5716                                    address runtime_entry,


5906         StubRoutines::_dlog = generate_libmLog();
5907       }
5908       if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog10)) {
5909         StubRoutines::_dlog10 = generate_libmLog10();
5910       }
5911       if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dpow)) {
5912         StubRoutines::_dpow = generate_libmPow();
5913       }
5914       if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) {
5915         StubRoutines::_dsin = generate_libmSin();
5916       }
5917       if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) {
5918         StubRoutines::_dcos = generate_libmCos();
5919       }
5920       if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) {
5921         StubRoutines::_dtan = generate_libmTan();
5922       }
5923     }
5924   }
5925 













5926   void generate_all() {
5927     // Generates all stubs and initializes the entry points
5928 
5929     // These entry points require SharedInfo::stack0 to be set up in
5930     // non-core builds and need to be relocatable, so they each
5931     // fabricate a RuntimeStub internally.
5932     StubRoutines::_throw_AbstractMethodError_entry =
5933       generate_throw_exception("AbstractMethodError throw_exception",
5934                                CAST_FROM_FN_PTR(address,
5935                                                 SharedRuntime::
5936                                                 throw_AbstractMethodError));
5937 
5938     StubRoutines::_throw_IncompatibleClassChangeError_entry =
5939       generate_throw_exception("IncompatibleClassChangeError throw_exception",
5940                                CAST_FROM_FN_PTR(address,
5941                                                 SharedRuntime::
5942                                                 throw_IncompatibleClassChangeError));
5943 
5944     StubRoutines::_throw_NullPointerException_at_call_entry =
5945       generate_throw_exception("NullPointerException at call throw_exception",


6061       StubRoutines::_mulAdd = generate_mulAdd();
6062     }
6063 #ifndef _WINDOWS
6064     if (UseMontgomeryMultiplyIntrinsic) {
6065       StubRoutines::_montgomeryMultiply
6066         = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply);
6067     }
6068     if (UseMontgomerySquareIntrinsic) {
6069       StubRoutines::_montgomerySquare
6070         = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square);
6071     }
6072 #endif // WINDOWS
6073 #endif // COMPILER2
6074 
6075     if (UseVectorizedMismatchIntrinsic) {
6076       StubRoutines::_vectorizedMismatch = generate_vectorizedMismatch();
6077     }
6078   }
6079 
6080  public:
6081   StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
6082     if (all) {
6083       generate_all();
6084     } else {
6085       generate_initial();




6086     }
6087   }
6088 }; // end class declaration
6089 
6090 #define UCM_TABLE_MAX_ENTRIES 16
6091 void StubGenerator_generate(CodeBuffer* code, bool all) {
6092   if (UnsafeCopyMemory::_table == NULL) {
6093     UnsafeCopyMemory::create_table(UCM_TABLE_MAX_ENTRIES);
6094   }
6095   StubGenerator g(code, all);
6096 }


  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "ci/ciUtilities.hpp"
  29 #include "gc/shared/barrierSet.hpp"
  30 #include "gc/shared/barrierSetAssembler.hpp"
  31 #include "gc/shared/barrierSetNMethod.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "memory/universe.hpp"
  34 #include "nativeInst_x86.hpp"
  35 #include "oops/instanceOop.hpp"
  36 #include "oops/method.hpp"
  37 #include "oops/objArrayKlass.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "prims/methodHandles.hpp"
  40 #include "runtime/continuation.hpp"
  41 #include "runtime/frame.inline.hpp"
  42 #include "runtime/handles.inline.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "runtime/stubCodeGenerator.hpp"
  45 #include "runtime/stubRoutines.hpp"
  46 #include "runtime/thread.inline.hpp"
  47 #ifdef COMPILER2
  48 #include "opto/runtime.hpp"
  49 #endif
  50 #if INCLUDE_ZGC
  51 #include "gc/z/zThreadLocalData.hpp"
  52 #endif
  53 
  54 // Declaration and definition of StubGenerator (no .hpp file).
  55 // For a more detailed description of the stub routine structure
  56 // see the comment in stubRoutines.hpp
  57 
  58 #define __ _masm->
  59 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
  60 #define a__ ((Assembler*)_masm)->


5678     __ enter(); // required for proper stackwalking of RuntimeStub frame
5679 
5680 #ifdef _WIN64
5681     __ push(rsi);
5682     __ push(rdi);
5683 #endif
5684     __ fast_tan(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4);
5685 
5686 #ifdef _WIN64
5687     __ pop(rdi);
5688     __ pop(rsi);
5689 #endif
5690 
5691     __ leave(); // required for proper stackwalking of RuntimeStub frame
5692     __ ret(0);
5693 
5694     return start;
5695 
5696   }
5697 
5698 void push_FrameInfo(MacroAssembler* _masm, Register fi, Register sp, Register fp, address pc) {
5699   if (!sp->is_valid()) { __ push(0); } else {
5700     if (sp == rsp) {
5701       __ movptr(fi, rsp);
5702       __ push(fi);
5703     } else {
5704       __ push(sp);
5705     }
5706   }
5707 
5708   if (!fp->is_valid()) __ push(0); else __ push(fp);
5709 
5710   __ lea(fi, ExternalAddress(pc));
5711   __ push(fi);
5712 
5713   __ movptr(fi, rsp); // make fi point to the beginning of FramInfo
5714 }
5715 
5716 void push_FrameInfo(MacroAssembler* _masm, Register fi, Register sp, Register fp, Register pc) {
5717   if (!sp->is_valid()) { __ push(0); } else {
5718     if (sp == rsp) {
5719       __ movptr(fi, rsp);
5720       __ push(fi);
5721     } else {
5722       __ push(sp);
5723     }
5724   }
5725 
5726   if (!fp->is_valid()) __ push(0); else __ push(fp);
5727 
5728   if (!pc->is_valid()) __ push(0); else __ push(pc);
5729 
5730   __ movptr(fi, rsp); // make fi point to the beginning of FramInfo
5731 }
5732 
5733 void pop_FrameInfo(MacroAssembler* _masm, Register sp, Register fp, Register pc) {
5734   if (!pc->is_valid()) __ lea(rsp, Address(rsp, wordSize)); else __ pop(pc);
5735   if (!fp->is_valid()) __ lea(rsp, Address(rsp, wordSize)); else __ pop(fp);
5736   if (!sp->is_valid()) __ lea(rsp, Address(rsp, wordSize)); else __ pop(sp);
5737 }
5738 
5739 static Register get_thread() {
5740 #ifdef _LP64
5741   return r15_thread;
5742 #else
5743   get_thread(rdi);
5744   return rdi;
5745 #endif // LP64
5746 }
5747 
5748 static void setup_freeze_invocation(MacroAssembler* _masm, address pc) {
5749   Register thread = get_thread();
5750   NOT_LP64(__ push(thread));
5751   LP64_ONLY(__ movptr(c_rarg0, thread));
5752   __ set_last_Java_frame(rsp, rbp, pc);
5753 }
5754 
5755 static void teardown_freeze_invocation(MacroAssembler* _masm) {
5756   __ reset_last_Java_frame(true);
5757   NOT_LP64(__ pop(rdi));
5758 }
5759 
5760 // c_rarg1 is from interpreter
5761 RuntimeStub* generate_cont_doYield() {
5762     const char *name = "cont_doYield";
5763 
5764     enum layout {
5765       frameinfo_11,
5766       frameinfo_12,
5767       frameinfo_21,
5768       frameinfo_22,
5769       frameinfo_31,
5770       frameinfo_32,
5771       rbp_off,
5772       rbpH_off,
5773       return_off,
5774       return_off2,
5775       framesize // inclusive of return address
5776     };
5777     // assert(is_even(framesize/2), "sp not 16-byte aligned");
5778     int insts_size = 512;
5779     int locs_size  = 64;
5780     CodeBuffer code(name, insts_size, locs_size);
5781     OopMapSet* oop_maps  = new OopMapSet();
5782     MacroAssembler* masm = new MacroAssembler(&code);
5783     MacroAssembler* _masm = masm;
5784 
5785     // MacroAssembler* masm = _masm;
5786     // StubCodeMark mark(this, "StubRoutines", name);
5787 
5788     // second argument is the FrameInfo
5789     Register fi = c_rarg1;
5790 
5791     address start = __ pc();
5792 
5793     __ movl(c_rarg2, c_rarg1);          // save from interpreter
5794     __ movptr(rax, Address(rsp, 0));    // use return address as the frame pc // __ lea(rax, InternalAddress(pcxxxx));
5795     __ lea(fi, Address(rsp, wordSize)); // skip return address
5796     __ movptr(c_rarg3, rbp);
5797 
5798     // __ stop("FFFFF");
5799     __ enter();
5800 
5801     // // return address and rbp are already in place
5802     // __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog
5803 
5804     push_FrameInfo(masm, fi, fi, c_rarg3, rax);
5805 
5806     int frame_complete = __ pc() - start;
5807     address the_pc = __ pc();
5808 
5809     __ post_call_nop(); // this must be exactly after the pc value that is pushed into the frame info, we use this nop for fast CodeBlob lookup
5810 
5811     if (ContPerfTest > 5) {
5812       setup_freeze_invocation(_masm, the_pc);
5813       __ call_VM_leaf(CAST_FROM_FN_PTR(address, Continuation::freeze), 3);
5814       teardown_freeze_invocation(_masm);
5815 
5816       // if (from_java) {
5817       //__ set_last_Java_frame(rsp, rbp, the_pc); // may be unnecessary. also, consider MacroAssembler::call_VM_leaf_base
5818       //__ call_VM(noreg, CAST_FROM_FN_PTR(address, Continuation::freeze), fi, false); // do NOT check exceptions; they'll get forwarded to the caller
5819       // } else {
5820       //   __ call_VM_leaf(CAST_FROM_FN_PTR(address, Continuation::freeze_C), fi);
5821       // }
5822     }
5823 
5824     Label pinned;
5825     __ pop(c_rarg2); // read the pc from the FrameInfo
5826     if (ContPerfTest <= 5) { __ xorq(c_rarg2, c_rarg2); __ xorq(rax, rax); }
5827     __ testq(c_rarg2, c_rarg2);
5828     __ jcc(Assembler::zero, pinned);
5829 
5830     __ pop(rbp); // not pinned -- jump to Continuation.run (the entry frame)
5831     __ movptr(rbp, Address(rbp, 0)); // frame_info->fp has an indirection here. See Continuation::freeze for an explanation.
5832     __ pop(fi);
5833     __ movptr(rsp, fi);
5834     __ jmp(c_rarg2);
5835 
5836     __ bind(pinned); // pinned -- return to caller
5837     __ lea(rsp, Address(rsp, wordSize*2)); // "pop" the rest of the FrameInfo struct
5838 
5839     __ leave();
5840     __ ret(0);
5841 
5842     // return start;
5843 
5844     OopMap* map = new OopMap(framesize, 1);
5845     // map->set_callee_saved(VMRegImpl::stack2reg(rbp_off), rbp->as_VMReg());
5846     oop_maps->add_gc_map(the_pc - start, map);
5847 
5848     RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
5849     RuntimeStub::new_runtime_stub(name,
5850                                   &code,
5851                                   frame_complete,
5852                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
5853                                   oop_maps, false);
5854     return stub;
5855   }
5856 
5857   address generate_cont_jump_from_safepoint() {
5858     StubCodeMark mark(this, "StubRoutines","Continuation jump from safepoint");
5859 
5860     Register fi = rbx;
5861 
5862     address start = __ pc();
5863 
5864     __ get_thread(r15_thread);
5865     __ reset_last_Java_frame(true); // false would be fine, too, I guess
5866 
5867     __ lea(fi, Address(r15_thread, JavaThread::cont_frame_offset()));
5868     __ movptr(rdx, Address(fi, wordSize*0)); // pc
5869     __ movptr(rbp, Address(fi, wordSize*1)); // fp
5870     __ movptr(rbp, Address(rbp, 0)); // fp is indirect. See Continuation::freeze for an explanation.
5871     __ movptr(rsp, Address(fi, wordSize*2)); // sp
5872 
5873     __ xorq(rax, rax);
5874     __ movptr(Address(fi, wordSize*0), rax); // pc
5875     __ movptr(Address(fi, wordSize*1), rax); // fp
5876     __ movptr(Address(fi, wordSize*2), rax); // sp
5877     __ movb(Address(r15_thread, JavaThread::cont_preempt_offset()), 0);
5878 
5879     __ jmp(rdx);
5880 
5881     return start;
5882   }
5883 
5884   // c_rarg1 - sp
5885   // c_rarg2 - fp
5886   // c_rarg3 - pc
5887   address generate_cont_jump() {
5888     StubCodeMark mark(this, "StubRoutines","Continuation Jump");
5889     address start = __ pc();
5890 
5891     __ movptr(rbp, c_rarg2);
5892     __ movptr(rbp, Address(rbp, 0)); // rbp is indirect. See Continuation::freeze for an explanation.
5893     __ movptr(rsp, c_rarg1);
5894     __ jmp(c_rarg3);
5895 
5896     return start;
5897   }
5898 
5899   address generate_cont_thaw(bool return_barrier, bool exception) {
5900     assert (return_barrier || !exception, "must be");
5901 
5902     address start = __ pc();
5903 
5904     // TODO: Handle Valhalla return types. May require generating different return barriers.
5905 
5906     Register fi = r11;
5907 
5908     if (!return_barrier) {
5909       __ pop(c_rarg3); // pop return address. if we don't do this, we get a drift, where the bottom-most frozen frame continuously grows
5910       // __ lea(rsp, Address(rsp, wordSize)); // pop return address. if we don't do this, we get a drift, where the bottom-most frozen frame continuously grows
5911       // write sp to thread->_cont_frame.sp
5912       __ lea(fi, Address(r15_thread, JavaThread::cont_frame_offset()));
5913       __ movptr(Address(fi, wordSize*2), rsp); // sp
5914     } else {
5915       Label no_saved_sp;
5916       __ lea(fi, Address(r15_thread, JavaThread::cont_frame_offset()));
5917       __ movptr(fi, Address(fi, wordSize*2)); // sp
5918       __ testq(fi, fi);
5919       __ jcc(Assembler::zero, no_saved_sp);
5920       __ movptr(rsp, fi);
5921       __ bind(no_saved_sp);
5922     }
5923 
5924     Label thaw_success;
5925     __ movptr(fi, rsp);
5926     if (return_barrier) {
5927       __ push(rax); __ push_d(xmm0); // preserve possible return value from a method returning to the return barrier
5928     }
5929     __ movl(c_rarg1, return_barrier);
5930     push_FrameInfo(_masm, fi, fi, rbp, c_rarg3);
5931     if (ContPerfTest > 105) {
5932       __ call_VM_leaf(CAST_FROM_FN_PTR(address, Continuation::prepare_thaw), fi, c_rarg1);
5933     } else {
5934       __ xorq(rax, rax);
5935     }
5936     __ testq(rax, rax);           // rax contains the size of the frames to thaw, 0 if overflow or no more frames
5937     __ jcc(Assembler::notZero, thaw_success);
5938 
5939     pop_FrameInfo(_masm, fi, rbp, rbx);
5940     if (return_barrier) {
5941       __ pop_d(xmm0); __ pop(rax); // restore return value (no safepoint in the call to thaw, so even an oop return value should be OK)
5942     }
5943     __ movptr(rsp, fi); // we're now on the yield frame (which is in an address above us b/c rsp has been pushed down)
5944     __ jmp(rbx); // a jump to StubRoutines::throw_StackOverflowError_entry
5945 
5946     __ bind(thaw_success);
5947 
5948     pop_FrameInfo(_masm, fi, rbp, c_rarg3); // c_rarg3 would still be our return address
5949     if (return_barrier) {
5950       __ pop_d(xmm0); __ pop(rdx);   // TEMPORARILY restore return value (we're going to push it again, but rsp is about to move)
5951     }
5952 
5953     __ subq(rsp, rax);             // make room for the thawed frames
5954     __ subptr(rsp, wordSize);      // make room for return address
5955     __ andptr(rsp, -16); // align
5956     if (return_barrier) {
5957       __ push(rdx); __ push_d(xmm0); // save original return value -- again
5958     }
5959     push_FrameInfo(_masm, fi, fi, rbp, c_rarg3);
5960     __ movl(c_rarg1, return_barrier);
5961     __ movl(c_rarg2, exception);
5962     if (ContPerfTest > 112) {
5963       if (!return_barrier && JvmtiExport::can_support_continuations()) {
5964         __ call_VM(noreg, CAST_FROM_FN_PTR(address, Continuation::thaw), fi, c_rarg1, c_rarg2);
5965       } else {
5966         __ call_VM_leaf(CAST_FROM_FN_PTR(address, Continuation::thaw_leaf), fi, c_rarg1, c_rarg2);
5967       }
5968     }
5969     if (exception) {
5970       __ movptr(rdx, rax); // rdx must contain the original pc in the case of exception
5971     }
5972     pop_FrameInfo(_masm, fi, rbp, rbx);
5973     if (return_barrier) {
5974       __ pop_d(xmm0); __ pop(rax); // restore return value (no safepoint in the call to thaw, so even an oop return value should be OK)
5975     }
5976 
5977     __ movptr(rsp, fi); // we're now on the yield frame (which is in an address above us b/c rsp has been pushed down)
5978 
5979     if (!return_barrier) {
5980       // This is necessary for forced yields, as the return addres (in rbx) is captured in a call_VM, and skips the restoration of rbcp and locals
5981       // ... but it does no harm even for ordinary yields
5982       // TODO: use InterpreterMacroAssembler
5983       static const Register _locals_register = LP64_ONLY(r14) NOT_LP64(rdi);
5984       static const Register _bcp_register    = LP64_ONLY(r13) NOT_LP64(rsi);
5985 
5986       Label not_interpreter;
5987       __ testq(rax, rax); // rax is true iff we're jumping into the interpreter
5988       __ jcc(Assembler::zero, not_interpreter);
5989 
5990       // see InterpreterMacroAssembler::restore_bcp/restore_locals
5991       __ movptr(_bcp_register,    Address(rbp, frame::interpreter_frame_bcp_offset    * wordSize));
5992       __ movptr(_locals_register, Address(rbp, frame::interpreter_frame_locals_offset * wordSize));
5993       // __ reinit_heapbase();
5994 
5995       __ bind(not_interpreter);
5996 
5997       __ movl(rax, 0); // return 0 (success) from doYield
5998     }
5999 
6000     __ jmp(rbx);
6001 
6002     return start;
6003   }
6004 
6005   address generate_cont_thaw() {
6006     StubCodeMark mark(this, "StubRoutines", "Cont thaw");
6007     address start = __ pc();
6008     generate_cont_thaw(false, false);
6009     return start;
6010   }
6011 
6012   address generate_cont_returnBarrier() {
6013     // TODO: will probably need multiple return barriers depending on return type
6014     StubCodeMark mark(this, "StubRoutines", "cont return barrier");
6015     address start = __ pc();
6016 
6017     if (CONT_FULL_STACK)
6018       __ stop("RETURN BARRIER -- UNREACHABLE 0");
6019 
6020     generate_cont_thaw(true, false);
6021 
6022     return start;
6023   }
6024 
6025   address generate_cont_returnBarrier_exception() {
6026     StubCodeMark mark(this, "StubRoutines", "cont return barrier exception handler");
6027     address start = __ pc();
6028 
6029     if (CONT_FULL_STACK)
6030       __ stop("RETURN BARRIER -- UNREACHABLE 0");
6031 
6032     generate_cont_thaw(true, true);
6033 
6034     return start;
6035   }
6036 
6037   address generate_cont_getPC() {
6038     StubCodeMark mark(this, "StubRoutines", "GetPC");
6039     address start = __ pc();
6040 
6041     __ movptr(rax, Address(rsp, 0));
6042     __ ret(0);
6043 
6044     return start;
6045   }
6046 
6047   address generate_cont_getSP() { // used by C2
6048     StubCodeMark mark(this, "StubRoutines", "getSP");
6049     address start = __ pc();
6050 
6051     __ set_cont_fastpath(get_thread(), 1);
6052     __ lea(rax, Address(rsp, wordSize));
6053     __ ret(0);
6054 
6055     return start;
6056   }
6057 
6058   address generate_cont_getFP() {
6059     StubCodeMark mark(this, "StubRoutines", "GetFP");
6060     address start = __ pc();
6061 
6062     __ stop("WHAT?");
6063     __ lea(rax, Address(rsp, wordSize));
6064     __ ret(0);
6065 
6066     return start;
6067   }
6068 
6069 #undef __
6070 #define __ masm->
6071 
6072   // Continuation point for throwing of implicit exceptions that are
6073   // not handled in the current activation. Fabricates an exception
6074   // oop and initiates normal exception dispatching in this
6075   // frame. Since we need to preserve callee-saved values (currently
6076   // only for C2, but done for C1 as well) we need a callee-saved oop
6077   // map and therefore have to make these stubs into RuntimeStubs
6078   // rather than BufferBlobs.  If the compiler needs all registers to
6079   // be preserved between the fault point and the exception handler
6080   // then it must assume responsibility for that in
6081   // AbstractCompiler::continuation_for_implicit_null_exception or
6082   // continuation_for_implicit_division_by_zero_exception. All other
6083   // implicit exceptions (e.g., NullPointerException or
6084   // AbstractMethodError on entry) are either at call sites or
6085   // otherwise assume that stack unwinding will be initiated, so
6086   // caller saved registers were assumed volatile in the compiler.
6087   address generate_throw_exception(const char* name,
6088                                    address runtime_entry,


6278         StubRoutines::_dlog = generate_libmLog();
6279       }
6280       if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog10)) {
6281         StubRoutines::_dlog10 = generate_libmLog10();
6282       }
6283       if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dpow)) {
6284         StubRoutines::_dpow = generate_libmPow();
6285       }
6286       if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) {
6287         StubRoutines::_dsin = generate_libmSin();
6288       }
6289       if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) {
6290         StubRoutines::_dcos = generate_libmCos();
6291       }
6292       if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) {
6293         StubRoutines::_dtan = generate_libmTan();
6294       }
6295     }
6296   }
6297 
6298   void generate_phase1() {
6299     // Continuation stubs:
6300     StubRoutines::_cont_thaw          = generate_cont_thaw();
6301     StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
6302     StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
6303     StubRoutines::_cont_doYield_stub = generate_cont_doYield();
6304     StubRoutines::_cont_doYield    = StubRoutines::_cont_doYield_stub->entry_point();
6305     StubRoutines::_cont_jump_from_sp = generate_cont_jump_from_safepoint();
6306     StubRoutines::_cont_jump       = generate_cont_jump();
6307     StubRoutines::_cont_getSP      = generate_cont_getSP();
6308     StubRoutines::_cont_getPC      = generate_cont_getPC();
6309   }
6310 
6311   void generate_all() {
6312     // Generates all stubs and initializes the entry points
6313 
6314     // These entry points require SharedInfo::stack0 to be set up in
6315     // non-core builds and need to be relocatable, so they each
6316     // fabricate a RuntimeStub internally.
6317     StubRoutines::_throw_AbstractMethodError_entry =
6318       generate_throw_exception("AbstractMethodError throw_exception",
6319                                CAST_FROM_FN_PTR(address,
6320                                                 SharedRuntime::
6321                                                 throw_AbstractMethodError));
6322 
6323     StubRoutines::_throw_IncompatibleClassChangeError_entry =
6324       generate_throw_exception("IncompatibleClassChangeError throw_exception",
6325                                CAST_FROM_FN_PTR(address,
6326                                                 SharedRuntime::
6327                                                 throw_IncompatibleClassChangeError));
6328 
6329     StubRoutines::_throw_NullPointerException_at_call_entry =
6330       generate_throw_exception("NullPointerException at call throw_exception",


6446       StubRoutines::_mulAdd = generate_mulAdd();
6447     }
6448 #ifndef _WINDOWS
6449     if (UseMontgomeryMultiplyIntrinsic) {
6450       StubRoutines::_montgomeryMultiply
6451         = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply);
6452     }
6453     if (UseMontgomerySquareIntrinsic) {
6454       StubRoutines::_montgomerySquare
6455         = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square);
6456     }
6457 #endif // WINDOWS
6458 #endif // COMPILER2
6459 
6460     if (UseVectorizedMismatchIntrinsic) {
6461       StubRoutines::_vectorizedMismatch = generate_vectorizedMismatch();
6462     }
6463   }
6464 
6465  public:
6466   StubGenerator(CodeBuffer* code, int phase) : StubCodeGenerator(code) {
6467     if (phase == 0) {


6468       generate_initial();
6469     } else if (phase == 1) {
6470       generate_phase1();
6471     } else {
6472       generate_all();
6473     }
6474   }
6475 }; // end class declaration
6476 
6477 #define UCM_TABLE_MAX_ENTRIES 16
6478 void StubGenerator_generate(CodeBuffer* code, int phase) {
6479   if (UnsafeCopyMemory::_table == NULL) {
6480     UnsafeCopyMemory::create_table(UCM_TABLE_MAX_ENTRIES);
6481   }
6482   StubGenerator g(code, phase);
6483 }
< prev index next >