6784 __ ret(0);
6785
6786
6787 __ BIND(deoptimize_label);
6788
6789 __ popa();
6790 __ pop(c_rarg0);
6791
6792 __ leave();
6793
6794 // this can be taken out, but is good for verification purposes. getting a SIGSEGV
6795 // here while still having a correct stack is valuable
6796 __ testptr(rsp, Address(rsp, 0));
6797
6798 __ movptr(rsp, Address(rsp, 0)); // new rsp was written in the barrier
6799 __ jmp(Address(rsp, -1 * wordSize)); // jmp target should be callers verified_entry_point
6800
6801 return start;
6802 }
6803
6804 /**
6805 * Arguments:
6806 *
6807 * Input:
6808 * c_rarg0 - out address
6809 * c_rarg1 - in address
6810 * c_rarg2 - offset
6811 * c_rarg3 - len
6812 * not Win64
6813 * c_rarg4 - k
6814 * Win64
6815 * rsp+40 - k
6816 */
6817 address generate_mulAdd() {
6818 __ align(CodeEntryAlignment);
6819 StubCodeMark mark(this, "StubRoutines", "mulAdd");
6820
6821 address start = __ pc();
6822 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
6823 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
7704 if (VM_Version::supports_avx512_vbmi()) {
7705 StubRoutines::x86::_shuffle_base64 = base64_shuffle_addr();
7706 StubRoutines::x86::_lookup_lo_base64 = base64_vbmi_lookup_lo_addr();
7707 StubRoutines::x86::_lookup_hi_base64 = base64_vbmi_lookup_hi_addr();
7708 StubRoutines::x86::_lookup_lo_base64url = base64_vbmi_lookup_lo_url_addr();
7709 StubRoutines::x86::_lookup_hi_base64url = base64_vbmi_lookup_hi_url_addr();
7710 StubRoutines::x86::_pack_vec_base64 = base64_vbmi_pack_vec_addr();
7711 StubRoutines::x86::_join_0_1_base64 = base64_vbmi_join_0_1_addr();
7712 StubRoutines::x86::_join_1_2_base64 = base64_vbmi_join_1_2_addr();
7713 StubRoutines::x86::_join_2_3_base64 = base64_vbmi_join_2_3_addr();
7714 }
7715 StubRoutines::x86::_decoding_table_base64 = base64_decoding_table_addr();
7716 StubRoutines::_base64_encodeBlock = generate_base64_encodeBlock();
7717 StubRoutines::_base64_decodeBlock = generate_base64_decodeBlock();
7718 }
7719
7720 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
7721 if (bs_nm != NULL) {
7722 StubRoutines::x86::_method_entry_barrier = generate_method_entry_barrier();
7723 }
7724 #ifdef COMPILER2
7725 if (UseMultiplyToLenIntrinsic) {
7726 StubRoutines::_multiplyToLen = generate_multiplyToLen();
7727 }
7728 if (UseSquareToLenIntrinsic) {
7729 StubRoutines::_squareToLen = generate_squareToLen();
7730 }
7731 if (UseMulAddIntrinsic) {
7732 StubRoutines::_mulAdd = generate_mulAdd();
7733 }
7734 if (VM_Version::supports_avx512_vbmi2()) {
7735 StubRoutines::_bigIntegerRightShiftWorker = generate_bigIntegerRightShift();
7736 StubRoutines::_bigIntegerLeftShiftWorker = generate_bigIntegerLeftShift();
7737 }
7738 if (UseMontgomeryMultiplyIntrinsic) {
7739 StubRoutines::_montgomeryMultiply
7740 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply);
7741 }
7742 if (UseMontgomerySquareIntrinsic) {
7743 StubRoutines::_montgomerySquare
|
6784 __ ret(0);
6785
6786
6787 __ BIND(deoptimize_label);
6788
6789 __ popa();
6790 __ pop(c_rarg0);
6791
6792 __ leave();
6793
6794 // this can be taken out, but is good for verification purposes. getting a SIGSEGV
6795 // here while still having a correct stack is valuable
6796 __ testptr(rsp, Address(rsp, 0));
6797
6798 __ movptr(rsp, Address(rsp, 0)); // new rsp was written in the barrier
6799 __ jmp(Address(rsp, -1 * wordSize)); // jmp target should be callers verified_entry_point
6800
6801 return start;
6802 }
6803
6804 // Call runtime to ensure lock-stack size.
6805 // Arguments:
6806 // - c_rarg0: the required _limit pointer
6807 address generate_check_lock_stack() {
6808 __ align(CodeEntryAlignment);
6809 StubCodeMark mark(this, "StubRoutines", "check_lock_stack");
6810 address start = __ pc();
6811
6812 BLOCK_COMMENT("Entry:");
6813 __ enter(); // save rbp
6814
6815 __ pusha();
6816
6817 // The method may have floats as arguments, and we must spill them before calling
6818 // the VM runtime.
6819 __ push_FPU_state();
6820 /*
6821 assert(Argument::n_float_register_parameters_j == 8, "Assumption");
6822 const int xmm_size = wordSize * 2;
6823 const int xmm_spill_size = xmm_size * Argument::n_float_register_parameters_j;
6824 __ subptr(rsp, xmm_spill_size);
6825 __ movdqu(Address(rsp, xmm_size * 7), xmm7);
6826 __ movdqu(Address(rsp, xmm_size * 6), xmm6);
6827 __ movdqu(Address(rsp, xmm_size * 5), xmm5);
6828 __ movdqu(Address(rsp, xmm_size * 4), xmm4);
6829 __ movdqu(Address(rsp, xmm_size * 3), xmm3);
6830 __ movdqu(Address(rsp, xmm_size * 2), xmm2);
6831 __ movdqu(Address(rsp, xmm_size * 1), xmm1);
6832 __ movdqu(Address(rsp, xmm_size * 0), xmm0);
6833 */
6834 __ call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<void (*)(oop*)>(LockStack::ensure_lock_stack_size)), rax);
6835 /*
6836 __ movdqu(xmm0, Address(rsp, xmm_size * 0));
6837 __ movdqu(xmm1, Address(rsp, xmm_size * 1));
6838 __ movdqu(xmm2, Address(rsp, xmm_size * 2));
6839 __ movdqu(xmm3, Address(rsp, xmm_size * 3));
6840 __ movdqu(xmm4, Address(rsp, xmm_size * 4));
6841 __ movdqu(xmm5, Address(rsp, xmm_size * 5));
6842 __ movdqu(xmm6, Address(rsp, xmm_size * 6));
6843 __ movdqu(xmm7, Address(rsp, xmm_size * 7));
6844 __ addptr(rsp, xmm_spill_size);
6845 */
6846 __ pop_FPU_state();
6847 __ popa();
6848
6849 __ leave();
6850
6851 __ ret(0);
6852
6853 return start;
6854 }
6855
6856 /**
6857 * Arguments:
6858 *
6859 * Input:
6860 * c_rarg0 - out address
6861 * c_rarg1 - in address
6862 * c_rarg2 - offset
6863 * c_rarg3 - len
6864 * not Win64
6865 * c_rarg4 - k
6866 * Win64
6867 * rsp+40 - k
6868 */
6869 address generate_mulAdd() {
6870 __ align(CodeEntryAlignment);
6871 StubCodeMark mark(this, "StubRoutines", "mulAdd");
6872
6873 address start = __ pc();
6874 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
6875 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
7756 if (VM_Version::supports_avx512_vbmi()) {
7757 StubRoutines::x86::_shuffle_base64 = base64_shuffle_addr();
7758 StubRoutines::x86::_lookup_lo_base64 = base64_vbmi_lookup_lo_addr();
7759 StubRoutines::x86::_lookup_hi_base64 = base64_vbmi_lookup_hi_addr();
7760 StubRoutines::x86::_lookup_lo_base64url = base64_vbmi_lookup_lo_url_addr();
7761 StubRoutines::x86::_lookup_hi_base64url = base64_vbmi_lookup_hi_url_addr();
7762 StubRoutines::x86::_pack_vec_base64 = base64_vbmi_pack_vec_addr();
7763 StubRoutines::x86::_join_0_1_base64 = base64_vbmi_join_0_1_addr();
7764 StubRoutines::x86::_join_1_2_base64 = base64_vbmi_join_1_2_addr();
7765 StubRoutines::x86::_join_2_3_base64 = base64_vbmi_join_2_3_addr();
7766 }
7767 StubRoutines::x86::_decoding_table_base64 = base64_decoding_table_addr();
7768 StubRoutines::_base64_encodeBlock = generate_base64_encodeBlock();
7769 StubRoutines::_base64_decodeBlock = generate_base64_decodeBlock();
7770 }
7771
7772 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
7773 if (bs_nm != NULL) {
7774 StubRoutines::x86::_method_entry_barrier = generate_method_entry_barrier();
7775 }
7776 if (UseFastLocking) {
7777 StubRoutines::x86::_check_lock_stack = generate_check_lock_stack();
7778 }
7779 #ifdef COMPILER2
7780 if (UseMultiplyToLenIntrinsic) {
7781 StubRoutines::_multiplyToLen = generate_multiplyToLen();
7782 }
7783 if (UseSquareToLenIntrinsic) {
7784 StubRoutines::_squareToLen = generate_squareToLen();
7785 }
7786 if (UseMulAddIntrinsic) {
7787 StubRoutines::_mulAdd = generate_mulAdd();
7788 }
7789 if (VM_Version::supports_avx512_vbmi2()) {
7790 StubRoutines::_bigIntegerRightShiftWorker = generate_bigIntegerRightShift();
7791 StubRoutines::_bigIntegerLeftShiftWorker = generate_bigIntegerLeftShift();
7792 }
7793 if (UseMontgomeryMultiplyIntrinsic) {
7794 StubRoutines::_montgomeryMultiply
7795 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply);
7796 }
7797 if (UseMontgomerySquareIntrinsic) {
7798 StubRoutines::_montgomerySquare
|