520 if( freg_arg0 == (uint)i ) {
521 regs[i].set2(xmm0->as_VMReg());
522 } else if( freg_arg1 == (uint)i ) {
523 regs[i].set2(xmm1->as_VMReg());
524 } else {
525 regs[i].set2(VMRegImpl::stack2reg(dstack));
526 dstack += 2;
527 }
528 break;
529 case T_VOID: regs[i].set_bad(); break;
530 break;
531 default:
532 ShouldNotReachHere();
533 break;
534 }
535 }
536
537 return stack;
538 }
539
540 // Patch the callers callsite with entry to compiled code if it exists.
541 static void patch_callers_callsite(MacroAssembler *masm) {
542 Label L;
543 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
544 __ jcc(Assembler::equal, L);
545 // Schedule the branch target address early.
546 // Call into the VM to patch the caller, then jump to compiled callee
547 // rax, isn't live so capture return address while we easily can
548 __ movptr(rax, Address(rsp, 0));
549 __ pusha();
550 __ pushf();
551
552 if (UseSSE == 1) {
553 __ subptr(rsp, 2*wordSize);
554 __ movflt(Address(rsp, 0), xmm0);
555 __ movflt(Address(rsp, wordSize), xmm1);
556 }
557 if (UseSSE >= 2) {
558 __ subptr(rsp, 4*wordSize);
559 __ movdbl(Address(rsp, 0), xmm0);
581 __ addptr(rsp, 2*wordSize);
582 }
583 if (UseSSE >= 2) {
584 __ movdbl(xmm0, Address(rsp, 0));
585 __ movdbl(xmm1, Address(rsp, 2*wordSize));
586 __ addptr(rsp, 4*wordSize);
587 }
588
589 __ popf();
590 __ popa();
591 __ bind(L);
592 }
593
594
595 static void move_c2i_double(MacroAssembler *masm, XMMRegister r, int st_off) {
596 int next_off = st_off - Interpreter::stackElementSize;
597 __ movdbl(Address(rsp, next_off), r);
598 }
599
600 static void gen_c2i_adapter(MacroAssembler *masm,
601 int total_args_passed,
602 int comp_args_on_stack,
603 const BasicType *sig_bt,
604 const VMRegPair *regs,
605 Label& skip_fixup) {
606 // Before we get into the guts of the C2I adapter, see if we should be here
607 // at all. We've come from compiled code and are attempting to jump to the
608 // interpreter, which means the caller made a static call to get here
609 // (vcalls always get a compiled target if there is one). Check for a
610 // compiled target. If there is one, we need to patch the caller's call.
611 patch_callers_callsite(masm);
612
613 __ bind(skip_fixup);
614
615 #ifdef COMPILER2
616 // C2 may leave the stack dirty if not in SSE2+ mode
617 if (UseSSE >= 2) {
618 __ verify_FPU(0, "c2i transition should have clean FPU stack");
619 } else {
620 __ empty_FPU_stack();
621 }
622 #endif /* COMPILER2 */
623
624 // Since all args are passed on the stack, total_args_passed * interpreter_
625 // stack_element_size is the
626 // space we need.
627 int extraspace = total_args_passed * Interpreter::stackElementSize;
628
629 // Get return address
630 __ pop(rax);
631
632 // set senderSP value
633 __ movptr(rsi, rsp);
634
635 __ subptr(rsp, extraspace);
636
637 // Now write the args into the outgoing interpreter space
638 for (int i = 0; i < total_args_passed; i++) {
639 if (sig_bt[i] == T_VOID) {
640 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
641 continue;
642 }
643
644 // st_off points to lowest address on stack.
645 int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize;
646 int next_off = st_off - Interpreter::stackElementSize;
647
648 // Say 4 args:
649 // i st_off
650 // 0 12 T_LONG
651 // 1 8 T_VOID
652 // 2 4 T_OBJECT
653 // 3 0 T_BOOL
654 VMReg r_1 = regs[i].first();
655 VMReg r_2 = regs[i].second();
656 if (!r_1->is_valid()) {
657 assert(!r_2->is_valid(), "");
658 continue;
659 }
660
661 if (r_1->is_stack()) {
662 // memory to memory use fpu stack top
663 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
664
665 if (!r_2->is_valid()) {
671 // st_off == MSW, st_off-wordSize == LSW
672
673 __ movptr(rdi, Address(rsp, ld_off));
674 __ movptr(Address(rsp, next_off), rdi);
675 __ movptr(rdi, Address(rsp, ld_off + wordSize));
676 __ movptr(Address(rsp, st_off), rdi);
677 }
678 } else if (r_1->is_Register()) {
679 Register r = r_1->as_Register();
680 if (!r_2->is_valid()) {
681 __ movl(Address(rsp, st_off), r);
682 } else {
683 // long/double in gpr
684 ShouldNotReachHere();
685 }
686 } else {
687 assert(r_1->is_XMMRegister(), "");
688 if (!r_2->is_valid()) {
689 __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
690 } else {
691 assert(sig_bt[i] == T_DOUBLE || sig_bt[i] == T_LONG, "wrong type");
692 move_c2i_double(masm, r_1->as_XMMRegister(), st_off);
693 }
694 }
695 }
696
697 // Schedule the branch target address early.
698 __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
699 // And repush original return address
700 __ push(rax);
701 __ jmp(rcx);
702 }
703
704
705 static void move_i2c_double(MacroAssembler *masm, XMMRegister r, Register saved_sp, int ld_off) {
706 int next_val_off = ld_off - Interpreter::stackElementSize;
707 __ movdbl(r, Address(saved_sp, next_val_off));
708 }
709
710 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
711 address code_start, address code_end,
712 Label& L_ok) {
713 Label L_fail;
714 __ lea(temp_reg, AddressLiteral(code_start, relocInfo::none));
715 __ cmpptr(pc_reg, temp_reg);
716 __ jcc(Assembler::belowEqual, L_fail);
717 __ lea(temp_reg, AddressLiteral(code_end, relocInfo::none));
718 __ cmpptr(pc_reg, temp_reg);
719 __ jcc(Assembler::below, L_ok);
720 __ bind(L_fail);
721 }
722
723 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
724 int total_args_passed,
725 int comp_args_on_stack,
726 const BasicType *sig_bt,
727 const VMRegPair *regs) {
728 // Note: rsi contains the senderSP on entry. We must preserve it since
729 // we may do a i2c -> c2i transition if we lose a race where compiled
730 // code goes non-entrant while we get args ready.
731
732 // Adapters can be frameless because they do not require the caller
733 // to perform additional cleanup work, such as correcting the stack pointer.
734 // An i2c adapter is frameless because the *caller* frame, which is interpreted,
735 // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
736 // even if a callee has modified the stack pointer.
737 // A c2i adapter is frameless because the *callee* frame, which is interpreted,
738 // routinely repairs its caller's stack pointer (from sender_sp, which is set
739 // up via the senderSP register).
740 // In other words, if *either* the caller or callee is interpreted, we can
741 // get the stack pointer repaired after a call.
742 // This is why c2i and i2c adapters cannot be indefinitely composed.
743 // In particular, if a c2i adapter were to somehow call an i2c adapter,
744 // both caller and callee would be compiled methods, and neither would
745 // clean up the stack pointer changes performed by the two adapters.
746 // If this happens, control eventually transfers back to the compiled
747 // caller, but with an uncorrected stack, causing delayed havoc.
801 }
802
803 // Align the outgoing SP
804 __ andptr(rsp, -(StackAlignmentInBytes));
805
806 // push the return address on the stack (note that pushing, rather
807 // than storing it, yields the correct frame alignment for the callee)
808 __ push(rax);
809
810 // Put saved SP in another register
811 const Register saved_sp = rax;
812 __ movptr(saved_sp, rdi);
813
814
815 // Will jump to the compiled code just as if compiled code was doing it.
816 // Pre-load the register-jump target early, to schedule it better.
817 __ movptr(rdi, Address(rbx, in_bytes(Method::from_compiled_offset())));
818
819 // Now generate the shuffle code. Pick up all register args and move the
820 // rest through the floating point stack top.
821 for (int i = 0; i < total_args_passed; i++) {
822 if (sig_bt[i] == T_VOID) {
823 // Longs and doubles are passed in native word order, but misaligned
824 // in the 32-bit build.
825 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
826 continue;
827 }
828
829 // Pick up 0, 1 or 2 words from SP+offset.
830
831 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
832 "scrambled load targets?");
833 // Load in argument order going down.
834 int ld_off = (total_args_passed - i) * Interpreter::stackElementSize;
835 // Point to interpreter value (vs. tag)
836 int next_off = ld_off - Interpreter::stackElementSize;
837 //
838 //
839 //
840 VMReg r_1 = regs[i].first();
841 VMReg r_2 = regs[i].second();
842 if (!r_1->is_valid()) {
843 assert(!r_2->is_valid(), "");
844 continue;
845 }
846 if (r_1->is_stack()) {
847 // Convert stack slot to an SP offset (+ wordSize to account for return address )
848 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
849
850 // We can use rsi as a temp here because compiled code doesn't need rsi as an input
851 // and if we end up going thru a c2i because of a miss a reasonable value of rsi
852 // we be generated.
853 if (!r_2->is_valid()) {
854 // __ fld_s(Address(saved_sp, ld_off));
912 // "compiled" so it is much better to make this transition
913 // invisible to the stack walking code. Unfortunately if
914 // we try and find the callee by normal means a safepoint
915 // is possible. So we stash the desired callee in the thread
916 // and the vm will find there should this case occur.
917
918 __ get_thread(rax);
919 __ movptr(Address(rax, JavaThread::callee_target_offset()), rbx);
920
921 // move Method* to rax, in case we end up in an c2i adapter.
922 // the c2i adapters expect Method* in rax, (c2) because c2's
923 // resolve stubs return the result (the method) in rax,.
924 // I'd love to fix this.
925 __ mov(rax, rbx);
926
927 __ jmp(rdi);
928 }
929
930 // ---------------------------------------------------------------
931 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
932 int total_args_passed,
933 int comp_args_on_stack,
934 const BasicType *sig_bt,
935 const VMRegPair *regs,
936 AdapterFingerPrint* fingerprint) {
937 address i2c_entry = __ pc();
938
939 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
940
941 // -------------------------------------------------------------------------
942 // Generate a C2I adapter. On entry we know rbx, holds the Method* during calls
943 // to the interpreter. The args start out packed in the compiled layout. They
944 // need to be unpacked into the interpreter layout. This will almost always
945 // require some stack space. We grow the current (compiled) stack, then repack
946 // the args. We finally end in a jump to the generic interpreter entry point.
947 // On exit from the interpreter, the interpreter will restore our SP (lest the
948 // compiled code, which relies solely on SP and not EBP, get sick).
949
950 address c2i_unverified_entry = __ pc();
951 Label skip_fixup;
952
953 Register data = rax;
954 Register receiver = rcx;
955 Register temp = rbx;
956
957 {
958 __ ic_check(1 /* end_alignment */);
959 __ movptr(rbx, Address(data, CompiledICData::speculated_method_offset()));
960 // Method might have been compiled since the call site was patched to
961 // interpreted if that is the case treat it as a miss so we can get
962 // the call site corrected.
963 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
964 __ jcc(Assembler::equal, skip_fixup);
965 }
966
967 address c2i_entry = __ pc();
968
969 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
970 bs->c2i_entry_barrier(masm);
971
972 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
973
974 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
975 }
976
977 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
978 VMRegPair *regs,
979 int total_args_passed) {
980
981 // We return the amount of VMRegImpl stack slots we need to reserve for all
982 // the arguments NOT counting out_preserve_stack_slots.
983
984 uint stack = 0; // All arguments on stack
985
986 for( int i = 0; i < total_args_passed; i++) {
987 // From the type and the argument number (count) compute the location
988 switch( sig_bt[i] ) {
989 case T_BOOLEAN:
990 case T_CHAR:
991 case T_FLOAT:
992 case T_BYTE:
993 case T_SHORT:
2627 __ bind(pending);
2628
2629 RegisterSaver::restore_live_registers(masm);
2630
2631 // exception pending => remove activation and forward to exception handler
2632
2633 __ get_thread(thread);
2634 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
2635 __ movptr(rax, Address(thread, Thread::pending_exception_offset()));
2636 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2637
2638 // -------------
2639 // make sure all code is generated
2640 masm->flush();
2641
2642 // return the blob
2643 // frame_size_words or bytes??
2644 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
2645 }
2646
2647 //------------------------------------------------------------------------------------------------------------------------
2648 // Continuation point for throwing of implicit exceptions that are not handled in
2649 // the current activation. Fabricates an exception oop and initiates normal
2650 // exception dispatching in this frame.
2651 //
2652 // Previously the compiler (c2) allowed for callee save registers on Java calls.
2653 // This is no longer true after adapter frames were removed but could possibly
2654 // be brought back in the future if the interpreter code was reworked and it
2655 // was deemed worthwhile. The comment below was left to describe what must
2656 // happen here if callee saves were resurrected. As it stands now this stub
2657 // could actually be a vanilla BufferBlob and have now oopMap at all.
2658 // Since it doesn't make much difference we've chosen to leave it the
2659 // way it was in the callee save days and keep the comment.
2660
2661 // If we need to preserve callee-saved values we need a callee-saved oop map and
2662 // therefore have to make these stubs into RuntimeStubs rather than BufferBlobs.
2663 // If the compiler needs all registers to be preserved between the fault
2664 // point and the exception handler then it must assume responsibility for that in
2665 // AbstractCompiler::continuation_for_implicit_null_exception or
2666 // continuation_for_implicit_division_by_zero_exception. All other implicit
|
520 if( freg_arg0 == (uint)i ) {
521 regs[i].set2(xmm0->as_VMReg());
522 } else if( freg_arg1 == (uint)i ) {
523 regs[i].set2(xmm1->as_VMReg());
524 } else {
525 regs[i].set2(VMRegImpl::stack2reg(dstack));
526 dstack += 2;
527 }
528 break;
529 case T_VOID: regs[i].set_bad(); break;
530 break;
531 default:
532 ShouldNotReachHere();
533 break;
534 }
535 }
536
537 return stack;
538 }
539
540 const uint SharedRuntime::java_return_convention_max_int = 1;
541 const uint SharedRuntime::java_return_convention_max_float = 1;
542 int SharedRuntime::java_return_convention(const BasicType *sig_bt,
543 VMRegPair *regs,
544 int total_args_passed) {
545 Unimplemented();
546 return 0;
547 }
548
549 // Patch the callers callsite with entry to compiled code if it exists.
550 static void patch_callers_callsite(MacroAssembler *masm) {
551 Label L;
552 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
553 __ jcc(Assembler::equal, L);
554 // Schedule the branch target address early.
555 // Call into the VM to patch the caller, then jump to compiled callee
556 // rax, isn't live so capture return address while we easily can
557 __ movptr(rax, Address(rsp, 0));
558 __ pusha();
559 __ pushf();
560
561 if (UseSSE == 1) {
562 __ subptr(rsp, 2*wordSize);
563 __ movflt(Address(rsp, 0), xmm0);
564 __ movflt(Address(rsp, wordSize), xmm1);
565 }
566 if (UseSSE >= 2) {
567 __ subptr(rsp, 4*wordSize);
568 __ movdbl(Address(rsp, 0), xmm0);
590 __ addptr(rsp, 2*wordSize);
591 }
592 if (UseSSE >= 2) {
593 __ movdbl(xmm0, Address(rsp, 0));
594 __ movdbl(xmm1, Address(rsp, 2*wordSize));
595 __ addptr(rsp, 4*wordSize);
596 }
597
598 __ popf();
599 __ popa();
600 __ bind(L);
601 }
602
603
604 static void move_c2i_double(MacroAssembler *masm, XMMRegister r, int st_off) {
605 int next_off = st_off - Interpreter::stackElementSize;
606 __ movdbl(Address(rsp, next_off), r);
607 }
608
609 static void gen_c2i_adapter(MacroAssembler *masm,
610 const GrowableArray<SigEntry>& sig_extended,
611 const VMRegPair *regs,
612 Label& skip_fixup,
613 address start,
614 OopMapSet*& oop_maps,
615 int& frame_complete,
616 int& frame_size_in_words) {
617 // Before we get into the guts of the C2I adapter, see if we should be here
618 // at all. We've come from compiled code and are attempting to jump to the
619 // interpreter, which means the caller made a static call to get here
620 // (vcalls always get a compiled target if there is one). Check for a
621 // compiled target. If there is one, we need to patch the caller's call.
622 patch_callers_callsite(masm);
623
624 __ bind(skip_fixup);
625
626 #ifdef COMPILER2
627 // C2 may leave the stack dirty if not in SSE2+ mode
628 if (UseSSE >= 2) {
629 __ verify_FPU(0, "c2i transition should have clean FPU stack");
630 } else {
631 __ empty_FPU_stack();
632 }
633 #endif /* COMPILER2 */
634
635 // Since all args are passed on the stack, total_args_passed * interpreter_
636 // stack_element_size is the
637 // space we need.
638 int extraspace = sig_extended.length() * Interpreter::stackElementSize;
639
640 // Get return address
641 __ pop(rax);
642
643 // set senderSP value
644 __ movptr(rsi, rsp);
645
646 __ subptr(rsp, extraspace);
647
648 // Now write the args into the outgoing interpreter space
649 for (int i = 0; i < sig_extended.length(); i++) {
650 if (sig_extended.at(i)._bt == T_VOID) {
651 assert(i > 0 && (sig_extended.at(i-1)._bt == T_LONG || sig_extended.at(i-1)._bt == T_DOUBLE), "missing half");
652 continue;
653 }
654
655 // st_off points to lowest address on stack.
656 int st_off = ((sig_extended.length() - 1) - i) * Interpreter::stackElementSize;
657 int next_off = st_off - Interpreter::stackElementSize;
658
659 // Say 4 args:
660 // i st_off
661 // 0 12 T_LONG
662 // 1 8 T_VOID
663 // 2 4 T_OBJECT
664 // 3 0 T_BOOL
665 VMReg r_1 = regs[i].first();
666 VMReg r_2 = regs[i].second();
667 if (!r_1->is_valid()) {
668 assert(!r_2->is_valid(), "");
669 continue;
670 }
671
672 if (r_1->is_stack()) {
673 // memory to memory use fpu stack top
674 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
675
676 if (!r_2->is_valid()) {
682 // st_off == MSW, st_off-wordSize == LSW
683
684 __ movptr(rdi, Address(rsp, ld_off));
685 __ movptr(Address(rsp, next_off), rdi);
686 __ movptr(rdi, Address(rsp, ld_off + wordSize));
687 __ movptr(Address(rsp, st_off), rdi);
688 }
689 } else if (r_1->is_Register()) {
690 Register r = r_1->as_Register();
691 if (!r_2->is_valid()) {
692 __ movl(Address(rsp, st_off), r);
693 } else {
694 // long/double in gpr
695 ShouldNotReachHere();
696 }
697 } else {
698 assert(r_1->is_XMMRegister(), "");
699 if (!r_2->is_valid()) {
700 __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
701 } else {
702 assert(sig_extended.at(i)._bt == T_DOUBLE || sig_extended.at(i)._bt == T_LONG, "wrong type");
703 move_c2i_double(masm, r_1->as_XMMRegister(), st_off);
704 }
705 }
706 }
707
708 // Schedule the branch target address early.
709 __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
710 // And repush original return address
711 __ push(rax);
712 __ jmp(rcx);
713 }
714
715
716 static void move_i2c_double(MacroAssembler *masm, XMMRegister r, Register saved_sp, int ld_off) {
717 int next_val_off = ld_off - Interpreter::stackElementSize;
718 __ movdbl(r, Address(saved_sp, next_val_off));
719 }
720
721 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
722 address code_start, address code_end,
723 Label& L_ok) {
724 Label L_fail;
725 __ lea(temp_reg, AddressLiteral(code_start, relocInfo::none));
726 __ cmpptr(pc_reg, temp_reg);
727 __ jcc(Assembler::belowEqual, L_fail);
728 __ lea(temp_reg, AddressLiteral(code_end, relocInfo::none));
729 __ cmpptr(pc_reg, temp_reg);
730 __ jcc(Assembler::below, L_ok);
731 __ bind(L_fail);
732 }
733
734 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
735 int comp_args_on_stack,
736 const GrowableArray<SigEntry>& sig_extended,
737 const VMRegPair *regs) {
738
739 // Note: rsi contains the senderSP on entry. We must preserve it since
740 // we may do a i2c -> c2i transition if we lose a race where compiled
741 // code goes non-entrant while we get args ready.
742
743 // Adapters can be frameless because they do not require the caller
744 // to perform additional cleanup work, such as correcting the stack pointer.
745 // An i2c adapter is frameless because the *caller* frame, which is interpreted,
746 // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
747 // even if a callee has modified the stack pointer.
748 // A c2i adapter is frameless because the *callee* frame, which is interpreted,
749 // routinely repairs its caller's stack pointer (from sender_sp, which is set
750 // up via the senderSP register).
751 // In other words, if *either* the caller or callee is interpreted, we can
752 // get the stack pointer repaired after a call.
753 // This is why c2i and i2c adapters cannot be indefinitely composed.
754 // In particular, if a c2i adapter were to somehow call an i2c adapter,
755 // both caller and callee would be compiled methods, and neither would
756 // clean up the stack pointer changes performed by the two adapters.
757 // If this happens, control eventually transfers back to the compiled
758 // caller, but with an uncorrected stack, causing delayed havoc.
812 }
813
814 // Align the outgoing SP
815 __ andptr(rsp, -(StackAlignmentInBytes));
816
817 // push the return address on the stack (note that pushing, rather
818 // than storing it, yields the correct frame alignment for the callee)
819 __ push(rax);
820
821 // Put saved SP in another register
822 const Register saved_sp = rax;
823 __ movptr(saved_sp, rdi);
824
825
826 // Will jump to the compiled code just as if compiled code was doing it.
827 // Pre-load the register-jump target early, to schedule it better.
828 __ movptr(rdi, Address(rbx, in_bytes(Method::from_compiled_offset())));
829
830 // Now generate the shuffle code. Pick up all register args and move the
831 // rest through the floating point stack top.
832 for (int i = 0; i < sig_extended.length(); i++) {
833 if (sig_extended.at(i)._bt == T_VOID) {
834 // Longs and doubles are passed in native word order, but misaligned
835 // in the 32-bit build.
836 assert(i > 0 && (sig_extended.at(i-1)._bt == T_LONG || sig_extended.at(i-1)._bt == T_DOUBLE), "missing half");
837 continue;
838 }
839
840 // Pick up 0, 1 or 2 words from SP+offset.
841
842 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
843 "scrambled load targets?");
844 // Load in argument order going down.
845 int ld_off = (sig_extended.length() - i) * Interpreter::stackElementSize;
846 // Point to interpreter value (vs. tag)
847 int next_off = ld_off - Interpreter::stackElementSize;
848 //
849 //
850 //
851 VMReg r_1 = regs[i].first();
852 VMReg r_2 = regs[i].second();
853 if (!r_1->is_valid()) {
854 assert(!r_2->is_valid(), "");
855 continue;
856 }
857 if (r_1->is_stack()) {
858 // Convert stack slot to an SP offset (+ wordSize to account for return address )
859 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
860
861 // We can use rsi as a temp here because compiled code doesn't need rsi as an input
862 // and if we end up going thru a c2i because of a miss a reasonable value of rsi
863 // we be generated.
864 if (!r_2->is_valid()) {
865 // __ fld_s(Address(saved_sp, ld_off));
923 // "compiled" so it is much better to make this transition
924 // invisible to the stack walking code. Unfortunately if
925 // we try and find the callee by normal means a safepoint
926 // is possible. So we stash the desired callee in the thread
927 // and the vm will find there should this case occur.
928
929 __ get_thread(rax);
930 __ movptr(Address(rax, JavaThread::callee_target_offset()), rbx);
931
932 // move Method* to rax, in case we end up in an c2i adapter.
933 // the c2i adapters expect Method* in rax, (c2) because c2's
934 // resolve stubs return the result (the method) in rax,.
935 // I'd love to fix this.
936 __ mov(rax, rbx);
937
938 __ jmp(rdi);
939 }
940
941 // ---------------------------------------------------------------
942 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
943 int comp_args_on_stack,
944 const GrowableArray<SigEntry>& sig_extended,
945 const VMRegPair *regs,
946 AdapterFingerPrint* fingerprint,
947 AdapterBlob*& new_adapter) {
948 address i2c_entry = __ pc();
949
950 gen_i2c_adapter(masm, comp_args_on_stack, sig_extended, regs);
951
952 // -------------------------------------------------------------------------
953 // Generate a C2I adapter. On entry we know rbx, holds the Method* during calls
954 // to the interpreter. The args start out packed in the compiled layout. They
955 // need to be unpacked into the interpreter layout. This will almost always
956 // require some stack space. We grow the current (compiled) stack, then repack
957 // the args. We finally end in a jump to the generic interpreter entry point.
958 // On exit from the interpreter, the interpreter will restore our SP (lest the
959 // compiled code, which relies solely on SP and not EBP, get sick).
960
961 address c2i_unverified_entry = __ pc();
962 Label skip_fixup;
963
964 Register data = rax;
965 Register receiver = rcx;
966 Register temp = rbx;
967
968 {
969 __ ic_check(1 /* end_alignment */);
970 __ movptr(rbx, Address(data, CompiledICData::speculated_method_offset()));
971 // Method might have been compiled since the call site was patched to
972 // interpreted if that is the case treat it as a miss so we can get
973 // the call site corrected.
974 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
975 __ jcc(Assembler::equal, skip_fixup);
976 }
977
978 address c2i_entry = __ pc();
979
980 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
981 bs->c2i_entry_barrier(masm);
982
983 OopMapSet* oop_maps = nullptr;
984 int frame_complete = CodeOffsets::frame_never_safe;
985 int frame_size_in_words = 0;
986 gen_c2i_adapter(masm, sig_extended, regs, skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words);
987
988 new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words, oop_maps);
989 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
990 }
991
992 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
993 VMRegPair *regs,
994 int total_args_passed) {
995
996 // We return the amount of VMRegImpl stack slots we need to reserve for all
997 // the arguments NOT counting out_preserve_stack_slots.
998
999 uint stack = 0; // All arguments on stack
1000
1001 for( int i = 0; i < total_args_passed; i++) {
1002 // From the type and the argument number (count) compute the location
1003 switch( sig_bt[i] ) {
1004 case T_BOOLEAN:
1005 case T_CHAR:
1006 case T_FLOAT:
1007 case T_BYTE:
1008 case T_SHORT:
2642 __ bind(pending);
2643
2644 RegisterSaver::restore_live_registers(masm);
2645
2646 // exception pending => remove activation and forward to exception handler
2647
2648 __ get_thread(thread);
2649 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
2650 __ movptr(rax, Address(thread, Thread::pending_exception_offset()));
2651 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2652
2653 // -------------
2654 // make sure all code is generated
2655 masm->flush();
2656
2657 // return the blob
2658 // frame_size_words or bytes??
2659 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
2660 }
2661
2662 BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(const InlineKlass* vk) {
2663 Unimplemented();
2664 return nullptr;
2665 }
2666
2667 //------------------------------------------------------------------------------------------------------------------------
2668 // Continuation point for throwing of implicit exceptions that are not handled in
2669 // the current activation. Fabricates an exception oop and initiates normal
2670 // exception dispatching in this frame.
2671 //
2672 // Previously the compiler (c2) allowed for callee save registers on Java calls.
2673 // This is no longer true after adapter frames were removed but could possibly
2674 // be brought back in the future if the interpreter code was reworked and it
2675 // was deemed worthwhile. The comment below was left to describe what must
2676 // happen here if callee saves were resurrected. As it stands now this stub
2677 // could actually be a vanilla BufferBlob and have now oopMap at all.
2678 // Since it doesn't make much difference we've chosen to leave it the
2679 // way it was in the callee save days and keep the comment.
2680
2681 // If we need to preserve callee-saved values we need a callee-saved oop map and
2682 // therefore have to make these stubs into RuntimeStubs rather than BufferBlobs.
2683 // If the compiler needs all registers to be preserved between the fault
2684 // point and the exception handler then it must assume responsibility for that in
2685 // AbstractCompiler::continuation_for_implicit_null_exception or
2686 // continuation_for_implicit_division_by_zero_exception. All other implicit
|