< prev index next >

src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp

Print this page

  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #ifndef _WINDOWS
  27 #include "alloca.h"
  28 #endif
  29 #include "asm/macroAssembler.hpp"
  30 #include "asm/macroAssembler.inline.hpp"

  31 #include "code/compiledIC.hpp"
  32 #include "code/debugInfoRec.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "code/nativeInst.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/oopMap.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "gc/shared/gcLocker.hpp"
  39 #include "gc/shared/barrierSet.hpp"
  40 #include "gc/shared/barrierSetAssembler.hpp"
  41 #include "interpreter/interpreter.hpp"
  42 #include "logging/log.hpp"
  43 #include "memory/resourceArea.hpp"
  44 #include "memory/universe.hpp"
  45 #include "oops/compiledICHolder.hpp"
  46 #include "oops/klass.inline.hpp"
  47 #include "oops/method.inline.hpp"
  48 #include "prims/methodHandles.hpp"
  49 #include "runtime/continuation.hpp"
  50 #include "runtime/continuationEntry.inline.hpp"

 507     case T_SHORT:
 508     case T_INT:
 509       if (int_args < Argument::n_int_register_parameters_j) {
 510         regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 511       } else {
 512         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 513         stk_args += 2;
 514       }
 515       break;
 516     case T_VOID:
 517       // halves of T_LONG or T_DOUBLE
 518       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 519       regs[i].set_bad();
 520       break;
 521     case T_LONG:
 522       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 523       // fall through
 524     case T_OBJECT:
 525     case T_ARRAY:
 526     case T_ADDRESS:

 527       if (int_args < Argument::n_int_register_parameters_j) {
 528         regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 529       } else {
 530         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 531         stk_args += 2;
 532       }
 533       break;
 534     case T_FLOAT:
 535       if (fp_args < Argument::n_float_register_parameters_j) {
 536         regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 537       } else {
 538         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 539         stk_args += 2;
 540       }
 541       break;
 542     case T_DOUBLE:
 543       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 544       if (fp_args < Argument::n_float_register_parameters_j) {
 545         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 546       } else {
 547         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 548         stk_args += 2;
 549       }
 550       break;
 551     default:
 552       ShouldNotReachHere();
 553       break;
 554     }
 555   }
 556 
 557   return align_up(stk_args, 2);
 558 }
 559 


















































































 560 // Patch the callers callsite with entry to compiled code if it exists.
 561 static void patch_callers_callsite(MacroAssembler *masm) {
 562   Label L;
 563   __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
 564   __ jcc(Assembler::equal, L);
 565 
 566   // Save the current stack pointer
 567   __ mov(r13, rsp);
 568   // Schedule the branch target address early.
 569   // Call into the VM to patch the caller, then jump to compiled callee
 570   // rax isn't live so capture return address while we easily can
 571   __ movptr(rax, Address(rsp, 0));
 572 
 573   // align stack so push_CPU_state doesn't fault
 574   __ andptr(rsp, -(StackAlignmentInBytes));
 575   __ push_CPU_state();
 576   __ vzeroupper();
 577   // VM needs caller's callsite
 578   // VM needs target method
 579   // This needs to be a long call since we will relocate this adapter to

 582   // Allocate argument register save area
 583   if (frame::arg_reg_save_area_bytes != 0) {
 584     __ subptr(rsp, frame::arg_reg_save_area_bytes);
 585   }
 586   __ mov(c_rarg0, rbx);
 587   __ mov(c_rarg1, rax);
 588   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 589 
 590   // De-allocate argument register save area
 591   if (frame::arg_reg_save_area_bytes != 0) {
 592     __ addptr(rsp, frame::arg_reg_save_area_bytes);
 593   }
 594 
 595   __ vzeroupper();
 596   __ pop_CPU_state();
 597   // restore sp
 598   __ mov(rsp, r13);
 599   __ bind(L);
 600 }
 601 










































































































 602 
 603 static void gen_c2i_adapter(MacroAssembler *masm,
 604                             int total_args_passed,
 605                             int comp_args_on_stack,
 606                             const BasicType *sig_bt,
 607                             const VMRegPair *regs,
 608                             Label& skip_fixup) {































 609   // Before we get into the guts of the C2I adapter, see if we should be here
 610   // at all.  We've come from compiled code and are attempting to jump to the
 611   // interpreter, which means the caller made a static call to get here
 612   // (vcalls always get a compiled target if there is one).  Check for a
 613   // compiled target.  If there is one, we need to patch the caller's call.
 614   patch_callers_callsite(masm);
 615 
 616   __ bind(skip_fixup);
 617 










































 618   // Since all args are passed on the stack, total_args_passed *
 619   // Interpreter::stackElementSize is the space we need.
 620 
 621   assert(total_args_passed >= 0, "total_args_passed is %d", total_args_passed);
 622 
 623   int extraspace = (total_args_passed * Interpreter::stackElementSize);
 624 
 625   // stack is aligned, keep it that way
 626   // This is not currently needed or enforced by the interpreter, but
 627   // we might as well conform to the ABI.
 628   extraspace = align_up(extraspace, 2*wordSize);
 629 
 630   // set senderSP value
 631   __ lea(r13, Address(rsp, wordSize));
 632 
 633 #ifdef ASSERT
 634   __ check_stack_alignment(r13, "sender stack not aligned");
 635 #endif
 636   if (extraspace > 0) {
 637     // Pop the return address
 638     __ pop(rax);
 639 
 640     __ subptr(rsp, extraspace);
 641 
 642     // Push the return address
 643     __ push(rax);
 644 
 645     // Account for the return address location since we store it first rather
 646     // than hold it in a register across all the shuffling
 647     extraspace += wordSize;
 648   }
 649 
 650 #ifdef ASSERT
 651   __ check_stack_alignment(rsp, "callee stack not aligned", wordSize, rax);
 652 #endif
 653 
 654   // Now write the args into the outgoing interpreter space
 655   for (int i = 0; i < total_args_passed; i++) {
 656     if (sig_bt[i] == T_VOID) {
 657       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 658       continue;
 659     }
 660 
 661     // offset to start parameters
 662     int st_off   = (total_args_passed - i) * Interpreter::stackElementSize;
 663     int next_off = st_off - Interpreter::stackElementSize;
 664 
 665     // Say 4 args:
 666     // i   st_off
 667     // 0   32 T_LONG
 668     // 1   24 T_VOID
 669     // 2   16 T_OBJECT
 670     // 3    8 T_BOOL
 671     // -    0 return address
 672     //
 673     // However to make thing extra confusing. Because we can fit a long/double in
 674     // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 675     // leaves one slot empty and only stores to a single slot. In this case the
 676     // slot that is occupied is the T_VOID slot. See I said it was confusing.
 677 
 678     VMReg r_1 = regs[i].first();
 679     VMReg r_2 = regs[i].second();
 680     if (!r_1->is_valid()) {
 681       assert(!r_2->is_valid(), "");
 682       continue;
 683     }
 684     if (r_1->is_stack()) {
 685       // memory to memory use rax
 686       int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 687       if (!r_2->is_valid()) {
 688         // sign extend??
 689         __ movl(rax, Address(rsp, ld_off));
 690         __ movptr(Address(rsp, st_off), rax);
 691 
 692       } else {
 693 
 694         __ movq(rax, Address(rsp, ld_off));
 695 
 696         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 697         // T_DOUBLE and T_LONG use two slots in the interpreter
 698         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 699           // ld_off == LSW, ld_off+wordSize == MSW
 700           // st_off == MSW, next_off == LSW
 701           __ movq(Address(rsp, next_off), rax);
 702 #ifdef ASSERT
 703           // Overwrite the unused slot with known junk
 704           __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
 705           __ movptr(Address(rsp, st_off), rax);
 706 #endif /* ASSERT */
 707         } else {
 708           __ movq(Address(rsp, st_off), rax);
 709         }
 710       }
 711     } else if (r_1->is_Register()) {
 712       Register r = r_1->as_Register();
 713       if (!r_2->is_valid()) {
 714         // must be only an int (or less ) so move only 32bits to slot
 715         // why not sign extend??
 716         __ movl(Address(rsp, st_off), r);
 717       } else {
 718         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 719         // T_DOUBLE and T_LONG use two slots in the interpreter
 720         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 721           // long/double in gpr
 722 #ifdef ASSERT
 723           // Overwrite the unused slot with known junk
 724           __ mov64(rax, CONST64(0xdeadffffdeadaaab));
 725           __ movptr(Address(rsp, st_off), rax);
 726 #endif /* ASSERT */
 727           __ movq(Address(rsp, next_off), r);

























 728         } else {
 729           __ movptr(Address(rsp, st_off), r);





















 730         }
 731       }
 732     } else {
 733       assert(r_1->is_XMMRegister(), "");
 734       if (!r_2->is_valid()) {
 735         // only a float use just part of the slot
 736         __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
 737       } else {
 738 #ifdef ASSERT
 739         // Overwrite the unused slot with known junk
 740         __ mov64(rax, CONST64(0xdeadffffdeadaaac));
 741         __ movptr(Address(rsp, st_off), rax);
 742 #endif /* ASSERT */
 743         __ movdbl(Address(rsp, next_off), r_1->as_XMMRegister());
 744       }
 745     }
 746   }
 747 
 748   // Schedule the branch target address early.
 749   __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
 750   __ jmp(rcx);
 751 }
 752 
 753 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
 754                         address code_start, address code_end,
 755                         Label& L_ok) {
 756   Label L_fail;
 757   __ lea(temp_reg, ExternalAddress(code_start));
 758   __ cmpptr(pc_reg, temp_reg);
 759   __ jcc(Assembler::belowEqual, L_fail);
 760   __ lea(temp_reg, ExternalAddress(code_end));
 761   __ cmpptr(pc_reg, temp_reg);
 762   __ jcc(Assembler::below, L_ok);
 763   __ bind(L_fail);
 764 }
 765 
 766 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 767                                     int total_args_passed,
 768                                     int comp_args_on_stack,
 769                                     const BasicType *sig_bt,
 770                                     const VMRegPair *regs) {
 771 
 772   // Note: r13 contains the senderSP on entry. We must preserve it since
 773   // we may do a i2c -> c2i transition if we lose a race where compiled
 774   // code goes non-entrant while we get args ready.
 775   // In addition we use r13 to locate all the interpreter args as
 776   // we must align the stack to 16 bytes on an i2c entry else we
 777   // lose alignment we expect in all compiled code and register
 778   // save code can segv when fxsave instructions find improperly
 779   // aligned stack pointer.
 780 
 781   // Adapters can be frameless because they do not require the caller
 782   // to perform additional cleanup work, such as correcting the stack pointer.
 783   // An i2c adapter is frameless because the *caller* frame, which is interpreted,
 784   // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
 785   // even if a callee has modified the stack pointer.
 786   // A c2i adapter is frameless because the *callee* frame, which is interpreted,
 787   // routinely repairs its caller's stack pointer (from sender_sp, which is set
 788   // up via the senderSP register).
 789   // In other words, if *either* the caller or callee is interpreted, we can

 834   // Convert 4-byte c2 stack slots to words.
 835   int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
 836 
 837   if (comp_args_on_stack) {
 838     __ subptr(rsp, comp_words_on_stack * wordSize);
 839   }
 840 
 841   // Ensure compiled code always sees stack at proper alignment
 842   __ andptr(rsp, -16);
 843 
 844   // push the return address and misalign the stack that youngest frame always sees
 845   // as far as the placement of the call instruction
 846   __ push(rax);
 847 
 848   // Put saved SP in another register
 849   const Register saved_sp = rax;
 850   __ movptr(saved_sp, r11);
 851 
 852   // Will jump to the compiled code just as if compiled code was doing it.
 853   // Pre-load the register-jump target early, to schedule it better.
 854   __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_offset())));
 855 
 856 #if INCLUDE_JVMCI
 857   if (EnableJVMCI) {
 858     // check if this call should be routed towards a specific entry point
 859     __ cmpptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
 860     Label no_alternative_target;
 861     __ jcc(Assembler::equal, no_alternative_target);
 862     __ movptr(r11, Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 863     __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
 864     __ bind(no_alternative_target);
 865   }
 866 #endif // INCLUDE_JVMCI
 867 


 868   // Now generate the shuffle code.  Pick up all register args and move the
 869   // rest through the floating point stack top.
 870   for (int i = 0; i < total_args_passed; i++) {
 871     if (sig_bt[i] == T_VOID) {


 872       // Longs and doubles are passed in native word order, but misaligned
 873       // in the 32-bit build.
 874       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");

 875       continue;
 876     }
 877 
 878     // Pick up 0, 1 or 2 words from SP+offset.
 879 
 880     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
 881             "scrambled load targets?");
 882     // Load in argument order going down.
 883     int ld_off = (total_args_passed - i)*Interpreter::stackElementSize;
 884     // Point to interpreter value (vs. tag)
 885     int next_off = ld_off - Interpreter::stackElementSize;
 886     //
 887     //
 888     //
 889     VMReg r_1 = regs[i].first();
 890     VMReg r_2 = regs[i].second();
 891     if (!r_1->is_valid()) {
 892       assert(!r_2->is_valid(), "");
 893       continue;
 894     }

 896       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 897       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
 898 
 899       // We can use r13 as a temp here because compiled code doesn't need r13 as an input
 900       // and if we end up going thru a c2i because of a miss a reasonable value of r13
 901       // will be generated.
 902       if (!r_2->is_valid()) {
 903         // sign extend???
 904         __ movl(r13, Address(saved_sp, ld_off));
 905         __ movptr(Address(rsp, st_off), r13);
 906       } else {
 907         //
 908         // We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
 909         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
 910         // So we must adjust where to pick up the data to match the interpreter.
 911         //
 912         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 913         // are accessed as negative so LSW is at LOW address
 914 
 915         // ld_off is MSW so get LSW
 916         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 917                            next_off : ld_off;
 918         __ movq(r13, Address(saved_sp, offset));
 919         // st_off is LSW (i.e. reg.first())
 920         __ movq(Address(rsp, st_off), r13);
 921       }
 922     } else if (r_1->is_Register()) {  // Register argument
 923       Register r = r_1->as_Register();
 924       assert(r != rax, "must be different");
 925       if (r_2->is_valid()) {
 926         //
 927         // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
 928         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
 929         // So we must adjust where to pick up the data to match the interpreter.
 930 
 931         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 932                            next_off : ld_off;
 933 
 934         // this can be a misaligned move
 935         __ movq(r, Address(saved_sp, offset));
 936       } else {
 937         // sign extend and use a full word?
 938         __ movl(r, Address(saved_sp, ld_off));
 939       }
 940     } else {
 941       if (!r_2->is_valid()) {
 942         __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
 943       } else {
 944         __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
 945       }
 946     }
 947   }
 948 
 949   __ push_cont_fastpath(); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about
 950 
 951   // 6243940 We might end up in handle_wrong_method if
 952   // the callee is deoptimized as we race thru here. If that
 953   // happens we don't want to take a safepoint because the
 954   // caller frame will look interpreted and arguments are now
 955   // "compiled" so it is much better to make this transition
 956   // invisible to the stack walking code. Unfortunately if
 957   // we try and find the callee by normal means a safepoint
 958   // is possible. So we stash the desired callee in the thread
 959   // and the vm will find there should this case occur.
 960 
 961   __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
 962 
 963   // put Method* where a c2i would expect should we end up there
 964   // only needed because eof c2 resolve stubs return Method* as a result in
 965   // rax
 966   __ mov(rax, rbx);
 967   __ jmp(r11);
 968 }
 969 






















 970 // ---------------------------------------------------------------
 971 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 972                                                             int total_args_passed,
 973                                                             int comp_args_on_stack,
 974                                                             const BasicType *sig_bt,
 975                                                             const VMRegPair *regs,
 976                                                             AdapterFingerPrint* fingerprint) {






 977   address i2c_entry = __ pc();
 978 
 979   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
 980 
 981   // -------------------------------------------------------------------------
 982   // Generate a C2I adapter.  On entry we know rbx holds the Method* during calls
 983   // to the interpreter.  The args start out packed in the compiled layout.  They
 984   // need to be unpacked into the interpreter layout.  This will almost always
 985   // require some stack space.  We grow the current (compiled) stack, then repack
 986   // the args.  We  finally end in a jump to the generic interpreter entry point.
 987   // On exit from the interpreter, the interpreter will restore our SP (lest the
 988   // compiled code, which relies solely on SP and not RBP, get sick).
 989 
 990   address c2i_unverified_entry = __ pc();

 991   Label skip_fixup;
 992   Label ok;
 993 
 994   Register holder = rax;
 995   Register receiver = j_rarg0;
 996   Register temp = rbx;
 997 
 998   {
 999     __ load_klass(temp, receiver, rscratch1);
1000     __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
1001     __ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
1002     __ jcc(Assembler::equal, ok);
1003     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1004 
1005     __ bind(ok);
1006     // Method might have been compiled since the call site was patched to
1007     // interpreted if that is the case treat it as a miss so we can get
1008     // the call site corrected.
1009     __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
1010     __ jcc(Assembler::equal, skip_fixup);
1011     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1012   }
1013 
1014   address c2i_entry = __ pc();
1015 
1016   // Class initialization barrier for static methods
1017   address c2i_no_clinit_check_entry = NULL;
1018   if (VM_Version::supports_fast_class_init_checks()) {
1019     Label L_skip_barrier;
1020     Register method = rbx;
1021 
1022     { // Bypass the barrier for non-static methods
1023       Register flags = rscratch1;
1024       __ movl(flags, Address(method, Method::access_flags_offset()));
1025       __ testl(flags, JVM_ACC_STATIC);
1026       __ jcc(Assembler::zero, L_skip_barrier); // non-static
1027     }
1028 
1029     Register klass = rscratch1;
1030     __ load_method_holder(klass, method);
1031     __ clinit_barrier(klass, r15_thread, &L_skip_barrier /*L_fast_path*/);


1032 
1033     __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path




1034 
1035     __ bind(L_skip_barrier);
1036     c2i_no_clinit_check_entry = __ pc();

1037   }
1038 
1039   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1040   bs->c2i_entry_barrier(masm);
1041 
1042   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);





1043 
1044   __ flush();
1045   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
1046 }
1047 
1048 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1049                                          VMRegPair *regs,
1050                                          VMRegPair *regs2,
1051                                          int total_args_passed) {
1052   assert(regs2 == NULL, "not needed on x86");
1053 // We return the amount of VMRegImpl stack slots we need to reserve for all
1054 // the arguments NOT counting out_preserve_stack_slots.
1055 
1056 // NOTE: These arrays will have to change when c1 is ported
1057 #ifdef _WIN64
1058     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1059       c_rarg0, c_rarg1, c_rarg2, c_rarg3
1060     };
1061     static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1062       c_farg0, c_farg1, c_farg2, c_farg3
1063     };
1064 #else
1065     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {

1083       case T_BYTE:
1084       case T_SHORT:
1085       case T_INT:
1086         if (int_args < Argument::n_int_register_parameters_c) {
1087           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
1088 #ifdef _WIN64
1089           fp_args++;
1090           // Allocate slots for callee to stuff register args the stack.
1091           stk_args += 2;
1092 #endif
1093         } else {
1094           regs[i].set1(VMRegImpl::stack2reg(stk_args));
1095           stk_args += 2;
1096         }
1097         break;
1098       case T_LONG:
1099         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1100         // fall through
1101       case T_OBJECT:
1102       case T_ARRAY:

1103       case T_ADDRESS:
1104       case T_METADATA:
1105         if (int_args < Argument::n_int_register_parameters_c) {
1106           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
1107 #ifdef _WIN64
1108           fp_args++;
1109           stk_args += 2;
1110 #endif
1111         } else {
1112           regs[i].set2(VMRegImpl::stack2reg(stk_args));
1113           stk_args += 2;
1114         }
1115         break;
1116       case T_FLOAT:
1117         if (fp_args < Argument::n_float_register_parameters_c) {
1118           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
1119 #ifdef _WIN64
1120           int_args++;
1121           // Allocate slots for callee to stuff register args the stack.
1122           stk_args += 2;

2000 
2001   int temploc = -1;
2002   for (int ai = 0; ai < arg_order.length(); ai += 2) {
2003     int i = arg_order.at(ai);
2004     int c_arg = arg_order.at(ai + 1);
2005     __ block_comment(err_msg("move %d -> %d", i, c_arg));
2006 #ifdef ASSERT
2007     if (in_regs[i].first()->is_Register()) {
2008       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
2009     } else if (in_regs[i].first()->is_XMMRegister()) {
2010       assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!");
2011     }
2012     if (out_regs[c_arg].first()->is_Register()) {
2013       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2014     } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2015       freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2016     }
2017 #endif /* ASSERT */
2018     switch (in_sig_bt[i]) {
2019       case T_ARRAY:

2020       case T_OBJECT:
2021         __ object_move(map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2022                     ((i == 0) && (!is_static)),
2023                     &receiver_offset);
2024         break;
2025       case T_VOID:
2026         break;
2027 
2028       case T_FLOAT:
2029         __ float_move(in_regs[i], out_regs[c_arg]);
2030           break;
2031 
2032       case T_DOUBLE:
2033         assert( i + 1 < total_in_args &&
2034                 in_sig_bt[i + 1] == T_VOID &&
2035                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2036         __ double_move(in_regs[i], out_regs[c_arg]);
2037         break;
2038 
2039       case T_LONG :

2125 
2126     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2127 
2128     // Get the handle (the 2nd argument)
2129     __ mov(oop_handle_reg, c_rarg1);
2130 
2131     // Get address of the box
2132 
2133     __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2134 
2135     // Load the oop from the handle
2136     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2137 
2138     if (!UseHeavyMonitors) {
2139 
2140       // Load immediate 1 into swap_reg %rax
2141       __ movl(swap_reg, 1);
2142 
2143       // Load (object->mark() | 1) into swap_reg %rax
2144       __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));




2145 
2146       // Save (object->mark() | 1) into BasicLock's displaced header
2147       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2148 
2149       // src -> dest iff dest == rax else rax <- dest
2150       __ lock();
2151       __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2152       __ jcc(Assembler::equal, count_mon);
2153 
2154       // Hmm should this move to the slow path code area???
2155 
2156       // Test if the oopMark is an obvious stack pointer, i.e.,
2157       //  1) (mark & 3) == 0, and
2158       //  2) rsp <= mark < mark + os::pagesize()
2159       // These 3 tests can be done by evaluating the following
2160       // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2161       // assuming both stack pointer and pagesize have their
2162       // least significant 2 bits clear.
2163       // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2164 

2186   // Now set thread in native
2187   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2188 
2189   __ call(RuntimeAddress(native_func));
2190 
2191   // Verify or restore cpu control state after JNI call
2192   __ restore_cpu_control_state_after_jni(rscratch1);
2193 
2194   // Unpack native results.
2195   switch (ret_type) {
2196   case T_BOOLEAN: __ c2bool(rax);            break;
2197   case T_CHAR   : __ movzwl(rax, rax);      break;
2198   case T_BYTE   : __ sign_extend_byte (rax); break;
2199   case T_SHORT  : __ sign_extend_short(rax); break;
2200   case T_INT    : /* nothing to do */        break;
2201   case T_DOUBLE :
2202   case T_FLOAT  :
2203     // Result is in xmm0 we'll save as needed
2204     break;
2205   case T_ARRAY:                 // Really a handle

2206   case T_OBJECT:                // Really a handle
2207       break; // can't de-handlize until after safepoint check
2208   case T_VOID: break;
2209   case T_LONG: break;
2210   default       : ShouldNotReachHere();
2211   }
2212 
2213   Label after_transition;
2214 
2215   // Switch thread to "native transition" state before reading the synchronization state.
2216   // This additional state is necessary because reading and testing the synchronization
2217   // state is not atomic w.r.t. GC, as this scenario demonstrates:
2218   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2219   //     VM thread changes sync state to synchronizing and suspends threads for GC.
2220   //     Thread A is resumed to finish this native method, but doesn't block here since it
2221   //     didn't see any synchronization is progress, and escapes.
2222   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2223 
2224   // Force this write out before the read below
2225   if (!UseSystemMemoryBarrier) {

3677   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), NULL_WORD);
3678 #endif
3679   // Clear the exception oop so GC no longer processes it as a root.
3680   __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), NULL_WORD);
3681 
3682   // rax: exception oop
3683   // r8:  exception handler
3684   // rdx: exception pc
3685   // Jump to handler
3686 
3687   __ jmp(r8);
3688 
3689   // Make sure all code is generated
3690   masm->flush();
3691 
3692   // Set exception blob
3693   _exception_blob =  ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
3694 }
3695 #endif // COMPILER2
3696 















































































































  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #ifndef _WINDOWS
  27 #include "alloca.h"
  28 #endif
  29 #include "asm/macroAssembler.hpp"
  30 #include "asm/macroAssembler.inline.hpp"
  31 #include "classfile/symbolTable.hpp"
  32 #include "code/compiledIC.hpp"
  33 #include "code/debugInfoRec.hpp"
  34 #include "code/icBuffer.hpp"
  35 #include "code/nativeInst.hpp"
  36 #include "code/vtableStubs.hpp"
  37 #include "compiler/oopMap.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "gc/shared/gcLocker.hpp"
  40 #include "gc/shared/barrierSet.hpp"
  41 #include "gc/shared/barrierSetAssembler.hpp"
  42 #include "interpreter/interpreter.hpp"
  43 #include "logging/log.hpp"
  44 #include "memory/resourceArea.hpp"
  45 #include "memory/universe.hpp"
  46 #include "oops/compiledICHolder.hpp"
  47 #include "oops/klass.inline.hpp"
  48 #include "oops/method.inline.hpp"
  49 #include "prims/methodHandles.hpp"
  50 #include "runtime/continuation.hpp"
  51 #include "runtime/continuationEntry.inline.hpp"

 508     case T_SHORT:
 509     case T_INT:
 510       if (int_args < Argument::n_int_register_parameters_j) {
 511         regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 512       } else {
 513         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 514         stk_args += 2;
 515       }
 516       break;
 517     case T_VOID:
 518       // halves of T_LONG or T_DOUBLE
 519       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 520       regs[i].set_bad();
 521       break;
 522     case T_LONG:
 523       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 524       // fall through
 525     case T_OBJECT:
 526     case T_ARRAY:
 527     case T_ADDRESS:
 528     case T_PRIMITIVE_OBJECT:
 529       if (int_args < Argument::n_int_register_parameters_j) {
 530         regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 531       } else {
 532         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 533         stk_args += 2;
 534       }
 535       break;
 536     case T_FLOAT:
 537       if (fp_args < Argument::n_float_register_parameters_j) {
 538         regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 539       } else {
 540         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 541         stk_args += 2;
 542       }
 543       break;
 544     case T_DOUBLE:
 545       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 546       if (fp_args < Argument::n_float_register_parameters_j) {
 547         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 548       } else {
 549         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 550         stk_args += 2;
 551       }
 552       break;
 553     default:
 554       ShouldNotReachHere();
 555       break;
 556     }
 557   }
 558 
 559   return align_up(stk_args, 2);
 560 }
 561 
 562 // Same as java_calling_convention() but for multiple return
 563 // values. There's no way to store them on the stack so if we don't
 564 // have enough registers, multiple values can't be returned.
 565 const uint SharedRuntime::java_return_convention_max_int = Argument::n_int_register_parameters_j+1;
 566 const uint SharedRuntime::java_return_convention_max_float = Argument::n_float_register_parameters_j;
 567 int SharedRuntime::java_return_convention(const BasicType *sig_bt,
 568                                           VMRegPair *regs,
 569                                           int total_args_passed) {
 570   // Create the mapping between argument positions and
 571   // registers.
 572   static const Register INT_ArgReg[java_return_convention_max_int] = {
 573     rax, j_rarg5, j_rarg4, j_rarg3, j_rarg2, j_rarg1, j_rarg0
 574   };
 575   static const XMMRegister FP_ArgReg[java_return_convention_max_float] = {
 576     j_farg0, j_farg1, j_farg2, j_farg3,
 577     j_farg4, j_farg5, j_farg6, j_farg7
 578   };
 579 
 580 
 581   uint int_args = 0;
 582   uint fp_args = 0;
 583 
 584   for (int i = 0; i < total_args_passed; i++) {
 585     switch (sig_bt[i]) {
 586     case T_BOOLEAN:
 587     case T_CHAR:
 588     case T_BYTE:
 589     case T_SHORT:
 590     case T_INT:
 591       if (int_args < Argument::n_int_register_parameters_j+1) {
 592         regs[i].set1(INT_ArgReg[int_args]->as_VMReg());
 593         int_args++;
 594       } else {
 595         return -1;
 596       }
 597       break;
 598     case T_VOID:
 599       // halves of T_LONG or T_DOUBLE
 600       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 601       regs[i].set_bad();
 602       break;
 603     case T_LONG:
 604       assert(sig_bt[i + 1] == T_VOID, "expecting half");
 605       // fall through
 606     case T_OBJECT:
 607     case T_PRIMITIVE_OBJECT:
 608     case T_ARRAY:
 609     case T_ADDRESS:
 610     case T_METADATA:
 611       if (int_args < Argument::n_int_register_parameters_j+1) {
 612         regs[i].set2(INT_ArgReg[int_args]->as_VMReg());
 613         int_args++;
 614       } else {
 615         return -1;
 616       }
 617       break;
 618     case T_FLOAT:
 619       if (fp_args < Argument::n_float_register_parameters_j) {
 620         regs[i].set1(FP_ArgReg[fp_args]->as_VMReg());
 621         fp_args++;
 622       } else {
 623         return -1;
 624       }
 625       break;
 626     case T_DOUBLE:
 627       assert(sig_bt[i + 1] == T_VOID, "expecting half");
 628       if (fp_args < Argument::n_float_register_parameters_j) {
 629         regs[i].set2(FP_ArgReg[fp_args]->as_VMReg());
 630         fp_args++;
 631       } else {
 632         return -1;
 633       }
 634       break;
 635     default:
 636       ShouldNotReachHere();
 637       break;
 638     }
 639   }
 640 
 641   return int_args + fp_args;
 642 }
 643 
 644 // Patch the callers callsite with entry to compiled code if it exists.
 645 static void patch_callers_callsite(MacroAssembler *masm) {
 646   Label L;
 647   __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
 648   __ jcc(Assembler::equal, L);
 649 
 650   // Save the current stack pointer
 651   __ mov(r13, rsp);
 652   // Schedule the branch target address early.
 653   // Call into the VM to patch the caller, then jump to compiled callee
 654   // rax isn't live so capture return address while we easily can
 655   __ movptr(rax, Address(rsp, 0));
 656 
 657   // align stack so push_CPU_state doesn't fault
 658   __ andptr(rsp, -(StackAlignmentInBytes));
 659   __ push_CPU_state();
 660   __ vzeroupper();
 661   // VM needs caller's callsite
 662   // VM needs target method
 663   // This needs to be a long call since we will relocate this adapter to

 666   // Allocate argument register save area
 667   if (frame::arg_reg_save_area_bytes != 0) {
 668     __ subptr(rsp, frame::arg_reg_save_area_bytes);
 669   }
 670   __ mov(c_rarg0, rbx);
 671   __ mov(c_rarg1, rax);
 672   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 673 
 674   // De-allocate argument register save area
 675   if (frame::arg_reg_save_area_bytes != 0) {
 676     __ addptr(rsp, frame::arg_reg_save_area_bytes);
 677   }
 678 
 679   __ vzeroupper();
 680   __ pop_CPU_state();
 681   // restore sp
 682   __ mov(rsp, r13);
 683   __ bind(L);
 684 }
 685 
 686 // For each inline type argument, sig includes the list of fields of
 687 // the inline type. This utility function computes the number of
 688 // arguments for the call if inline types are passed by reference (the
 689 // calling convention the interpreter expects).
 690 static int compute_total_args_passed_int(const GrowableArray<SigEntry>* sig_extended) {
 691   int total_args_passed = 0;
 692   if (InlineTypePassFieldsAsArgs) {
 693     for (int i = 0; i < sig_extended->length(); i++) {
 694       BasicType bt = sig_extended->at(i)._bt;
 695       if (bt == T_PRIMITIVE_OBJECT) {
 696         // In sig_extended, an inline type argument starts with:
 697         // T_PRIMITIVE_OBJECT, followed by the types of the fields of the
 698         // inline type and T_VOID to mark the end of the value
 699         // type. Inline types are flattened so, for instance, in the
 700         // case of an inline type with an int field and an inline type
 701         // field that itself has 2 fields, an int and a long:
 702         // T_PRIMITIVE_OBJECT T_INT T_PRIMITIVE_OBJECT T_INT T_LONG T_VOID (second
 703         // slot for the T_LONG) T_VOID (inner T_PRIMITIVE_OBJECT) T_VOID
 704         // (outer T_PRIMITIVE_OBJECT)
 705         total_args_passed++;
 706         int vt = 1;
 707         do {
 708           i++;
 709           BasicType bt = sig_extended->at(i)._bt;
 710           BasicType prev_bt = sig_extended->at(i-1)._bt;
 711           if (bt == T_PRIMITIVE_OBJECT) {
 712             vt++;
 713           } else if (bt == T_VOID &&
 714                      prev_bt != T_LONG &&
 715                      prev_bt != T_DOUBLE) {
 716             vt--;
 717           }
 718         } while (vt != 0);
 719       } else {
 720         total_args_passed++;
 721       }
 722     }
 723   } else {
 724     total_args_passed = sig_extended->length();
 725   }
 726   return total_args_passed;
 727 }
 728 
 729 
 730 static void gen_c2i_adapter_helper(MacroAssembler* masm,
 731                                    BasicType bt,
 732                                    BasicType prev_bt,
 733                                    size_t size_in_bytes,
 734                                    const VMRegPair& reg_pair,
 735                                    const Address& to,
 736                                    int extraspace,
 737                                    bool is_oop) {
 738   assert(bt != T_PRIMITIVE_OBJECT || !InlineTypePassFieldsAsArgs, "no inline type here");
 739   if (bt == T_VOID) {
 740     assert(prev_bt == T_LONG || prev_bt == T_DOUBLE, "missing half");
 741     return;
 742   }
 743 
 744   // Say 4 args:
 745   // i   st_off
 746   // 0   32 T_LONG
 747   // 1   24 T_VOID
 748   // 2   16 T_OBJECT
 749   // 3    8 T_BOOL
 750   // -    0 return address
 751   //
 752   // However to make thing extra confusing. Because we can fit a long/double in
 753   // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 754   // leaves one slot empty and only stores to a single slot. In this case the
 755   // slot that is occupied is the T_VOID slot. See I said it was confusing.
 756 
 757   bool wide = (size_in_bytes == wordSize);
 758   VMReg r_1 = reg_pair.first();
 759   VMReg r_2 = reg_pair.second();
 760   assert(r_2->is_valid() == wide, "invalid size");
 761   if (!r_1->is_valid()) {
 762     assert(!r_2->is_valid(), "must be invalid");
 763     return;
 764   }
 765 
 766   if (!r_1->is_XMMRegister()) {
 767     Register val = rax;
 768     if (r_1->is_stack()) {
 769       int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 770       __ load_sized_value(val, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false);
 771     } else {
 772       val = r_1->as_Register();
 773     }
 774     assert_different_registers(to.base(), val, rscratch1);
 775     if (is_oop) {
 776       __ push(r13);
 777       __ push(rbx);
 778       __ store_heap_oop(to, val, rscratch1, r13, rbx, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
 779       __ pop(rbx);
 780       __ pop(r13);
 781     } else {
 782       __ store_sized_value(to, val, size_in_bytes);
 783     }
 784   } else {
 785     if (wide) {
 786       __ movdbl(to, r_1->as_XMMRegister());
 787     } else {
 788       __ movflt(to, r_1->as_XMMRegister());
 789     }
 790   }
 791 }
 792 
 793 static void gen_c2i_adapter(MacroAssembler *masm,
 794                             const GrowableArray<SigEntry>* sig_extended,


 795                             const VMRegPair *regs,
 796                             bool requires_clinit_barrier,
 797                             address& c2i_no_clinit_check_entry,
 798                             Label& skip_fixup,
 799                             address start,
 800                             OopMapSet* oop_maps,
 801                             int& frame_complete,
 802                             int& frame_size_in_words,
 803                             bool alloc_inline_receiver) {
 804   if (requires_clinit_barrier && VM_Version::supports_fast_class_init_checks()) {
 805     Label L_skip_barrier;
 806     Register method = rbx;
 807 
 808     { // Bypass the barrier for non-static methods
 809       Register flags = rscratch1;
 810       __ movl(flags, Address(method, Method::access_flags_offset()));
 811       __ testl(flags, JVM_ACC_STATIC);
 812       __ jcc(Assembler::zero, L_skip_barrier); // non-static
 813     }
 814 
 815     Register klass = rscratch1;
 816     __ load_method_holder(klass, method);
 817     __ clinit_barrier(klass, r15_thread, &L_skip_barrier /*L_fast_path*/);
 818 
 819     __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
 820 
 821     __ bind(L_skip_barrier);
 822     c2i_no_clinit_check_entry = __ pc();
 823   }
 824 
 825   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 826   bs->c2i_entry_barrier(masm);
 827 
 828   // Before we get into the guts of the C2I adapter, see if we should be here
 829   // at all.  We've come from compiled code and are attempting to jump to the
 830   // interpreter, which means the caller made a static call to get here
 831   // (vcalls always get a compiled target if there is one).  Check for a
 832   // compiled target.  If there is one, we need to patch the caller's call.
 833   patch_callers_callsite(masm);
 834 
 835   __ bind(skip_fixup);
 836 
 837   if (InlineTypePassFieldsAsArgs) {
 838     // Is there an inline type argument?
 839     bool has_inline_argument = false;
 840     for (int i = 0; i < sig_extended->length() && !has_inline_argument; i++) {
 841       has_inline_argument = (sig_extended->at(i)._bt == T_PRIMITIVE_OBJECT);
 842     }
 843     if (has_inline_argument) {
 844       // There is at least an inline type argument: we're coming from
 845       // compiled code so we have no buffers to back the inline types.
 846       // Allocate the buffers here with a runtime call.
 847       OopMap* map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, /*save_vectors*/ false);
 848 
 849       frame_complete = __ offset();
 850 
 851       __ set_last_Java_frame(noreg, noreg, NULL, rscratch1);
 852 
 853       __ mov(c_rarg0, r15_thread);
 854       __ mov(c_rarg1, rbx);
 855       __ mov64(c_rarg2, (int64_t)alloc_inline_receiver);
 856       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::allocate_inline_types)));
 857 
 858       oop_maps->add_gc_map((int)(__ pc() - start), map);
 859       __ reset_last_Java_frame(false);
 860 
 861       RegisterSaver::restore_live_registers(masm);
 862 
 863       Label no_exception;
 864       __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
 865       __ jcc(Assembler::equal, no_exception);
 866 
 867       __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), NULL_WORD);
 868       __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
 869       __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
 870 
 871       __ bind(no_exception);
 872 
 873       // We get an array of objects from the runtime call
 874       __ get_vm_result(rscratch2, r15_thread); // Use rscratch2 (r11) as temporary because rscratch1 (r10) is trashed by movptr()
 875       __ get_vm_result_2(rbx, r15_thread); // TODO: required to keep the callee Method live?
 876     }
 877   }
 878 
 879   // Since all args are passed on the stack, total_args_passed *
 880   // Interpreter::stackElementSize is the space we need.
 881   int total_args_passed = compute_total_args_passed_int(sig_extended);
 882   assert(total_args_passed >= 0, "total_args_passed is %d", total_args_passed);
 883 
 884   int extraspace = (total_args_passed * Interpreter::stackElementSize);
 885 
 886   // stack is aligned, keep it that way
 887   // This is not currently needed or enforced by the interpreter, but
 888   // we might as well conform to the ABI.
 889   extraspace = align_up(extraspace, 2*wordSize);
 890 
 891   // set senderSP value
 892   __ lea(r13, Address(rsp, wordSize));
 893 
 894 #ifdef ASSERT
 895   __ check_stack_alignment(r13, "sender stack not aligned");
 896 #endif
 897   if (extraspace > 0) {
 898     // Pop the return address
 899     __ pop(rax);
 900 
 901     __ subptr(rsp, extraspace);
 902 
 903     // Push the return address
 904     __ push(rax);
 905 
 906     // Account for the return address location since we store it first rather
 907     // than hold it in a register across all the shuffling
 908     extraspace += wordSize;
 909   }
 910 
 911 #ifdef ASSERT
 912   __ check_stack_alignment(rsp, "callee stack not aligned", wordSize, rax);
 913 #endif
 914 
 915   // Now write the args into the outgoing interpreter space






















 916 
 917   // next_arg_comp is the next argument from the compiler point of
 918   // view (inline type fields are passed in registers/on the stack). In
 919   // sig_extended, an inline type argument starts with: T_PRIMITIVE_OBJECT,
 920   // followed by the types of the fields of the inline type and T_VOID
 921   // to mark the end of the inline type. ignored counts the number of
 922   // T_PRIMITIVE_OBJECT/T_VOID. next_vt_arg is the next inline type argument:
 923   // used to get the buffer for that argument from the pool of buffers
 924   // we allocated above and want to pass to the
 925   // interpreter. next_arg_int is the next argument from the
 926   // interpreter point of view (inline types are passed by reference).
 927   for (int next_arg_comp = 0, ignored = 0, next_vt_arg = 0, next_arg_int = 0;
 928        next_arg_comp < sig_extended->length(); next_arg_comp++) {
 929     assert(ignored <= next_arg_comp, "shouldn't skip over more slots than there are arguments");
 930     assert(next_arg_int <= total_args_passed, "more arguments for the interpreter than expected?");
 931     BasicType bt = sig_extended->at(next_arg_comp)._bt;
 932     int st_off = (total_args_passed - next_arg_int) * Interpreter::stackElementSize;
 933     if (!InlineTypePassFieldsAsArgs || bt != T_PRIMITIVE_OBJECT) {
 934       int next_off = st_off - Interpreter::stackElementSize;
 935       const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : st_off;
 936       const VMRegPair reg_pair = regs[next_arg_comp-ignored];
 937       size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4;
 938       gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
 939                              size_in_bytes, reg_pair, Address(rsp, offset), extraspace, false);
 940       next_arg_int++;
 941 #ifdef ASSERT
 942       if (bt == T_LONG || bt == T_DOUBLE) {
 943         // Overwrite the unused slot with known junk
 944         __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
 945         __ movptr(Address(rsp, st_off), rax);



 946       }















 947 #endif /* ASSERT */
 948     } else {
 949       ignored++;
 950       // get the buffer from the just allocated pool of buffers
 951       int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + next_vt_arg * type2aelembytes(T_PRIMITIVE_OBJECT);
 952       __ load_heap_oop(r14, Address(rscratch2, index));
 953       next_vt_arg++; next_arg_int++;
 954       int vt = 1;
 955       // write fields we get from compiled code in registers/stack
 956       // slots to the buffer: we know we are done with that inline type
 957       // argument when we hit the T_VOID that acts as an end of inline
 958       // type delimiter for this inline type. Inline types are flattened
 959       // so we might encounter embedded inline types. Each entry in
 960       // sig_extended contains a field offset in the buffer.
 961       Label L_null;
 962       do {
 963         next_arg_comp++;
 964         BasicType bt = sig_extended->at(next_arg_comp)._bt;
 965         BasicType prev_bt = sig_extended->at(next_arg_comp-1)._bt;
 966         if (bt == T_PRIMITIVE_OBJECT) {
 967           vt++;
 968           ignored++;
 969         } else if (bt == T_VOID &&
 970                    prev_bt != T_LONG &&
 971                    prev_bt != T_DOUBLE) {
 972           vt--;
 973           ignored++;
 974         } else {
 975           int off = sig_extended->at(next_arg_comp)._offset;
 976           if (off == -1) {
 977             // Nullable inline type argument, emit null check
 978             VMReg reg = regs[next_arg_comp-ignored].first();
 979             Label L_notNull;
 980             if (reg->is_stack()) {
 981               int ld_off = reg->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 982               __ testb(Address(rsp, ld_off), 1);
 983             } else {
 984               __ testb(reg->as_Register(), 1);
 985             }
 986             __ jcc(Assembler::notZero, L_notNull);
 987             __ movptr(Address(rsp, st_off), 0);
 988             __ jmp(L_null);
 989             __ bind(L_notNull);
 990             continue;
 991           }
 992           assert(off > 0, "offset in object should be positive");
 993           size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
 994           bool is_oop = is_reference_type(bt);
 995           gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
 996                                  size_in_bytes, regs[next_arg_comp-ignored], Address(r14, off), extraspace, is_oop);
 997         }
 998       } while (vt != 0);
 999       // pass the buffer to the interpreter
1000       __ movptr(Address(rsp, st_off), r14);
1001       __ bind(L_null);










1002     }
1003   }
1004 
1005   // Schedule the branch target address early.
1006   __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
1007   __ jmp(rcx);
1008 }
1009 
1010 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
1011                         address code_start, address code_end,
1012                         Label& L_ok) {
1013   Label L_fail;
1014   __ lea(temp_reg, ExternalAddress(code_start));
1015   __ cmpptr(pc_reg, temp_reg);
1016   __ jcc(Assembler::belowEqual, L_fail);
1017   __ lea(temp_reg, ExternalAddress(code_end));
1018   __ cmpptr(pc_reg, temp_reg);
1019   __ jcc(Assembler::below, L_ok);
1020   __ bind(L_fail);
1021 }
1022 
1023 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,

1024                                     int comp_args_on_stack,
1025                                     const GrowableArray<SigEntry>* sig,
1026                                     const VMRegPair *regs) {
1027 
1028   // Note: r13 contains the senderSP on entry. We must preserve it since
1029   // we may do a i2c -> c2i transition if we lose a race where compiled
1030   // code goes non-entrant while we get args ready.
1031   // In addition we use r13 to locate all the interpreter args as
1032   // we must align the stack to 16 bytes on an i2c entry else we
1033   // lose alignment we expect in all compiled code and register
1034   // save code can segv when fxsave instructions find improperly
1035   // aligned stack pointer.
1036 
1037   // Adapters can be frameless because they do not require the caller
1038   // to perform additional cleanup work, such as correcting the stack pointer.
1039   // An i2c adapter is frameless because the *caller* frame, which is interpreted,
1040   // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
1041   // even if a callee has modified the stack pointer.
1042   // A c2i adapter is frameless because the *callee* frame, which is interpreted,
1043   // routinely repairs its caller's stack pointer (from sender_sp, which is set
1044   // up via the senderSP register).
1045   // In other words, if *either* the caller or callee is interpreted, we can

1090   // Convert 4-byte c2 stack slots to words.
1091   int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
1092 
1093   if (comp_args_on_stack) {
1094     __ subptr(rsp, comp_words_on_stack * wordSize);
1095   }
1096 
1097   // Ensure compiled code always sees stack at proper alignment
1098   __ andptr(rsp, -16);
1099 
1100   // push the return address and misalign the stack that youngest frame always sees
1101   // as far as the placement of the call instruction
1102   __ push(rax);
1103 
1104   // Put saved SP in another register
1105   const Register saved_sp = rax;
1106   __ movptr(saved_sp, r11);
1107 
1108   // Will jump to the compiled code just as if compiled code was doing it.
1109   // Pre-load the register-jump target early, to schedule it better.
1110   __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_inline_offset())));
1111 
1112 #if INCLUDE_JVMCI
1113   if (EnableJVMCI) {
1114     // check if this call should be routed towards a specific entry point
1115     __ cmpptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
1116     Label no_alternative_target;
1117     __ jcc(Assembler::equal, no_alternative_target);
1118     __ movptr(r11, Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
1119     __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
1120     __ bind(no_alternative_target);
1121   }
1122 #endif // INCLUDE_JVMCI
1123 
1124   int total_args_passed = sig->length();
1125 
1126   // Now generate the shuffle code.  Pick up all register args and move the
1127   // rest through the floating point stack top.
1128   for (int i = 0; i < total_args_passed; i++) {
1129     BasicType bt = sig->at(i)._bt;
1130     assert(bt != T_PRIMITIVE_OBJECT, "i2c adapter doesn't unpack inline type args");
1131     if (bt == T_VOID) {
1132       // Longs and doubles are passed in native word order, but misaligned
1133       // in the 32-bit build.
1134       BasicType prev_bt = (i > 0) ? sig->at(i-1)._bt : T_ILLEGAL;
1135       assert(i > 0 && (prev_bt == T_LONG || prev_bt == T_DOUBLE), "missing half");
1136       continue;
1137     }
1138 
1139     // Pick up 0, 1 or 2 words from SP+offset.
1140 
1141     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
1142             "scrambled load targets?");
1143     // Load in argument order going down.
1144     int ld_off = (total_args_passed - i)*Interpreter::stackElementSize;
1145     // Point to interpreter value (vs. tag)
1146     int next_off = ld_off - Interpreter::stackElementSize;
1147     //
1148     //
1149     //
1150     VMReg r_1 = regs[i].first();
1151     VMReg r_2 = regs[i].second();
1152     if (!r_1->is_valid()) {
1153       assert(!r_2->is_valid(), "");
1154       continue;
1155     }

1157       // Convert stack slot to an SP offset (+ wordSize to account for return address )
1158       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
1159 
1160       // We can use r13 as a temp here because compiled code doesn't need r13 as an input
1161       // and if we end up going thru a c2i because of a miss a reasonable value of r13
1162       // will be generated.
1163       if (!r_2->is_valid()) {
1164         // sign extend???
1165         __ movl(r13, Address(saved_sp, ld_off));
1166         __ movptr(Address(rsp, st_off), r13);
1167       } else {
1168         //
1169         // We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
1170         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
1171         // So we must adjust where to pick up the data to match the interpreter.
1172         //
1173         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
1174         // are accessed as negative so LSW is at LOW address
1175 
1176         // ld_off is MSW so get LSW
1177         const int offset = (bt==T_LONG||bt==T_DOUBLE)?
1178                            next_off : ld_off;
1179         __ movq(r13, Address(saved_sp, offset));
1180         // st_off is LSW (i.e. reg.first())
1181         __ movq(Address(rsp, st_off), r13);
1182       }
1183     } else if (r_1->is_Register()) {  // Register argument
1184       Register r = r_1->as_Register();
1185       assert(r != rax, "must be different");
1186       if (r_2->is_valid()) {
1187         //
1188         // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
1189         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
1190         // So we must adjust where to pick up the data to match the interpreter.
1191 
1192         const int offset = (bt==T_LONG||bt==T_DOUBLE)?
1193                            next_off : ld_off;
1194 
1195         // this can be a misaligned move
1196         __ movq(r, Address(saved_sp, offset));
1197       } else {
1198         // sign extend and use a full word?
1199         __ movl(r, Address(saved_sp, ld_off));
1200       }
1201     } else {
1202       if (!r_2->is_valid()) {
1203         __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
1204       } else {
1205         __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
1206       }
1207     }
1208   }
1209 
1210   __ push_cont_fastpath(); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about
1211 
1212   // 6243940 We might end up in handle_wrong_method if
1213   // the callee is deoptimized as we race thru here. If that
1214   // happens we don't want to take a safepoint because the
1215   // caller frame will look interpreted and arguments are now
1216   // "compiled" so it is much better to make this transition
1217   // invisible to the stack walking code. Unfortunately if
1218   // we try and find the callee by normal means a safepoint
1219   // is possible. So we stash the desired callee in the thread
1220   // and the vm will find there should this case occur.
1221 
1222   __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
1223 
1224   // put Method* where a c2i would expect should we end up there
1225   // only needed because of c2 resolve stubs return Method* as a result in
1226   // rax
1227   __ mov(rax, rbx);
1228   __ jmp(r11);
1229 }
1230 
1231 static void gen_inline_cache_check(MacroAssembler *masm, Label& skip_fixup) {
1232   Label ok;
1233 
1234   Register holder = rax;
1235   Register receiver = j_rarg0;
1236   Register temp = rbx;
1237 
1238   __ load_klass(temp, receiver, rscratch1);
1239   __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
1240   __ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
1241   __ jcc(Assembler::equal, ok);
1242   __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1243 
1244   __ bind(ok);
1245   // Method might have been compiled since the call site was patched to
1246   // interpreted if that is the case treat it as a miss so we can get
1247   // the call site corrected.
1248   __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
1249   __ jcc(Assembler::equal, skip_fixup);
1250   __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1251 }
1252 
1253 // ---------------------------------------------------------------
1254 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler* masm,

1255                                                             int comp_args_on_stack,
1256                                                             const GrowableArray<SigEntry>* sig,
1257                                                             const VMRegPair* regs,
1258                                                             const GrowableArray<SigEntry>* sig_cc,
1259                                                             const VMRegPair* regs_cc,
1260                                                             const GrowableArray<SigEntry>* sig_cc_ro,
1261                                                             const VMRegPair* regs_cc_ro,
1262                                                             AdapterFingerPrint* fingerprint,
1263                                                             AdapterBlob*& new_adapter,
1264                                                             bool allocate_code_blob) {
1265   address i2c_entry = __ pc();
1266   gen_i2c_adapter(masm, comp_args_on_stack, sig, regs);

1267 
1268   // -------------------------------------------------------------------------
1269   // Generate a C2I adapter.  On entry we know rbx holds the Method* during calls
1270   // to the interpreter.  The args start out packed in the compiled layout.  They
1271   // need to be unpacked into the interpreter layout.  This will almost always
1272   // require some stack space.  We grow the current (compiled) stack, then repack
1273   // the args.  We  finally end in a jump to the generic interpreter entry point.
1274   // On exit from the interpreter, the interpreter will restore our SP (lest the
1275   // compiled code, which relies solely on SP and not RBP, get sick).
1276 
1277   address c2i_unverified_entry        = __ pc();
1278   address c2i_unverified_inline_entry = __ pc();
1279   Label skip_fixup;





1280 
1281   gen_inline_cache_check(masm, skip_fixup);





1282 
1283   OopMapSet* oop_maps = new OopMapSet();
1284   int frame_complete = CodeOffsets::frame_never_safe;
1285   int frame_size_in_words = 0;







1286 
1287   // Scalarized c2i adapter with non-scalarized receiver (i.e., don't pack receiver)
1288   address c2i_no_clinit_check_entry = NULL;
1289   address c2i_inline_ro_entry = __ pc();
1290   if (regs_cc != regs_cc_ro) {
1291     // No class init barrier needed because method is guaranteed to be non-static
1292     gen_c2i_adapter(masm, sig_cc_ro, regs_cc_ro, /* requires_clinit_barrier = */ false, c2i_no_clinit_check_entry,
1293                     skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
1294     skip_fixup.reset();
1295   }



1296 
1297   // Scalarized c2i adapter
1298   address c2i_entry        = __ pc();
1299   address c2i_inline_entry = __ pc();
1300   gen_c2i_adapter(masm, sig_cc, regs_cc, /* requires_clinit_barrier = */ true, c2i_no_clinit_check_entry,
1301                   skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ true);
1302 
1303   // Non-scalarized c2i adapter
1304   if (regs != regs_cc) {
1305     c2i_unverified_inline_entry = __ pc();
1306     Label inline_entry_skip_fixup;
1307     gen_inline_cache_check(masm, inline_entry_skip_fixup);
1308 
1309     c2i_inline_entry = __ pc();
1310     gen_c2i_adapter(masm, sig, regs, /* requires_clinit_barrier = */ true, c2i_no_clinit_check_entry,
1311                     inline_entry_skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
1312   }
1313 
1314   __ flush();

1315 
1316   // The c2i adapters might safepoint and trigger a GC. The caller must make sure that
1317   // the GC knows about the location of oop argument locations passed to the c2i adapter.
1318   if (allocate_code_blob) {
1319     bool caller_must_gc_arguments = (regs != regs_cc);
1320     new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words, oop_maps, caller_must_gc_arguments);
1321   }
1322 
1323   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_inline_entry, c2i_inline_ro_entry, c2i_unverified_entry, c2i_unverified_inline_entry, c2i_no_clinit_check_entry);

1324 }
1325 
1326 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1327                                          VMRegPair *regs,
1328                                          VMRegPair *regs2,
1329                                          int total_args_passed) {
1330   assert(regs2 == NULL, "not needed on x86");
1331 // We return the amount of VMRegImpl stack slots we need to reserve for all
1332 // the arguments NOT counting out_preserve_stack_slots.
1333 
1334 // NOTE: These arrays will have to change when c1 is ported
1335 #ifdef _WIN64
1336     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1337       c_rarg0, c_rarg1, c_rarg2, c_rarg3
1338     };
1339     static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1340       c_farg0, c_farg1, c_farg2, c_farg3
1341     };
1342 #else
1343     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {

1361       case T_BYTE:
1362       case T_SHORT:
1363       case T_INT:
1364         if (int_args < Argument::n_int_register_parameters_c) {
1365           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
1366 #ifdef _WIN64
1367           fp_args++;
1368           // Allocate slots for callee to stuff register args the stack.
1369           stk_args += 2;
1370 #endif
1371         } else {
1372           regs[i].set1(VMRegImpl::stack2reg(stk_args));
1373           stk_args += 2;
1374         }
1375         break;
1376       case T_LONG:
1377         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1378         // fall through
1379       case T_OBJECT:
1380       case T_ARRAY:
1381       case T_PRIMITIVE_OBJECT:
1382       case T_ADDRESS:
1383       case T_METADATA:
1384         if (int_args < Argument::n_int_register_parameters_c) {
1385           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
1386 #ifdef _WIN64
1387           fp_args++;
1388           stk_args += 2;
1389 #endif
1390         } else {
1391           regs[i].set2(VMRegImpl::stack2reg(stk_args));
1392           stk_args += 2;
1393         }
1394         break;
1395       case T_FLOAT:
1396         if (fp_args < Argument::n_float_register_parameters_c) {
1397           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
1398 #ifdef _WIN64
1399           int_args++;
1400           // Allocate slots for callee to stuff register args the stack.
1401           stk_args += 2;

2279 
2280   int temploc = -1;
2281   for (int ai = 0; ai < arg_order.length(); ai += 2) {
2282     int i = arg_order.at(ai);
2283     int c_arg = arg_order.at(ai + 1);
2284     __ block_comment(err_msg("move %d -> %d", i, c_arg));
2285 #ifdef ASSERT
2286     if (in_regs[i].first()->is_Register()) {
2287       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
2288     } else if (in_regs[i].first()->is_XMMRegister()) {
2289       assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!");
2290     }
2291     if (out_regs[c_arg].first()->is_Register()) {
2292       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2293     } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2294       freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2295     }
2296 #endif /* ASSERT */
2297     switch (in_sig_bt[i]) {
2298       case T_ARRAY:
2299       case T_PRIMITIVE_OBJECT:
2300       case T_OBJECT:
2301         __ object_move(map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2302                     ((i == 0) && (!is_static)),
2303                     &receiver_offset);
2304         break;
2305       case T_VOID:
2306         break;
2307 
2308       case T_FLOAT:
2309         __ float_move(in_regs[i], out_regs[c_arg]);
2310           break;
2311 
2312       case T_DOUBLE:
2313         assert( i + 1 < total_in_args &&
2314                 in_sig_bt[i + 1] == T_VOID &&
2315                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2316         __ double_move(in_regs[i], out_regs[c_arg]);
2317         break;
2318 
2319       case T_LONG :

2405 
2406     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2407 
2408     // Get the handle (the 2nd argument)
2409     __ mov(oop_handle_reg, c_rarg1);
2410 
2411     // Get address of the box
2412 
2413     __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2414 
2415     // Load the oop from the handle
2416     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2417 
2418     if (!UseHeavyMonitors) {
2419 
2420       // Load immediate 1 into swap_reg %rax
2421       __ movl(swap_reg, 1);
2422 
2423       // Load (object->mark() | 1) into swap_reg %rax
2424       __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2425       if (EnableValhalla) {
2426         // Mask inline_type bit such that we go to the slow path if object is an inline type
2427         __ andptr(swap_reg, ~((int) markWord::inline_type_bit_in_place));
2428       }
2429 
2430       // Save (object->mark() | 1) into BasicLock's displaced header
2431       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2432 
2433       // src -> dest iff dest == rax else rax <- dest
2434       __ lock();
2435       __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2436       __ jcc(Assembler::equal, count_mon);
2437 
2438       // Hmm should this move to the slow path code area???
2439 
2440       // Test if the oopMark is an obvious stack pointer, i.e.,
2441       //  1) (mark & 3) == 0, and
2442       //  2) rsp <= mark < mark + os::pagesize()
2443       // These 3 tests can be done by evaluating the following
2444       // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2445       // assuming both stack pointer and pagesize have their
2446       // least significant 2 bits clear.
2447       // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2448 

2470   // Now set thread in native
2471   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2472 
2473   __ call(RuntimeAddress(native_func));
2474 
2475   // Verify or restore cpu control state after JNI call
2476   __ restore_cpu_control_state_after_jni(rscratch1);
2477 
2478   // Unpack native results.
2479   switch (ret_type) {
2480   case T_BOOLEAN: __ c2bool(rax);            break;
2481   case T_CHAR   : __ movzwl(rax, rax);      break;
2482   case T_BYTE   : __ sign_extend_byte (rax); break;
2483   case T_SHORT  : __ sign_extend_short(rax); break;
2484   case T_INT    : /* nothing to do */        break;
2485   case T_DOUBLE :
2486   case T_FLOAT  :
2487     // Result is in xmm0 we'll save as needed
2488     break;
2489   case T_ARRAY:                 // Really a handle
2490   case T_PRIMITIVE_OBJECT:           // Really a handle
2491   case T_OBJECT:                // Really a handle
2492       break; // can't de-handlize until after safepoint check
2493   case T_VOID: break;
2494   case T_LONG: break;
2495   default       : ShouldNotReachHere();
2496   }
2497 
2498   Label after_transition;
2499 
2500   // Switch thread to "native transition" state before reading the synchronization state.
2501   // This additional state is necessary because reading and testing the synchronization
2502   // state is not atomic w.r.t. GC, as this scenario demonstrates:
2503   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2504   //     VM thread changes sync state to synchronizing and suspends threads for GC.
2505   //     Thread A is resumed to finish this native method, but doesn't block here since it
2506   //     didn't see any synchronization is progress, and escapes.
2507   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2508 
2509   // Force this write out before the read below
2510   if (!UseSystemMemoryBarrier) {

3962   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), NULL_WORD);
3963 #endif
3964   // Clear the exception oop so GC no longer processes it as a root.
3965   __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), NULL_WORD);
3966 
3967   // rax: exception oop
3968   // r8:  exception handler
3969   // rdx: exception pc
3970   // Jump to handler
3971 
3972   __ jmp(r8);
3973 
3974   // Make sure all code is generated
3975   masm->flush();
3976 
3977   // Set exception blob
3978   _exception_blob =  ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
3979 }
3980 #endif // COMPILER2
3981 
3982 BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(const InlineKlass* vk) {
3983   BufferBlob* buf = BufferBlob::create("inline types pack/unpack", 16 * K);
3984   CodeBuffer buffer(buf);
3985   short buffer_locs[20];
3986   buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3987                                          sizeof(buffer_locs)/sizeof(relocInfo));
3988 
3989   MacroAssembler* masm = new MacroAssembler(&buffer);
3990 
3991   const Array<SigEntry>* sig_vk = vk->extended_sig();
3992   const Array<VMRegPair>* regs = vk->return_regs();
3993 
3994   int pack_fields_jobject_off = __ offset();
3995   // Resolve pre-allocated buffer from JNI handle.
3996   // We cannot do this in generate_call_stub() because it requires GC code to be initialized.
3997   __ movptr(rax, Address(r13, 0));
3998   __ resolve_jobject(rax /* value */,
3999                      r15_thread /* thread */,
4000                      r12 /* tmp */);
4001   __ movptr(Address(r13, 0), rax);
4002 
4003   int pack_fields_off = __ offset();
4004 
4005   int j = 1;
4006   for (int i = 0; i < sig_vk->length(); i++) {
4007     BasicType bt = sig_vk->at(i)._bt;
4008     if (bt == T_PRIMITIVE_OBJECT) {
4009       continue;
4010     }
4011     if (bt == T_VOID) {
4012       if (sig_vk->at(i-1)._bt == T_LONG ||
4013           sig_vk->at(i-1)._bt == T_DOUBLE) {
4014         j++;
4015       }
4016       continue;
4017     }
4018     int off = sig_vk->at(i)._offset;
4019     assert(off > 0, "offset in object should be positive");
4020     VMRegPair pair = regs->at(j);
4021     VMReg r_1 = pair.first();
4022     VMReg r_2 = pair.second();
4023     Address to(rax, off);
4024     if (bt == T_FLOAT) {
4025       __ movflt(to, r_1->as_XMMRegister());
4026     } else if (bt == T_DOUBLE) {
4027       __ movdbl(to, r_1->as_XMMRegister());
4028     } else {
4029       Register val = r_1->as_Register();
4030       assert_different_registers(to.base(), val, r14, r13, rbx, rscratch1);
4031       if (is_reference_type(bt)) {
4032         __ store_heap_oop(to, val, r14, r13, rbx, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
4033       } else {
4034         __ store_sized_value(to, r_1->as_Register(), type2aelembytes(bt));
4035       }
4036     }
4037     j++;
4038   }
4039   assert(j == regs->length(), "missed a field?");
4040 
4041   __ ret(0);
4042 
4043   int unpack_fields_off = __ offset();
4044 
4045   Label skip;
4046   __ testptr(rax, rax);
4047   __ jcc(Assembler::zero, skip);
4048 
4049   j = 1;
4050   for (int i = 0; i < sig_vk->length(); i++) {
4051     BasicType bt = sig_vk->at(i)._bt;
4052     if (bt == T_PRIMITIVE_OBJECT) {
4053       continue;
4054     }
4055     if (bt == T_VOID) {
4056       if (sig_vk->at(i-1)._bt == T_LONG ||
4057           sig_vk->at(i-1)._bt == T_DOUBLE) {
4058         j++;
4059       }
4060       continue;
4061     }
4062     int off = sig_vk->at(i)._offset;
4063     assert(off > 0, "offset in object should be positive");
4064     VMRegPair pair = regs->at(j);
4065     VMReg r_1 = pair.first();
4066     VMReg r_2 = pair.second();
4067     Address from(rax, off);
4068     if (bt == T_FLOAT) {
4069       __ movflt(r_1->as_XMMRegister(), from);
4070     } else if (bt == T_DOUBLE) {
4071       __ movdbl(r_1->as_XMMRegister(), from);
4072     } else if (bt == T_OBJECT || bt == T_ARRAY) {
4073       assert_different_registers(rax, r_1->as_Register());
4074       __ load_heap_oop(r_1->as_Register(), from);
4075     } else {
4076       assert(is_java_primitive(bt), "unexpected basic type");
4077       assert_different_registers(rax, r_1->as_Register());
4078       size_t size_in_bytes = type2aelembytes(bt);
4079       __ load_sized_value(r_1->as_Register(), from, size_in_bytes, bt != T_CHAR && bt != T_BOOLEAN);
4080     }
4081     j++;
4082   }
4083   assert(j == regs->length(), "missed a field?");
4084 
4085   __ bind(skip);
4086   __ ret(0);
4087 
4088   __ flush();
4089 
4090   return BufferedInlineTypeBlob::create(&buffer, pack_fields_off, pack_fields_jobject_off, unpack_fields_off);
4091 }
< prev index next >