< prev index next >

src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp

Print this page

  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #ifndef _WINDOWS
  27 #include "alloca.h"
  28 #endif
  29 #include "asm/macroAssembler.hpp"
  30 #include "asm/macroAssembler.inline.hpp"

  31 #include "code/debugInfoRec.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "code/nativeInst.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "compiler/oopMap.hpp"
  36 #include "gc/shared/collectedHeap.hpp"
  37 #include "gc/shared/gcLocker.hpp"
  38 #include "gc/shared/barrierSet.hpp"
  39 #include "gc/shared/barrierSetAssembler.hpp"
  40 #include "interpreter/interpreter.hpp"
  41 #include "logging/log.hpp"
  42 #include "memory/resourceArea.hpp"
  43 #include "memory/universe.hpp"
  44 #include "oops/compiledICHolder.hpp"
  45 #include "oops/klass.inline.hpp"
  46 #include "prims/methodHandles.hpp"
  47 #include "runtime/jniHandles.hpp"
  48 #include "runtime/safepointMechanism.hpp"
  49 #include "runtime/sharedRuntime.hpp"
  50 #include "runtime/signature.hpp"

 925         // So we must adjust where to pick up the data to match the interpreter.
 926 
 927         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 928                            next_off : ld_off;
 929 
 930         // this can be a misaligned move
 931         __ movq(r, Address(saved_sp, offset));
 932       } else {
 933         // sign extend and use a full word?
 934         __ movl(r, Address(saved_sp, ld_off));
 935       }
 936     } else {
 937       if (!r_2->is_valid()) {
 938         __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
 939       } else {
 940         __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
 941       }
 942     }
 943   }
 944 


 945   // 6243940 We might end up in handle_wrong_method if
 946   // the callee is deoptimized as we race thru here. If that
 947   // happens we don't want to take a safepoint because the
 948   // caller frame will look interpreted and arguments are now
 949   // "compiled" so it is much better to make this transition
 950   // invisible to the stack walking code. Unfortunately if
 951   // we try and find the callee by normal means a safepoint
 952   // is possible. So we stash the desired callee in the thread
 953   // and the vm will find there should this case occur.
 954 
 955   __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
 956 
 957   // put Method* where a c2i would expect should we end up there
 958   // only needed becaus eof c2 resolve stubs return Method* as a result in
 959   // rax
 960   __ mov(rax, rbx);
 961   __ jmp(r11);
 962 }
 963 
 964 // ---------------------------------------------------------------

1453                             const methodHandle& method,
1454                             const BasicType* sig_bt,
1455                             const VMRegPair* regs) {
1456   Register temp_reg = rbx;  // not part of any compiled calling seq
1457   if (VerifyOops) {
1458     for (int i = 0; i < method->size_of_parameters(); i++) {
1459       if (is_reference_type(sig_bt[i])) {
1460         VMReg r = regs[i].first();
1461         assert(r->is_valid(), "bad oop arg");
1462         if (r->is_stack()) {
1463           __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1464           __ verify_oop(temp_reg);
1465         } else {
1466           __ verify_oop(r->as_Register());
1467         }
1468       }
1469     }
1470   }
1471 }
1472 
































































































1473 static void gen_special_dispatch(MacroAssembler* masm,
1474                                  const methodHandle& method,
1475                                  const BasicType* sig_bt,
1476                                  const VMRegPair* regs) {
1477   verify_oop_args(masm, method, sig_bt, regs);
1478   vmIntrinsics::ID iid = method->intrinsic_id();
1479 
1480   // Now write the args into the outgoing interpreter space
1481   bool     has_receiver   = false;
1482   Register receiver_reg   = noreg;
1483   int      member_arg_pos = -1;
1484   Register member_reg     = noreg;
1485   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1486   if (ref_kind != 0) {
1487     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1488     member_reg = rbx;  // known to be free at this point
1489     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1490   } else if (iid == vmIntrinsics::_invokeBasic || iid == vmIntrinsics::_linkToNative) {
1491     has_receiver = true;
1492   } else {

1535 // convention (handlizes oops, etc), transitions to native, makes the call,
1536 // returns to java state (possibly blocking), unhandlizes any result and
1537 // returns.
1538 //
1539 // Critical native functions are a shorthand for the use of
1540 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1541 // functions.  The wrapper is expected to unpack the arguments before
1542 // passing them to the callee. Critical native functions leave the state _in_Java,
1543 // since they cannot stop for GC.
1544 // Some other parts of JNI setup are skipped like the tear down of the JNI handle
1545 // block and the check for pending exceptions it's impossible for them
1546 // to be thrown.
1547 //
1548 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1549                                                 const methodHandle& method,
1550                                                 int compile_id,
1551                                                 BasicType* in_sig_bt,
1552                                                 VMRegPair* in_regs,
1553                                                 BasicType ret_type,
1554                                                 address critical_entry) {































1555   if (method->is_method_handle_intrinsic()) {
1556     vmIntrinsics::ID iid = method->intrinsic_id();
1557     intptr_t start = (intptr_t)__ pc();
1558     int vep_offset = ((intptr_t)__ pc()) - start;
1559     gen_special_dispatch(masm,
1560                          method,
1561                          in_sig_bt,
1562                          in_regs);
1563     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1564     __ flush();
1565     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1566     return nmethod::new_native_nmethod(method,
1567                                        compile_id,
1568                                        masm->code(),
1569                                        vep_offset,
1570                                        frame_complete,
1571                                        stack_slots / VMRegImpl::slots_per_word,
1572                                        in_ByteSize(-1),
1573                                        in_ByteSize(-1),
1574                                        (OopMapSet*)NULL);
1575   }

1576   bool is_critical_native = true;
1577   address native_func = critical_entry;
1578   if (native_func == NULL) {
1579     native_func = method->native_function();
1580     is_critical_native = false;
1581   }
1582   assert(native_func != NULL, "must have function");
1583 
1584   // An OopMap for lock (and class if static)
1585   OopMapSet *oop_maps = new OopMapSet();
1586   intptr_t start = (intptr_t)__ pc();
1587 
1588   // We have received a description of where all the java arg are located
1589   // on entry to the wrapper. We need to convert these args to where
1590   // the jni function will expect them. To figure out where they go
1591   // we convert the java signature to a C signature by inserting
1592   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1593 
1594   const int total_in_args = method->size_of_parameters();
1595   int total_c_args = total_in_args;

2091 
2092     // Hmm should this move to the slow path code area???
2093 
2094     // Test if the oopMark is an obvious stack pointer, i.e.,
2095     //  1) (mark & 3) == 0, and
2096     //  2) rsp <= mark < mark + os::pagesize()
2097     // These 3 tests can be done by evaluating the following
2098     // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2099     // assuming both stack pointer and pagesize have their
2100     // least significant 2 bits clear.
2101     // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2102 
2103     __ subptr(swap_reg, rsp);
2104     __ andptr(swap_reg, 3 - os::vm_page_size());
2105 
2106     // Save the test result, for recursive case, the result is zero
2107     __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2108     __ jcc(Assembler::notEqual, slow_path_lock);
2109 
2110     // Slow path will re-enter here
2111 
2112     __ bind(lock_done);

2113   }
2114 
2115   // Finally just about ready to make the JNI call
2116 
2117   // get JNIEnv* which is first argument to native
2118   if (!is_critical_native) {
2119     __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2120 
2121     // Now set thread in native
2122     __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2123   }
2124 
2125   __ call(RuntimeAddress(native_func));
2126 
2127   // Verify or restore cpu control state after JNI call
2128   __ restore_cpu_control_state_after_jni();
2129 
2130   // Unpack native results.
2131   switch (ret_type) {
2132   case T_BOOLEAN: __ c2bool(rax);            break;

2236     }
2237 
2238 
2239     // get address of the stack lock
2240     __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2241     //  get old displaced header
2242     __ movptr(old_hdr, Address(rax, 0));
2243 
2244     // Atomic swap old header if oop still contains the stack lock
2245     __ lock();
2246     __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2247     __ jcc(Assembler::notEqual, slow_path_unlock);
2248 
2249     // slow path re-enters here
2250     __ bind(unlock_done);
2251     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2252       restore_native_result(masm, ret_type, stack_slots);
2253     }
2254 
2255     __ bind(done);
2256 
2257   }
2258   {
2259     SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2260     save_native_result(masm, ret_type, stack_slots);
2261     __ mov_metadata(c_rarg1, method());
2262     __ call_VM_leaf(
2263          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2264          r15_thread, c_rarg1);
2265     restore_native_result(masm, ret_type, stack_slots);
2266   }
2267 
2268   __ reset_last_Java_frame(false);
2269 
2270   // Unbox oop result, e.g. JNIHandles::resolve value.
2271   if (is_reference_type(ret_type)) {
2272     __ resolve_jobject(rax /* value */,
2273                        r15_thread /* thread */,
2274                        rcx /* tmp */);
2275   }
2276 

3016 
3017   if (UseRTMLocking) {
3018     // Abort RTM transaction before calling runtime
3019     // because critical section will be large and will be
3020     // aborted anyway. Also nmethod could be deoptimized.
3021     __ xabort(0);
3022   }
3023 
3024   // Make room for return address (or push it again)
3025   if (!cause_return) {
3026     __ push(rbx);
3027   }
3028 
3029   // Save registers, fpu state, and flags
3030   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, save_vectors);
3031 
3032   // The following is basically a call_VM.  However, we need the precise
3033   // address of the call in order to generate an oopmap. Hence, we do all the
3034   // work outselves.
3035 
3036   __ set_last_Java_frame(noreg, noreg, NULL);
3037 
3038   // The return address must always be correct so that frame constructor never
3039   // sees an invalid pc.
3040 
3041   if (!cause_return) {
3042     // Get the return pc saved by the signal handler and stash it in its appropriate place on the stack.
3043     // Additionally, rbx is a callee saved register and we can look at it later to determine
3044     // if someone changed the return address for us!
3045     __ movptr(rbx, Address(r15_thread, JavaThread::saved_exception_pc_offset()));
3046     __ movptr(Address(rbp, wordSize), rbx);
3047   }
3048 
3049   // Do the call
3050   __ mov(c_rarg0, r15_thread);
3051   __ call(RuntimeAddress(call_ptr));
3052 
3053   // Set an oopmap for the call site.  This oopmap will map all
3054   // oop-registers and debug-info registers as callee-saved.  This
3055   // will allow deoptimization at this safepoint to find all possible
3056   // debug-info recordings, as well as let GC find all oops.

3164 //
3165 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3166   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3167 
3168   // allocate space for the code
3169   ResourceMark rm;
3170 
3171   CodeBuffer buffer(name, 1000, 512);
3172   MacroAssembler* masm                = new MacroAssembler(&buffer);
3173 
3174   int frame_size_in_words;
3175 
3176   OopMapSet *oop_maps = new OopMapSet();
3177   OopMap* map = NULL;
3178 
3179   int start = __ offset();
3180 
3181   // No need to save vector registers since they are caller-saved anyway.
3182   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, /*save_vectors*/ false);
3183 


3184   int frame_complete = __ offset();
3185 
3186   __ set_last_Java_frame(noreg, noreg, NULL);
3187 
3188   __ mov(c_rarg0, r15_thread);
3189 
3190   __ call(RuntimeAddress(destination));
3191 
3192 
3193   // Set an oopmap for the call site.
3194   // We need this not only for callee-saved registers, but also for volatile
3195   // registers that the compiler might be keeping live across a safepoint.
3196 
3197   oop_maps->add_gc_map( __ offset() - start, map);
3198 
3199   // rax contains the address we are going to jump to assuming no exception got installed
3200 
3201   // clear last_Java_sp
3202   __ reset_last_Java_frame(false);
3203   // check for pending exceptions

  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #ifndef _WINDOWS
  27 #include "alloca.h"
  28 #endif
  29 #include "asm/macroAssembler.hpp"
  30 #include "asm/macroAssembler.inline.hpp"
  31 #include "code/compiledIC.hpp"
  32 #include "code/debugInfoRec.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "code/nativeInst.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/oopMap.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "gc/shared/gcLocker.hpp"
  39 #include "gc/shared/barrierSet.hpp"
  40 #include "gc/shared/barrierSetAssembler.hpp"
  41 #include "interpreter/interpreter.hpp"
  42 #include "logging/log.hpp"
  43 #include "memory/resourceArea.hpp"
  44 #include "memory/universe.hpp"
  45 #include "oops/compiledICHolder.hpp"
  46 #include "oops/klass.inline.hpp"
  47 #include "prims/methodHandles.hpp"
  48 #include "runtime/jniHandles.hpp"
  49 #include "runtime/safepointMechanism.hpp"
  50 #include "runtime/sharedRuntime.hpp"
  51 #include "runtime/signature.hpp"

 926         // So we must adjust where to pick up the data to match the interpreter.
 927 
 928         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 929                            next_off : ld_off;
 930 
 931         // this can be a misaligned move
 932         __ movq(r, Address(saved_sp, offset));
 933       } else {
 934         // sign extend and use a full word?
 935         __ movl(r, Address(saved_sp, ld_off));
 936       }
 937     } else {
 938       if (!r_2->is_valid()) {
 939         __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
 940       } else {
 941         __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
 942       }
 943     }
 944   }
 945 
 946   __ push_cont_fastpath(r15_thread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about
 947 
 948   // 6243940 We might end up in handle_wrong_method if
 949   // the callee is deoptimized as we race thru here. If that
 950   // happens we don't want to take a safepoint because the
 951   // caller frame will look interpreted and arguments are now
 952   // "compiled" so it is much better to make this transition
 953   // invisible to the stack walking code. Unfortunately if
 954   // we try and find the callee by normal means a safepoint
 955   // is possible. So we stash the desired callee in the thread
 956   // and the vm will find there should this case occur.
 957 
 958   __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
 959 
 960   // put Method* where a c2i would expect should we end up there
 961   // only needed becaus eof c2 resolve stubs return Method* as a result in
 962   // rax
 963   __ mov(rax, rbx);
 964   __ jmp(r11);
 965 }
 966 
 967 // ---------------------------------------------------------------

1456                             const methodHandle& method,
1457                             const BasicType* sig_bt,
1458                             const VMRegPair* regs) {
1459   Register temp_reg = rbx;  // not part of any compiled calling seq
1460   if (VerifyOops) {
1461     for (int i = 0; i < method->size_of_parameters(); i++) {
1462       if (is_reference_type(sig_bt[i])) {
1463         VMReg r = regs[i].first();
1464         assert(r->is_valid(), "bad oop arg");
1465         if (r->is_stack()) {
1466           __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1467           __ verify_oop(temp_reg);
1468         } else {
1469           __ verify_oop(r->as_Register());
1470         }
1471       }
1472     }
1473   }
1474 }
1475 
1476 // defined in stubGenerator_x86_64.cpp
1477 OopMap* continuation_enter_setup(MacroAssembler* masm, int& stack_slots);
1478 void fill_continuation_entry(MacroAssembler* masm);
1479 void continuation_enter_cleanup(MacroAssembler* masm);
1480 
1481 // enterSpecial(Continuation c, boolean isContinue)
1482 // On entry: c_rarg1 -- the continuation object
1483 //           c_rarg2 -- isContinue
1484 static void gen_continuation_enter(MacroAssembler* masm,
1485                                  const methodHandle& method,
1486                                  const BasicType* sig_bt,
1487                                  const VMRegPair* regs,
1488                                  int& exception_offset,
1489                                  OopMapSet*oop_maps,
1490                                  int& frame_complete,
1491                                  int& stack_slots) {
1492   //verify_oop_args(masm, method, sig_bt, regs);
1493   AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(),
1494                          relocInfo::static_call_type);
1495 
1496   stack_slots = 2; // will be overwritten
1497   address start = __ pc();
1498 
1499   Label call_thaw, exit;
1500 
1501   __ push(rbp);
1502 
1503   //BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1504   //bs->nmethod_entry_barrier(masm);
1505   OopMap* map = continuation_enter_setup(masm, stack_slots);  // kills rax
1506 
1507   // Frame is now completed as far as size and linkage.
1508   frame_complete =__ pc() - start;
1509   // if isContinue == 0
1510   //   _enterSP = sp
1511   // end
1512  
1513   fill_continuation_entry(masm); // kills rax
1514 
1515   __ cmpl(c_rarg2, 0);
1516   __ jcc(Assembler::notEqual, call_thaw);
1517 
1518   int up = align_up((intptr_t) __ pc() + 1, 4) - (intptr_t) (__ pc() + 1);
1519   if (up > 0) {
1520     __ nop(up);
1521   }
1522 
1523   address mark = __ pc();
1524   __ call(resolve);
1525   oop_maps->add_gc_map(__ pc() - start, map);
1526   __ post_call_nop();
1527 
1528   __ jmp(exit);
1529 
1530   __ bind(call_thaw);
1531 
1532   __ movptr(rbx, (intptr_t) StubRoutines::cont_thaw());
1533   __ call(rbx);
1534   oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1535   ContinuationEntry::return_pc_offset = __ pc() - start;
1536   __ post_call_nop();
1537 
1538   __ bind(exit);
1539   continuation_enter_cleanup(masm);
1540   __ pop(rbp);
1541   __ ret(0);
1542 
1543   /// exception handling
1544 
1545   exception_offset = __ pc() - start;
1546 
1547   continuation_enter_cleanup(masm);
1548   __ addptr(rsp, 1*wordSize);
1549 
1550   __ movptr(rbx, rax); // save the exception
1551   __ movptr(c_rarg0, Address(rsp, 0));
1552 
1553   __ call_VM_leaf(CAST_FROM_FN_PTR(address,
1554         SharedRuntime::exception_handler_for_return_address),
1555       r15_thread, c_rarg0);
1556   __ mov(rdi, rax);
1557   __ movptr(rax, rbx);
1558   __ mov(rbx, rdi);
1559   __ pop(rdx);
1560 
1561   // continue at exception handler (return address removed)
1562   // rax: exception
1563   // rbx: exception handler
1564   // rdx: throwing pc
1565   __ verify_oop(rax);
1566   __ jmp(rbx);
1567 
1568   CodeBuffer* cbuf = masm->code_section()->outer();
1569   address stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, mark);
1570 }
1571 
1572 static void gen_special_dispatch(MacroAssembler* masm,
1573                                  const methodHandle& method,
1574                                  const BasicType* sig_bt,
1575                                  const VMRegPair* regs) {
1576   verify_oop_args(masm, method, sig_bt, regs);
1577   vmIntrinsics::ID iid = method->intrinsic_id();
1578 
1579   // Now write the args into the outgoing interpreter space
1580   bool     has_receiver   = false;
1581   Register receiver_reg   = noreg;
1582   int      member_arg_pos = -1;
1583   Register member_reg     = noreg;
1584   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1585   if (ref_kind != 0) {
1586     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1587     member_reg = rbx;  // known to be free at this point
1588     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1589   } else if (iid == vmIntrinsics::_invokeBasic || iid == vmIntrinsics::_linkToNative) {
1590     has_receiver = true;
1591   } else {

1634 // convention (handlizes oops, etc), transitions to native, makes the call,
1635 // returns to java state (possibly blocking), unhandlizes any result and
1636 // returns.
1637 //
1638 // Critical native functions are a shorthand for the use of
1639 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1640 // functions.  The wrapper is expected to unpack the arguments before
1641 // passing them to the callee. Critical native functions leave the state _in_Java,
1642 // since they cannot stop for GC.
1643 // Some other parts of JNI setup are skipped like the tear down of the JNI handle
1644 // block and the check for pending exceptions it's impossible for them
1645 // to be thrown.
1646 //
1647 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1648                                                 const methodHandle& method,
1649                                                 int compile_id,
1650                                                 BasicType* in_sig_bt,
1651                                                 VMRegPair* in_regs,
1652                                                 BasicType ret_type,
1653                                                 address critical_entry) {
1654   if (method->is_continuation_enter_intrinsic()) {
1655     vmIntrinsics::ID iid = method->intrinsic_id();
1656     intptr_t start = (intptr_t)__ pc();
1657     int vep_offset = ((intptr_t)__ pc()) - start;
1658     int exception_offset = 0;
1659     int frame_complete = 0;
1660     int stack_slots = 0;
1661     OopMapSet* oop_maps =  new OopMapSet();
1662     gen_continuation_enter(masm,
1663                          method,
1664                          in_sig_bt,
1665                          in_regs,
1666                          exception_offset,
1667                          oop_maps,
1668                          frame_complete,
1669                          stack_slots);
1670     __ flush();
1671     nmethod* nm = nmethod::new_native_nmethod(method,
1672                                               compile_id,
1673                                               masm->code(),
1674                                               vep_offset,
1675                                               frame_complete,
1676                                               stack_slots,
1677                                               in_ByteSize(-1),
1678                                               in_ByteSize(-1),
1679                                               oop_maps,
1680                                               exception_offset);
1681     ContinuationEntry::set_enter_nmethod(nm);
1682     return nm;
1683   }
1684 
1685   if (method->is_method_handle_intrinsic()) {
1686     vmIntrinsics::ID iid = method->intrinsic_id();
1687     intptr_t start = (intptr_t)__ pc();
1688     int vep_offset = ((intptr_t)__ pc()) - start;
1689     gen_special_dispatch(masm,
1690                          method,
1691                          in_sig_bt,
1692                          in_regs);
1693     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1694     __ flush();
1695     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1696     return nmethod::new_native_nmethod(method,
1697                                        compile_id,
1698                                        masm->code(),
1699                                        vep_offset,
1700                                        frame_complete,
1701                                        stack_slots / VMRegImpl::slots_per_word,
1702                                        in_ByteSize(-1),
1703                                        in_ByteSize(-1),
1704                                        (OopMapSet*)NULL);
1705   }
1706 
1707   bool is_critical_native = true;
1708   address native_func = critical_entry;
1709   if (native_func == NULL) {
1710     native_func = method->native_function();
1711     is_critical_native = false;
1712   }
1713   assert(native_func != NULL, "must have function");
1714 
1715   // An OopMap for lock (and class if static)
1716   OopMapSet *oop_maps = new OopMapSet();
1717   intptr_t start = (intptr_t)__ pc();
1718 
1719   // We have received a description of where all the java arg are located
1720   // on entry to the wrapper. We need to convert these args to where
1721   // the jni function will expect them. To figure out where they go
1722   // we convert the java signature to a C signature by inserting
1723   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1724 
1725   const int total_in_args = method->size_of_parameters();
1726   int total_c_args = total_in_args;

2222 
2223     // Hmm should this move to the slow path code area???
2224 
2225     // Test if the oopMark is an obvious stack pointer, i.e.,
2226     //  1) (mark & 3) == 0, and
2227     //  2) rsp <= mark < mark + os::pagesize()
2228     // These 3 tests can be done by evaluating the following
2229     // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2230     // assuming both stack pointer and pagesize have their
2231     // least significant 2 bits clear.
2232     // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2233 
2234     __ subptr(swap_reg, rsp);
2235     __ andptr(swap_reg, 3 - os::vm_page_size());
2236 
2237     // Save the test result, for recursive case, the result is zero
2238     __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2239     __ jcc(Assembler::notEqual, slow_path_lock);
2240 
2241     // Slow path will re-enter here

2242     __ bind(lock_done);
2243     // __ inc_held_monitor_count(r15_thread);
2244   }
2245 
2246   // Finally just about ready to make the JNI call
2247 
2248   // get JNIEnv* which is first argument to native
2249   if (!is_critical_native) {
2250     __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2251 
2252     // Now set thread in native
2253     __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2254   }
2255 
2256   __ call(RuntimeAddress(native_func));
2257 
2258   // Verify or restore cpu control state after JNI call
2259   __ restore_cpu_control_state_after_jni();
2260 
2261   // Unpack native results.
2262   switch (ret_type) {
2263   case T_BOOLEAN: __ c2bool(rax);            break;

2367     }
2368 
2369 
2370     // get address of the stack lock
2371     __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2372     //  get old displaced header
2373     __ movptr(old_hdr, Address(rax, 0));
2374 
2375     // Atomic swap old header if oop still contains the stack lock
2376     __ lock();
2377     __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2378     __ jcc(Assembler::notEqual, slow_path_unlock);
2379 
2380     // slow path re-enters here
2381     __ bind(unlock_done);
2382     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2383       restore_native_result(masm, ret_type, stack_slots);
2384     }
2385 
2386     __ bind(done);
2387     // __ dec_held_monitor_count(r15_thread);
2388   }
2389   {
2390     SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2391     save_native_result(masm, ret_type, stack_slots);
2392     __ mov_metadata(c_rarg1, method());
2393     __ call_VM_leaf(
2394          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2395          r15_thread, c_rarg1);
2396     restore_native_result(masm, ret_type, stack_slots);
2397   }
2398 
2399   __ reset_last_Java_frame(false);
2400 
2401   // Unbox oop result, e.g. JNIHandles::resolve value.
2402   if (is_reference_type(ret_type)) {
2403     __ resolve_jobject(rax /* value */,
2404                        r15_thread /* thread */,
2405                        rcx /* tmp */);
2406   }
2407 

3147 
3148   if (UseRTMLocking) {
3149     // Abort RTM transaction before calling runtime
3150     // because critical section will be large and will be
3151     // aborted anyway. Also nmethod could be deoptimized.
3152     __ xabort(0);
3153   }
3154 
3155   // Make room for return address (or push it again)
3156   if (!cause_return) {
3157     __ push(rbx);
3158   }
3159 
3160   // Save registers, fpu state, and flags
3161   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, save_vectors);
3162 
3163   // The following is basically a call_VM.  However, we need the precise
3164   // address of the call in order to generate an oopmap. Hence, we do all the
3165   // work outselves.
3166 
3167   __ set_last_Java_frame(noreg, noreg, NULL);  // JavaFrameAnchor::capture_last_Java_pc() will get the pc from the return address, which we store next:
3168 
3169   // The return address must always be correct so that frame constructor never
3170   // sees an invalid pc.
3171 
3172   if (!cause_return) {
3173     // Get the return pc saved by the signal handler and stash it in its appropriate place on the stack.
3174     // Additionally, rbx is a callee saved register and we can look at it later to determine
3175     // if someone changed the return address for us!
3176     __ movptr(rbx, Address(r15_thread, JavaThread::saved_exception_pc_offset()));
3177     __ movptr(Address(rbp, wordSize), rbx);
3178   }
3179 
3180   // Do the call
3181   __ mov(c_rarg0, r15_thread);
3182   __ call(RuntimeAddress(call_ptr));
3183 
3184   // Set an oopmap for the call site.  This oopmap will map all
3185   // oop-registers and debug-info registers as callee-saved.  This
3186   // will allow deoptimization at this safepoint to find all possible
3187   // debug-info recordings, as well as let GC find all oops.

3295 //
3296 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3297   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3298 
3299   // allocate space for the code
3300   ResourceMark rm;
3301 
3302   CodeBuffer buffer(name, 1000, 512);
3303   MacroAssembler* masm                = new MacroAssembler(&buffer);
3304 
3305   int frame_size_in_words;
3306 
3307   OopMapSet *oop_maps = new OopMapSet();
3308   OopMap* map = NULL;
3309 
3310   int start = __ offset();
3311 
3312   // No need to save vector registers since they are caller-saved anyway.
3313   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, /*save_vectors*/ false);
3314 
3315   // __ stop_if_in_cont(r10, "CONT 3");
3316 
3317   int frame_complete = __ offset();
3318 
3319   __ set_last_Java_frame(noreg, noreg, NULL);
3320 
3321   __ mov(c_rarg0, r15_thread);
3322 
3323   __ call(RuntimeAddress(destination));
3324 
3325 
3326   // Set an oopmap for the call site.
3327   // We need this not only for callee-saved registers, but also for volatile
3328   // registers that the compiler might be keeping live across a safepoint.
3329 
3330   oop_maps->add_gc_map( __ offset() - start, map);
3331 
3332   // rax contains the address we are going to jump to assuming no exception got installed
3333 
3334   // clear last_Java_sp
3335   __ reset_last_Java_frame(false);
3336   // check for pending exceptions
< prev index next >