< prev index next >

src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp

Print this page

  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #ifndef _WINDOWS
  27 #include "alloca.h"
  28 #endif
  29 #include "asm/macroAssembler.hpp"
  30 #include "asm/macroAssembler.inline.hpp"

  31 #include "code/debugInfoRec.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "code/nativeInst.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "compiler/oopMap.hpp"
  36 #include "gc/shared/collectedHeap.hpp"
  37 #include "gc/shared/gcLocker.hpp"
  38 #include "gc/shared/barrierSet.hpp"
  39 #include "gc/shared/barrierSetAssembler.hpp"
  40 #include "interpreter/interpreter.hpp"
  41 #include "logging/log.hpp"
  42 #include "memory/resourceArea.hpp"
  43 #include "memory/universe.hpp"
  44 #include "oops/compiledICHolder.hpp"
  45 #include "oops/klass.inline.hpp"
  46 #include "prims/methodHandles.hpp"
  47 #include "runtime/jniHandles.hpp"
  48 #include "runtime/safepointMechanism.hpp"
  49 #include "runtime/sharedRuntime.hpp"
  50 #include "runtime/signature.hpp"

 925         // So we must adjust where to pick up the data to match the interpreter.
 926 
 927         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 928                            next_off : ld_off;
 929 
 930         // this can be a misaligned move
 931         __ movq(r, Address(saved_sp, offset));
 932       } else {
 933         // sign extend and use a full word?
 934         __ movl(r, Address(saved_sp, ld_off));
 935       }
 936     } else {
 937       if (!r_2->is_valid()) {
 938         __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
 939       } else {
 940         __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
 941       }
 942     }
 943   }
 944 


 945   // 6243940 We might end up in handle_wrong_method if
 946   // the callee is deoptimized as we race thru here. If that
 947   // happens we don't want to take a safepoint because the
 948   // caller frame will look interpreted and arguments are now
 949   // "compiled" so it is much better to make this transition
 950   // invisible to the stack walking code. Unfortunately if
 951   // we try and find the callee by normal means a safepoint
 952   // is possible. So we stash the desired callee in the thread
 953   // and the vm will find there should this case occur.
 954 
 955   __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
 956 
 957   // put Method* where a c2i would expect should we end up there
 958   // only needed becaus eof c2 resolve stubs return Method* as a result in
 959   // rax
 960   __ mov(rax, rbx);
 961   __ jmp(r11);
 962 }
 963 
 964 // ---------------------------------------------------------------

1413                             const methodHandle& method,
1414                             const BasicType* sig_bt,
1415                             const VMRegPair* regs) {
1416   Register temp_reg = rbx;  // not part of any compiled calling seq
1417   if (VerifyOops) {
1418     for (int i = 0; i < method->size_of_parameters(); i++) {
1419       if (is_reference_type(sig_bt[i])) {
1420         VMReg r = regs[i].first();
1421         assert(r->is_valid(), "bad oop arg");
1422         if (r->is_stack()) {
1423           __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1424           __ verify_oop(temp_reg);
1425         } else {
1426           __ verify_oop(r->as_Register());
1427         }
1428       }
1429     }
1430   }
1431 }
1432 
































































































1433 static void gen_special_dispatch(MacroAssembler* masm,
1434                                  const methodHandle& method,
1435                                  const BasicType* sig_bt,
1436                                  const VMRegPair* regs) {
1437   verify_oop_args(masm, method, sig_bt, regs);
1438   vmIntrinsics::ID iid = method->intrinsic_id();
1439 
1440   // Now write the args into the outgoing interpreter space
1441   bool     has_receiver   = false;
1442   Register receiver_reg   = noreg;
1443   int      member_arg_pos = -1;
1444   Register member_reg     = noreg;
1445   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1446   if (ref_kind != 0) {
1447     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1448     member_reg = rbx;  // known to be free at this point
1449     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1450   } else if (iid == vmIntrinsics::_invokeBasic || iid == vmIntrinsics::_linkToNative) {
1451     has_receiver = true;
1452   } else {

1494 // in the Java compiled code convention, marshals them to the native
1495 // convention (handlizes oops, etc), transitions to native, makes the call,
1496 // returns to java state (possibly blocking), unhandlizes any result and
1497 // returns.
1498 //
1499 // Critical native functions are a shorthand for the use of
1500 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1501 // functions.  The wrapper is expected to unpack the arguments before
1502 // passing them to the callee. Critical native functions leave the state _in_Java,
1503 // since they cannot stop for GC.
1504 // Some other parts of JNI setup are skipped like the tear down of the JNI handle
1505 // block and the check for pending exceptions it's impossible for them
1506 // to be thrown.
1507 //
1508 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1509                                                 const methodHandle& method,
1510                                                 int compile_id,
1511                                                 BasicType* in_sig_bt,
1512                                                 VMRegPair* in_regs,
1513                                                 BasicType ret_type) {































1514   if (method->is_method_handle_intrinsic()) {
1515     vmIntrinsics::ID iid = method->intrinsic_id();
1516     intptr_t start = (intptr_t)__ pc();
1517     int vep_offset = ((intptr_t)__ pc()) - start;
1518     gen_special_dispatch(masm,
1519                          method,
1520                          in_sig_bt,
1521                          in_regs);
1522     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1523     __ flush();
1524     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1525     return nmethod::new_native_nmethod(method,
1526                                        compile_id,
1527                                        masm->code(),
1528                                        vep_offset,
1529                                        frame_complete,
1530                                        stack_slots / VMRegImpl::slots_per_word,
1531                                        in_ByteSize(-1),
1532                                        in_ByteSize(-1),
1533                                        (OopMapSet*)NULL);

1938       // Test if the oopMark is an obvious stack pointer, i.e.,
1939       //  1) (mark & 3) == 0, and
1940       //  2) rsp <= mark < mark + os::pagesize()
1941       // These 3 tests can be done by evaluating the following
1942       // expression: ((mark - rsp) & (3 - os::vm_page_size())),
1943       // assuming both stack pointer and pagesize have their
1944       // least significant 2 bits clear.
1945       // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
1946 
1947       __ subptr(swap_reg, rsp);
1948       __ andptr(swap_reg, 3 - os::vm_page_size());
1949 
1950       // Save the test result, for recursive case, the result is zero
1951       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1952       __ jcc(Assembler::notEqual, slow_path_lock);
1953     } else {
1954       __ jmp(slow_path_lock);
1955     }
1956 
1957     // Slow path will re-enter here
1958 
1959     __ bind(lock_done);

1960   }
1961 
1962   // Finally just about ready to make the JNI call
1963 
1964   // get JNIEnv* which is first argument to native
1965   __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
1966 
1967   // Now set thread in native
1968   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
1969 
1970   __ call(RuntimeAddress(native_func));
1971 
1972   // Verify or restore cpu control state after JNI call
1973   __ restore_cpu_control_state_after_jni();
1974 
1975   // Unpack native results.
1976   switch (ret_type) {
1977   case T_BOOLEAN: __ c2bool(rax);            break;
1978   case T_CHAR   : __ movzwl(rax, rax);      break;
1979   case T_BYTE   : __ sign_extend_byte (rax); break;

2076       // get address of the stack lock
2077       __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2078       //  get old displaced header
2079       __ movptr(old_hdr, Address(rax, 0));
2080 
2081       // Atomic swap old header if oop still contains the stack lock
2082       __ lock();
2083       __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2084       __ jcc(Assembler::notEqual, slow_path_unlock);
2085     } else {
2086       __ jmp(slow_path_unlock);
2087     }
2088 
2089     // slow path re-enters here
2090     __ bind(unlock_done);
2091     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2092       restore_native_result(masm, ret_type, stack_slots);
2093     }
2094 
2095     __ bind(done);
2096 
2097   }
2098   {
2099     SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2100     save_native_result(masm, ret_type, stack_slots);
2101     __ mov_metadata(c_rarg1, method());
2102     __ call_VM_leaf(
2103          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2104          r15_thread, c_rarg1);
2105     restore_native_result(masm, ret_type, stack_slots);
2106   }
2107 
2108   __ reset_last_Java_frame(false);
2109 
2110   // Unbox oop result, e.g. JNIHandles::resolve value.
2111   if (is_reference_type(ret_type)) {
2112     __ resolve_jobject(rax /* value */,
2113                        r15_thread /* thread */,
2114                        rcx /* tmp */);
2115   }
2116 

2850 
2851   if (UseRTMLocking) {
2852     // Abort RTM transaction before calling runtime
2853     // because critical section will be large and will be
2854     // aborted anyway. Also nmethod could be deoptimized.
2855     __ xabort(0);
2856   }
2857 
2858   // Make room for return address (or push it again)
2859   if (!cause_return) {
2860     __ push(rbx);
2861   }
2862 
2863   // Save registers, fpu state, and flags
2864   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, save_vectors);
2865 
2866   // The following is basically a call_VM.  However, we need the precise
2867   // address of the call in order to generate an oopmap. Hence, we do all the
2868   // work outselves.
2869 
2870   __ set_last_Java_frame(noreg, noreg, NULL);
2871 
2872   // The return address must always be correct so that frame constructor never
2873   // sees an invalid pc.
2874 
2875   if (!cause_return) {
2876     // Get the return pc saved by the signal handler and stash it in its appropriate place on the stack.
2877     // Additionally, rbx is a callee saved register and we can look at it later to determine
2878     // if someone changed the return address for us!
2879     __ movptr(rbx, Address(r15_thread, JavaThread::saved_exception_pc_offset()));
2880     __ movptr(Address(rbp, wordSize), rbx);
2881   }
2882 
2883   // Do the call
2884   __ mov(c_rarg0, r15_thread);
2885   __ call(RuntimeAddress(call_ptr));
2886 
2887   // Set an oopmap for the call site.  This oopmap will map all
2888   // oop-registers and debug-info registers as callee-saved.  This
2889   // will allow deoptimization at this safepoint to find all possible
2890   // debug-info recordings, as well as let GC find all oops.

  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #ifndef _WINDOWS
  27 #include "alloca.h"
  28 #endif
  29 #include "asm/macroAssembler.hpp"
  30 #include "asm/macroAssembler.inline.hpp"
  31 #include "code/compiledIC.hpp"
  32 #include "code/debugInfoRec.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "code/nativeInst.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/oopMap.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "gc/shared/gcLocker.hpp"
  39 #include "gc/shared/barrierSet.hpp"
  40 #include "gc/shared/barrierSetAssembler.hpp"
  41 #include "interpreter/interpreter.hpp"
  42 #include "logging/log.hpp"
  43 #include "memory/resourceArea.hpp"
  44 #include "memory/universe.hpp"
  45 #include "oops/compiledICHolder.hpp"
  46 #include "oops/klass.inline.hpp"
  47 #include "prims/methodHandles.hpp"
  48 #include "runtime/jniHandles.hpp"
  49 #include "runtime/safepointMechanism.hpp"
  50 #include "runtime/sharedRuntime.hpp"
  51 #include "runtime/signature.hpp"

 926         // So we must adjust where to pick up the data to match the interpreter.
 927 
 928         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 929                            next_off : ld_off;
 930 
 931         // this can be a misaligned move
 932         __ movq(r, Address(saved_sp, offset));
 933       } else {
 934         // sign extend and use a full word?
 935         __ movl(r, Address(saved_sp, ld_off));
 936       }
 937     } else {
 938       if (!r_2->is_valid()) {
 939         __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
 940       } else {
 941         __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
 942       }
 943     }
 944   }
 945 
 946   __ push_cont_fastpath(r15_thread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about
 947 
 948   // 6243940 We might end up in handle_wrong_method if
 949   // the callee is deoptimized as we race thru here. If that
 950   // happens we don't want to take a safepoint because the
 951   // caller frame will look interpreted and arguments are now
 952   // "compiled" so it is much better to make this transition
 953   // invisible to the stack walking code. Unfortunately if
 954   // we try and find the callee by normal means a safepoint
 955   // is possible. So we stash the desired callee in the thread
 956   // and the vm will find there should this case occur.
 957 
 958   __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
 959 
 960   // put Method* where a c2i would expect should we end up there
 961   // only needed becaus eof c2 resolve stubs return Method* as a result in
 962   // rax
 963   __ mov(rax, rbx);
 964   __ jmp(r11);
 965 }
 966 
 967 // ---------------------------------------------------------------

1416                             const methodHandle& method,
1417                             const BasicType* sig_bt,
1418                             const VMRegPair* regs) {
1419   Register temp_reg = rbx;  // not part of any compiled calling seq
1420   if (VerifyOops) {
1421     for (int i = 0; i < method->size_of_parameters(); i++) {
1422       if (is_reference_type(sig_bt[i])) {
1423         VMReg r = regs[i].first();
1424         assert(r->is_valid(), "bad oop arg");
1425         if (r->is_stack()) {
1426           __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1427           __ verify_oop(temp_reg);
1428         } else {
1429           __ verify_oop(r->as_Register());
1430         }
1431       }
1432     }
1433   }
1434 }
1435 
1436 // defined in stubGenerator_x86_64.cpp
1437 OopMap* continuation_enter_setup(MacroAssembler* masm, int& stack_slots);
1438 void fill_continuation_entry(MacroAssembler* masm);
1439 void continuation_enter_cleanup(MacroAssembler* masm);
1440 
1441 // enterSpecial(Continuation c, boolean isContinue)
1442 // On entry: c_rarg1 -- the continuation object
1443 //           c_rarg2 -- isContinue
1444 static void gen_continuation_enter(MacroAssembler* masm,
1445                                  const methodHandle& method,
1446                                  const BasicType* sig_bt,
1447                                  const VMRegPair* regs,
1448                                  int& exception_offset,
1449                                  OopMapSet*oop_maps,
1450                                  int& frame_complete,
1451                                  int& stack_slots) {
1452   //verify_oop_args(masm, method, sig_bt, regs);
1453   AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(),
1454                          relocInfo::static_call_type);
1455 
1456   stack_slots = 2; // will be overwritten
1457   address start = __ pc();
1458 
1459   Label call_thaw, exit;
1460 
1461   __ push(rbp);
1462 
1463   //BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1464   //bs->nmethod_entry_barrier(masm);
1465   OopMap* map = continuation_enter_setup(masm, stack_slots);  // kills rax
1466 
1467   // Frame is now completed as far as size and linkage.
1468   frame_complete =__ pc() - start;
1469   // if isContinue == 0
1470   //   _enterSP = sp
1471   // end
1472  
1473   fill_continuation_entry(masm); // kills rax
1474 
1475   __ cmpl(c_rarg2, 0);
1476   __ jcc(Assembler::notEqual, call_thaw);
1477 
1478   int up = align_up((intptr_t) __ pc() + 1, 4) - (intptr_t) (__ pc() + 1);
1479   if (up > 0) {
1480     __ nop(up);
1481   }
1482 
1483   address mark = __ pc();
1484   __ call(resolve);
1485   oop_maps->add_gc_map(__ pc() - start, map);
1486   __ post_call_nop();
1487 
1488   __ jmp(exit);
1489 
1490   __ bind(call_thaw);
1491 
1492   __ movptr(rbx, (intptr_t) StubRoutines::cont_thaw());
1493   __ call(rbx);
1494   oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1495   ContinuationEntry::return_pc_offset = __ pc() - start;
1496   __ post_call_nop();
1497 
1498   __ bind(exit);
1499   continuation_enter_cleanup(masm);
1500   __ pop(rbp);
1501   __ ret(0);
1502 
1503   /// exception handling
1504 
1505   exception_offset = __ pc() - start;
1506 
1507   continuation_enter_cleanup(masm);
1508   __ addptr(rsp, 1*wordSize);
1509 
1510   __ movptr(rbx, rax); // save the exception
1511   __ movptr(c_rarg0, Address(rsp, 0));
1512 
1513   __ call_VM_leaf(CAST_FROM_FN_PTR(address,
1514         SharedRuntime::exception_handler_for_return_address),
1515       r15_thread, c_rarg0);
1516   __ mov(rdi, rax);
1517   __ movptr(rax, rbx);
1518   __ mov(rbx, rdi);
1519   __ pop(rdx);
1520 
1521   // continue at exception handler (return address removed)
1522   // rax: exception
1523   // rbx: exception handler
1524   // rdx: throwing pc
1525   __ verify_oop(rax);
1526   __ jmp(rbx);
1527 
1528   CodeBuffer* cbuf = masm->code_section()->outer();
1529   address stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, mark);
1530 }
1531 
1532 static void gen_special_dispatch(MacroAssembler* masm,
1533                                  const methodHandle& method,
1534                                  const BasicType* sig_bt,
1535                                  const VMRegPair* regs) {
1536   verify_oop_args(masm, method, sig_bt, regs);
1537   vmIntrinsics::ID iid = method->intrinsic_id();
1538 
1539   // Now write the args into the outgoing interpreter space
1540   bool     has_receiver   = false;
1541   Register receiver_reg   = noreg;
1542   int      member_arg_pos = -1;
1543   Register member_reg     = noreg;
1544   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1545   if (ref_kind != 0) {
1546     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1547     member_reg = rbx;  // known to be free at this point
1548     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1549   } else if (iid == vmIntrinsics::_invokeBasic || iid == vmIntrinsics::_linkToNative) {
1550     has_receiver = true;
1551   } else {

1593 // in the Java compiled code convention, marshals them to the native
1594 // convention (handlizes oops, etc), transitions to native, makes the call,
1595 // returns to java state (possibly blocking), unhandlizes any result and
1596 // returns.
1597 //
1598 // Critical native functions are a shorthand for the use of
1599 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1600 // functions.  The wrapper is expected to unpack the arguments before
1601 // passing them to the callee. Critical native functions leave the state _in_Java,
1602 // since they cannot stop for GC.
1603 // Some other parts of JNI setup are skipped like the tear down of the JNI handle
1604 // block and the check for pending exceptions it's impossible for them
1605 // to be thrown.
1606 //
1607 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1608                                                 const methodHandle& method,
1609                                                 int compile_id,
1610                                                 BasicType* in_sig_bt,
1611                                                 VMRegPair* in_regs,
1612                                                 BasicType ret_type) {
1613   if (method->is_continuation_enter_intrinsic()) {
1614     vmIntrinsics::ID iid = method->intrinsic_id();
1615     intptr_t start = (intptr_t)__ pc();
1616     int vep_offset = ((intptr_t)__ pc()) - start;
1617     int exception_offset = 0;
1618     int frame_complete = 0;
1619     int stack_slots = 0;
1620     OopMapSet* oop_maps =  new OopMapSet();
1621     gen_continuation_enter(masm,
1622                          method,
1623                          in_sig_bt,
1624                          in_regs,
1625                          exception_offset,
1626                          oop_maps,
1627                          frame_complete,
1628                          stack_slots);
1629     __ flush();
1630     nmethod* nm = nmethod::new_native_nmethod(method,
1631                                               compile_id,
1632                                               masm->code(),
1633                                               vep_offset,
1634                                               frame_complete,
1635                                               stack_slots,
1636                                               in_ByteSize(-1),
1637                                               in_ByteSize(-1),
1638                                               oop_maps,
1639                                               exception_offset);
1640     ContinuationEntry::set_enter_nmethod(nm);
1641     return nm;
1642   }
1643 
1644   if (method->is_method_handle_intrinsic()) {
1645     vmIntrinsics::ID iid = method->intrinsic_id();
1646     intptr_t start = (intptr_t)__ pc();
1647     int vep_offset = ((intptr_t)__ pc()) - start;
1648     gen_special_dispatch(masm,
1649                          method,
1650                          in_sig_bt,
1651                          in_regs);
1652     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1653     __ flush();
1654     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1655     return nmethod::new_native_nmethod(method,
1656                                        compile_id,
1657                                        masm->code(),
1658                                        vep_offset,
1659                                        frame_complete,
1660                                        stack_slots / VMRegImpl::slots_per_word,
1661                                        in_ByteSize(-1),
1662                                        in_ByteSize(-1),
1663                                        (OopMapSet*)NULL);

2068       // Test if the oopMark is an obvious stack pointer, i.e.,
2069       //  1) (mark & 3) == 0, and
2070       //  2) rsp <= mark < mark + os::pagesize()
2071       // These 3 tests can be done by evaluating the following
2072       // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2073       // assuming both stack pointer and pagesize have their
2074       // least significant 2 bits clear.
2075       // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2076 
2077       __ subptr(swap_reg, rsp);
2078       __ andptr(swap_reg, 3 - os::vm_page_size());
2079 
2080       // Save the test result, for recursive case, the result is zero
2081       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2082       __ jcc(Assembler::notEqual, slow_path_lock);
2083     } else {
2084       __ jmp(slow_path_lock);
2085     }
2086 
2087     // Slow path will re-enter here

2088     __ bind(lock_done);
2089     // __ inc_held_monitor_count(r15_thread);
2090   }
2091 
2092   // Finally just about ready to make the JNI call
2093 
2094   // get JNIEnv* which is first argument to native
2095   __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2096 
2097   // Now set thread in native
2098   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2099 
2100   __ call(RuntimeAddress(native_func));
2101 
2102   // Verify or restore cpu control state after JNI call
2103   __ restore_cpu_control_state_after_jni();
2104 
2105   // Unpack native results.
2106   switch (ret_type) {
2107   case T_BOOLEAN: __ c2bool(rax);            break;
2108   case T_CHAR   : __ movzwl(rax, rax);      break;
2109   case T_BYTE   : __ sign_extend_byte (rax); break;

2206       // get address of the stack lock
2207       __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2208       //  get old displaced header
2209       __ movptr(old_hdr, Address(rax, 0));
2210 
2211       // Atomic swap old header if oop still contains the stack lock
2212       __ lock();
2213       __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2214       __ jcc(Assembler::notEqual, slow_path_unlock);
2215     } else {
2216       __ jmp(slow_path_unlock);
2217     }
2218 
2219     // slow path re-enters here
2220     __ bind(unlock_done);
2221     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2222       restore_native_result(masm, ret_type, stack_slots);
2223     }
2224 
2225     __ bind(done);
2226     // __ dec_held_monitor_count(r15_thread);
2227   }
2228   {
2229     SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2230     save_native_result(masm, ret_type, stack_slots);
2231     __ mov_metadata(c_rarg1, method());
2232     __ call_VM_leaf(
2233          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2234          r15_thread, c_rarg1);
2235     restore_native_result(masm, ret_type, stack_slots);
2236   }
2237 
2238   __ reset_last_Java_frame(false);
2239 
2240   // Unbox oop result, e.g. JNIHandles::resolve value.
2241   if (is_reference_type(ret_type)) {
2242     __ resolve_jobject(rax /* value */,
2243                        r15_thread /* thread */,
2244                        rcx /* tmp */);
2245   }
2246 

2980 
2981   if (UseRTMLocking) {
2982     // Abort RTM transaction before calling runtime
2983     // because critical section will be large and will be
2984     // aborted anyway. Also nmethod could be deoptimized.
2985     __ xabort(0);
2986   }
2987 
2988   // Make room for return address (or push it again)
2989   if (!cause_return) {
2990     __ push(rbx);
2991   }
2992 
2993   // Save registers, fpu state, and flags
2994   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, save_vectors);
2995 
2996   // The following is basically a call_VM.  However, we need the precise
2997   // address of the call in order to generate an oopmap. Hence, we do all the
2998   // work outselves.
2999 
3000   __ set_last_Java_frame(noreg, noreg, NULL);  // JavaFrameAnchor::capture_last_Java_pc() will get the pc from the return address, which we store next:
3001 
3002   // The return address must always be correct so that frame constructor never
3003   // sees an invalid pc.
3004 
3005   if (!cause_return) {
3006     // Get the return pc saved by the signal handler and stash it in its appropriate place on the stack.
3007     // Additionally, rbx is a callee saved register and we can look at it later to determine
3008     // if someone changed the return address for us!
3009     __ movptr(rbx, Address(r15_thread, JavaThread::saved_exception_pc_offset()));
3010     __ movptr(Address(rbp, wordSize), rbx);
3011   }
3012 
3013   // Do the call
3014   __ mov(c_rarg0, r15_thread);
3015   __ call(RuntimeAddress(call_ptr));
3016 
3017   // Set an oopmap for the call site.  This oopmap will map all
3018   // oop-registers and debug-info registers as callee-saved.  This
3019   // will allow deoptimization at this safepoint to find all possible
3020   // debug-info recordings, as well as let GC find all oops.
< prev index next >