< prev index next >

src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp

Print this page

1295     if (method->is_continuation_enter_intrinsic()) {
1296       assert(interpreted_entry_offset != -1, "Must be set");
1297       assert(exception_offset != -1,         "Must be set");
1298     } else {
1299       assert(interpreted_entry_offset == -1, "Must be unset");
1300       assert(exception_offset == -1,         "Must be unset");
1301     }
1302     assert(frame_complete != -1,    "Must be set");
1303     assert(stack_slots != -1,       "Must be set");
1304     assert(vep_offset != -1,        "Must be set");
1305 #endif
1306 
1307     __ flush();
1308     nmethod* nm = nmethod::new_native_nmethod(method,
1309                                               compile_id,
1310                                               masm->code(),
1311                                               vep_offset,
1312                                               frame_complete,
1313                                               stack_slots,
1314                                               in_ByteSize(-1),
1315                                               in_ByteSize(-1),
1316                                               oop_maps,
1317                                               exception_offset);
1318     if (method->is_continuation_enter_intrinsic()) {
1319       ContinuationEntry::set_enter_code(nm, interpreted_entry_offset);
1320     } else if (method->is_continuation_yield_intrinsic()) {
1321       _cont_doYield_stub = nm;
1322     } else {
1323       guarantee(false, "Unknown Continuation native intrinsic");
1324     }
1325     return nm;
1326   }
1327 
1328   if (method->is_method_handle_intrinsic()) {
1329     vmIntrinsics::ID iid = method->intrinsic_id();
1330     intptr_t start = (intptr_t)__ pc();
1331     int vep_offset = ((intptr_t)__ pc()) - start;
1332 
1333     // First instruction must be a nop as it may need to be patched on deoptimisation
1334     __ nop();
1335     gen_special_dispatch(masm,
1336                          method,
1337                          in_sig_bt,
1338                          in_regs);
1339     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1340     __ flush();
1341     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1342     return nmethod::new_native_nmethod(method,
1343                                        compile_id,
1344                                        masm->code(),
1345                                        vep_offset,
1346                                        frame_complete,
1347                                        stack_slots / VMRegImpl::slots_per_word,
1348                                        in_ByteSize(-1),
1349                                        in_ByteSize(-1),
1350                                        (OopMapSet*)NULL);
1351   }
1352   address native_func = method->native_function();
1353   assert(native_func != NULL, "must have function");
1354 
1355   // An OopMap for lock (and class if static)
1356   OopMapSet *oop_maps = new OopMapSet();
1357   intptr_t start = (intptr_t)__ pc();
1358 
1359   // We have received a description of where all the java arg are located
1360   // on entry to the wrapper. We need to convert these args to where
1361   // the jni function will expect them. To figure out where they go
1362   // we convert the java signature to a C signature by inserting
1363   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1364 
1365   const int total_in_args = method->size_of_parameters();
1366   int total_c_args = total_in_args + (method->is_static() ? 2 : 1);
1367 
1368   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1369   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);

1389   }
1390 
1391   // Compute framesize for the wrapper.  We need to handlize all oops in
1392   // incoming registers
1393 
1394   // Calculate the total number of stack slots we will need.
1395 
1396   // First count the abi requirement plus all of the outgoing args
1397   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1398 
1399   // Now the space for the inbound oop handle area
1400   int total_save_slots = 8 * VMRegImpl::slots_per_word;  // 8 arguments passed in registers
1401 
1402   int oop_handle_offset = stack_slots;
1403   stack_slots += total_save_slots;
1404 
1405   // Now any space we need for handlizing a klass if static method
1406 
1407   int klass_slot_offset = 0;
1408   int klass_offset = -1;
1409   int lock_slot_offset = 0;
1410   bool is_static = false;
1411 
1412   if (method->is_static()) {
1413     klass_slot_offset = stack_slots;
1414     stack_slots += VMRegImpl::slots_per_word;
1415     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1416     is_static = true;
1417   }
1418 
1419   // Plus a lock if needed
1420 
1421   if (method->is_synchronized()) {
1422     lock_slot_offset = stack_slots;
1423     stack_slots += VMRegImpl::slots_per_word;
1424   }
1425 
1426   // Now a place (+2) to save return values or temp during shuffling
1427   // + 4 for return address (which we own) and saved rfp
1428   stack_slots += 6;
1429 
1430   // Ok The space we have allocated will look like:
1431   //
1432   //
1433   // FP-> |                     |
1434   //      |---------------------|
1435   //      | 2 slots for moves   |
1436   //      |---------------------|
1437   //      | lock box (if sync)  |
1438   //      |---------------------| <- lock_slot_offset
1439   //      | klass (if static)   |
1440   //      |---------------------| <- klass_slot_offset
1441   //      | oopHandle area      |
1442   //      |---------------------| <- oop_handle_offset (8 java arg registers)
1443   //      | outbound memory     |
1444   //      | based arguments     |
1445   //      |                     |
1446   //      |---------------------|
1447   //      |                     |
1448   // SP-> | out_preserved_slots |
1449   //
1450   //
1451 
1452 
1453   // Now compute actual number of stack words we need rounding to make
1454   // stack properly aligned.
1455   stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1456 
1457   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1458 

1675       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1676       rthread, c_rarg1);
1677     restore_args(masm, total_c_args, c_arg, out_regs);
1678   }
1679 
1680   // Lock a synchronized method
1681 
1682   // Register definitions used by locking and unlocking
1683 
1684   const Register swap_reg = r0;
1685   const Register obj_reg  = r19;  // Will contain the oop
1686   const Register lock_reg = r13;  // Address of compiler lock object (BasicLock)
1687   const Register old_hdr  = r13;  // value of old header at unlock time
1688   const Register tmp = lr;
1689 
1690   Label slow_path_lock;
1691   Label lock_done;
1692 
1693   if (method->is_synchronized()) {
1694     Label count;
1695     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1696 
1697     // Get the handle (the 2nd argument)
1698     __ mov(oop_handle_reg, c_rarg1);
1699 
1700     // Get address of the box
1701 
1702     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1703 
1704     // Load the oop from the handle
1705     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1706 
1707     if (!UseHeavyMonitors) {
1708       // Load (object->mark() | 1) into swap_reg %r0
1709       __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1710       __ orr(swap_reg, rscratch1, 1);
1711 
1712       // Save (object->mark() | 1) into BasicLock's displaced header
1713       __ str(swap_reg, Address(lock_reg, mark_word_offset));
1714 
1715       // src -> dest iff dest == r0 else r0 <- dest
1716       __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/NULL);
1717 
1718       // Hmm should this move to the slow path code area???
1719 
1720       // Test if the oopMark is an obvious stack pointer, i.e.,
1721       //  1) (mark & 3) == 0, and
1722       //  2) sp <= mark < mark + os::pagesize()
1723       // These 3 tests can be done by evaluating the following
1724       // expression: ((mark - sp) & (3 - os::vm_page_size())),
1725       // assuming both stack pointer and pagesize have their
1726       // least significant 2 bits clear.
1727       // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1728 
1729       __ sub(swap_reg, sp, swap_reg);
1730       __ neg(swap_reg, swap_reg);
1731       __ ands(swap_reg, swap_reg, 3 - os::vm_page_size());
1732 
1733       // Save the test result, for recursive case, the result is zero
1734       __ str(swap_reg, Address(lock_reg, mark_word_offset));
1735       __ br(Assembler::NE, slow_path_lock);
1736     } else {
1737       __ b(slow_path_lock);
1738     }
1739     __ bind(count);
1740     __ increment(Address(rthread, JavaThread::held_monitor_count_offset()));
1741 
1742     // Slow path will re-enter here
1743     __ bind(lock_done);
1744   }
1745 
1746 
1747   // Finally just about ready to make the JNI call
1748 
1749   // get JNIEnv* which is first argument to native
1750   __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1751 
1752   // Now set thread in native
1753   __ mov(rscratch1, _thread_in_native);
1754   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1755   __ stlrw(rscratch1, rscratch2);

1824   __ stlrw(rscratch1, rscratch2);
1825   __ bind(after_transition);
1826 
1827   Label reguard;
1828   Label reguard_done;
1829   __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
1830   __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
1831   __ br(Assembler::EQ, reguard);
1832   __ bind(reguard_done);
1833 
1834   // native result if any is live
1835 
1836   // Unlock
1837   Label unlock_done;
1838   Label slow_path_unlock;
1839   if (method->is_synchronized()) {
1840 
1841     // Get locked oop from the handle we passed to jni
1842     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1843 
1844     Label done, not_recursive;
1845 
1846     if (!UseHeavyMonitors) {
1847       // Simple recursive lock?
1848       __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1849       __ cbnz(rscratch1, not_recursive);
1850       __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1851       __ b(done);
1852     }
1853 
1854     __ bind(not_recursive);
1855 
1856     // Must save r0 if if it is live now because cmpxchg must use it
1857     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1858       save_native_result(masm, ret_type, stack_slots);
1859     }
1860 
1861     if (!UseHeavyMonitors) {
1862       // get address of the stack lock
1863       __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1864       //  get old displaced header
1865       __ ldr(old_hdr, Address(r0, 0));
1866 
1867       // Atomic swap old header if oop still contains the stack lock
1868       Label count;
1869       __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, count, &slow_path_unlock);
1870       __ bind(count);
1871       __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1872     } else {
1873       __ b(slow_path_unlock);
1874     }
1875 
1876     // slow path re-enters here
1877     __ bind(unlock_done);
1878     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1879       restore_native_result(masm, ret_type, stack_slots);
1880     }
1881 
1882     __ bind(done);
1883   }
1884 
1885   Label dtrace_method_exit, dtrace_method_exit_done;
1886   {
1887     uint64_t offset;
1888     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
1889     __ ldrb(rscratch1, Address(rscratch1, offset));
1890     __ cbnzw(rscratch1, dtrace_method_exit);

1920 
1921   // forward the exception
1922   __ bind(exception_pending);
1923 
1924   // and forward the exception
1925   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1926 
1927   // Slow path locking & unlocking
1928   if (method->is_synchronized()) {
1929 
1930     __ block_comment("Slow path lock {");
1931     __ bind(slow_path_lock);
1932 
1933     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
1934     // args are (oop obj, BasicLock* lock, JavaThread* thread)
1935 
1936     // protect the args we've loaded
1937     save_args(masm, total_c_args, c_arg, out_regs);
1938 
1939     __ mov(c_rarg0, obj_reg);
1940     __ mov(c_rarg1, lock_reg);
1941     __ mov(c_rarg2, rthread);
1942 
1943     // Not a leaf but we have last_Java_frame setup as we want
1944     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
1945     restore_args(masm, total_c_args, c_arg, out_regs);
1946 
1947 #ifdef ASSERT
1948     { Label L;
1949       __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1950       __ cbz(rscratch1, L);
1951       __ stop("no pending exception allowed on exit from monitorenter");
1952       __ bind(L);
1953     }
1954 #endif
1955     __ b(lock_done);
1956 
1957     __ block_comment("} Slow path lock");
1958 
1959     __ block_comment("Slow path unlock {");
1960     __ bind(slow_path_unlock);
1961 
1962     // If we haven't already saved the native result we must save it now as xmm registers
1963     // are still exposed.
1964 
1965     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
1966       save_native_result(masm, ret_type, stack_slots);
1967     }
1968 
1969     __ mov(c_rarg2, rthread);
1970     __ lea(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1971     __ mov(c_rarg0, obj_reg);
1972 
1973     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
1974     // NOTE that obj_reg == r19 currently
1975     __ ldr(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1976     __ str(zr, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1977 
1978     __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C));
1979 
1980 #ifdef ASSERT
1981     {
1982       Label L;
1983       __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1984       __ cbz(rscratch1, L);
1985       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
1986       __ bind(L);
1987     }
1988 #endif /* ASSERT */
1989 
1990     __ str(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));

2054     save_native_result(masm, ret_type, stack_slots);
2055     __ mov_metadata(c_rarg1, method());
2056     __ call_VM_leaf(
2057          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2058          rthread, c_rarg1);
2059     restore_native_result(masm, ret_type, stack_slots);
2060     __ b(dtrace_method_exit_done);
2061     __ block_comment("} dtrace exit");
2062   }
2063 
2064 
2065   __ flush();
2066 
2067   nmethod *nm = nmethod::new_native_nmethod(method,
2068                                             compile_id,
2069                                             masm->code(),
2070                                             vep_offset,
2071                                             frame_complete,
2072                                             stack_slots / VMRegImpl::slots_per_word,
2073                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2074                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2075                                             oop_maps);
2076 
2077   return nm;
2078 }
2079 
2080 // this function returns the adjust size (in number of words) to a c2i adapter
2081 // activation for use during deoptimization
2082 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2083   assert(callee_locals >= callee_parameters,
2084           "test and remove; got more parms than locals");
2085   if (callee_locals < callee_parameters)
2086     return 0;                   // No adjustment for negative locals
2087   int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2088   // diff is counted in stack words
2089   return align_up(diff, 2);
2090 }
2091 
2092 
2093 //------------------------------generate_deopt_blob----------------------------
2094 void SharedRuntime::generate_deopt_blob() {

1295     if (method->is_continuation_enter_intrinsic()) {
1296       assert(interpreted_entry_offset != -1, "Must be set");
1297       assert(exception_offset != -1,         "Must be set");
1298     } else {
1299       assert(interpreted_entry_offset == -1, "Must be unset");
1300       assert(exception_offset == -1,         "Must be unset");
1301     }
1302     assert(frame_complete != -1,    "Must be set");
1303     assert(stack_slots != -1,       "Must be set");
1304     assert(vep_offset != -1,        "Must be set");
1305 #endif
1306 
1307     __ flush();
1308     nmethod* nm = nmethod::new_native_nmethod(method,
1309                                               compile_id,
1310                                               masm->code(),
1311                                               vep_offset,
1312                                               frame_complete,
1313                                               stack_slots,
1314                                               in_ByteSize(-1),

1315                                               oop_maps,
1316                                               exception_offset);
1317     if (method->is_continuation_enter_intrinsic()) {
1318       ContinuationEntry::set_enter_code(nm, interpreted_entry_offset);
1319     } else if (method->is_continuation_yield_intrinsic()) {
1320       _cont_doYield_stub = nm;
1321     } else {
1322       guarantee(false, "Unknown Continuation native intrinsic");
1323     }
1324     return nm;
1325   }
1326 
1327   if (method->is_method_handle_intrinsic()) {
1328     vmIntrinsics::ID iid = method->intrinsic_id();
1329     intptr_t start = (intptr_t)__ pc();
1330     int vep_offset = ((intptr_t)__ pc()) - start;
1331 
1332     // First instruction must be a nop as it may need to be patched on deoptimisation
1333     __ nop();
1334     gen_special_dispatch(masm,
1335                          method,
1336                          in_sig_bt,
1337                          in_regs);
1338     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1339     __ flush();
1340     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1341     return nmethod::new_native_nmethod(method,
1342                                        compile_id,
1343                                        masm->code(),
1344                                        vep_offset,
1345                                        frame_complete,
1346                                        stack_slots / VMRegImpl::slots_per_word,
1347                                        in_ByteSize(-1),

1348                                        (OopMapSet*)NULL);
1349   }
1350   address native_func = method->native_function();
1351   assert(native_func != NULL, "must have function");
1352 
1353   // An OopMap for lock (and class if static)
1354   OopMapSet *oop_maps = new OopMapSet();
1355   intptr_t start = (intptr_t)__ pc();
1356 
1357   // We have received a description of where all the java arg are located
1358   // on entry to the wrapper. We need to convert these args to where
1359   // the jni function will expect them. To figure out where they go
1360   // we convert the java signature to a C signature by inserting
1361   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1362 
1363   const int total_in_args = method->size_of_parameters();
1364   int total_c_args = total_in_args + (method->is_static() ? 2 : 1);
1365 
1366   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1367   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);

1387   }
1388 
1389   // Compute framesize for the wrapper.  We need to handlize all oops in
1390   // incoming registers
1391 
1392   // Calculate the total number of stack slots we will need.
1393 
1394   // First count the abi requirement plus all of the outgoing args
1395   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1396 
1397   // Now the space for the inbound oop handle area
1398   int total_save_slots = 8 * VMRegImpl::slots_per_word;  // 8 arguments passed in registers
1399 
1400   int oop_handle_offset = stack_slots;
1401   stack_slots += total_save_slots;
1402 
1403   // Now any space we need for handlizing a klass if static method
1404 
1405   int klass_slot_offset = 0;
1406   int klass_offset = -1;

1407   bool is_static = false;
1408 
1409   if (method->is_static()) {
1410     klass_slot_offset = stack_slots;
1411     stack_slots += VMRegImpl::slots_per_word;
1412     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1413     is_static = true;
1414   }
1415 
1416   // Plus a lock if needed
1417 
1418   if (method->is_synchronized()) {

1419     stack_slots += VMRegImpl::slots_per_word;
1420   }
1421 
1422   // Now a place (+2) to save return values or temp during shuffling
1423   // + 4 for return address (which we own) and saved rfp
1424   stack_slots += 6;
1425 
1426   // Ok The space we have allocated will look like:
1427   //
1428   //
1429   // FP-> |                     |
1430   //      |---------------------|
1431   //      | 2 slots for moves   |
1432   //      |---------------------|


1433   //      | klass (if static)   |
1434   //      |---------------------| <- klass_slot_offset
1435   //      | oopHandle area      |
1436   //      |---------------------| <- oop_handle_offset (8 java arg registers)
1437   //      | outbound memory     |
1438   //      | based arguments     |
1439   //      |                     |
1440   //      |---------------------|
1441   //      |                     |
1442   // SP-> | out_preserved_slots |
1443   //
1444   //
1445 
1446 
1447   // Now compute actual number of stack words we need rounding to make
1448   // stack properly aligned.
1449   stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1450 
1451   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1452 

1669       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1670       rthread, c_rarg1);
1671     restore_args(masm, total_c_args, c_arg, out_regs);
1672   }
1673 
1674   // Lock a synchronized method
1675 
1676   // Register definitions used by locking and unlocking
1677 
1678   const Register swap_reg = r0;
1679   const Register obj_reg  = r19;  // Will contain the oop
1680   const Register lock_reg = r13;  // Address of compiler lock object (BasicLock)
1681   const Register old_hdr  = r13;  // value of old header at unlock time
1682   const Register tmp = lr;
1683 
1684   Label slow_path_lock;
1685   Label lock_done;
1686 
1687   if (method->is_synchronized()) {
1688     Label count;

1689 
1690     // Get the handle (the 2nd argument)
1691     __ mov(oop_handle_reg, c_rarg1);
1692 




1693     // Load the oop from the handle
1694     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1695 
1696     if (!UseHeavyMonitors) {
1697       __ ldr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1698       __ fast_lock(obj_reg, old_hdr, swap_reg, tmp, rscratch1, slow_path_lock);


























1699     } else {
1700       __ b(slow_path_lock);
1701     }
1702     __ bind(count);
1703     __ increment(Address(rthread, JavaThread::held_monitor_count_offset()));
1704 
1705     // Slow path will re-enter here
1706     __ bind(lock_done);
1707   }
1708 
1709 
1710   // Finally just about ready to make the JNI call
1711 
1712   // get JNIEnv* which is first argument to native
1713   __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1714 
1715   // Now set thread in native
1716   __ mov(rscratch1, _thread_in_native);
1717   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1718   __ stlrw(rscratch1, rscratch2);

1787   __ stlrw(rscratch1, rscratch2);
1788   __ bind(after_transition);
1789 
1790   Label reguard;
1791   Label reguard_done;
1792   __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
1793   __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
1794   __ br(Assembler::EQ, reguard);
1795   __ bind(reguard_done);
1796 
1797   // native result if any is live
1798 
1799   // Unlock
1800   Label unlock_done;
1801   Label slow_path_unlock;
1802   if (method->is_synchronized()) {
1803 
1804     // Get locked oop from the handle we passed to jni
1805     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1806 
1807     Label done;










1808 
1809     // Must save r0 if if it is live now because cmpxchg must use it
1810     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1811       save_native_result(masm, ret_type, stack_slots);
1812     }
1813 
1814     if (!UseHeavyMonitors) {
1815       __ ldr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1816       __ fast_unlock(obj_reg, old_hdr, swap_reg, rscratch1, slow_path_unlock);







1817       __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1818     } else {
1819       __ b(slow_path_unlock);
1820     }
1821 
1822     // slow path re-enters here
1823     __ bind(unlock_done);
1824     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1825       restore_native_result(masm, ret_type, stack_slots);
1826     }
1827 
1828     __ bind(done);
1829   }
1830 
1831   Label dtrace_method_exit, dtrace_method_exit_done;
1832   {
1833     uint64_t offset;
1834     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
1835     __ ldrb(rscratch1, Address(rscratch1, offset));
1836     __ cbnzw(rscratch1, dtrace_method_exit);

1866 
1867   // forward the exception
1868   __ bind(exception_pending);
1869 
1870   // and forward the exception
1871   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1872 
1873   // Slow path locking & unlocking
1874   if (method->is_synchronized()) {
1875 
1876     __ block_comment("Slow path lock {");
1877     __ bind(slow_path_lock);
1878 
1879     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
1880     // args are (oop obj, BasicLock* lock, JavaThread* thread)
1881 
1882     // protect the args we've loaded
1883     save_args(masm, total_c_args, c_arg, out_regs);
1884 
1885     __ mov(c_rarg0, obj_reg);
1886     __ mov(c_rarg1, rthread);

1887 
1888     // Not a leaf but we have last_Java_frame setup as we want
1889     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 2);
1890     restore_args(masm, total_c_args, c_arg, out_regs);
1891 
1892 #ifdef ASSERT
1893     { Label L;
1894       __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1895       __ cbz(rscratch1, L);
1896       __ stop("no pending exception allowed on exit from monitorenter");
1897       __ bind(L);
1898     }
1899 #endif
1900     __ b(lock_done);
1901 
1902     __ block_comment("} Slow path lock");
1903 
1904     __ block_comment("Slow path unlock {");
1905     __ bind(slow_path_unlock);
1906 
1907     // If we haven't already saved the native result we must save it now as xmm registers
1908     // are still exposed.
1909 
1910     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
1911       save_native_result(masm, ret_type, stack_slots);
1912     }
1913 
1914     __ mov(c_rarg1, rthread);

1915     __ mov(c_rarg0, obj_reg);
1916 
1917     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
1918     // NOTE that obj_reg == r19 currently
1919     __ ldr(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1920     __ str(zr, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1921 
1922     __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C));
1923 
1924 #ifdef ASSERT
1925     {
1926       Label L;
1927       __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1928       __ cbz(rscratch1, L);
1929       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
1930       __ bind(L);
1931     }
1932 #endif /* ASSERT */
1933 
1934     __ str(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));

1998     save_native_result(masm, ret_type, stack_slots);
1999     __ mov_metadata(c_rarg1, method());
2000     __ call_VM_leaf(
2001          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2002          rthread, c_rarg1);
2003     restore_native_result(masm, ret_type, stack_slots);
2004     __ b(dtrace_method_exit_done);
2005     __ block_comment("} dtrace exit");
2006   }
2007 
2008 
2009   __ flush();
2010 
2011   nmethod *nm = nmethod::new_native_nmethod(method,
2012                                             compile_id,
2013                                             masm->code(),
2014                                             vep_offset,
2015                                             frame_complete,
2016                                             stack_slots / VMRegImpl::slots_per_word,
2017                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),

2018                                             oop_maps);
2019 
2020   return nm;
2021 }
2022 
2023 // this function returns the adjust size (in number of words) to a c2i adapter
2024 // activation for use during deoptimization
2025 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2026   assert(callee_locals >= callee_parameters,
2027           "test and remove; got more parms than locals");
2028   if (callee_locals < callee_parameters)
2029     return 0;                   // No adjustment for negative locals
2030   int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2031   // diff is counted in stack words
2032   return align_up(diff, 2);
2033 }
2034 
2035 
2036 //------------------------------generate_deopt_blob----------------------------
2037 void SharedRuntime::generate_deopt_blob() {
< prev index next >