< prev index next >

src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp

Print this page

 937     intptr_t start = (intptr_t)__ pc();
 938     int vep_offset = ((intptr_t)__ pc()) - start;
 939 
 940     // First instruction must be a nop as it may need to be patched on deoptimisation
 941     MacroAssembler::assert_alignment(__ pc());
 942     __ nop();
 943     gen_special_dispatch(masm,
 944                          method,
 945                          in_sig_bt,
 946                          in_regs);
 947     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
 948     __ flush();
 949     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
 950     return nmethod::new_native_nmethod(method,
 951                                        compile_id,
 952                                        masm->code(),
 953                                        vep_offset,
 954                                        frame_complete,
 955                                        stack_slots / VMRegImpl::slots_per_word,
 956                                        in_ByteSize(-1),
 957                                        in_ByteSize(-1),
 958                                        (OopMapSet*)NULL);
 959   }
 960   address native_func = method->native_function();
 961   assert(native_func != NULL, "must have function");
 962 
 963   // An OopMap for lock (and class if static)
 964   OopMapSet *oop_maps = new OopMapSet();
 965   assert_cond(oop_maps != NULL);
 966   intptr_t start = (intptr_t)__ pc();
 967 
 968   // We have received a description of where all the java arg are located
 969   // on entry to the wrapper. We need to convert these args to where
 970   // the jni function will expect them. To figure out where they go
 971   // we convert the java signature to a C signature by inserting
 972   // the hidden arguments as arg[0] and possibly arg[1] (static method)
 973 
 974   const int total_in_args = method->size_of_parameters();
 975   int total_c_args = total_in_args + (method->is_static() ? 2 : 1);
 976 
 977   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);

 993   int out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
 994 
 995   // Compute framesize for the wrapper.  We need to handlize all oops in
 996   // incoming registers
 997 
 998   // Calculate the total number of stack slots we will need.
 999 
1000   // First count the abi requirement plus all of the outgoing args
1001   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1002 
1003   // Now the space for the inbound oop handle area
1004   int total_save_slots = 8 * VMRegImpl::slots_per_word;  // 8 arguments passed in registers
1005 
1006   int oop_handle_offset = stack_slots;
1007   stack_slots += total_save_slots;
1008 
1009   // Now any space we need for handlizing a klass if static method
1010 
1011   int klass_slot_offset = 0;
1012   int klass_offset = -1;
1013   int lock_slot_offset = 0;
1014   bool is_static = false;
1015 
1016   if (method->is_static()) {
1017     klass_slot_offset = stack_slots;
1018     stack_slots += VMRegImpl::slots_per_word;
1019     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1020     is_static = true;
1021   }
1022 
1023   // Plus a lock if needed
1024 
1025   if (method->is_synchronized()) {
1026     lock_slot_offset = stack_slots;
1027     stack_slots += VMRegImpl::slots_per_word;
1028   }
1029 
1030   // Now a place (+2) to save return values or temp during shuffling
1031   // + 4 for return address (which we own) and saved fp
1032   stack_slots += 6;
1033 
1034   // Ok The space we have allocated will look like:
1035   //
1036   //
1037   // FP-> |                     |
1038   //      | 2 slots (ra)        |
1039   //      | 2 slots (fp)        |
1040   //      |---------------------|
1041   //      | 2 slots for moves   |
1042   //      |---------------------|
1043   //      | lock box (if sync)  |
1044   //      |---------------------| <- lock_slot_offset
1045   //      | klass (if static)   |
1046   //      |---------------------| <- klass_slot_offset
1047   //      | oopHandle area      |
1048   //      |---------------------| <- oop_handle_offset (8 java arg registers)
1049   //      | outbound memory     |
1050   //      | based arguments     |
1051   //      |                     |
1052   //      |---------------------|
1053   //      |                     |
1054   // SP-> | out_preserved_slots |
1055   //
1056   //
1057 
1058 
1059   // Now compute actual number of stack words we need rounding to make
1060   // stack properly aligned.
1061   stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1062 
1063   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1064 

1280       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1281       xthread, c_rarg1);
1282     restore_args(masm, total_c_args, c_arg, out_regs);
1283   }
1284 
1285   // Lock a synchronized method
1286 
1287   // Register definitions used by locking and unlocking
1288 
1289   const Register swap_reg = x10;
1290   const Register obj_reg  = x9;  // Will contain the oop
1291   const Register lock_reg = x30;  // Address of compiler lock object (BasicLock)
1292   const Register old_hdr  = x30;  // value of old header at unlock time
1293   const Register tmp      = ra;
1294 
1295   Label slow_path_lock;
1296   Label lock_done;
1297 
1298   if (method->is_synchronized()) {
1299 
1300     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1301 
1302     // Get the handle (the 2nd argument)
1303     __ mv(oop_handle_reg, c_rarg1);
1304 
1305     // Get address of the box
1306 
1307     __ la(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1308 
1309     // Load the oop from the handle
1310     __ ld(obj_reg, Address(oop_handle_reg, 0));
1311 
1312     if (!UseHeavyMonitors) {
1313       // Load (object->mark() | 1) into swap_reg % x10
1314       __ ld(t0, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1315       __ ori(swap_reg, t0, 1);
1316 
1317       // Save (object->mark() | 1) into BasicLock's displaced header
1318       __ sd(swap_reg, Address(lock_reg, mark_word_offset));
1319 
1320       // src -> dest if dest == x10 else x10 <- dest
1321       {
1322         Label here;
1323         __ cmpxchg_obj_header(x10, lock_reg, obj_reg, t0, lock_done, /*fallthrough*/NULL);
1324       }
1325 
1326       // Test if the oopMark is an obvious stack pointer, i.e.,
1327       //  1) (mark & 3) == 0, and
1328       //  2) sp <= mark < mark + os::pagesize()
1329       // These 3 tests can be done by evaluating the following
1330       // expression: ((mark - sp) & (3 - os::vm_page_size())),
1331       // assuming both stack pointer and pagesize have their
1332       // least significant 2 bits clear.
1333       // NOTE: the oopMark is in swap_reg % 10 as the result of cmpxchg
1334 
1335       __ sub(swap_reg, swap_reg, sp);
1336       __ andi(swap_reg, swap_reg, 3 - os::vm_page_size());
1337 
1338       // Save the test result, for recursive case, the result is zero
1339       __ sd(swap_reg, Address(lock_reg, mark_word_offset));
1340       __ bnez(swap_reg, slow_path_lock);
1341     } else {
1342       __ j(slow_path_lock);
1343     }
1344 
1345     // Slow path will re-enter here
1346     __ bind(lock_done);
1347   }
1348 
1349 
1350   // Finally just about ready to make the JNI call
1351 
1352   // get JNIEnv* which is first argument to native
1353   __ la(c_rarg0, Address(xthread, in_bytes(JavaThread::jni_environment_offset())));
1354 
1355   // Now set thread in native
1356   __ la(t1, Address(xthread, JavaThread::thread_state_offset()));
1357   __ mv(t0, _thread_in_native);
1358   __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1359   __ sw(t0, Address(t1));
1360 

1412 
1413   Label reguard;
1414   Label reguard_done;
1415   __ lbu(t0, Address(xthread, JavaThread::stack_guard_state_offset()));
1416   __ mv(t1, StackOverflow::stack_guard_yellow_reserved_disabled);
1417   __ beq(t0, t1, reguard);
1418   __ bind(reguard_done);
1419 
1420   // native result if any is live
1421 
1422   // Unlock
1423   Label unlock_done;
1424   Label slow_path_unlock;
1425   if (method->is_synchronized()) {
1426 
1427     // Get locked oop from the handle we passed to jni
1428     __ ld(obj_reg, Address(oop_handle_reg, 0));
1429 
1430     Label done;
1431 
1432     if (!UseHeavyMonitors) {
1433       // Simple recursive lock?
1434       __ ld(t0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1435       __ beqz(t0, done);
1436     }
1437 
1438 
1439     // Must save x10 if if it is live now because cmpxchg must use it
1440     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1441       save_native_result(masm, ret_type, stack_slots);
1442     }
1443 
1444     if (!UseHeavyMonitors) {
1445       // get address of the stack lock
1446       __ la(x10, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1447       //  get old displaced header
1448       __ ld(old_hdr, Address(x10, 0));
1449 
1450       // Atomic swap old header if oop still contains the stack lock
1451       Label succeed;
1452       __ cmpxchg_obj_header(x10, old_hdr, obj_reg, t0, succeed, &slow_path_unlock);
1453       __ bind(succeed);
1454     } else {
1455       __ j(slow_path_unlock);
1456     }
1457 
1458     // slow path re-enters here
1459     __ bind(unlock_done);
1460     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1461       restore_native_result(masm, ret_type, stack_slots);
1462     }
1463 
1464     __ bind(done);
1465   }
1466 
1467   Label dtrace_method_exit, dtrace_method_exit_done;
1468   {
1469     int32_t offset = 0;
1470     __ la_patchable(t0, ExternalAddress((address)&DTraceMethodProbes), offset);
1471     __ lbu(t0, Address(t0, offset));
1472     __ bnez(t0, dtrace_method_exit);
1473     __ bind(dtrace_method_exit_done);

1502 
1503   // forward the exception
1504   __ bind(exception_pending);
1505 
1506   // and forward the exception
1507   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1508 
1509   // Slow path locking & unlocking
1510   if (method->is_synchronized()) {
1511 
1512     __ block_comment("Slow path lock {");
1513     __ bind(slow_path_lock);
1514 
1515     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
1516     // args are (oop obj, BasicLock* lock, JavaThread* thread)
1517 
1518     // protect the args we've loaded
1519     save_args(masm, total_c_args, c_arg, out_regs);
1520 
1521     __ mv(c_rarg0, obj_reg);
1522     __ mv(c_rarg1, lock_reg);
1523     __ mv(c_rarg2, xthread);
1524 
1525     // Not a leaf but we have last_Java_frame setup as we want
1526     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
1527     restore_args(masm, total_c_args, c_arg, out_regs);
1528 
1529 #ifdef ASSERT
1530     { Label L;
1531       __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1532       __ beqz(t0, L);
1533       __ stop("no pending exception allowed on exit from monitorenter");
1534       __ bind(L);
1535     }
1536 #endif
1537     __ j(lock_done);
1538 
1539     __ block_comment("} Slow path lock");
1540 
1541     __ block_comment("Slow path unlock {");
1542     __ bind(slow_path_unlock);
1543 
1544     if (ret_type == T_FLOAT || ret_type == T_DOUBLE) {
1545       save_native_result(masm, ret_type, stack_slots);
1546     }
1547 
1548     __ mv(c_rarg2, xthread);
1549     __ la(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1550     __ mv(c_rarg0, obj_reg);
1551 
1552     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
1553     // NOTE that obj_reg == x9 currently
1554     __ ld(x9, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1555     __ sd(zr, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1556 
1557     __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C));
1558 
1559 #ifdef ASSERT
1560     {
1561       Label L;
1562       __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1563       __ beqz(t0, L);
1564       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
1565       __ bind(L);
1566     }
1567 #endif /* ASSERT */
1568 
1569     __ sd(x9, Address(xthread, in_bytes(Thread::pending_exception_offset())));

1633     __ bind(dtrace_method_exit);
1634     save_native_result(masm, ret_type, stack_slots);
1635     __ mov_metadata(c_rarg1, method());
1636     __ call_VM_leaf(
1637          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
1638          xthread, c_rarg1);
1639     restore_native_result(masm, ret_type, stack_slots);
1640     __ j(dtrace_method_exit_done);
1641     __ block_comment("} dtrace exit");
1642   }
1643 
1644   __ flush();
1645 
1646   nmethod *nm = nmethod::new_native_nmethod(method,
1647                                             compile_id,
1648                                             masm->code(),
1649                                             vep_offset,
1650                                             frame_complete,
1651                                             stack_slots / VMRegImpl::slots_per_word,
1652                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
1653                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
1654                                             oop_maps);
1655   assert(nm != NULL, "create native nmethod fail!");
1656   return nm;
1657 }
1658 
1659 // this function returns the adjust size (in number of words) to a c2i adapter
1660 // activation for use during deoptimization
1661 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
1662   assert(callee_locals >= callee_parameters,
1663          "test and remove; got more parms than locals");
1664   if (callee_locals < callee_parameters) {
1665     return 0;                   // No adjustment for negative locals
1666   }
1667   int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
1668   // diff is counted in stack words
1669   return align_up(diff, 2);
1670 }
1671 
1672 //------------------------------generate_deopt_blob----------------------------
1673 void SharedRuntime::generate_deopt_blob() {

 937     intptr_t start = (intptr_t)__ pc();
 938     int vep_offset = ((intptr_t)__ pc()) - start;
 939 
 940     // First instruction must be a nop as it may need to be patched on deoptimisation
 941     MacroAssembler::assert_alignment(__ pc());
 942     __ nop();
 943     gen_special_dispatch(masm,
 944                          method,
 945                          in_sig_bt,
 946                          in_regs);
 947     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
 948     __ flush();
 949     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
 950     return nmethod::new_native_nmethod(method,
 951                                        compile_id,
 952                                        masm->code(),
 953                                        vep_offset,
 954                                        frame_complete,
 955                                        stack_slots / VMRegImpl::slots_per_word,
 956                                        in_ByteSize(-1),

 957                                        (OopMapSet*)NULL);
 958   }
 959   address native_func = method->native_function();
 960   assert(native_func != NULL, "must have function");
 961 
 962   // An OopMap for lock (and class if static)
 963   OopMapSet *oop_maps = new OopMapSet();
 964   assert_cond(oop_maps != NULL);
 965   intptr_t start = (intptr_t)__ pc();
 966 
 967   // We have received a description of where all the java arg are located
 968   // on entry to the wrapper. We need to convert these args to where
 969   // the jni function will expect them. To figure out where they go
 970   // we convert the java signature to a C signature by inserting
 971   // the hidden arguments as arg[0] and possibly arg[1] (static method)
 972 
 973   const int total_in_args = method->size_of_parameters();
 974   int total_c_args = total_in_args + (method->is_static() ? 2 : 1);
 975 
 976   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);

 992   int out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
 993 
 994   // Compute framesize for the wrapper.  We need to handlize all oops in
 995   // incoming registers
 996 
 997   // Calculate the total number of stack slots we will need.
 998 
 999   // First count the abi requirement plus all of the outgoing args
1000   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1001 
1002   // Now the space for the inbound oop handle area
1003   int total_save_slots = 8 * VMRegImpl::slots_per_word;  // 8 arguments passed in registers
1004 
1005   int oop_handle_offset = stack_slots;
1006   stack_slots += total_save_slots;
1007 
1008   // Now any space we need for handlizing a klass if static method
1009 
1010   int klass_slot_offset = 0;
1011   int klass_offset = -1;

1012   bool is_static = false;
1013 
1014   if (method->is_static()) {
1015     klass_slot_offset = stack_slots;
1016     stack_slots += VMRegImpl::slots_per_word;
1017     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1018     is_static = true;
1019   }
1020 
1021   // Plus a lock if needed
1022 
1023   if (method->is_synchronized()) {

1024     stack_slots += VMRegImpl::slots_per_word;
1025   }
1026 
1027   // Now a place (+2) to save return values or temp during shuffling
1028   // + 4 for return address (which we own) and saved fp
1029   stack_slots += 6;
1030 
1031   // Ok The space we have allocated will look like:
1032   //
1033   //
1034   // FP-> |                     |
1035   //      | 2 slots (ra)        |
1036   //      | 2 slots (fp)        |
1037   //      |---------------------|
1038   //      | 2 slots for moves   |
1039   //      |---------------------|


1040   //      | klass (if static)   |
1041   //      |---------------------| <- klass_slot_offset
1042   //      | oopHandle area      |
1043   //      |---------------------| <- oop_handle_offset (8 java arg registers)
1044   //      | outbound memory     |
1045   //      | based arguments     |
1046   //      |                     |
1047   //      |---------------------|
1048   //      |                     |
1049   // SP-> | out_preserved_slots |
1050   //
1051   //
1052 
1053 
1054   // Now compute actual number of stack words we need rounding to make
1055   // stack properly aligned.
1056   stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1057 
1058   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1059 

1275       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1276       xthread, c_rarg1);
1277     restore_args(masm, total_c_args, c_arg, out_regs);
1278   }
1279 
1280   // Lock a synchronized method
1281 
1282   // Register definitions used by locking and unlocking
1283 
1284   const Register swap_reg = x10;
1285   const Register obj_reg  = x9;  // Will contain the oop
1286   const Register lock_reg = x30;  // Address of compiler lock object (BasicLock)
1287   const Register old_hdr  = x30;  // value of old header at unlock time
1288   const Register tmp      = ra;
1289 
1290   Label slow_path_lock;
1291   Label lock_done;
1292 
1293   if (method->is_synchronized()) {
1294 


1295     // Get the handle (the 2nd argument)
1296     __ mv(oop_handle_reg, c_rarg1);
1297 




1298     // Load the oop from the handle
1299     __ ld(obj_reg, Address(oop_handle_reg, 0));
1300 
1301     if (!UseHeavyMonitors) {
1302       __ ld(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1303       __ fast_lock(obj_reg, old_hdr, swap_reg, tmp, t0, slow_path_lock);


























1304     } else {
1305       __ j(slow_path_lock);
1306     }
1307 
1308     // Slow path will re-enter here
1309     __ bind(lock_done);
1310   }
1311 
1312 
1313   // Finally just about ready to make the JNI call
1314 
1315   // get JNIEnv* which is first argument to native
1316   __ la(c_rarg0, Address(xthread, in_bytes(JavaThread::jni_environment_offset())));
1317 
1318   // Now set thread in native
1319   __ la(t1, Address(xthread, JavaThread::thread_state_offset()));
1320   __ mv(t0, _thread_in_native);
1321   __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1322   __ sw(t0, Address(t1));
1323 

1375 
1376   Label reguard;
1377   Label reguard_done;
1378   __ lbu(t0, Address(xthread, JavaThread::stack_guard_state_offset()));
1379   __ mv(t1, StackOverflow::stack_guard_yellow_reserved_disabled);
1380   __ beq(t0, t1, reguard);
1381   __ bind(reguard_done);
1382 
1383   // native result if any is live
1384 
1385   // Unlock
1386   Label unlock_done;
1387   Label slow_path_unlock;
1388   if (method->is_synchronized()) {
1389 
1390     // Get locked oop from the handle we passed to jni
1391     __ ld(obj_reg, Address(oop_handle_reg, 0));
1392 
1393     Label done;
1394 







1395     // Must save x10 if if it is live now because cmpxchg must use it
1396     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1397       save_native_result(masm, ret_type, stack_slots);
1398     }
1399 
1400     if (!UseHeavyMonitors) {
1401       __ ld(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1402       __ fast_unlock(obj_reg, old_hdr, swap_reg, t0, slow_path_unlock);







1403     } else {
1404       __ j(slow_path_unlock);
1405     }
1406 
1407     // slow path re-enters here
1408     __ bind(unlock_done);
1409     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1410       restore_native_result(masm, ret_type, stack_slots);
1411     }
1412 
1413     __ bind(done);
1414   }
1415 
1416   Label dtrace_method_exit, dtrace_method_exit_done;
1417   {
1418     int32_t offset = 0;
1419     __ la_patchable(t0, ExternalAddress((address)&DTraceMethodProbes), offset);
1420     __ lbu(t0, Address(t0, offset));
1421     __ bnez(t0, dtrace_method_exit);
1422     __ bind(dtrace_method_exit_done);

1451 
1452   // forward the exception
1453   __ bind(exception_pending);
1454 
1455   // and forward the exception
1456   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1457 
1458   // Slow path locking & unlocking
1459   if (method->is_synchronized()) {
1460 
1461     __ block_comment("Slow path lock {");
1462     __ bind(slow_path_lock);
1463 
1464     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
1465     // args are (oop obj, BasicLock* lock, JavaThread* thread)
1466 
1467     // protect the args we've loaded
1468     save_args(masm, total_c_args, c_arg, out_regs);
1469 
1470     __ mv(c_rarg0, obj_reg);
1471     __ mv(c_rarg1, xthread);

1472 
1473     // Not a leaf but we have last_Java_frame setup as we want
1474     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 2);
1475     restore_args(masm, total_c_args, c_arg, out_regs);
1476 
1477 #ifdef ASSERT
1478     { Label L;
1479       __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1480       __ beqz(t0, L);
1481       __ stop("no pending exception allowed on exit from monitorenter");
1482       __ bind(L);
1483     }
1484 #endif
1485     __ j(lock_done);
1486 
1487     __ block_comment("} Slow path lock");
1488 
1489     __ block_comment("Slow path unlock {");
1490     __ bind(slow_path_unlock);
1491 
1492     if (ret_type == T_FLOAT || ret_type == T_DOUBLE) {
1493       save_native_result(masm, ret_type, stack_slots);
1494     }
1495 
1496     __ mv(c_rarg1, xthread);

1497     __ mv(c_rarg0, obj_reg);
1498 
1499     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
1500     // NOTE that obj_reg == x9 currently
1501     __ ld(x9, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1502     __ sd(zr, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1503 
1504     __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C));
1505 
1506 #ifdef ASSERT
1507     {
1508       Label L;
1509       __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1510       __ beqz(t0, L);
1511       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
1512       __ bind(L);
1513     }
1514 #endif /* ASSERT */
1515 
1516     __ sd(x9, Address(xthread, in_bytes(Thread::pending_exception_offset())));

1580     __ bind(dtrace_method_exit);
1581     save_native_result(masm, ret_type, stack_slots);
1582     __ mov_metadata(c_rarg1, method());
1583     __ call_VM_leaf(
1584          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
1585          xthread, c_rarg1);
1586     restore_native_result(masm, ret_type, stack_slots);
1587     __ j(dtrace_method_exit_done);
1588     __ block_comment("} dtrace exit");
1589   }
1590 
1591   __ flush();
1592 
1593   nmethod *nm = nmethod::new_native_nmethod(method,
1594                                             compile_id,
1595                                             masm->code(),
1596                                             vep_offset,
1597                                             frame_complete,
1598                                             stack_slots / VMRegImpl::slots_per_word,
1599                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),

1600                                             oop_maps);
1601   assert(nm != NULL, "create native nmethod fail!");
1602   return nm;
1603 }
1604 
1605 // this function returns the adjust size (in number of words) to a c2i adapter
1606 // activation for use during deoptimization
1607 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
1608   assert(callee_locals >= callee_parameters,
1609          "test and remove; got more parms than locals");
1610   if (callee_locals < callee_parameters) {
1611     return 0;                   // No adjustment for negative locals
1612   }
1613   int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
1614   // diff is counted in stack words
1615   return align_up(diff, 2);
1616 }
1617 
1618 //------------------------------generate_deopt_blob----------------------------
1619 void SharedRuntime::generate_deopt_blob() {
< prev index next >