< prev index next >

src/hotspot/cpu/x86/interp_masm_x86.cpp

Print this page

   1 /*
   2  * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *

  50 }
  51 
  52 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
  53   Label update, next, none;
  54 
  55 #ifdef _LP64
  56   assert_different_registers(obj, rscratch1, mdo_addr.base(), mdo_addr.index());
  57 #else
  58   assert_different_registers(obj, mdo_addr.base(), mdo_addr.index());
  59 #endif
  60 
  61   interp_verify_oop(obj, atos);
  62 
  63   testptr(obj, obj);
  64   jccb(Assembler::notZero, update);
  65   testptr(mdo_addr, TypeEntries::null_seen);
  66   jccb(Assembler::notZero, next); // null already seen. Nothing to do anymore.
  67   // atomic update to prevent overwriting Klass* with 0
  68   lock();
  69   orptr(mdo_addr, TypeEntries::null_seen);
  70   jmpb(next);
  71 
  72   bind(update);
  73   Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
  74   load_klass(obj, obj, tmp_load_klass);
  75 #ifdef _LP64
  76   mov(rscratch1, obj);
  77 #endif
  78 
  79   xorptr(obj, mdo_addr);
  80   testptr(obj, TypeEntries::type_klass_mask);
  81   jccb(Assembler::zero, next); // klass seen before, nothing to
  82                                // do. The unknown bit may have been
  83                                // set already but no need to check.
  84 
  85   testptr(obj, TypeEntries::type_unknown);
  86   jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
  87 
  88   cmpptr(mdo_addr, 0);
  89   jccb(Assembler::equal, none);
  90   cmpptr(mdo_addr, TypeEntries::null_seen);

1200   call_VM(noreg, CAST_FROM_FN_PTR(address,
1201           InterpreterRuntime::build_method_counters), method);
1202   movptr(mcs, Address(method,Method::method_counters_offset()));
1203   testptr(mcs, mcs);
1204   jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
1205   bind(has_counters);
1206 }
1207 
1208 
1209 // Lock object
1210 //
1211 // Args:
1212 //      rdx, c_rarg1: BasicObjectLock to be used for locking
1213 //
1214 // Kills:
1215 //      rax, rbx
1216 void InterpreterMacroAssembler::lock_object(Register lock_reg) {
1217   assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx),
1218          "The argument is only for looks. It must be c_rarg1");
1219 
1220   if (UseHeavyMonitors) {
1221     call_VM(noreg,
1222             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1223             lock_reg);
1224   } else {
1225     Label done;
1226 
1227     const Register swap_reg = rax; // Must use rax for cmpxchg instruction
1228     const Register tmp_reg = rbx; // Will be passed to biased_locking_enter to avoid a
1229                                   // problematic case where tmp_reg = no_reg.
1230     const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop
1231     const Register rklass_decode_tmp = LP64_ONLY(rscratch1) NOT_LP64(noreg);
1232 
1233     const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
1234     const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
1235     const int mark_offset = lock_offset +
1236                             BasicLock::displaced_header_offset_in_bytes();
1237 
1238     Label slow_case;
1239 
1240     // Load object pointer into obj_reg
1241     movptr(obj_reg, Address(lock_reg, obj_offset));
1242 
1243     if (DiagnoseSyncOnValueBasedClasses != 0) {
1244       load_klass(tmp_reg, obj_reg, rklass_decode_tmp);
1245       movl(tmp_reg, Address(tmp_reg, Klass::access_flags_offset()));
1246       testl(tmp_reg, JVM_ACC_IS_VALUE_BASED_CLASS);
1247       jcc(Assembler::notZero, slow_case);
1248     }
1249 
1250     if (UseBiasedLocking) {
1251       biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp_reg, rklass_decode_tmp, false, done, &slow_case);
1252     }
1253 
1254     // Load immediate 1 into swap_reg %rax
1255     movl(swap_reg, (int32_t)1);










1256 
1257     // Load (object->mark() | 1) into swap_reg %rax
1258     orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1259 
1260     // Save (object->mark() | 1) into BasicLock's displaced header
1261     movptr(Address(lock_reg, mark_offset), swap_reg);
1262 
1263     assert(lock_offset == 0,
1264            "displaced header must be first word in BasicObjectLock");
1265 
1266     lock();
1267     cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1268     if (PrintBiasedLockingStatistics) {
1269       cond_inc32(Assembler::zero,
1270                  ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr()));
1271     }
1272     jcc(Assembler::zero, done);
1273 
1274     const int zero_bits = LP64_ONLY(7) NOT_LP64(3);
1275 
1276     // Fast check for recursive lock.
1277     //
1278     // Can apply the optimization only if this is a stack lock
1279     // allocated in this thread. For efficiency, we can focus on
1280     // recently allocated stack locks (instead of reading the stack
1281     // base and checking whether 'mark' points inside the current
1282     // thread stack):
1283     //  1) (mark & zero_bits) == 0, and
1284     //  2) rsp <= mark < mark + os::pagesize()
1285     //
1286     // Warning: rsp + os::pagesize can overflow the stack base. We must
1287     // neither apply the optimization for an inflated lock allocated
1288     // just above the thread stack (this is why condition 1 matters)
1289     // nor apply the optimization if the stack lock is inside the stack
1290     // of another thread. The latter is avoided even in case of overflow
1291     // because we have guard pages at the end of all stacks. Hence, if
1292     // we go over the stack base and hit the stack of another thread,
1293     // this should not be in a writeable area that could contain a
1294     // stack lock allocated by that thread. As a consequence, a stack
1295     // lock less than page size away from rsp is guaranteed to be
1296     // owned by the current thread.
1297     //
1298     // These 3 tests can be done by evaluating the following
1299     // expression: ((mark - rsp) & (zero_bits - os::vm_page_size())),
1300     // assuming both stack pointer and pagesize have their
1301     // least significant bits clear.
1302     // NOTE: the mark is in swap_reg %rax as the result of cmpxchg
1303     subptr(swap_reg, rsp);
1304     andptr(swap_reg, zero_bits - os::vm_page_size());
1305 
1306     // Save the test result, for recursive case, the result is zero
1307     movptr(Address(lock_reg, mark_offset), swap_reg);
1308 
1309     if (PrintBiasedLockingStatistics) {
1310       cond_inc32(Assembler::zero,
1311                  ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr()));


1312     }
1313     jcc(Assembler::zero, done);
1314 
1315     bind(slow_case);
1316 
1317     // Call the runtime routine for slow case
1318     call_VM(noreg,
1319             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1320             lock_reg);
1321 





1322     bind(done);
1323   }
1324 }
1325 
1326 
1327 // Unlocks an object. Used in monitorexit bytecode and
1328 // remove_activation.  Throws an IllegalMonitorException if object is
1329 // not locked by current thread.
1330 //
1331 // Args:
1332 //      rdx, c_rarg1: BasicObjectLock for lock
1333 //
1334 // Kills:
1335 //      rax
1336 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
1337 //      rscratch1 (scratch reg)
1338 // rax, rbx, rcx, rdx
1339 void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
1340   assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx),
1341          "The argument is only for looks. It must be c_rarg1");
1342 
1343   if (UseHeavyMonitors) {
1344     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1345   } else {
1346     Label done;
1347 
1348     const Register swap_reg   = rax;  // Must use rax for cmpxchg instruction
1349     const Register header_reg = LP64_ONLY(c_rarg2) NOT_LP64(rbx);  // Will contain the old oopMark
1350     const Register obj_reg    = LP64_ONLY(c_rarg3) NOT_LP64(rcx);  // Will contain the oop
1351 
1352     save_bcp(); // Save in case of exception
1353 
1354     // Convert from BasicObjectLock structure to object and BasicLock
1355     // structure Store the BasicLock address into %rax
1356     lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes()));


1357 
1358     // Load oop into obj_reg(%c_rarg3)
1359     movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
1360 
1361     // Free entry
1362     movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD);
1363 
1364     if (UseBiasedLocking) {
1365       biased_locking_exit(obj_reg, header_reg, done);
1366     }
1367 
1368     // Load the old header from BasicLock structure
1369     movptr(header_reg, Address(swap_reg,
1370                                BasicLock::displaced_header_offset_in_bytes()));







1371 
1372     // Test for recursion
1373     testptr(header_reg, header_reg);

1374 
1375     // zero for recursive case
1376     jcc(Assembler::zero, done);
1377 
1378     // Atomic swap back the old header
1379     lock();
1380     cmpxchgptr(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1381 
1382     // zero for simple unlock of a stack-lock case
1383     jcc(Assembler::zero, done);

1384 



1385 

1386     // Call the runtime routine for slow case.
1387     movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), obj_reg); // restore obj
1388     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1389 
1390     bind(done);
1391 
1392     restore_bcp();
1393   }
1394 }
1395 
1396 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
1397                                                          Label& zero_continue) {
1398   assert(ProfileInterpreter, "must be profiling interpreter");
1399   movptr(mdp, Address(rbp, frame::interpreter_frame_mdp_offset * wordSize));
1400   testptr(mdp, mdp);
1401   jcc(Assembler::zero, zero_continue);
1402 }
1403 
1404 
1405 // Set the method data pointer for the current bcp.

   1 /*
   2  * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *

  50 }
  51 
  52 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
  53   Label update, next, none;
  54 
  55 #ifdef _LP64
  56   assert_different_registers(obj, rscratch1, mdo_addr.base(), mdo_addr.index());
  57 #else
  58   assert_different_registers(obj, mdo_addr.base(), mdo_addr.index());
  59 #endif
  60 
  61   interp_verify_oop(obj, atos);
  62 
  63   testptr(obj, obj);
  64   jccb(Assembler::notZero, update);
  65   testptr(mdo_addr, TypeEntries::null_seen);
  66   jccb(Assembler::notZero, next); // null already seen. Nothing to do anymore.
  67   // atomic update to prevent overwriting Klass* with 0
  68   lock();
  69   orptr(mdo_addr, TypeEntries::null_seen);
  70   jmp(next);
  71 
  72   bind(update);
  73   Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
  74   load_klass(obj, obj, tmp_load_klass);
  75 #ifdef _LP64
  76   mov(rscratch1, obj);
  77 #endif
  78 
  79   xorptr(obj, mdo_addr);
  80   testptr(obj, TypeEntries::type_klass_mask);
  81   jccb(Assembler::zero, next); // klass seen before, nothing to
  82                                // do. The unknown bit may have been
  83                                // set already but no need to check.
  84 
  85   testptr(obj, TypeEntries::type_unknown);
  86   jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
  87 
  88   cmpptr(mdo_addr, 0);
  89   jccb(Assembler::equal, none);
  90   cmpptr(mdo_addr, TypeEntries::null_seen);

1200   call_VM(noreg, CAST_FROM_FN_PTR(address,
1201           InterpreterRuntime::build_method_counters), method);
1202   movptr(mcs, Address(method,Method::method_counters_offset()));
1203   testptr(mcs, mcs);
1204   jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
1205   bind(has_counters);
1206 }
1207 
1208 
1209 // Lock object
1210 //
1211 // Args:
1212 //      rdx, c_rarg1: BasicObjectLock to be used for locking
1213 //
1214 // Kills:
1215 //      rax, rbx
1216 void InterpreterMacroAssembler::lock_object(Register lock_reg) {
1217   assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx),
1218          "The argument is only for looks. It must be c_rarg1");
1219 
1220   if (LockingMode == LM_MONITOR) {
1221     call_VM(noreg,
1222             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1223             lock_reg);
1224   } else {
1225     Label done;
1226 
1227     const Register swap_reg = rax; // Must use rax for cmpxchg instruction
1228     const Register tmp_reg = rbx; // Will be passed to biased_locking_enter to avoid a
1229                                   // problematic case where tmp_reg = no_reg.
1230     const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop
1231     const Register rklass_decode_tmp = LP64_ONLY(rscratch1) NOT_LP64(noreg);
1232 
1233     const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
1234     const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
1235     const int mark_offset = lock_offset +
1236                             BasicLock::displaced_header_offset_in_bytes();
1237 
1238     Label slow_case;
1239 
1240     // Load object pointer into obj_reg
1241     movptr(obj_reg, Address(lock_reg, obj_offset));
1242 
1243     if (DiagnoseSyncOnValueBasedClasses != 0) {
1244       load_klass(tmp_reg, obj_reg, rklass_decode_tmp);
1245       movl(tmp_reg, Address(tmp_reg, Klass::access_flags_offset()));
1246       testl(tmp_reg, JVM_ACC_IS_VALUE_BASED_CLASS);
1247       jcc(Assembler::notZero, slow_case);
1248     }
1249 
1250     if (UseBiasedLocking) {
1251       biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp_reg, rklass_decode_tmp, false, done, &slow_case);
1252     }
1253 
1254     if (LockingMode == LM_LIGHTWEIGHT) {
1255 #ifdef _LP64
1256       const Register thread = r15_thread;
1257 #else
1258       const Register thread = lock_reg;
1259       get_thread(thread);
1260 #endif
1261       lightweight_lock(obj_reg, swap_reg, thread, tmp_reg, slow_case);
1262       jmp(done);
1263     } else {
1264       // Load immediate 1 into swap_reg %rax
1265       movl(swap_reg, (int32_t)1);
1266 
1267       // Load (object->mark() | 1) into swap_reg %rax
1268       orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1269 
1270       // Save (object->mark() | 1) into BasicLock's displaced header
1271       movptr(Address(lock_reg, mark_offset), swap_reg);
1272 
1273       assert(lock_offset == 0,
1274              "displaced header must be first word in BasicObjectLock");
1275 
1276       lock();
1277       cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1278       if (PrintBiasedLockingStatistics) {
1279         cond_inc32(Assembler::zero,
1280                    ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr()));
1281       }
1282       jcc(Assembler::zero, done);
1283 
1284       const int zero_bits = LP64_ONLY(7) NOT_LP64(3);
1285 
1286       // Fast check for recursive lock.
1287       //
1288       // Can apply the optimization only if this is a stack lock
1289       // allocated in this thread. For efficiency, we can focus on
1290       // recently allocated stack locks (instead of reading the stack
1291       // base and checking whether 'mark' points inside the current
1292       // thread stack):
1293       //  1) (mark & zero_bits) == 0, and
1294       //  2) rsp <= mark < mark + os::pagesize()
1295       //
1296       // Warning: rsp + os::pagesize can overflow the stack base. We must
1297       // neither apply the optimization for an inflated lock allocated
1298       // just above the thread stack (this is why condition 1 matters)
1299       // nor apply the optimization if the stack lock is inside the stack
1300       // of another thread. The latter is avoided even in case of overflow
1301       // because we have guard pages at the end of all stacks. Hence, if
1302       // we go over the stack base and hit the stack of another thread,
1303       // this should not be in a writeable area that could contain a
1304       // stack lock allocated by that thread. As a consequence, a stack
1305       // lock less than page size away from rsp is guaranteed to be
1306       // owned by the current thread.
1307       //
1308       // These 3 tests can be done by evaluating the following
1309       // expression: ((mark - rsp) & (zero_bits - os::vm_page_size())),
1310       // assuming both stack pointer and pagesize have their
1311       // least significant bits clear.
1312       // NOTE: the mark is in swap_reg %rax as the result of cmpxchg
1313       subptr(swap_reg, rsp);
1314       andptr(swap_reg, zero_bits - os::vm_page_size());
1315 
1316       // Save the test result, for recursive case, the result is zero
1317       movptr(Address(lock_reg, mark_offset), swap_reg);
1318 
1319       if (PrintBiasedLockingStatistics) {
1320         cond_inc32(Assembler::zero,
1321                    ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr()));
1322       }
1323       jcc(Assembler::zero, done);
1324     }


1325     bind(slow_case);
1326 
1327     // Call the runtime routine for slow case
1328     if (LockingMode == LM_LIGHTWEIGHT) {
1329       call_VM(noreg,
1330               CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter_obj),
1331               obj_reg);
1332     } else {
1333       call_VM(noreg,
1334               CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1335               lock_reg);
1336     }
1337     bind(done);
1338   }
1339 }
1340 
1341 
1342 // Unlocks an object. Used in monitorexit bytecode and
1343 // remove_activation.  Throws an IllegalMonitorException if object is
1344 // not locked by current thread.
1345 //
1346 // Args:
1347 //      rdx, c_rarg1: BasicObjectLock for lock
1348 //
1349 // Kills:
1350 //      rax
1351 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
1352 //      rscratch1 (scratch reg)
1353 // rax, rbx, rcx, rdx
1354 void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
1355   assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx),
1356          "The argument is only for looks. It must be c_rarg1");
1357 
1358   if (LockingMode == LM_MONITOR) {
1359     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1360   } else {
1361     Label done, slow_case;
1362 
1363     const Register swap_reg   = rax;  // Must use rax for cmpxchg instruction
1364     const Register header_reg = LP64_ONLY(c_rarg2) NOT_LP64(rbx);  // Will contain the old oopMark
1365     const Register obj_reg    = LP64_ONLY(c_rarg3) NOT_LP64(rcx);  // Will contain the oop
1366 
1367     save_bcp(); // Save in case of exception
1368 
1369     if (LockingMode != LM_LIGHTWEIGHT) {
1370       // Convert from BasicObjectLock structure to object and BasicLock
1371       // structure Store the BasicLock address into %rax
1372       lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes()));
1373     }
1374 
1375     // Load oop into obj_reg(%c_rarg3)
1376     movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
1377 
1378     // Free entry
1379     movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD);
1380 
1381     if (LockingMode == LM_LIGHTWEIGHT) {
1382 #ifdef _LP64
1383       lightweight_unlock(obj_reg, swap_reg, r15_thread, header_reg, slow_case);
1384 #else
1385       // This relies on the implementation of lightweight_unlock being able to handle
1386       // that the reg_rax and thread Register parameters may alias each other.
1387       get_thread(swap_reg);
1388       lightweight_unlock(obj_reg, swap_reg, swap_reg, header_reg, slow_case);
1389 #endif
1390       jmp(done);
1391     } else {
1392       if (UseBiasedLocking) {
1393         biased_locking_exit(obj_reg, header_reg, done);
1394       }
1395 
1396       // Load the old header from BasicLock structure
1397       movptr(header_reg, Address(swap_reg,
1398                                  BasicLock::displaced_header_offset_in_bytes()));
1399 
1400       // Test for recursion
1401       testptr(header_reg, header_reg);
1402 
1403       // zero for recursive case
1404       jcc(Assembler::zero, done);

1405 
1406       // Atomic swap back the old header
1407       lock();
1408       cmpxchgptr(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1409 
1410       // zero for simple unlock of a stack-lock case
1411       jcc(Assembler::zero, done);
1412     }
1413 
1414     bind(slow_case);
1415     // Call the runtime routine for slow case.
1416     movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), obj_reg); // restore obj
1417     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1418 
1419     bind(done);
1420 
1421     restore_bcp();
1422   }
1423 }
1424 
1425 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
1426                                                          Label& zero_continue) {
1427   assert(ProfileInterpreter, "must be profiling interpreter");
1428   movptr(mdp, Address(rbp, frame::interpreter_frame_mdp_offset * wordSize));
1429   testptr(mdp, mdp);
1430   jcc(Assembler::zero, zero_continue);
1431 }
1432 
1433 
1434 // Set the method data pointer for the current bcp.
< prev index next >