1137 InterpreterRuntime::build_method_counters), method);
1138 movptr(mcs, Address(method,Method::method_counters_offset()));
1139 testptr(mcs, mcs);
1140 jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
1141 bind(has_counters);
1142 }
1143
1144
1145 // Lock object
1146 //
1147 // Args:
1148 // rdx, c_rarg1: BasicObjectLock to be used for locking
1149 //
1150 // Kills:
1151 // rax, rbx
1152 void InterpreterMacroAssembler::lock_object(Register lock_reg) {
1153 assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx),
1154 "The argument is only for looks. It must be c_rarg1");
1155
1156 if (LockingMode == LM_MONITOR) {
1157 call_VM(noreg,
1158 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1159 lock_reg);
1160 } else {
1161 Label count_locking, done, slow_case;
1162
1163 const Register swap_reg = rax; // Must use rax for cmpxchg instruction
1164 const Register tmp_reg = rbx;
1165 const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop
1166 const Register rklass_decode_tmp = rscratch1;
1167
1168 const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
1169 const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
1170 const int mark_offset = lock_offset +
1171 BasicLock::displaced_header_offset_in_bytes();
1172
1173 // Load object pointer into obj_reg
1174 movptr(obj_reg, Address(lock_reg, obj_offset));
1175
1176 if (DiagnoseSyncOnValueBasedClasses != 0) {
1177 load_klass(tmp_reg, obj_reg, rklass_decode_tmp);
1178 movl(tmp_reg, Address(tmp_reg, Klass::access_flags_offset()));
1179 testl(tmp_reg, JVM_ACC_IS_VALUE_BASED_CLASS);
1223 // nor apply the optimization if the stack lock is inside the stack
1224 // of another thread. The latter is avoided even in case of overflow
1225 // because we have guard pages at the end of all stacks. Hence, if
1226 // we go over the stack base and hit the stack of another thread,
1227 // this should not be in a writeable area that could contain a
1228 // stack lock allocated by that thread. As a consequence, a stack
1229 // lock less than page size away from rsp is guaranteed to be
1230 // owned by the current thread.
1231 //
1232 // These 3 tests can be done by evaluating the following
1233 // expression: ((mark - rsp) & (zero_bits - os::vm_page_size())),
1234 // assuming both stack pointer and pagesize have their
1235 // least significant bits clear.
1236 // NOTE: the mark is in swap_reg %rax as the result of cmpxchg
1237 subptr(swap_reg, rsp);
1238 andptr(swap_reg, zero_bits - (int)os::vm_page_size());
1239
1240 // Save the test result, for recursive case, the result is zero
1241 movptr(Address(lock_reg, mark_offset), swap_reg);
1242 jcc(Assembler::notZero, slow_case);
1243
1244 bind(count_locking);
1245 }
1246 inc_held_monitor_count();
1247 jmp(done);
1248
1249 bind(slow_case);
1250
1251 // Call the runtime routine for slow case
1252 if (LockingMode == LM_LIGHTWEIGHT) {
1253 call_VM(noreg,
1254 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter_obj),
1255 obj_reg);
1256 } else {
1257 call_VM(noreg,
1258 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1259 lock_reg);
1260 }
1261 bind(done);
1262 }
1263 }
1264
1265
1266 // Unlocks an object. Used in monitorexit bytecode and
1267 // remove_activation. Throws an IllegalMonitorException if object is
1268 // not locked by current thread.
1269 //
1270 // Args:
1271 // rdx, c_rarg1: BasicObjectLock for lock
1272 //
1273 // Kills:
1274 // rax
1275 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
1276 // rscratch1 (scratch reg)
1277 // rax, rbx, rcx, rdx
1278 void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
1279 assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx),
1280 "The argument is only for looks. It must be c_rarg1");
1281
1282 if (LockingMode == LM_MONITOR) {
1283 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1284 } else {
1285 Label count_locking, done, slow_case;
1286
1287 const Register swap_reg = rax; // Must use rax for cmpxchg instruction
1288 const Register header_reg = LP64_ONLY(c_rarg2) NOT_LP64(rbx); // Will contain the old oopMark
1289 const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop
1290
1291 save_bcp(); // Save in case of exception
1292
1293 if (LockingMode != LM_LIGHTWEIGHT) {
1294 // Convert from BasicObjectLock structure to object and BasicLock
1295 // structure Store the BasicLock address into %rax
1296 lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset()));
1297 }
1298
1299 // Load oop into obj_reg(%c_rarg3)
1300 movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
1301
1302 // Free entry
1303 movptr(Address(lock_reg, BasicObjectLock::obj_offset()), NULL_WORD);
1304
1305 if (LockingMode == LM_LIGHTWEIGHT) {
1306 #ifdef _LP64
1307 lightweight_unlock(obj_reg, swap_reg, r15_thread, header_reg, slow_case);
1308 #else
1309 // This relies on the implementation of lightweight_unlock being able to handle
1310 // that the reg_rax and thread Register parameters may alias each other.
1311 get_thread(swap_reg);
1312 lightweight_unlock(obj_reg, swap_reg, swap_reg, header_reg, slow_case);
1313 #endif
1314 } else if (LockingMode == LM_LEGACY) {
1315 // Load the old header from BasicLock structure
1316 movptr(header_reg, Address(swap_reg,
1317 BasicLock::displaced_header_offset_in_bytes()));
1318
1319 // Test for recursion
1320 testptr(header_reg, header_reg);
1321
1322 // zero for recursive case
1323 jcc(Assembler::zero, count_locking);
1324
1325 // Atomic swap back the old header
1326 lock();
1327 cmpxchgptr(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1328
1329 // zero for simple unlock of a stack-lock case
1330 jcc(Assembler::notZero, slow_case);
1331
1332 bind(count_locking);
1333 }
1334 dec_held_monitor_count();
1335 jmp(done);
1336
1337 bind(slow_case);
1338 // Call the runtime routine for slow case.
1339 movptr(Address(lock_reg, BasicObjectLock::obj_offset()), obj_reg); // restore obj
1340 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1341
1342 bind(done);
1343
1344 restore_bcp();
1345 }
1346 }
1347
1348 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
1349 Label& zero_continue) {
1350 assert(ProfileInterpreter, "must be profiling interpreter");
1351 movptr(mdp, Address(rbp, frame::interpreter_frame_mdp_offset * wordSize));
1352 testptr(mdp, mdp);
1353 jcc(Assembler::zero, zero_continue);
1354 }
|
1137 InterpreterRuntime::build_method_counters), method);
1138 movptr(mcs, Address(method,Method::method_counters_offset()));
1139 testptr(mcs, mcs);
1140 jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
1141 bind(has_counters);
1142 }
1143
1144
1145 // Lock object
1146 //
1147 // Args:
1148 // rdx, c_rarg1: BasicObjectLock to be used for locking
1149 //
1150 // Kills:
1151 // rax, rbx
1152 void InterpreterMacroAssembler::lock_object(Register lock_reg) {
1153 assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx),
1154 "The argument is only for looks. It must be c_rarg1");
1155
1156 if (LockingMode == LM_MONITOR) {
1157 push_cont_fastpath();
1158 call_VM(noreg,
1159 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1160 lock_reg);
1161 pop_cont_fastpath();
1162 } else {
1163 Label count_locking, done, slow_case;
1164
1165 const Register swap_reg = rax; // Must use rax for cmpxchg instruction
1166 const Register tmp_reg = rbx;
1167 const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop
1168 const Register rklass_decode_tmp = rscratch1;
1169
1170 const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
1171 const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
1172 const int mark_offset = lock_offset +
1173 BasicLock::displaced_header_offset_in_bytes();
1174
1175 // Load object pointer into obj_reg
1176 movptr(obj_reg, Address(lock_reg, obj_offset));
1177
1178 if (DiagnoseSyncOnValueBasedClasses != 0) {
1179 load_klass(tmp_reg, obj_reg, rklass_decode_tmp);
1180 movl(tmp_reg, Address(tmp_reg, Klass::access_flags_offset()));
1181 testl(tmp_reg, JVM_ACC_IS_VALUE_BASED_CLASS);
1225 // nor apply the optimization if the stack lock is inside the stack
1226 // of another thread. The latter is avoided even in case of overflow
1227 // because we have guard pages at the end of all stacks. Hence, if
1228 // we go over the stack base and hit the stack of another thread,
1229 // this should not be in a writeable area that could contain a
1230 // stack lock allocated by that thread. As a consequence, a stack
1231 // lock less than page size away from rsp is guaranteed to be
1232 // owned by the current thread.
1233 //
1234 // These 3 tests can be done by evaluating the following
1235 // expression: ((mark - rsp) & (zero_bits - os::vm_page_size())),
1236 // assuming both stack pointer and pagesize have their
1237 // least significant bits clear.
1238 // NOTE: the mark is in swap_reg %rax as the result of cmpxchg
1239 subptr(swap_reg, rsp);
1240 andptr(swap_reg, zero_bits - (int)os::vm_page_size());
1241
1242 // Save the test result, for recursive case, the result is zero
1243 movptr(Address(lock_reg, mark_offset), swap_reg);
1244 jcc(Assembler::notZero, slow_case);
1245 }
1246 jmp(done);
1247
1248 bind(count_locking);
1249 inc_held_monitor_count();
1250 jmp(done);
1251
1252 bind(slow_case);
1253
1254 push_cont_fastpath();
1255 // Call the runtime routine for slow case
1256 if (LockingMode == LM_LIGHTWEIGHT) {
1257 call_VM(noreg,
1258 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter_obj),
1259 obj_reg);
1260 } else {
1261 call_VM(noreg,
1262 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1263 lock_reg);
1264 }
1265 pop_cont_fastpath();
1266 bind(done);
1267 }
1268 }
1269
1270
1271 // Unlocks an object. Used in monitorexit bytecode and
1272 // remove_activation. Throws an IllegalMonitorException if object is
1273 // not locked by current thread.
1274 //
1275 // Args:
1276 // rdx, c_rarg1: BasicObjectLock for lock
1277 //
1278 // Kills:
1279 // rax
1280 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
1281 // rscratch1 (scratch reg)
1282 // rax, rbx, rcx, rdx
1283 void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
1284 assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx),
1285 "The argument is only for looks. It must be c_rarg1");
1286
1287 if (LockingMode == LM_MONITOR) {
1288 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1289 } else {
1290 Label done, slow_case;
1291
1292 const Register swap_reg = rax; // Must use rax for cmpxchg instruction
1293 const Register header_reg = LP64_ONLY(c_rarg2) NOT_LP64(rbx); // Will contain the old oopMark
1294 const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop
1295
1296 save_bcp(); // Save in case of exception
1297
1298 if (LockingMode != LM_LIGHTWEIGHT) {
1299 // Convert from BasicObjectLock structure to object and BasicLock
1300 // structure Store the BasicLock address into %rax
1301 lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset()));
1302 }
1303
1304 // Load oop into obj_reg(%c_rarg3)
1305 movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
1306
1307 // Free entry
1308 movptr(Address(lock_reg, BasicObjectLock::obj_offset()), NULL_WORD);
1309
1310 if (LockingMode == LM_LIGHTWEIGHT) {
1311 #ifdef _LP64
1312 lightweight_unlock(obj_reg, swap_reg, r15_thread, header_reg, slow_case);
1313 #else
1314 // This relies on the implementation of lightweight_unlock being able to handle
1315 // that the reg_rax and thread Register parameters may alias each other.
1316 get_thread(swap_reg);
1317 lightweight_unlock(obj_reg, swap_reg, swap_reg, header_reg, slow_case);
1318 #endif
1319 } else if (LockingMode == LM_LEGACY) {
1320 // Load the old header from BasicLock structure
1321 movptr(header_reg, Address(swap_reg,
1322 BasicLock::displaced_header_offset_in_bytes()));
1323
1324 // Test for recursion
1325 testptr(header_reg, header_reg);
1326
1327 // zero for recursive case
1328 jcc(Assembler::zero, done);
1329
1330 // Atomic swap back the old header
1331 lock();
1332 cmpxchgptr(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1333
1334 // zero for simple unlock of a stack-lock case
1335 jcc(Assembler::notZero, slow_case);
1336 dec_held_monitor_count();
1337 }
1338 jmp(done);
1339
1340 bind(slow_case);
1341 // Call the runtime routine for slow case.
1342 movptr(Address(lock_reg, BasicObjectLock::obj_offset()), obj_reg); // restore obj
1343 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1344
1345 bind(done);
1346
1347 restore_bcp();
1348 }
1349 }
1350
1351 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
1352 Label& zero_continue) {
1353 assert(ProfileInterpreter, "must be profiling interpreter");
1354 movptr(mdp, Address(rbp, frame::interpreter_frame_mdp_offset * wordSize));
1355 testptr(mdp, mdp);
1356 jcc(Assembler::zero, zero_continue);
1357 }
|