< prev index next >

src/hotspot/cpu/x86/interp_masm_x86.cpp

Print this page

1142           InterpreterRuntime::build_method_counters), method);
1143   movptr(mcs, Address(method,Method::method_counters_offset()));
1144   testptr(mcs, mcs);
1145   jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
1146   bind(has_counters);
1147 }
1148 
1149 
1150 // Lock object
1151 //
1152 // Args:
1153 //      rdx, c_rarg1: BasicObjectLock to be used for locking
1154 //
1155 // Kills:
1156 //      rax, rbx
1157 void InterpreterMacroAssembler::lock_object(Register lock_reg) {
1158   assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx),
1159          "The argument is only for looks. It must be c_rarg1");
1160 
1161   if (LockingMode == LM_MONITOR) {

1162     call_VM(noreg,
1163             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1164             lock_reg);

1165   } else {
1166     Label count_locking, done, slow_case;
1167 
1168     const Register swap_reg = rax; // Must use rax for cmpxchg instruction
1169     const Register tmp_reg = rbx;
1170     const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop
1171     const Register rklass_decode_tmp = rscratch1;
1172 
1173     const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
1174     const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
1175     const int mark_offset = lock_offset +
1176                             BasicLock::displaced_header_offset_in_bytes();
1177 
1178     // Load object pointer into obj_reg
1179     movptr(obj_reg, Address(lock_reg, obj_offset));
1180 
1181     if (DiagnoseSyncOnValueBasedClasses != 0) {
1182       load_klass(tmp_reg, obj_reg, rklass_decode_tmp);
1183       movl(tmp_reg, Address(tmp_reg, Klass::access_flags_offset()));
1184       testl(tmp_reg, JVM_ACC_IS_VALUE_BASED_CLASS);

1228       // nor apply the optimization if the stack lock is inside the stack
1229       // of another thread. The latter is avoided even in case of overflow
1230       // because we have guard pages at the end of all stacks. Hence, if
1231       // we go over the stack base and hit the stack of another thread,
1232       // this should not be in a writeable area that could contain a
1233       // stack lock allocated by that thread. As a consequence, a stack
1234       // lock less than page size away from rsp is guaranteed to be
1235       // owned by the current thread.
1236       //
1237       // These 3 tests can be done by evaluating the following
1238       // expression: ((mark - rsp) & (zero_bits - os::vm_page_size())),
1239       // assuming both stack pointer and pagesize have their
1240       // least significant bits clear.
1241       // NOTE: the mark is in swap_reg %rax as the result of cmpxchg
1242       subptr(swap_reg, rsp);
1243       andptr(swap_reg, zero_bits - (int)os::vm_page_size());
1244 
1245       // Save the test result, for recursive case, the result is zero
1246       movptr(Address(lock_reg, mark_offset), swap_reg);
1247       jcc(Assembler::notZero, slow_case);
1248 
1249       bind(count_locking);
1250     }



1251     inc_held_monitor_count();
1252     jmp(done);
1253 
1254     bind(slow_case);
1255 

1256     // Call the runtime routine for slow case
1257     if (LockingMode == LM_LIGHTWEIGHT) {
1258       call_VM(noreg,
1259               CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter_obj),
1260               obj_reg);
1261     } else {
1262       call_VM(noreg,
1263               CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1264               lock_reg);
1265     }

1266     bind(done);
1267   }
1268 }
1269 
1270 
1271 // Unlocks an object. Used in monitorexit bytecode and
1272 // remove_activation.  Throws an IllegalMonitorException if object is
1273 // not locked by current thread.
1274 //
1275 // Args:
1276 //      rdx, c_rarg1: BasicObjectLock for lock
1277 //
1278 // Kills:
1279 //      rax
1280 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
1281 //      rscratch1 (scratch reg)
1282 // rax, rbx, rcx, rdx
1283 void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
1284   assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx),
1285          "The argument is only for looks. It must be c_rarg1");
1286 
1287   if (LockingMode == LM_MONITOR) {
1288     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1289   } else {
1290     Label count_locking, done, slow_case;
1291 
1292     const Register swap_reg   = rax;  // Must use rax for cmpxchg instruction
1293     const Register header_reg = LP64_ONLY(c_rarg2) NOT_LP64(rbx);  // Will contain the old oopMark
1294     const Register obj_reg    = LP64_ONLY(c_rarg3) NOT_LP64(rcx);  // Will contain the oop
1295 
1296     save_bcp(); // Save in case of exception
1297 
1298     if (LockingMode != LM_LIGHTWEIGHT) {
1299       // Convert from BasicObjectLock structure to object and BasicLock
1300       // structure Store the BasicLock address into %rax
1301       lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset()));
1302     }
1303 
1304     // Load oop into obj_reg(%c_rarg3)
1305     movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
1306 
1307     // Free entry
1308     movptr(Address(lock_reg, BasicObjectLock::obj_offset()), NULL_WORD);
1309 
1310     if (LockingMode == LM_LIGHTWEIGHT) {
1311 #ifdef _LP64
1312       lightweight_unlock(obj_reg, swap_reg, r15_thread, header_reg, slow_case);
1313 #else
1314       // This relies on the implementation of lightweight_unlock being able to handle
1315       // that the reg_rax and thread Register parameters may alias each other.
1316       get_thread(swap_reg);
1317       lightweight_unlock(obj_reg, swap_reg, swap_reg, header_reg, slow_case);
1318 #endif
1319     } else if (LockingMode == LM_LEGACY) {
1320       // Load the old header from BasicLock structure
1321       movptr(header_reg, Address(swap_reg,
1322                                  BasicLock::displaced_header_offset_in_bytes()));
1323 
1324       // Test for recursion
1325       testptr(header_reg, header_reg);
1326 
1327       // zero for recursive case
1328       jcc(Assembler::zero, count_locking);
1329 
1330       // Atomic swap back the old header
1331       lock();
1332       cmpxchgptr(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1333 
1334       // zero for simple unlock of a stack-lock case
1335       jcc(Assembler::notZero, slow_case);
1336 
1337       bind(count_locking);
1338     }
1339     dec_held_monitor_count();
1340     jmp(done);
1341 
1342     bind(slow_case);
1343     // Call the runtime routine for slow case.
1344     movptr(Address(lock_reg, BasicObjectLock::obj_offset()), obj_reg); // restore obj
1345     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1346 
1347     bind(done);
1348 
1349     restore_bcp();
1350   }
1351 }
1352 
1353 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
1354                                                          Label& zero_continue) {
1355   assert(ProfileInterpreter, "must be profiling interpreter");
1356   movptr(mdp, Address(rbp, frame::interpreter_frame_mdp_offset * wordSize));
1357   testptr(mdp, mdp);
1358   jcc(Assembler::zero, zero_continue);
1359 }

1142           InterpreterRuntime::build_method_counters), method);
1143   movptr(mcs, Address(method,Method::method_counters_offset()));
1144   testptr(mcs, mcs);
1145   jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
1146   bind(has_counters);
1147 }
1148 
1149 
1150 // Lock object
1151 //
1152 // Args:
1153 //      rdx, c_rarg1: BasicObjectLock to be used for locking
1154 //
1155 // Kills:
1156 //      rax, rbx
1157 void InterpreterMacroAssembler::lock_object(Register lock_reg) {
1158   assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx),
1159          "The argument is only for looks. It must be c_rarg1");
1160 
1161   if (LockingMode == LM_MONITOR) {
1162     push_cont_fastpath();
1163     call_VM(noreg,
1164             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1165             lock_reg);
1166     pop_cont_fastpath();
1167   } else {
1168     Label count_locking, done, slow_case;
1169 
1170     const Register swap_reg = rax; // Must use rax for cmpxchg instruction
1171     const Register tmp_reg = rbx;
1172     const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop
1173     const Register rklass_decode_tmp = rscratch1;
1174 
1175     const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
1176     const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
1177     const int mark_offset = lock_offset +
1178                             BasicLock::displaced_header_offset_in_bytes();
1179 
1180     // Load object pointer into obj_reg
1181     movptr(obj_reg, Address(lock_reg, obj_offset));
1182 
1183     if (DiagnoseSyncOnValueBasedClasses != 0) {
1184       load_klass(tmp_reg, obj_reg, rklass_decode_tmp);
1185       movl(tmp_reg, Address(tmp_reg, Klass::access_flags_offset()));
1186       testl(tmp_reg, JVM_ACC_IS_VALUE_BASED_CLASS);

1230       // nor apply the optimization if the stack lock is inside the stack
1231       // of another thread. The latter is avoided even in case of overflow
1232       // because we have guard pages at the end of all stacks. Hence, if
1233       // we go over the stack base and hit the stack of another thread,
1234       // this should not be in a writeable area that could contain a
1235       // stack lock allocated by that thread. As a consequence, a stack
1236       // lock less than page size away from rsp is guaranteed to be
1237       // owned by the current thread.
1238       //
1239       // These 3 tests can be done by evaluating the following
1240       // expression: ((mark - rsp) & (zero_bits - os::vm_page_size())),
1241       // assuming both stack pointer and pagesize have their
1242       // least significant bits clear.
1243       // NOTE: the mark is in swap_reg %rax as the result of cmpxchg
1244       subptr(swap_reg, rsp);
1245       andptr(swap_reg, zero_bits - (int)os::vm_page_size());
1246 
1247       // Save the test result, for recursive case, the result is zero
1248       movptr(Address(lock_reg, mark_offset), swap_reg);
1249       jcc(Assembler::notZero, slow_case);


1250     }
1251     jmp(done);
1252 
1253     bind(count_locking);
1254     inc_held_monitor_count();
1255     jmp(done);
1256 
1257     bind(slow_case);
1258 
1259     push_cont_fastpath();
1260     // Call the runtime routine for slow case
1261     if (LockingMode == LM_LIGHTWEIGHT) {
1262       call_VM(noreg,
1263               CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter_obj),
1264               obj_reg);
1265     } else {
1266       call_VM(noreg,
1267               CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1268               lock_reg);
1269     }
1270     pop_cont_fastpath();
1271     bind(done);
1272   }
1273 }
1274 
1275 
1276 // Unlocks an object. Used in monitorexit bytecode and
1277 // remove_activation.  Throws an IllegalMonitorException if object is
1278 // not locked by current thread.
1279 //
1280 // Args:
1281 //      rdx, c_rarg1: BasicObjectLock for lock
1282 //
1283 // Kills:
1284 //      rax
1285 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
1286 //      rscratch1 (scratch reg)
1287 // rax, rbx, rcx, rdx
1288 void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
1289   assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx),
1290          "The argument is only for looks. It must be c_rarg1");
1291 
1292   if (LockingMode == LM_MONITOR) {
1293     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1294   } else {
1295     Label done, slow_case;
1296 
1297     const Register swap_reg   = rax;  // Must use rax for cmpxchg instruction
1298     const Register header_reg = LP64_ONLY(c_rarg2) NOT_LP64(rbx);  // Will contain the old oopMark
1299     const Register obj_reg    = LP64_ONLY(c_rarg3) NOT_LP64(rcx);  // Will contain the oop
1300 
1301     save_bcp(); // Save in case of exception
1302 
1303     if (LockingMode != LM_LIGHTWEIGHT) {
1304       // Convert from BasicObjectLock structure to object and BasicLock
1305       // structure Store the BasicLock address into %rax
1306       lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset()));
1307     }
1308 
1309     // Load oop into obj_reg(%c_rarg3)
1310     movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
1311 
1312     // Free entry
1313     movptr(Address(lock_reg, BasicObjectLock::obj_offset()), NULL_WORD);
1314 
1315     if (LockingMode == LM_LIGHTWEIGHT) {
1316 #ifdef _LP64
1317       lightweight_unlock(obj_reg, swap_reg, r15_thread, header_reg, slow_case);
1318 #else
1319       // This relies on the implementation of lightweight_unlock being able to handle
1320       // that the reg_rax and thread Register parameters may alias each other.
1321       get_thread(swap_reg);
1322       lightweight_unlock(obj_reg, swap_reg, swap_reg, header_reg, slow_case);
1323 #endif
1324     } else if (LockingMode == LM_LEGACY) {
1325       // Load the old header from BasicLock structure
1326       movptr(header_reg, Address(swap_reg,
1327                                  BasicLock::displaced_header_offset_in_bytes()));
1328 
1329       // Test for recursion
1330       testptr(header_reg, header_reg);
1331 
1332       // zero for recursive case
1333       jcc(Assembler::zero, done);
1334 
1335       // Atomic swap back the old header
1336       lock();
1337       cmpxchgptr(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1338 
1339       // zero for simple unlock of a stack-lock case
1340       jcc(Assembler::notZero, slow_case);
1341       dec_held_monitor_count();

1342     }

1343     jmp(done);
1344 
1345     bind(slow_case);
1346     // Call the runtime routine for slow case.
1347     movptr(Address(lock_reg, BasicObjectLock::obj_offset()), obj_reg); // restore obj
1348     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1349 
1350     bind(done);
1351 
1352     restore_bcp();
1353   }
1354 }
1355 
1356 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
1357                                                          Label& zero_continue) {
1358   assert(ProfileInterpreter, "must be profiling interpreter");
1359   movptr(mdp, Address(rbp, frame::interpreter_frame_mdp_offset * wordSize));
1360   testptr(mdp, mdp);
1361   jcc(Assembler::zero, zero_continue);
1362 }
< prev index next >