319 save_bcp();
320 #ifdef ASSERT
321 {
322 Label L;
323 cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
324 jcc(Assembler::equal, L);
325 stop("InterpreterMacroAssembler::call_VM_base:"
326 " last_sp isn't null");
327 bind(L);
328 }
329 #endif /* ASSERT */
330 // super call
331 MacroAssembler::call_VM_base(oop_result, noreg, last_java_sp,
332 entry_point, number_of_arguments,
333 check_exceptions);
334 // interpreter specific
335 restore_bcp();
336 restore_locals();
337 }
338
339 void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) {
340 if (JvmtiExport::can_pop_frame()) {
341 Label L;
342 // Initiate popframe handling only if it is not already being
343 // processed. If the flag has the popframe_processing bit set, it
344 // means that this code is called *during* popframe handling - we
345 // don't want to reenter.
346 // This method is only called just after the call into the vm in
347 // call_VM_base, so the arg registers are available.
348 Register pop_cond = NOT_LP64(java_thread) // Not clear if any other register is available on 32 bit
349 LP64_ONLY(c_rarg0);
350 movl(pop_cond, Address(java_thread, JavaThread::popframe_condition_offset()));
351 testl(pop_cond, JavaThread::popframe_pending_bit);
352 jcc(Assembler::zero, L);
353 testl(pop_cond, JavaThread::popframe_processing_bit);
354 jcc(Assembler::notZero, L);
355 // Call Interpreter::remove_activation_preserving_args_entry() to get the
356 // address of the same-named entrypoint in the generated interpreter code.
357 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
358 jmp(rax);
1137 InterpreterRuntime::build_method_counters), method);
1138 movptr(mcs, Address(method,Method::method_counters_offset()));
1139 testptr(mcs, mcs);
1140 jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
1141 bind(has_counters);
1142 }
1143
1144
1145 // Lock object
1146 //
1147 // Args:
1148 // rdx, c_rarg1: BasicObjectLock to be used for locking
1149 //
1150 // Kills:
1151 // rax, rbx
1152 void InterpreterMacroAssembler::lock_object(Register lock_reg) {
1153 assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx),
1154 "The argument is only for looks. It must be c_rarg1");
1155
1156 if (LockingMode == LM_MONITOR) {
1157 call_VM(noreg,
1158 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1159 lock_reg);
1160 } else {
1161 Label count_locking, done, slow_case;
1162
1163 const Register swap_reg = rax; // Must use rax for cmpxchg instruction
1164 const Register tmp_reg = rbx;
1165 const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop
1166 const Register rklass_decode_tmp = rscratch1;
1167
1168 const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
1169 const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
1170 const int mark_offset = lock_offset +
1171 BasicLock::displaced_header_offset_in_bytes();
1172
1173 // Load object pointer into obj_reg
1174 movptr(obj_reg, Address(lock_reg, obj_offset));
1175
1176 if (DiagnoseSyncOnValueBasedClasses != 0) {
1177 load_klass(tmp_reg, obj_reg, rklass_decode_tmp);
1224 // because we have guard pages at the end of all stacks. Hence, if
1225 // we go over the stack base and hit the stack of another thread,
1226 // this should not be in a writeable area that could contain a
1227 // stack lock allocated by that thread. As a consequence, a stack
1228 // lock less than page size away from rsp is guaranteed to be
1229 // owned by the current thread.
1230 //
1231 // These 3 tests can be done by evaluating the following
1232 // expression: ((mark - rsp) & (zero_bits - os::vm_page_size())),
1233 // assuming both stack pointer and pagesize have their
1234 // least significant bits clear.
1235 // NOTE: the mark is in swap_reg %rax as the result of cmpxchg
1236 subptr(swap_reg, rsp);
1237 andptr(swap_reg, zero_bits - (int)os::vm_page_size());
1238
1239 // Save the test result, for recursive case, the result is zero
1240 movptr(Address(lock_reg, mark_offset), swap_reg);
1241 jcc(Assembler::notZero, slow_case);
1242
1243 bind(count_locking);
1244 }
1245 inc_held_monitor_count();
1246 jmp(done);
1247
1248 bind(slow_case);
1249
1250 // Call the runtime routine for slow case
1251 call_VM(noreg,
1252 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1253 lock_reg);
1254 bind(done);
1255 }
1256 }
1257
1258
1259 // Unlocks an object. Used in monitorexit bytecode and
1260 // remove_activation. Throws an IllegalMonitorException if object is
1261 // not locked by current thread.
1262 //
1263 // Args:
1264 // rdx, c_rarg1: BasicObjectLock for lock
1265 //
1266 // Kills:
1267 // rax
1268 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
1269 // rscratch1 (scratch reg)
1270 // rax, rbx, rcx, rdx
1271 void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
1304 #endif
1305 } else if (LockingMode == LM_LEGACY) {
1306 // Load the old header from BasicLock structure
1307 movptr(header_reg, Address(swap_reg,
1308 BasicLock::displaced_header_offset_in_bytes()));
1309
1310 // Test for recursion
1311 testptr(header_reg, header_reg);
1312
1313 // zero for recursive case
1314 jcc(Assembler::zero, count_locking);
1315
1316 // Atomic swap back the old header
1317 lock();
1318 cmpxchgptr(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1319
1320 // zero for simple unlock of a stack-lock case
1321 jcc(Assembler::notZero, slow_case);
1322
1323 bind(count_locking);
1324 }
1325 dec_held_monitor_count();
1326 jmp(done);
1327
1328 bind(slow_case);
1329 // Call the runtime routine for slow case.
1330 movptr(Address(lock_reg, BasicObjectLock::obj_offset()), obj_reg); // restore obj
1331 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1332
1333 bind(done);
1334
1335 restore_bcp();
1336 }
1337 }
1338
1339 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
1340 Label& zero_continue) {
1341 assert(ProfileInterpreter, "must be profiling interpreter");
1342 movptr(mdp, Address(rbp, frame::interpreter_frame_mdp_offset * wordSize));
1343 testptr(mdp, mdp);
1344 jcc(Assembler::zero, zero_continue);
1345 }
|
319 save_bcp();
320 #ifdef ASSERT
321 {
322 Label L;
323 cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
324 jcc(Assembler::equal, L);
325 stop("InterpreterMacroAssembler::call_VM_base:"
326 " last_sp isn't null");
327 bind(L);
328 }
329 #endif /* ASSERT */
330 // super call
331 MacroAssembler::call_VM_base(oop_result, noreg, last_java_sp,
332 entry_point, number_of_arguments,
333 check_exceptions);
334 // interpreter specific
335 restore_bcp();
336 restore_locals();
337 }
338
339 #ifdef _LP64
340 void InterpreterMacroAssembler::call_VM_preemptable(Register oop_result,
341 address entry_point,
342 Register arg_1) {
343 assert(arg_1 == c_rarg1, "");
344 Label resume_pc, not_preempted;
345
346 push_cont_fastpath();
347
348 // Make VM call. In case of preemption set last_pc to
349 // the one we want to resume to.
350 lea(rscratch1, resume_pc);
351 push(rscratch1);
352 MacroAssembler::call_VM_helper(oop_result, entry_point, 1, false /*check_exceptions*/);
353 pop(rscratch1);
354
355 pop_cont_fastpath();
356
357 // Check if preempted
358 movptr(rscratch1, Address(r15_thread, JavaThread::preempt_alternate_return_offset()));
359 cmpptr(rscratch1, NULL_WORD);
360 jccb(Assembler::zero, not_preempted);
361 movptr(Address(r15_thread, JavaThread::preempt_alternate_return_offset()), NULL_WORD);
362 jmp(rscratch1);
363
364 bind(resume_pc);
365 restore_after_resume(false /* is_native */);
366
367 bind(not_preempted);
368 }
369
370 void InterpreterMacroAssembler::restore_after_resume(bool is_native) {
371 lea(rscratch1, ExternalAddress(Interpreter::cont_resume_interpreter_adapter()));
372 call(rscratch1);
373 if (is_native) {
374 // On resume we need to set up stack as expected
375 push(dtos);
376 push(ltos);
377 }
378 }
379 #else
380 void InterpreterMacroAssembler::call_VM_preemptable(Register oop_result,
381 address entry_point,
382 Register arg_1) {
383 MacroAssembler::call_VM(oop_result, entry_point, arg_1);
384 }
385 #endif // _LP64
386
387 void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) {
388 if (JvmtiExport::can_pop_frame()) {
389 Label L;
390 // Initiate popframe handling only if it is not already being
391 // processed. If the flag has the popframe_processing bit set, it
392 // means that this code is called *during* popframe handling - we
393 // don't want to reenter.
394 // This method is only called just after the call into the vm in
395 // call_VM_base, so the arg registers are available.
396 Register pop_cond = NOT_LP64(java_thread) // Not clear if any other register is available on 32 bit
397 LP64_ONLY(c_rarg0);
398 movl(pop_cond, Address(java_thread, JavaThread::popframe_condition_offset()));
399 testl(pop_cond, JavaThread::popframe_pending_bit);
400 jcc(Assembler::zero, L);
401 testl(pop_cond, JavaThread::popframe_processing_bit);
402 jcc(Assembler::notZero, L);
403 // Call Interpreter::remove_activation_preserving_args_entry() to get the
404 // address of the same-named entrypoint in the generated interpreter code.
405 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
406 jmp(rax);
1185 InterpreterRuntime::build_method_counters), method);
1186 movptr(mcs, Address(method,Method::method_counters_offset()));
1187 testptr(mcs, mcs);
1188 jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
1189 bind(has_counters);
1190 }
1191
1192
1193 // Lock object
1194 //
1195 // Args:
1196 // rdx, c_rarg1: BasicObjectLock to be used for locking
1197 //
1198 // Kills:
1199 // rax, rbx
1200 void InterpreterMacroAssembler::lock_object(Register lock_reg) {
1201 assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx),
1202 "The argument is only for looks. It must be c_rarg1");
1203
1204 if (LockingMode == LM_MONITOR) {
1205 call_VM_preemptable(noreg,
1206 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1207 lock_reg);
1208 } else {
1209 Label count_locking, done, slow_case;
1210
1211 const Register swap_reg = rax; // Must use rax for cmpxchg instruction
1212 const Register tmp_reg = rbx;
1213 const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop
1214 const Register rklass_decode_tmp = rscratch1;
1215
1216 const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
1217 const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
1218 const int mark_offset = lock_offset +
1219 BasicLock::displaced_header_offset_in_bytes();
1220
1221 // Load object pointer into obj_reg
1222 movptr(obj_reg, Address(lock_reg, obj_offset));
1223
1224 if (DiagnoseSyncOnValueBasedClasses != 0) {
1225 load_klass(tmp_reg, obj_reg, rklass_decode_tmp);
1272 // because we have guard pages at the end of all stacks. Hence, if
1273 // we go over the stack base and hit the stack of another thread,
1274 // this should not be in a writeable area that could contain a
1275 // stack lock allocated by that thread. As a consequence, a stack
1276 // lock less than page size away from rsp is guaranteed to be
1277 // owned by the current thread.
1278 //
1279 // These 3 tests can be done by evaluating the following
1280 // expression: ((mark - rsp) & (zero_bits - os::vm_page_size())),
1281 // assuming both stack pointer and pagesize have their
1282 // least significant bits clear.
1283 // NOTE: the mark is in swap_reg %rax as the result of cmpxchg
1284 subptr(swap_reg, rsp);
1285 andptr(swap_reg, zero_bits - (int)os::vm_page_size());
1286
1287 // Save the test result, for recursive case, the result is zero
1288 movptr(Address(lock_reg, mark_offset), swap_reg);
1289 jcc(Assembler::notZero, slow_case);
1290
1291 bind(count_locking);
1292 inc_held_monitor_count();
1293 }
1294 jmp(done);
1295
1296 bind(slow_case);
1297
1298 // Call the runtime routine for slow case
1299 call_VM_preemptable(noreg,
1300 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1301 lock_reg);
1302 bind(done);
1303 }
1304 }
1305
1306
1307 // Unlocks an object. Used in monitorexit bytecode and
1308 // remove_activation. Throws an IllegalMonitorException if object is
1309 // not locked by current thread.
1310 //
1311 // Args:
1312 // rdx, c_rarg1: BasicObjectLock for lock
1313 //
1314 // Kills:
1315 // rax
1316 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
1317 // rscratch1 (scratch reg)
1318 // rax, rbx, rcx, rdx
1319 void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
1352 #endif
1353 } else if (LockingMode == LM_LEGACY) {
1354 // Load the old header from BasicLock structure
1355 movptr(header_reg, Address(swap_reg,
1356 BasicLock::displaced_header_offset_in_bytes()));
1357
1358 // Test for recursion
1359 testptr(header_reg, header_reg);
1360
1361 // zero for recursive case
1362 jcc(Assembler::zero, count_locking);
1363
1364 // Atomic swap back the old header
1365 lock();
1366 cmpxchgptr(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1367
1368 // zero for simple unlock of a stack-lock case
1369 jcc(Assembler::notZero, slow_case);
1370
1371 bind(count_locking);
1372 dec_held_monitor_count();
1373 }
1374 jmp(done);
1375
1376 bind(slow_case);
1377 // Call the runtime routine for slow case.
1378 movptr(Address(lock_reg, BasicObjectLock::obj_offset()), obj_reg); // restore obj
1379 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1380
1381 bind(done);
1382
1383 restore_bcp();
1384 }
1385 }
1386
1387 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
1388 Label& zero_continue) {
1389 assert(ProfileInterpreter, "must be profiling interpreter");
1390 movptr(mdp, Address(rbp, frame::interpreter_frame_mdp_offset * wordSize));
1391 testptr(mdp, mdp);
1392 jcc(Assembler::zero, zero_continue);
1393 }
|