< prev index next > src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
Print this page
// Load the oop from the handle
__ movptr(obj_reg, Address(oop_handle_reg, 0));
if (!UseHeavyMonitors) {
+ if (UseFastLocking) {
+ // Load object header
+ __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
+ __ fast_lock_impl(obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock);
+ } else {
+ // Load immediate 1 into swap_reg %rax
+ __ movl(swap_reg, 1);
- // Load immediate 1 into swap_reg %rax
- __ movl(swap_reg, 1);
-
- // Load (object->mark() | 1) into swap_reg %rax
- __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
+ // Load (object->mark() | 1) into swap_reg %rax
+ __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
- // Save (object->mark() | 1) into BasicLock's displaced header
- __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
+ // Save (object->mark() | 1) into BasicLock's displaced header
+ __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
- // src -> dest iff dest == rax else rax <- dest
- __ lock();
- __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
- __ jcc(Assembler::equal, count_mon);
+ // src -> dest iff dest == rax else rax <- dest
+ __ lock();
+ __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
+ __ jcc(Assembler::equal, count_mon);
- // Hmm should this move to the slow path code area???
+ // Hmm should this move to the slow path code area???
- // Test if the oopMark is an obvious stack pointer, i.e.,
- // 1) (mark & 3) == 0, and
- // 2) rsp <= mark < mark + os::pagesize()
- // These 3 tests can be done by evaluating the following
- // expression: ((mark - rsp) & (3 - os::vm_page_size())),
- // assuming both stack pointer and pagesize have their
- // least significant 2 bits clear.
- // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
+ // Test if the oopMark is an obvious stack pointer, i.e.,
+ // 1) (mark & 3) == 0, and
+ // 2) rsp <= mark < mark + os::pagesize()
+ // These 3 tests can be done by evaluating the following
+ // expression: ((mark - rsp) & (3 - os::vm_page_size())),
+ // assuming both stack pointer and pagesize have their
+ // least significant 2 bits clear.
+ // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
- __ subptr(swap_reg, rsp);
- __ andptr(swap_reg, 3 - (int)os::vm_page_size());
+ __ subptr(swap_reg, rsp);
+ __ andptr(swap_reg, 3 - (int)os::vm_page_size());
- // Save the test result, for recursive case, the result is zero
- __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
- __ jcc(Assembler::notEqual, slow_path_lock);
+ // Save the test result, for recursive case, the result is zero
+ __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
+ __ jcc(Assembler::notEqual, slow_path_lock);
+ }
} else {
__ jmp(slow_path_lock);
}
__ bind(count_mon);
__ inc_held_monitor_count();
Label fast_done;
// Get locked oop from the handle we passed to jni
__ movptr(obj_reg, Address(oop_handle_reg, 0));
- if (!UseHeavyMonitors) {
+ if (!UseHeavyMonitors && !UseFastLocking) {
Label not_recur;
// Simple recursive lock?
__ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), NULL_WORD);
__ jcc(Assembler::notEqual, not_recur);
__ dec_held_monitor_count();
if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
save_native_result(masm, ret_type, stack_slots);
}
if (!UseHeavyMonitors) {
- // get address of the stack lock
- __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
- // get old displaced header
- __ movptr(old_hdr, Address(rax, 0));
-
- // Atomic swap old header if oop still contains the stack lock
- __ lock();
- __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
- __ jcc(Assembler::notEqual, slow_path_unlock);
+ if (UseFastLocking) {
+ __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
+ __ andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
+ __ fast_unlock_impl(obj_reg, swap_reg, lock_reg, slow_path_unlock);
+ } else {
+ // get address of the stack lock
+ __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
+ // get old displaced header
+ __ movptr(old_hdr, Address(rax, 0));
+
+ // Atomic swap old header if oop still contains the stack lock
+ __ lock();
+ __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
+ __ jcc(Assembler::notEqual, slow_path_unlock);
+ }
__ dec_held_monitor_count();
} else {
__ jmp(slow_path_unlock);
}
< prev index next >