679 const Register tmp3 = c_rarg5;
680
681 const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
682 const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
683 const int mark_offset = lock_offset +
684 BasicLock::displaced_header_offset_in_bytes();
685
686 Label slow_case;
687
688 // Load object pointer into obj_reg %c_rarg3
689 ldr(obj_reg, Address(lock_reg, obj_offset));
690
691 if (DiagnoseSyncOnValueBasedClasses != 0) {
692 load_klass(tmp, obj_reg);
693 ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
694 tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
695 br(Assembler::NE, slow_case);
696 }
697
698 if (LockingMode == LM_LIGHTWEIGHT) {
699 lightweight_lock(obj_reg, tmp, tmp2, tmp3, slow_case);
700 b(count);
701 } else if (LockingMode == LM_LEGACY) {
702 // Load (object->mark() | 1) into swap_reg
703 ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
704 orr(swap_reg, rscratch1, 1);
705
706 // Save (object->mark() | 1) into BasicLock's displaced header
707 str(swap_reg, Address(lock_reg, mark_offset));
708
709 assert(lock_offset == 0,
710 "displached header must be first word in BasicObjectLock");
711
712 Label fail;
713 cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
714
715 // Fast check for recursive lock.
716 //
717 // Can apply the optimization only if this is a stack lock
718 // allocated in this thread. For efficiency, we can focus on
719 // recently allocated stack locks (instead of reading the stack
735 // owned by the current thread.
736 //
737 // These 3 tests can be done by evaluating the following
738 // expression: ((mark - sp) & (7 - os::vm_page_size())),
739 // assuming both stack pointer and pagesize have their
740 // least significant 3 bits clear.
741 // NOTE: the mark is in swap_reg %r0 as the result of cmpxchg
742 // NOTE2: aarch64 does not like to subtract sp from rn so take a
743 // copy
744 mov(rscratch1, sp);
745 sub(swap_reg, swap_reg, rscratch1);
746 ands(swap_reg, swap_reg, (uint64_t)(7 - (int)os::vm_page_size()));
747
748 // Save the test result, for recursive case, the result is zero
749 str(swap_reg, Address(lock_reg, mark_offset));
750 br(Assembler::EQ, count);
751 }
752 bind(slow_case);
753
754 // Call the runtime routine for slow case
755 if (LockingMode == LM_LIGHTWEIGHT) {
756 call_VM(noreg,
757 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter_obj),
758 obj_reg);
759 } else {
760 call_VM(noreg,
761 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
762 lock_reg);
763 }
764 b(done);
765
766 bind(count);
767 increment(Address(rthread, JavaThread::held_monitor_count_offset()));
768
769 bind(done);
770 }
771 }
772
773
774 // Unlocks an object. Used in monitorexit bytecode and
775 // remove_activation. Throws an IllegalMonitorException if object is
776 // not locked by current thread.
777 //
778 // Args:
779 // c_rarg1: BasicObjectLock for lock
780 //
781 // Kills:
782 // r0
783 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
|
679 const Register tmp3 = c_rarg5;
680
681 const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
682 const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
683 const int mark_offset = lock_offset +
684 BasicLock::displaced_header_offset_in_bytes();
685
686 Label slow_case;
687
688 // Load object pointer into obj_reg %c_rarg3
689 ldr(obj_reg, Address(lock_reg, obj_offset));
690
691 if (DiagnoseSyncOnValueBasedClasses != 0) {
692 load_klass(tmp, obj_reg);
693 ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
694 tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
695 br(Assembler::NE, slow_case);
696 }
697
698 if (LockingMode == LM_LIGHTWEIGHT) {
699 lightweight_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case);
700 b(count);
701 } else if (LockingMode == LM_LEGACY) {
702 // Load (object->mark() | 1) into swap_reg
703 ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
704 orr(swap_reg, rscratch1, 1);
705
706 // Save (object->mark() | 1) into BasicLock's displaced header
707 str(swap_reg, Address(lock_reg, mark_offset));
708
709 assert(lock_offset == 0,
710 "displached header must be first word in BasicObjectLock");
711
712 Label fail;
713 cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
714
715 // Fast check for recursive lock.
716 //
717 // Can apply the optimization only if this is a stack lock
718 // allocated in this thread. For efficiency, we can focus on
719 // recently allocated stack locks (instead of reading the stack
735 // owned by the current thread.
736 //
737 // These 3 tests can be done by evaluating the following
738 // expression: ((mark - sp) & (7 - os::vm_page_size())),
739 // assuming both stack pointer and pagesize have their
740 // least significant 3 bits clear.
741 // NOTE: the mark is in swap_reg %r0 as the result of cmpxchg
742 // NOTE2: aarch64 does not like to subtract sp from rn so take a
743 // copy
744 mov(rscratch1, sp);
745 sub(swap_reg, swap_reg, rscratch1);
746 ands(swap_reg, swap_reg, (uint64_t)(7 - (int)os::vm_page_size()));
747
748 // Save the test result, for recursive case, the result is zero
749 str(swap_reg, Address(lock_reg, mark_offset));
750 br(Assembler::EQ, count);
751 }
752 bind(slow_case);
753
754 // Call the runtime routine for slow case
755 call_VM(noreg,
756 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
757 lock_reg);
758 b(done);
759
760 bind(count);
761 increment(Address(rthread, JavaThread::held_monitor_count_offset()));
762
763 bind(done);
764 }
765 }
766
767
768 // Unlocks an object. Used in monitorexit bytecode and
769 // remove_activation. Throws an IllegalMonitorException if object is
770 // not locked by current thread.
771 //
772 // Args:
773 // c_rarg1: BasicObjectLock for lock
774 //
775 // Kills:
776 // r0
777 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
|