< prev index next >

src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp

Print this page

 685 
 686     const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
 687     const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
 688     const int mark_offset = lock_offset +
 689                             BasicLock::displaced_header_offset_in_bytes();
 690 
 691     Label slow_case;
 692 
 693     // Load object pointer into obj_reg %c_rarg3
 694     ldr(obj_reg, Address(lock_reg, obj_offset));
 695 
 696     if (DiagnoseSyncOnValueBasedClasses != 0) {
 697       load_klass(tmp, obj_reg);
 698       ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
 699       tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
 700       br(Assembler::NE, slow_case);
 701     }
 702 
 703     if (LockingMode == LM_LIGHTWEIGHT) {
 704       lightweight_lock(obj_reg, tmp, tmp2, tmp3, slow_case);
 705       b(count);
 706     } else if (LockingMode == LM_LEGACY) {
 707       // Load (object->mark() | 1) into swap_reg
 708       ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 709       orr(swap_reg, rscratch1, 1);
 710 
 711       // Save (object->mark() | 1) into BasicLock's displaced header
 712       str(swap_reg, Address(lock_reg, mark_offset));
 713 
 714       assert(lock_offset == 0,
 715              "displached header must be first word in BasicObjectLock");
 716 
 717       Label fail;
 718       cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
 719 
 720       // Fast check for recursive lock.
 721       //
 722       // Can apply the optimization only if this is a stack lock
 723       // allocated in this thread. For efficiency, we can focus on
 724       // recently allocated stack locks (instead of reading the stack
 725       // base and checking whether 'mark' points inside the current

 735       // because we have guard pages at the end of all stacks. Hence, if
 736       // we go over the stack base and hit the stack of another thread,
 737       // this should not be in a writeable area that could contain a
 738       // stack lock allocated by that thread. As a consequence, a stack
 739       // lock less than page size away from sp is guaranteed to be
 740       // owned by the current thread.
 741       //
 742       // These 3 tests can be done by evaluating the following
 743       // expression: ((mark - sp) & (7 - os::vm_page_size())),
 744       // assuming both stack pointer and pagesize have their
 745       // least significant 3 bits clear.
 746       // NOTE: the mark is in swap_reg %r0 as the result of cmpxchg
 747       // NOTE2: aarch64 does not like to subtract sp from rn so take a
 748       // copy
 749       mov(rscratch1, sp);
 750       sub(swap_reg, swap_reg, rscratch1);
 751       ands(swap_reg, swap_reg, (uint64_t)(7 - (int)os::vm_page_size()));
 752 
 753       // Save the test result, for recursive case, the result is zero
 754       str(swap_reg, Address(lock_reg, mark_offset));
 755       br(Assembler::EQ, count);

 756     }





 757     bind(slow_case);
 758 

 759     // Call the runtime routine for slow case
 760     if (LockingMode == LM_LIGHTWEIGHT) {
 761       call_VM(noreg,
 762               CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter_obj),
 763               obj_reg);
 764     } else {
 765       call_VM(noreg,
 766               CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
 767               lock_reg);
 768     }
 769     b(done);
 770 
 771     bind(count);
 772     increment(Address(rthread, JavaThread::held_monitor_count_offset()));
 773 
 774     bind(done);
 775   }
 776 }
 777 
 778 
 779 // Unlocks an object. Used in monitorexit bytecode and
 780 // remove_activation.  Throws an IllegalMonitorException if object is
 781 // not locked by current thread.
 782 //
 783 // Args:
 784 //      c_rarg1: BasicObjectLock for lock
 785 //
 786 // Kills:
 787 //      r0
 788 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
 789 //      rscratch1, rscratch2 (scratch regs)
 790 void InterpreterMacroAssembler::unlock_object(Register lock_reg)
 791 {
 792   assert(lock_reg == c_rarg1, "The argument is only for looks. It must be rarg1");
 793 

 798 
 799     const Register swap_reg   = r0;
 800     const Register header_reg = c_rarg2;  // Will contain the old oopMark
 801     const Register obj_reg    = c_rarg3;  // Will contain the oop
 802     const Register tmp_reg    = c_rarg4;  // Temporary used by lightweight_unlock
 803 
 804     save_bcp(); // Save in case of exception
 805 
 806     if (LockingMode != LM_LIGHTWEIGHT) {
 807       // Convert from BasicObjectLock structure to object and BasicLock
 808       // structure Store the BasicLock address into %r0
 809       lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset()));
 810     }
 811 
 812     // Load oop into obj_reg(%c_rarg3)
 813     ldr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
 814 
 815     // Free entry
 816     str(zr, Address(lock_reg, BasicObjectLock::obj_offset()));
 817 

 818     if (LockingMode == LM_LIGHTWEIGHT) {
 819       Label slow_case;
 820       lightweight_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case);
 821       b(count);
 822       bind(slow_case);
 823     } else if (LockingMode == LM_LEGACY) {
 824       // Load the old header from BasicLock structure
 825       ldr(header_reg, Address(swap_reg,
 826                               BasicLock::displaced_header_offset_in_bytes()));
 827 
 828       // Test for recursion
 829       cbz(header_reg, count);
 830 
 831       // Atomic swap back the old header
 832       cmpxchg_obj_header(swap_reg, header_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
 833     }
 834     // Call the runtime routine for slow case.
 835     str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset())); // restore obj
 836     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
 837     b(done);
 838 
 839     bind(count);
 840     decrement(Address(rthread, JavaThread::held_monitor_count_offset()));

 841 




 842     bind(done);
 843     restore_bcp();
 844   }
 845 }
 846 
 847 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
 848                                                          Label& zero_continue) {
 849   assert(ProfileInterpreter, "must be profiling interpreter");
 850   ldr(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
 851   cbz(mdp, zero_continue);
 852 }
 853 
 854 // Set the method data pointer for the current bcp.
 855 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
 856   assert(ProfileInterpreter, "must be profiling interpreter");
 857   Label set_mdp;
 858   stp(r0, r1, Address(pre(sp, -2 * wordSize)));
 859 
 860   // Test MDO to avoid the call if it is null.
 861   ldr(r0, Address(rmethod, in_bytes(Method::method_data_offset())));

 685 
 686     const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
 687     const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
 688     const int mark_offset = lock_offset +
 689                             BasicLock::displaced_header_offset_in_bytes();
 690 
 691     Label slow_case;
 692 
 693     // Load object pointer into obj_reg %c_rarg3
 694     ldr(obj_reg, Address(lock_reg, obj_offset));
 695 
 696     if (DiagnoseSyncOnValueBasedClasses != 0) {
 697       load_klass(tmp, obj_reg);
 698       ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
 699       tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
 700       br(Assembler::NE, slow_case);
 701     }
 702 
 703     if (LockingMode == LM_LIGHTWEIGHT) {
 704       lightweight_lock(obj_reg, tmp, tmp2, tmp3, slow_case);
 705       b(done);
 706     } else if (LockingMode == LM_LEGACY) {
 707       // Load (object->mark() | 1) into swap_reg
 708       ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 709       orr(swap_reg, rscratch1, 1);
 710 
 711       // Save (object->mark() | 1) into BasicLock's displaced header
 712       str(swap_reg, Address(lock_reg, mark_offset));
 713 
 714       assert(lock_offset == 0,
 715              "displached header must be first word in BasicObjectLock");
 716 
 717       Label fail;
 718       cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
 719 
 720       // Fast check for recursive lock.
 721       //
 722       // Can apply the optimization only if this is a stack lock
 723       // allocated in this thread. For efficiency, we can focus on
 724       // recently allocated stack locks (instead of reading the stack
 725       // base and checking whether 'mark' points inside the current

 735       // because we have guard pages at the end of all stacks. Hence, if
 736       // we go over the stack base and hit the stack of another thread,
 737       // this should not be in a writeable area that could contain a
 738       // stack lock allocated by that thread. As a consequence, a stack
 739       // lock less than page size away from sp is guaranteed to be
 740       // owned by the current thread.
 741       //
 742       // These 3 tests can be done by evaluating the following
 743       // expression: ((mark - sp) & (7 - os::vm_page_size())),
 744       // assuming both stack pointer and pagesize have their
 745       // least significant 3 bits clear.
 746       // NOTE: the mark is in swap_reg %r0 as the result of cmpxchg
 747       // NOTE2: aarch64 does not like to subtract sp from rn so take a
 748       // copy
 749       mov(rscratch1, sp);
 750       sub(swap_reg, swap_reg, rscratch1);
 751       ands(swap_reg, swap_reg, (uint64_t)(7 - (int)os::vm_page_size()));
 752 
 753       // Save the test result, for recursive case, the result is zero
 754       str(swap_reg, Address(lock_reg, mark_offset));
 755       br(Assembler::NE, slow_case);
 756       b(done);
 757     }
 758 
 759     bind(count);
 760     inc_held_monitor_count();
 761     b(done);
 762 
 763     bind(slow_case);
 764 
 765     //    push_cont_fastpath();
 766     // Call the runtime routine for slow case
 767     if (LockingMode == LM_LIGHTWEIGHT) {
 768       call_VM(noreg,
 769               CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter_obj),
 770               obj_reg);
 771     } else {
 772       call_VM(noreg,
 773               CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
 774               lock_reg);
 775     }
 776     //    pop_cont_fastpath();




 777     bind(done);
 778   }
 779 }
 780 
 781 
 782 // Unlocks an object. Used in monitorexit bytecode and
 783 // remove_activation.  Throws an IllegalMonitorException if object is
 784 // not locked by current thread.
 785 //
 786 // Args:
 787 //      c_rarg1: BasicObjectLock for lock
 788 //
 789 // Kills:
 790 //      r0
 791 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
 792 //      rscratch1, rscratch2 (scratch regs)
 793 void InterpreterMacroAssembler::unlock_object(Register lock_reg)
 794 {
 795   assert(lock_reg == c_rarg1, "The argument is only for looks. It must be rarg1");
 796 

 801 
 802     const Register swap_reg   = r0;
 803     const Register header_reg = c_rarg2;  // Will contain the old oopMark
 804     const Register obj_reg    = c_rarg3;  // Will contain the oop
 805     const Register tmp_reg    = c_rarg4;  // Temporary used by lightweight_unlock
 806 
 807     save_bcp(); // Save in case of exception
 808 
 809     if (LockingMode != LM_LIGHTWEIGHT) {
 810       // Convert from BasicObjectLock structure to object and BasicLock
 811       // structure Store the BasicLock address into %r0
 812       lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset()));
 813     }
 814 
 815     // Load oop into obj_reg(%c_rarg3)
 816     ldr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
 817 
 818     // Free entry
 819     str(zr, Address(lock_reg, BasicObjectLock::obj_offset()));
 820 
 821     Label slow_case;
 822     if (LockingMode == LM_LIGHTWEIGHT) {

 823       lightweight_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case);
 824       b(done);

 825     } else if (LockingMode == LM_LEGACY) {
 826       // Load the old header from BasicLock structure
 827       ldr(header_reg, Address(swap_reg,
 828                               BasicLock::displaced_header_offset_in_bytes()));
 829 
 830       // Test for recursion
 831       cbz(header_reg, done);
 832 
 833       // Atomic swap back the old header
 834       cmpxchg_obj_header(swap_reg, header_reg, obj_reg, rscratch1, count, &slow_case);
 835     }




 836 
 837     bind(count);
 838     dec_held_monitor_count();
 839     b(done);
 840 
 841     bind(slow_case);
 842     // Call the runtime routine for slow case.
 843     str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset())); // restore obj
 844     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
 845     bind(done);
 846     restore_bcp();
 847   }
 848 }
 849 
 850 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
 851                                                          Label& zero_continue) {
 852   assert(ProfileInterpreter, "must be profiling interpreter");
 853   ldr(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
 854   cbz(mdp, zero_continue);
 855 }
 856 
 857 // Set the method data pointer for the current bcp.
 858 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
 859   assert(ProfileInterpreter, "must be profiling interpreter");
 860   Label set_mdp;
 861   stp(r0, r1, Address(pre(sp, -2 * wordSize)));
 862 
 863   // Test MDO to avoid the call if it is null.
 864   ldr(r0, Address(rmethod, in_bytes(Method::method_data_offset())));
< prev index next >