< prev index next >

src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp

Print this page

 680 
 681     const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
 682     const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
 683     const int mark_offset = lock_offset +
 684                             BasicLock::displaced_header_offset_in_bytes();
 685 
 686     Label slow_case;
 687 
 688     // Load object pointer into obj_reg %c_rarg3
 689     ldr(obj_reg, Address(lock_reg, obj_offset));
 690 
 691     if (DiagnoseSyncOnValueBasedClasses != 0) {
 692       load_klass(tmp, obj_reg);
 693       ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
 694       tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
 695       br(Assembler::NE, slow_case);
 696     }
 697 
 698     if (LockingMode == LM_LIGHTWEIGHT) {
 699       lightweight_lock(obj_reg, tmp, tmp2, tmp3, slow_case);
 700       b(count);
 701     } else if (LockingMode == LM_LEGACY) {
 702       // Load (object->mark() | 1) into swap_reg
 703       ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 704       orr(swap_reg, rscratch1, 1);
 705 
 706       // Save (object->mark() | 1) into BasicLock's displaced header
 707       str(swap_reg, Address(lock_reg, mark_offset));
 708 
 709       assert(lock_offset == 0,
 710              "displached header must be first word in BasicObjectLock");
 711 
 712       Label fail;
 713       cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
 714 
 715       // Fast check for recursive lock.
 716       //
 717       // Can apply the optimization only if this is a stack lock
 718       // allocated in this thread. For efficiency, we can focus on
 719       // recently allocated stack locks (instead of reading the stack
 720       // base and checking whether 'mark' points inside the current

 730       // because we have guard pages at the end of all stacks. Hence, if
 731       // we go over the stack base and hit the stack of another thread,
 732       // this should not be in a writeable area that could contain a
 733       // stack lock allocated by that thread. As a consequence, a stack
 734       // lock less than page size away from sp is guaranteed to be
 735       // owned by the current thread.
 736       //
 737       // These 3 tests can be done by evaluating the following
 738       // expression: ((mark - sp) & (7 - os::vm_page_size())),
 739       // assuming both stack pointer and pagesize have their
 740       // least significant 3 bits clear.
 741       // NOTE: the mark is in swap_reg %r0 as the result of cmpxchg
 742       // NOTE2: aarch64 does not like to subtract sp from rn so take a
 743       // copy
 744       mov(rscratch1, sp);
 745       sub(swap_reg, swap_reg, rscratch1);
 746       ands(swap_reg, swap_reg, (uint64_t)(7 - (int)os::vm_page_size()));
 747 
 748       // Save the test result, for recursive case, the result is zero
 749       str(swap_reg, Address(lock_reg, mark_offset));
 750       br(Assembler::EQ, count);

 751     }





 752     bind(slow_case);
 753 

 754     // Call the runtime routine for slow case
 755     if (LockingMode == LM_LIGHTWEIGHT) {
 756       call_VM(noreg,
 757               CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter_obj),
 758               obj_reg);
 759     } else {
 760       call_VM(noreg,
 761               CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
 762               lock_reg);
 763     }
 764     b(done);
 765 
 766     bind(count);
 767     increment(Address(rthread, JavaThread::held_monitor_count_offset()));
 768 
 769     bind(done);
 770   }
 771 }
 772 
 773 
 774 // Unlocks an object. Used in monitorexit bytecode and
 775 // remove_activation.  Throws an IllegalMonitorException if object is
 776 // not locked by current thread.
 777 //
 778 // Args:
 779 //      c_rarg1: BasicObjectLock for lock
 780 //
 781 // Kills:
 782 //      r0
 783 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
 784 //      rscratch1, rscratch2 (scratch regs)
 785 void InterpreterMacroAssembler::unlock_object(Register lock_reg)
 786 {
 787   assert(lock_reg == c_rarg1, "The argument is only for looks. It must be rarg1");
 788 

 793 
 794     const Register swap_reg   = r0;
 795     const Register header_reg = c_rarg2;  // Will contain the old oopMark
 796     const Register obj_reg    = c_rarg3;  // Will contain the oop
 797     const Register tmp_reg    = c_rarg4;  // Temporary used by lightweight_unlock
 798 
 799     save_bcp(); // Save in case of exception
 800 
 801     if (LockingMode != LM_LIGHTWEIGHT) {
 802       // Convert from BasicObjectLock structure to object and BasicLock
 803       // structure Store the BasicLock address into %r0
 804       lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset()));
 805     }
 806 
 807     // Load oop into obj_reg(%c_rarg3)
 808     ldr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
 809 
 810     // Free entry
 811     str(zr, Address(lock_reg, BasicObjectLock::obj_offset()));
 812 

 813     if (LockingMode == LM_LIGHTWEIGHT) {
 814       Label slow_case;
 815       lightweight_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case);
 816       b(count);
 817       bind(slow_case);
 818     } else if (LockingMode == LM_LEGACY) {
 819       // Load the old header from BasicLock structure
 820       ldr(header_reg, Address(swap_reg,
 821                               BasicLock::displaced_header_offset_in_bytes()));
 822 
 823       // Test for recursion
 824       cbz(header_reg, count);
 825 
 826       // Atomic swap back the old header
 827       cmpxchg_obj_header(swap_reg, header_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
 828     }
 829     // Call the runtime routine for slow case.
 830     str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset())); // restore obj
 831     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
 832     b(done);
 833 
 834     bind(count);
 835     decrement(Address(rthread, JavaThread::held_monitor_count_offset()));

 836 




 837     bind(done);
 838     restore_bcp();
 839   }
 840 }
 841 
 842 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
 843                                                          Label& zero_continue) {
 844   assert(ProfileInterpreter, "must be profiling interpreter");
 845   ldr(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
 846   cbz(mdp, zero_continue);
 847 }
 848 
 849 // Set the method data pointer for the current bcp.
 850 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
 851   assert(ProfileInterpreter, "must be profiling interpreter");
 852   Label set_mdp;
 853   stp(r0, r1, Address(pre(sp, -2 * wordSize)));
 854 
 855   // Test MDO to avoid the call if it is null.
 856   ldr(r0, Address(rmethod, in_bytes(Method::method_data_offset())));

 680 
 681     const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
 682     const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
 683     const int mark_offset = lock_offset +
 684                             BasicLock::displaced_header_offset_in_bytes();
 685 
 686     Label slow_case;
 687 
 688     // Load object pointer into obj_reg %c_rarg3
 689     ldr(obj_reg, Address(lock_reg, obj_offset));
 690 
 691     if (DiagnoseSyncOnValueBasedClasses != 0) {
 692       load_klass(tmp, obj_reg);
 693       ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
 694       tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
 695       br(Assembler::NE, slow_case);
 696     }
 697 
 698     if (LockingMode == LM_LIGHTWEIGHT) {
 699       lightweight_lock(obj_reg, tmp, tmp2, tmp3, slow_case);
 700       b(done);
 701     } else if (LockingMode == LM_LEGACY) {
 702       // Load (object->mark() | 1) into swap_reg
 703       ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 704       orr(swap_reg, rscratch1, 1);
 705 
 706       // Save (object->mark() | 1) into BasicLock's displaced header
 707       str(swap_reg, Address(lock_reg, mark_offset));
 708 
 709       assert(lock_offset == 0,
 710              "displached header must be first word in BasicObjectLock");
 711 
 712       Label fail;
 713       cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
 714 
 715       // Fast check for recursive lock.
 716       //
 717       // Can apply the optimization only if this is a stack lock
 718       // allocated in this thread. For efficiency, we can focus on
 719       // recently allocated stack locks (instead of reading the stack
 720       // base and checking whether 'mark' points inside the current

 730       // because we have guard pages at the end of all stacks. Hence, if
 731       // we go over the stack base and hit the stack of another thread,
 732       // this should not be in a writeable area that could contain a
 733       // stack lock allocated by that thread. As a consequence, a stack
 734       // lock less than page size away from sp is guaranteed to be
 735       // owned by the current thread.
 736       //
 737       // These 3 tests can be done by evaluating the following
 738       // expression: ((mark - sp) & (7 - os::vm_page_size())),
 739       // assuming both stack pointer and pagesize have their
 740       // least significant 3 bits clear.
 741       // NOTE: the mark is in swap_reg %r0 as the result of cmpxchg
 742       // NOTE2: aarch64 does not like to subtract sp from rn so take a
 743       // copy
 744       mov(rscratch1, sp);
 745       sub(swap_reg, swap_reg, rscratch1);
 746       ands(swap_reg, swap_reg, (uint64_t)(7 - (int)os::vm_page_size()));
 747 
 748       // Save the test result, for recursive case, the result is zero
 749       str(swap_reg, Address(lock_reg, mark_offset));
 750       br(Assembler::NE, slow_case);
 751       b(done);
 752     }
 753 
 754     bind(count);
 755     inc_held_monitor_count();
 756     b(done);
 757 
 758     bind(slow_case);
 759 
 760     //    push_cont_fastpath();
 761     // Call the runtime routine for slow case
 762     if (LockingMode == LM_LIGHTWEIGHT) {
 763       call_VM(noreg,
 764               CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter_obj),
 765               obj_reg);
 766     } else {
 767       call_VM(noreg,
 768               CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
 769               lock_reg);
 770     }
 771     //    pop_cont_fastpath();




 772     bind(done);
 773   }
 774 }
 775 
 776 
 777 // Unlocks an object. Used in monitorexit bytecode and
 778 // remove_activation.  Throws an IllegalMonitorException if object is
 779 // not locked by current thread.
 780 //
 781 // Args:
 782 //      c_rarg1: BasicObjectLock for lock
 783 //
 784 // Kills:
 785 //      r0
 786 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
 787 //      rscratch1, rscratch2 (scratch regs)
 788 void InterpreterMacroAssembler::unlock_object(Register lock_reg)
 789 {
 790   assert(lock_reg == c_rarg1, "The argument is only for looks. It must be rarg1");
 791 

 796 
 797     const Register swap_reg   = r0;
 798     const Register header_reg = c_rarg2;  // Will contain the old oopMark
 799     const Register obj_reg    = c_rarg3;  // Will contain the oop
 800     const Register tmp_reg    = c_rarg4;  // Temporary used by lightweight_unlock
 801 
 802     save_bcp(); // Save in case of exception
 803 
 804     if (LockingMode != LM_LIGHTWEIGHT) {
 805       // Convert from BasicObjectLock structure to object and BasicLock
 806       // structure Store the BasicLock address into %r0
 807       lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset()));
 808     }
 809 
 810     // Load oop into obj_reg(%c_rarg3)
 811     ldr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
 812 
 813     // Free entry
 814     str(zr, Address(lock_reg, BasicObjectLock::obj_offset()));
 815 
 816     Label slow_case;
 817     if (LockingMode == LM_LIGHTWEIGHT) {

 818       lightweight_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case);
 819       b(done);

 820     } else if (LockingMode == LM_LEGACY) {
 821       // Load the old header from BasicLock structure
 822       ldr(header_reg, Address(swap_reg,
 823                               BasicLock::displaced_header_offset_in_bytes()));
 824 
 825       // Test for recursion
 826       cbz(header_reg, done);
 827 
 828       // Atomic swap back the old header
 829       cmpxchg_obj_header(swap_reg, header_reg, obj_reg, rscratch1, count, &slow_case);
 830     }




 831 
 832     bind(count);
 833     dec_held_monitor_count();
 834     b(done);
 835 
 836     bind(slow_case);
 837     // Call the runtime routine for slow case.
 838     str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset())); // restore obj
 839     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
 840     bind(done);
 841     restore_bcp();
 842   }
 843 }
 844 
 845 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
 846                                                          Label& zero_continue) {
 847   assert(ProfileInterpreter, "must be profiling interpreter");
 848   ldr(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
 849   cbz(mdp, zero_continue);
 850 }
 851 
 852 // Set the method data pointer for the current bcp.
 853 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
 854   assert(ProfileInterpreter, "must be profiling interpreter");
 855   Label set_mdp;
 856   stp(r0, r1, Address(pre(sp, -2 * wordSize)));
 857 
 858   // Test MDO to avoid the call if it is null.
 859   ldr(r0, Address(rmethod, in_bytes(Method::method_data_offset())));
< prev index next >