< prev index next >

src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp

Print this page

 649   // If we're returning to interpreted code we will shortly be
 650   // adjusting SP to allow some space for ESP.  If we're returning to
 651   // compiled code the saved sender SP was saved in sender_sp, so this
 652   // restores it.
 653   andr(sp, esp, -16);
 654 }
 655 
 656 // Lock object
 657 //
 658 // Args:
 659 //      c_rarg1: BasicObjectLock to be used for locking
 660 //
 661 // Kills:
 662 //      r0
 663 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, .. (param regs)
 664 //      rscratch1, rscratch2 (scratch regs)
 665 void InterpreterMacroAssembler::lock_object(Register lock_reg)
 666 {
 667   assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");
 668   if (LockingMode == LM_MONITOR) {

 669     call_VM(noreg,
 670             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
 671             lock_reg);

 672   } else {
 673     Label count, done;
 674 
 675     const Register swap_reg = r0;
 676     const Register tmp = c_rarg2;
 677     const Register obj_reg = c_rarg3; // Will contain the oop
 678     const Register tmp2 = c_rarg4;
 679     const Register tmp3 = c_rarg5;
 680 
 681     const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
 682     const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
 683     const int mark_offset = lock_offset +
 684                             BasicLock::displaced_header_offset_in_bytes();
 685 
 686     Label slow_case;
 687 
 688     // Load object pointer into obj_reg %c_rarg3
 689     ldr(obj_reg, Address(lock_reg, obj_offset));
 690 
 691     if (DiagnoseSyncOnValueBasedClasses != 0) {
 692       load_klass(tmp, obj_reg);
 693       ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
 694       tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
 695       br(Assembler::NE, slow_case);
 696     }
 697 
 698     if (LockingMode == LM_LIGHTWEIGHT) {
 699       lightweight_lock(obj_reg, tmp, tmp2, tmp3, slow_case);
 700       b(count);
 701     } else if (LockingMode == LM_LEGACY) {
 702       // Load (object->mark() | 1) into swap_reg
 703       ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 704       orr(swap_reg, rscratch1, 1);
 705 
 706       // Save (object->mark() | 1) into BasicLock's displaced header
 707       str(swap_reg, Address(lock_reg, mark_offset));
 708 
 709       assert(lock_offset == 0,
 710              "displached header must be first word in BasicObjectLock");
 711 
 712       Label fail;
 713       cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
 714 
 715       // Fast check for recursive lock.
 716       //
 717       // Can apply the optimization only if this is a stack lock
 718       // allocated in this thread. For efficiency, we can focus on
 719       // recently allocated stack locks (instead of reading the stack
 720       // base and checking whether 'mark' points inside the current

 730       // because we have guard pages at the end of all stacks. Hence, if
 731       // we go over the stack base and hit the stack of another thread,
 732       // this should not be in a writeable area that could contain a
 733       // stack lock allocated by that thread. As a consequence, a stack
 734       // lock less than page size away from sp is guaranteed to be
 735       // owned by the current thread.
 736       //
 737       // These 3 tests can be done by evaluating the following
 738       // expression: ((mark - sp) & (7 - os::vm_page_size())),
 739       // assuming both stack pointer and pagesize have their
 740       // least significant 3 bits clear.
 741       // NOTE: the mark is in swap_reg %r0 as the result of cmpxchg
 742       // NOTE2: aarch64 does not like to subtract sp from rn so take a
 743       // copy
 744       mov(rscratch1, sp);
 745       sub(swap_reg, swap_reg, rscratch1);
 746       ands(swap_reg, swap_reg, (uint64_t)(7 - (int)os::vm_page_size()));
 747 
 748       // Save the test result, for recursive case, the result is zero
 749       str(swap_reg, Address(lock_reg, mark_offset));
 750       br(Assembler::EQ, count);

 751     }





 752     bind(slow_case);
 753 
 754     // Call the runtime routine for slow case

 755     if (LockingMode == LM_LIGHTWEIGHT) {
 756       call_VM(noreg,
 757               CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter_obj),
 758               obj_reg);
 759     } else {
 760       call_VM(noreg,
 761               CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
 762               lock_reg);
 763     }
 764     b(done);
 765 
 766     bind(count);
 767     increment(Address(rthread, JavaThread::held_monitor_count_offset()));
 768 
 769     bind(done);
 770   }
 771 }
 772 
 773 
 774 // Unlocks an object. Used in monitorexit bytecode and
 775 // remove_activation.  Throws an IllegalMonitorException if object is
 776 // not locked by current thread.
 777 //
 778 // Args:
 779 //      c_rarg1: BasicObjectLock for lock
 780 //
 781 // Kills:
 782 //      r0
 783 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
 784 //      rscratch1, rscratch2 (scratch regs)
 785 void InterpreterMacroAssembler::unlock_object(Register lock_reg)
 786 {
 787   assert(lock_reg == c_rarg1, "The argument is only for looks. It must be rarg1");
 788 

 793 
 794     const Register swap_reg   = r0;
 795     const Register header_reg = c_rarg2;  // Will contain the old oopMark
 796     const Register obj_reg    = c_rarg3;  // Will contain the oop
 797     const Register tmp_reg    = c_rarg4;  // Temporary used by lightweight_unlock
 798 
 799     save_bcp(); // Save in case of exception
 800 
 801     if (LockingMode != LM_LIGHTWEIGHT) {
 802       // Convert from BasicObjectLock structure to object and BasicLock
 803       // structure Store the BasicLock address into %r0
 804       lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset()));
 805     }
 806 
 807     // Load oop into obj_reg(%c_rarg3)
 808     ldr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
 809 
 810     // Free entry
 811     str(zr, Address(lock_reg, BasicObjectLock::obj_offset()));
 812 

 813     if (LockingMode == LM_LIGHTWEIGHT) {
 814       Label slow_case;
 815       lightweight_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case);
 816       b(count);
 817       bind(slow_case);
 818     } else if (LockingMode == LM_LEGACY) {
 819       // Load the old header from BasicLock structure
 820       ldr(header_reg, Address(swap_reg,
 821                               BasicLock::displaced_header_offset_in_bytes()));
 822 
 823       // Test for recursion
 824       cbz(header_reg, count);
 825 
 826       // Atomic swap back the old header
 827       cmpxchg_obj_header(swap_reg, header_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
 828     }
 829     // Call the runtime routine for slow case.
 830     str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset())); // restore obj
 831     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
 832     b(done);
 833 
 834     bind(count);
 835     decrement(Address(rthread, JavaThread::held_monitor_count_offset()));

 836 




 837     bind(done);
 838     restore_bcp();
 839   }
 840 }
 841 
 842 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
 843                                                          Label& zero_continue) {
 844   assert(ProfileInterpreter, "must be profiling interpreter");
 845   ldr(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
 846   cbz(mdp, zero_continue);
 847 }
 848 
 849 // Set the method data pointer for the current bcp.
 850 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
 851   assert(ProfileInterpreter, "must be profiling interpreter");
 852   Label set_mdp;
 853   stp(r0, r1, Address(pre(sp, -2 * wordSize)));
 854 
 855   // Test MDO to avoid the call if it is null.
 856   ldr(r0, Address(rmethod, in_bytes(Method::method_data_offset())));

 649   // If we're returning to interpreted code we will shortly be
 650   // adjusting SP to allow some space for ESP.  If we're returning to
 651   // compiled code the saved sender SP was saved in sender_sp, so this
 652   // restores it.
 653   andr(sp, esp, -16);
 654 }
 655 
 656 // Lock object
 657 //
 658 // Args:
 659 //      c_rarg1: BasicObjectLock to be used for locking
 660 //
 661 // Kills:
 662 //      r0
 663 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, .. (param regs)
 664 //      rscratch1, rscratch2 (scratch regs)
 665 void InterpreterMacroAssembler::lock_object(Register lock_reg)
 666 {
 667   assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");
 668   if (LockingMode == LM_MONITOR) {
 669     push_cont_fastpath();
 670     call_VM(noreg,
 671             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
 672             lock_reg);
 673     pop_cont_fastpath();
 674   } else {
 675     Label count, done;
 676 
 677     const Register swap_reg = r0;
 678     const Register tmp = c_rarg2;
 679     const Register obj_reg = c_rarg3; // Will contain the oop
 680     const Register tmp2 = c_rarg4;
 681     const Register tmp3 = c_rarg5;
 682 
 683     const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
 684     const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
 685     const int mark_offset = lock_offset +
 686                             BasicLock::displaced_header_offset_in_bytes();
 687 
 688     Label slow_case;
 689 
 690     // Load object pointer into obj_reg %c_rarg3
 691     ldr(obj_reg, Address(lock_reg, obj_offset));
 692 
 693     if (DiagnoseSyncOnValueBasedClasses != 0) {
 694       load_klass(tmp, obj_reg);
 695       ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
 696       tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
 697       br(Assembler::NE, slow_case);
 698     }
 699 
 700     if (LockingMode == LM_LIGHTWEIGHT) {
 701       lightweight_lock(obj_reg, tmp, tmp2, tmp3, slow_case);
 702       b(done);
 703     } else if (LockingMode == LM_LEGACY) {
 704       // Load (object->mark() | 1) into swap_reg
 705       ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 706       orr(swap_reg, rscratch1, 1);
 707 
 708       // Save (object->mark() | 1) into BasicLock's displaced header
 709       str(swap_reg, Address(lock_reg, mark_offset));
 710 
 711       assert(lock_offset == 0,
 712              "displached header must be first word in BasicObjectLock");
 713 
 714       Label fail;
 715       cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
 716 
 717       // Fast check for recursive lock.
 718       //
 719       // Can apply the optimization only if this is a stack lock
 720       // allocated in this thread. For efficiency, we can focus on
 721       // recently allocated stack locks (instead of reading the stack
 722       // base and checking whether 'mark' points inside the current

 732       // because we have guard pages at the end of all stacks. Hence, if
 733       // we go over the stack base and hit the stack of another thread,
 734       // this should not be in a writeable area that could contain a
 735       // stack lock allocated by that thread. As a consequence, a stack
 736       // lock less than page size away from sp is guaranteed to be
 737       // owned by the current thread.
 738       //
 739       // These 3 tests can be done by evaluating the following
 740       // expression: ((mark - sp) & (7 - os::vm_page_size())),
 741       // assuming both stack pointer and pagesize have their
 742       // least significant 3 bits clear.
 743       // NOTE: the mark is in swap_reg %r0 as the result of cmpxchg
 744       // NOTE2: aarch64 does not like to subtract sp from rn so take a
 745       // copy
 746       mov(rscratch1, sp);
 747       sub(swap_reg, swap_reg, rscratch1);
 748       ands(swap_reg, swap_reg, (uint64_t)(7 - (int)os::vm_page_size()));
 749 
 750       // Save the test result, for recursive case, the result is zero
 751       str(swap_reg, Address(lock_reg, mark_offset));
 752       br(Assembler::NE, slow_case);
 753       b(done);
 754     }
 755 
 756     bind(count);
 757     inc_held_monitor_count();
 758     b(done);
 759 
 760     bind(slow_case);
 761 
 762     // Call the runtime routine for slow case
 763     push_cont_fastpath();
 764     if (LockingMode == LM_LIGHTWEIGHT) {
 765       call_VM(noreg,
 766               CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter_obj),
 767               obj_reg);
 768     } else {
 769       call_VM(noreg,
 770               CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
 771               lock_reg);
 772     }
 773     pop_cont_fastpath();




 774     bind(done);
 775   }
 776 }
 777 
 778 
 779 // Unlocks an object. Used in monitorexit bytecode and
 780 // remove_activation.  Throws an IllegalMonitorException if object is
 781 // not locked by current thread.
 782 //
 783 // Args:
 784 //      c_rarg1: BasicObjectLock for lock
 785 //
 786 // Kills:
 787 //      r0
 788 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
 789 //      rscratch1, rscratch2 (scratch regs)
 790 void InterpreterMacroAssembler::unlock_object(Register lock_reg)
 791 {
 792   assert(lock_reg == c_rarg1, "The argument is only for looks. It must be rarg1");
 793 

 798 
 799     const Register swap_reg   = r0;
 800     const Register header_reg = c_rarg2;  // Will contain the old oopMark
 801     const Register obj_reg    = c_rarg3;  // Will contain the oop
 802     const Register tmp_reg    = c_rarg4;  // Temporary used by lightweight_unlock
 803 
 804     save_bcp(); // Save in case of exception
 805 
 806     if (LockingMode != LM_LIGHTWEIGHT) {
 807       // Convert from BasicObjectLock structure to object and BasicLock
 808       // structure Store the BasicLock address into %r0
 809       lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset()));
 810     }
 811 
 812     // Load oop into obj_reg(%c_rarg3)
 813     ldr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
 814 
 815     // Free entry
 816     str(zr, Address(lock_reg, BasicObjectLock::obj_offset()));
 817 
 818     Label slow_case;
 819     if (LockingMode == LM_LIGHTWEIGHT) {

 820       lightweight_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case);
 821       b(done);

 822     } else if (LockingMode == LM_LEGACY) {
 823       // Load the old header from BasicLock structure
 824       ldr(header_reg, Address(swap_reg,
 825                               BasicLock::displaced_header_offset_in_bytes()));
 826 
 827       // Test for recursion
 828       cbz(header_reg, done);
 829 
 830       // Atomic swap back the old header
 831       cmpxchg_obj_header(swap_reg, header_reg, obj_reg, rscratch1, count, &slow_case);
 832     }




 833 
 834     bind(count);
 835     dec_held_monitor_count();
 836     b(done);
 837 
 838     bind(slow_case);
 839     // Call the runtime routine for slow case.
 840     str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset())); // restore obj
 841     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
 842     bind(done);
 843     restore_bcp();
 844   }
 845 }
 846 
 847 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
 848                                                          Label& zero_continue) {
 849   assert(ProfileInterpreter, "must be profiling interpreter");
 850   ldr(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
 851   cbz(mdp, zero_continue);
 852 }
 853 
 854 // Set the method data pointer for the current bcp.
 855 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
 856   assert(ProfileInterpreter, "must be profiling interpreter");
 857   Label set_mdp;
 858   stp(r0, r1, Address(pre(sp, -2 * wordSize)));
 859 
 860   // Test MDO to avoid the call if it is null.
 861   ldr(r0, Address(rmethod, in_bytes(Method::method_data_offset())));
< prev index next >