< prev index next >

src/hotspot/cpu/arm/interp_masm_arm.cpp

Print this page

 846 // variable _do_not_unlock_if_synchronized to true. The remove_activation will
 847 // check this flag.
 848 void InterpreterMacroAssembler::set_do_not_unlock_if_synchronized(bool flag, Register tmp) {
 849   const Address do_not_unlock_if_synchronized(Rthread,
 850                          JavaThread::do_not_unlock_if_synchronized_offset());
 851   if (flag) {
 852     mov(tmp, 1);
 853     strb(tmp, do_not_unlock_if_synchronized);
 854   } else {
 855     strb(zero_register(tmp), do_not_unlock_if_synchronized);
 856   }
 857 }
 858 
 859 // Lock object
 860 //
 861 // Argument: R1 : Points to BasicObjectLock to be used for locking.
 862 // Must be initialized with object to lock.
 863 // Blows volatile registers R0-R3, Rtemp, LR. Calls VM.
 864 void InterpreterMacroAssembler::lock_object(Register Rlock) {
 865   assert(Rlock == R1, "the second argument");



 866 
 867   if (UseHeavyMonitors) {
 868     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), Rlock);
 869   } else {
 870     Label done;
 871 
 872     const Register Robj = R2;
 873     const Register Rmark = R3;
 874     assert_different_registers(Robj, Rmark, Rlock, R0, Rtemp);
 875 
 876     const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
 877     const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
 878     const int mark_offset = lock_offset + BasicLock::displaced_header_offset_in_bytes();
 879 
 880     Label already_locked, slow_case;
 881 
 882     // Load object pointer
 883     ldr(Robj, Address(Rlock, obj_offset));
 884 
 885     if (DiagnoseSyncOnValueBasedClasses != 0) {
 886       load_klass(R0, Robj);
 887       ldr_u32(R0, Address(R0, Klass::access_flags_offset()));
 888       tst(R0, JVM_ACC_IS_VALUE_BASED_CLASS);
 889       b(slow_case, ne);
 890     }
 891 
 892     // On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.
 893     // That would be acceptable as ether CAS or slow case path is taken in that case.
 894     // Exception to that is if the object is locked by the calling thread, then the recursive test will pass (guaranteed as
 895     // loads are satisfied from a store queue if performed on the same processor).
 896 
 897     assert(oopDesc::mark_offset_in_bytes() == 0, "must be");
 898     ldr(Rmark, Address(Robj, oopDesc::mark_offset_in_bytes()));
 899 
 900     // Test if object is already locked
 901     tst(Rmark, markWord::unlocked_value);
 902     b(already_locked, eq);
 903 
 904     // Save old object->mark() into BasicLock's displaced header
 905     str(Rmark, Address(Rlock, mark_offset));
 906 
 907     cas_for_lock_acquire(Rmark, Rlock, Robj, Rtemp, slow_case);
 908 
 909     b(done);
 910 
 911     // If we got here that means the object is locked by ether calling thread or another thread.
 912     bind(already_locked);
 913     // Handling of locked objects: recursive locks and slow case.
 914 
 915     // Fast check for recursive lock.
 916     //
 917     // Can apply the optimization only if this is a stack lock
 918     // allocated in this thread. For efficiency, we can focus on
 919     // recently allocated stack locks (instead of reading the stack
 920     // base and checking whether 'mark' points inside the current
 921     // thread stack):
 922     //  1) (mark & 3) == 0
 923     //  2) SP <= mark < SP + os::pagesize()
 924     //
 925     // Warning: SP + os::pagesize can overflow the stack base. We must
 926     // neither apply the optimization for an inflated lock allocated
 927     // just above the thread stack (this is why condition 1 matters)
 928     // nor apply the optimization if the stack lock is inside the stack
 929     // of another thread. The latter is avoided even in case of overflow
 930     // because we have guard pages at the end of all stacks. Hence, if
 931     // we go over the stack base and hit the stack of another thread,
 932     // this should not be in a writeable area that could contain a
 933     // stack lock allocated by that thread. As a consequence, a stack
 934     // lock less than page size away from SP is guaranteed to be
 935     // owned by the current thread.
 936     //
 937     // Note: assuming SP is aligned, we can check the low bits of
 938     // (mark-SP) instead of the low bits of mark. In that case,
 939     // assuming page size is a power of 2, we can merge the two
 940     // conditions into a single test:
 941     // => ((mark - SP) & (3 - os::pagesize())) == 0
 942 
 943     // (3 - os::pagesize()) cannot be encoded as an ARM immediate operand.
 944     // Check independently the low bits and the distance to SP.
 945     // -1- test low 2 bits
 946     movs(R0, AsmOperand(Rmark, lsl, 30));
 947     // -2- test (mark - SP) if the low two bits are 0
 948     sub(R0, Rmark, SP, eq);
 949     movs(R0, AsmOperand(R0, lsr, exact_log2(os::vm_page_size())), eq);
 950     // If still 'eq' then recursive locking OK: store 0 into lock record
 951     str(R0, Address(Rlock, mark_offset), eq);
 952 
 953     b(done, eq);
 954 
 955     bind(slow_case);
 956 
 957     // Call the runtime routine for slow case
 958     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), Rlock);
 959 
 960     bind(done);
 961   }
 962 }
 963 
 964 
 965 // Unlocks an object. Used in monitorexit bytecode and remove_activation.
 966 //
 967 // Argument: R0: Points to BasicObjectLock structure for lock
 968 // Throw an IllegalMonitorException if object is not locked by current thread
 969 // Blows volatile registers R0-R3, Rtemp, LR. Calls VM.
 970 void InterpreterMacroAssembler::unlock_object(Register Rlock) {
 971   assert(Rlock == R0, "the first argument");



 972 
 973   if (UseHeavyMonitors) {
 974     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), Rlock);
 975   } else {
 976     Label done, slow_case;
 977 
 978     const Register Robj = R2;
 979     const Register Rmark = R3;
 980     assert_different_registers(Robj, Rmark, Rlock, Rtemp);
 981 
 982     const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
 983     const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
 984     const int mark_offset = lock_offset + BasicLock::displaced_header_offset_in_bytes();
 985 
 986     const Register Rzero = zero_register(Rtemp);
 987 
 988     // Load oop into Robj
 989     ldr(Robj, Address(Rlock, obj_offset));
 990 
 991     // Free entry
 992     str(Rzero, Address(Rlock, obj_offset));
 993 
 994     // Load the old header from BasicLock structure
 995     ldr(Rmark, Address(Rlock, mark_offset));
 996 
 997     // Test for recursion (zero mark in BasicLock)
 998     cbz(Rmark, done);
 999 
1000     bool allow_fallthrough_on_failure = true;
1001 
1002     cas_for_lock_release(Rlock, Rmark, Robj, Rtemp, slow_case, allow_fallthrough_on_failure);
1003 
1004     b(done, eq);
1005 
1006     bind(slow_case);
1007 
1008     // Call the runtime routine for slow case.
1009     str(Robj, Address(Rlock, obj_offset)); // restore obj
1010     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), Rlock);
1011 
1012     bind(done);
1013   }

1014 }
1015 
1016 
1017 // Test ImethodDataPtr.  If it is null, continue at the specified label
1018 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, Label& zero_continue) {
1019   assert(ProfileInterpreter, "must be profiling interpreter");
1020   ldr(mdp, Address(FP, frame::interpreter_frame_mdp_offset * wordSize));
1021   cbz(mdp, zero_continue);
1022 }
1023 
1024 
1025 // Set the method data pointer for the current bcp.
1026 // Blows volatile registers R0-R3, Rtemp, LR.
1027 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
1028   assert(ProfileInterpreter, "must be profiling interpreter");
1029   Label set_mdp;
1030 
1031   // Test MDO to avoid the call if it is NULL.
1032   ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
1033   cbz(Rtemp, set_mdp);

 846 // variable _do_not_unlock_if_synchronized to true. The remove_activation will
 847 // check this flag.
 848 void InterpreterMacroAssembler::set_do_not_unlock_if_synchronized(bool flag, Register tmp) {
 849   const Address do_not_unlock_if_synchronized(Rthread,
 850                          JavaThread::do_not_unlock_if_synchronized_offset());
 851   if (flag) {
 852     mov(tmp, 1);
 853     strb(tmp, do_not_unlock_if_synchronized);
 854   } else {
 855     strb(zero_register(tmp), do_not_unlock_if_synchronized);
 856   }
 857 }
 858 
 859 // Lock object
 860 //
 861 // Argument: R1 : Points to BasicObjectLock to be used for locking.
 862 // Must be initialized with object to lock.
 863 // Blows volatile registers R0-R3, Rtemp, LR. Calls VM.
 864 void InterpreterMacroAssembler::lock_object(Register Rlock) {
 865   assert(Rlock == R1, "the second argument");
 866   const Register Robj = R2;
 867   assert_different_registers(Robj, Rlock);
 868   const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
 869 
 870   // Load object pointer
 871   ldr(Robj, Address(Rlock, obj_offset));






















 872 
 873   // TODO: Implement fast-locking.
 874   mov(R0, Robj);
 875   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), R0);



































































 876 }
 877 
 878 
 879 // Unlocks an object. Used in monitorexit bytecode and remove_activation.
 880 //
 881 // Argument: R0: Points to BasicObjectLock structure for lock
 882 // Throw an IllegalMonitorException if object is not locked by current thread
 883 // Blows volatile registers R0-R3, Rtemp, LR. Calls VM.
 884 void InterpreterMacroAssembler::unlock_object(Register Rlock) {
 885   assert(Rlock == R0, "the first argument");
 886   const Register Robj = R2;
 887   assert_different_registers(Robj, Rlock);
 888   const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
 889 
 890   // Load oop into Robj
 891   ldr(Robj, Address(Rlock, obj_offset));




































 892 
 893   // TODO: Implement fast-locking.
 894   mov(R0, Robj);
 895   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), R0);
 896 }
 897 
 898 
 899 // Test ImethodDataPtr.  If it is null, continue at the specified label
 900 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, Label& zero_continue) {
 901   assert(ProfileInterpreter, "must be profiling interpreter");
 902   ldr(mdp, Address(FP, frame::interpreter_frame_mdp_offset * wordSize));
 903   cbz(mdp, zero_continue);
 904 }
 905 
 906 
 907 // Set the method data pointer for the current bcp.
 908 // Blows volatile registers R0-R3, Rtemp, LR.
 909 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
 910   assert(ProfileInterpreter, "must be profiling interpreter");
 911   Label set_mdp;
 912 
 913   // Test MDO to avoid the call if it is NULL.
 914   ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
 915   cbz(Rtemp, set_mdp);
< prev index next >