< prev index next >

src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp

Print this page

 713   leave();
 714   // If we're returning to interpreted code we will shortly be
 715   // adjusting SP to allow some space for ESP.  If we're returning to
 716   // compiled code the saved sender SP was saved in sender_sp, so this
 717   // restores it.
 718   andr(sp, esp, -16);
 719 }
 720 
 721 // Lock object
 722 //
 723 // Args:
 724 //      c_rarg1: BasicObjectLock to be used for locking
 725 //
 726 // Kills:
 727 //      r0
 728 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs)
 729 //      rscratch1, rscratch2 (scratch regs)
 730 void InterpreterMacroAssembler::lock_object(Register lock_reg)
 731 {
 732   assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");







 733   if (UseHeavyMonitors) {
 734     call_VM(noreg,
 735             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
 736             lock_reg);
 737   } else {
 738     Label count, done;
 739 
 740     const Register swap_reg = r0;
 741     const Register tmp = c_rarg2;
 742     const Register obj_reg = c_rarg3; // Will contain the oop
 743 
 744     const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
 745     const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
 746     const int mark_offset = lock_offset +
 747                             BasicLock::displaced_header_offset_in_bytes();
 748 
 749     Label slow_case;
 750 
 751     // Load object pointer into obj_reg %c_rarg3
 752     ldr(obj_reg, Address(lock_reg, obj_offset));
 753 
 754     if (DiagnoseSyncOnValueBasedClasses != 0) {
 755       load_klass(tmp, obj_reg);
 756       ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
 757       tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
 758       br(Assembler::NE, slow_case);
 759     }
 760 
 761     // Load (object->mark() | 1) into swap_reg
 762     ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 763     orr(swap_reg, rscratch1, 1);
 764 
 765     // Save (object->mark() | 1) into BasicLock's displaced header
 766     str(swap_reg, Address(lock_reg, mark_offset));
 767 
 768     assert(lock_offset == 0,
 769            "displached header must be first word in BasicObjectLock");
 770 
 771     Label fail;
 772     cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/NULL);
 773 
 774     // Fast check for recursive lock.
 775     //
 776     // Can apply the optimization only if this is a stack lock
 777     // allocated in this thread. For efficiency, we can focus on
 778     // recently allocated stack locks (instead of reading the stack
 779     // base and checking whether 'mark' points inside the current
 780     // thread stack):
 781     //  1) (mark & 7) == 0, and
 782     //  2) sp <= mark < mark + os::pagesize()
 783     //
 784     // Warning: sp + os::pagesize can overflow the stack base. We must
 785     // neither apply the optimization for an inflated lock allocated
 786     // just above the thread stack (this is why condition 1 matters)
 787     // nor apply the optimization if the stack lock is inside the stack
 788     // of another thread. The latter is avoided even in case of overflow
 789     // because we have guard pages at the end of all stacks. Hence, if
 790     // we go over the stack base and hit the stack of another thread,
 791     // this should not be in a writeable area that could contain a
 792     // stack lock allocated by that thread. As a consequence, a stack
 793     // lock less than page size away from sp is guaranteed to be
 794     // owned by the current thread.
 795     //
 796     // These 3 tests can be done by evaluating the following
 797     // expression: ((mark - sp) & (7 - os::vm_page_size())),
 798     // assuming both stack pointer and pagesize have their
 799     // least significant 3 bits clear.
 800     // NOTE: the mark is in swap_reg %r0 as the result of cmpxchg
 801     // NOTE2: aarch64 does not like to subtract sp from rn so take a
 802     // copy
 803     mov(rscratch1, sp);
 804     sub(swap_reg, swap_reg, rscratch1);
 805     ands(swap_reg, swap_reg, (uint64_t)(7 - os::vm_page_size()));
 806 
 807     // Save the test result, for recursive case, the result is zero
 808     str(swap_reg, Address(lock_reg, mark_offset));
 809     br(Assembler::EQ, count);
 810 
 811     bind(slow_case);
 812 
 813     // Call the runtime routine for slow case
 814     call_VM(noreg,
 815             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
 816             lock_reg);
 817     b(done);
 818 
 819     bind(count);
 820     increment(Address(rthread, JavaThread::held_monitor_count_offset()));
 821 
 822     bind(done);
 823   }
 824 }
 825 
 826 
 827 // Unlocks an object. Used in monitorexit bytecode and
 828 // remove_activation.  Throws an IllegalMonitorException if object is
 829 // not locked by current thread.
 830 //
 831 // Args:
 832 //      c_rarg1: BasicObjectLock for lock
 833 //
 834 // Kills:
 835 //      r0
 836 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
 837 //      rscratch1, rscratch2 (scratch regs)
 838 void InterpreterMacroAssembler::unlock_object(Register lock_reg)
 839 {
 840   assert(lock_reg == c_rarg1, "The argument is only for looks. It must be rarg1");
 841 








 842   if (UseHeavyMonitors) {
 843     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
 844   } else {
 845     Label count, done;
 846 
 847     const Register swap_reg   = r0;
 848     const Register header_reg = c_rarg2;  // Will contain the old oopMark
 849     const Register obj_reg    = c_rarg3;  // Will contain the oop
 850 
 851     save_bcp(); // Save in case of exception
 852 
 853     // Convert from BasicObjectLock structure to object and BasicLock
 854     // structure Store the BasicLock address into %r0
 855     lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes()));
 856 
 857     // Load oop into obj_reg(%c_rarg3)
 858     ldr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
 859 
 860     // Free entry
 861     str(zr, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
 862 
 863     // Load the old header from BasicLock structure
 864     ldr(header_reg, Address(swap_reg,
 865                             BasicLock::displaced_header_offset_in_bytes()));
 866 
 867     // Test for recursion
 868     cbz(header_reg, count);
 869 
 870     // Atomic swap back the old header
 871     cmpxchg_obj_header(swap_reg, header_reg, obj_reg, rscratch1, count, /*fallthrough*/NULL);
 872 
 873     // Call the runtime routine for slow case.
 874     str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); // restore obj
 875     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
 876     b(done);
 877 
 878     bind(count);
 879     decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
 880 
 881     bind(done);
 882     restore_bcp();
 883   }
 884 }
 885 
 886 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
 887                                                          Label& zero_continue) {
 888   assert(ProfileInterpreter, "must be profiling interpreter");
 889   ldr(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
 890   cbz(mdp, zero_continue);
 891 }
 892 
 893 // Set the method data pointer for the current bcp.
 894 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
 895   assert(ProfileInterpreter, "must be profiling interpreter");

1618   // super call
1619   MacroAssembler::call_VM_base(oop_result, noreg, last_java_sp,
1620                                entry_point, number_of_arguments,
1621                      check_exceptions);
1622 // interpreter specific
1623   restore_bcp();
1624   restore_locals();
1625 }
1626 
1627 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
1628   assert_different_registers(obj, rscratch1);
1629   Label update, next, none;
1630 
1631   verify_oop(obj);
1632 
1633   cbnz(obj, update);
1634   orptr(mdo_addr, TypeEntries::null_seen);
1635   b(next);
1636 
1637   bind(update);
1638   load_klass(obj, obj);

1639 
1640   ldr(rscratch1, mdo_addr);
1641   eor(obj, obj, rscratch1);
1642   tst(obj, TypeEntries::type_klass_mask);
1643   br(Assembler::EQ, next); // klass seen before, nothing to
1644                            // do. The unknown bit may have been
1645                            // set already but no need to check.
1646 
1647   tbnz(obj, exact_log2(TypeEntries::type_unknown), next);
1648   // already unknown. Nothing to do anymore.
1649 
1650   ldr(rscratch1, mdo_addr);
1651   cbz(rscratch1, none);
1652   cmp(rscratch1, (u1)TypeEntries::null_seen);
1653   br(Assembler::EQ, none);
1654   // There is a chance that the checks above (re-reading profiling
1655   // data from memory) fail if another thread has just set the
1656   // profiling to this obj's klass
1657   ldr(rscratch1, mdo_addr);
1658   eor(obj, obj, rscratch1);

 713   leave();
 714   // If we're returning to interpreted code we will shortly be
 715   // adjusting SP to allow some space for ESP.  If we're returning to
 716   // compiled code the saved sender SP was saved in sender_sp, so this
 717   // restores it.
 718   andr(sp, esp, -16);
 719 }
 720 
 721 // Lock object
 722 //
 723 // Args:
 724 //      c_rarg1: BasicObjectLock to be used for locking
 725 //
 726 // Kills:
 727 //      r0
 728 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs)
 729 //      rscratch1, rscratch2 (scratch regs)
 730 void InterpreterMacroAssembler::lock_object(Register lock_reg)
 731 {
 732   assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");
 733 
 734   const Register obj_reg = c_rarg3; // Will contain the oop
 735   const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
 736 
 737   // Load object pointer into obj_reg %c_rarg3
 738   ldr(obj_reg, Address(lock_reg, obj_offset));
 739 
 740   if (UseHeavyMonitors) {
 741     call_VM(noreg,
 742             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
 743             obj_reg);
 744   } else {
 745     Label count, done;
 746 
 747     const Register swap_reg = r0;
 748     const Register tmp = c_rarg2;






 749 
 750     Label slow_case;
 751 



 752     if (DiagnoseSyncOnValueBasedClasses != 0) {
 753       load_klass(tmp, obj_reg);
 754       ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
 755       tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
 756       br(Assembler::NE, slow_case);
 757     }
 758 
 759     ldr(tmp, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 760     fast_lock(obj_reg, tmp, rscratch1, swap_reg, rscratch2, slow_case);
 761     b(count);














































 762 
 763     bind(slow_case);
 764 
 765     // Call the runtime routine for slow case
 766     call_VM(noreg,
 767             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
 768             obj_reg);
 769     b(done);
 770 
 771     bind(count);
 772     increment(Address(rthread, JavaThread::held_monitor_count_offset()));
 773 
 774     bind(done);
 775   }
 776 }
 777 
 778 
 779 // Unlocks an object. Used in monitorexit bytecode and
 780 // remove_activation.  Throws an IllegalMonitorException if object is
 781 // not locked by current thread.
 782 //
 783 // Args:
 784 //      c_rarg1: BasicObjectLock for lock
 785 //
 786 // Kills:
 787 //      r0
 788 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
 789 //      rscratch1, rscratch2 (scratch regs)
 790 void InterpreterMacroAssembler::unlock_object(Register lock_reg)
 791 {
 792   assert(lock_reg == c_rarg1, "The argument is only for looks. It must be rarg1");
 793 
 794   const Register obj_reg    = c_rarg3;  // Will contain the oop
 795 
 796   // Load oop into obj_reg(%c_rarg3)
 797   ldr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
 798 
 799   // Free entry
 800   str(zr, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
 801 
 802   if (UseHeavyMonitors) {
 803     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), obj_reg);
 804   } else {
 805     Label count, done, slow_case;
 806 
 807     const Register swap_reg   = r0;
 808     const Register header_reg = c_rarg2;  // Will contain the old oopMark

 809 
 810     save_bcp(); // Save in case of exception
 811 
 812     // Check for non-symmetric locking. This is allowed by the spec and the interpreter
 813     // must handle it.
 814     ldr(header_reg, Address(rthread, Thread::lock_stack_current_offset()));
 815     cmpoop(header_reg, obj_reg);
 816     br(Assembler::NE, slow_case);




 817 
 818     ldr(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 819     fast_unlock(obj_reg, header_reg, swap_reg, rscratch1, slow_case);
 820     b(count);






 821 
 822     // Call the runtime routine for slow case.
 823     bind(slow_case);
 824     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), obj_reg);
 825     b(done);
 826 
 827     bind(count);
 828     decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
 829 
 830     bind(done);
 831     restore_bcp();
 832   }
 833 }
 834 
 835 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
 836                                                          Label& zero_continue) {
 837   assert(ProfileInterpreter, "must be profiling interpreter");
 838   ldr(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
 839   cbz(mdp, zero_continue);
 840 }
 841 
 842 // Set the method data pointer for the current bcp.
 843 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
 844   assert(ProfileInterpreter, "must be profiling interpreter");

1567   // super call
1568   MacroAssembler::call_VM_base(oop_result, noreg, last_java_sp,
1569                                entry_point, number_of_arguments,
1570                      check_exceptions);
1571 // interpreter specific
1572   restore_bcp();
1573   restore_locals();
1574 }
1575 
1576 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
1577   assert_different_registers(obj, rscratch1);
1578   Label update, next, none;
1579 
1580   verify_oop(obj);
1581 
1582   cbnz(obj, update);
1583   orptr(mdo_addr, TypeEntries::null_seen);
1584   b(next);
1585 
1586   bind(update);
1587   load_klass(rscratch1, obj);
1588   mov(obj, rscratch1);
1589 
1590   ldr(rscratch1, mdo_addr);
1591   eor(obj, obj, rscratch1);
1592   tst(obj, TypeEntries::type_klass_mask);
1593   br(Assembler::EQ, next); // klass seen before, nothing to
1594                            // do. The unknown bit may have been
1595                            // set already but no need to check.
1596 
1597   tbnz(obj, exact_log2(TypeEntries::type_unknown), next);
1598   // already unknown. Nothing to do anymore.
1599 
1600   ldr(rscratch1, mdo_addr);
1601   cbz(rscratch1, none);
1602   cmp(rscratch1, (u1)TypeEntries::null_seen);
1603   br(Assembler::EQ, none);
1604   // There is a chance that the checks above (re-reading profiling
1605   // data from memory) fail if another thread has just set the
1606   // profiling to this obj's klass
1607   ldr(rscratch1, mdo_addr);
1608   eor(obj, obj, rscratch1);
< prev index next >