< prev index next >

src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp

Print this page

  29 #include "gc/shared/barrierSet.hpp"
  30 #include "gc/shared/barrierSetAssembler.hpp"
  31 #include "interp_masm_aarch64.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "interpreter/interpreterRuntime.hpp"
  34 #include "logging/log.hpp"
  35 #include "oops/arrayOop.hpp"
  36 #include "oops/markWord.hpp"
  37 #include "oops/method.hpp"
  38 #include "oops/methodData.hpp"
  39 #include "oops/resolvedFieldEntry.hpp"
  40 #include "oops/resolvedIndyEntry.hpp"
  41 #include "oops/resolvedMethodEntry.hpp"
  42 #include "prims/jvmtiExport.hpp"
  43 #include "prims/jvmtiThreadState.hpp"
  44 #include "runtime/basicLock.hpp"
  45 #include "runtime/frame.inline.hpp"
  46 #include "runtime/javaThread.hpp"
  47 #include "runtime/safepointMechanism.hpp"
  48 #include "runtime/sharedRuntime.hpp"

  49 #include "utilities/powerOfTwo.hpp"
  50 
  51 void InterpreterMacroAssembler::narrow(Register result) {
  52 
  53   // Get method->_constMethod->_result_type
  54   ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
  55   ldr(rscratch1, Address(rscratch1, Method::const_offset()));
  56   ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
  57 
  58   Label done, notBool, notByte, notChar;
  59 
  60   // common case first
  61   cmpw(rscratch1, T_INT);
  62   br(Assembler::EQ, done);
  63 
  64   // mask integer result to narrower return type.
  65   cmpw(rscratch1, T_BOOLEAN);
  66   br(Assembler::NE, notBool);
  67   andw(result, result, 0x1);
  68   b(done);

 684     const Register tmp3 = c_rarg5;
 685 
 686     const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
 687     const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
 688     const int mark_offset = lock_offset +
 689                             BasicLock::displaced_header_offset_in_bytes();
 690 
 691     Label slow_case;
 692 
 693     // Load object pointer into obj_reg %c_rarg3
 694     ldr(obj_reg, Address(lock_reg, obj_offset));
 695 
 696     if (DiagnoseSyncOnValueBasedClasses != 0) {
 697       load_klass(tmp, obj_reg);
 698       ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
 699       tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
 700       br(Assembler::NE, slow_case);
 701     }
 702 
 703     if (LockingMode == LM_LIGHTWEIGHT) {
 704       lightweight_lock(obj_reg, tmp, tmp2, tmp3, slow_case);
 705       b(count);
 706     } else if (LockingMode == LM_LEGACY) {
 707       // Load (object->mark() | 1) into swap_reg
 708       ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 709       orr(swap_reg, rscratch1, 1);
 710 
 711       // Save (object->mark() | 1) into BasicLock's displaced header
 712       str(swap_reg, Address(lock_reg, mark_offset));
 713 
 714       assert(lock_offset == 0,
 715              "displached header must be first word in BasicObjectLock");
 716 
 717       Label fail;
 718       cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
 719 
 720       // Fast check for recursive lock.
 721       //
 722       // Can apply the optimization only if this is a stack lock
 723       // allocated in this thread. For efficiency, we can focus on
 724       // recently allocated stack locks (instead of reading the stack

 740       // owned by the current thread.
 741       //
 742       // These 3 tests can be done by evaluating the following
 743       // expression: ((mark - sp) & (7 - os::vm_page_size())),
 744       // assuming both stack pointer and pagesize have their
 745       // least significant 3 bits clear.
 746       // NOTE: the mark is in swap_reg %r0 as the result of cmpxchg
 747       // NOTE2: aarch64 does not like to subtract sp from rn so take a
 748       // copy
 749       mov(rscratch1, sp);
 750       sub(swap_reg, swap_reg, rscratch1);
 751       ands(swap_reg, swap_reg, (uint64_t)(7 - (int)os::vm_page_size()));
 752 
 753       // Save the test result, for recursive case, the result is zero
 754       str(swap_reg, Address(lock_reg, mark_offset));
 755       br(Assembler::EQ, count);
 756     }
 757     bind(slow_case);
 758 
 759     // Call the runtime routine for slow case
 760     if (LockingMode == LM_LIGHTWEIGHT) {
 761       call_VM(noreg,
 762               CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter_obj),
 763               obj_reg);
 764     } else {
 765       call_VM(noreg,
 766               CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
 767               lock_reg);
 768     }
 769     b(done);
 770 
 771     bind(count);
 772     increment(Address(rthread, JavaThread::held_monitor_count_offset()));
 773 
 774     bind(done);
 775   }
 776 }
 777 
 778 
 779 // Unlocks an object. Used in monitorexit bytecode and
 780 // remove_activation.  Throws an IllegalMonitorException if object is
 781 // not locked by current thread.
 782 //
 783 // Args:
 784 //      c_rarg1: BasicObjectLock for lock
 785 //
 786 // Kills:
 787 //      r0
 788 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)

  29 #include "gc/shared/barrierSet.hpp"
  30 #include "gc/shared/barrierSetAssembler.hpp"
  31 #include "interp_masm_aarch64.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "interpreter/interpreterRuntime.hpp"
  34 #include "logging/log.hpp"
  35 #include "oops/arrayOop.hpp"
  36 #include "oops/markWord.hpp"
  37 #include "oops/method.hpp"
  38 #include "oops/methodData.hpp"
  39 #include "oops/resolvedFieldEntry.hpp"
  40 #include "oops/resolvedIndyEntry.hpp"
  41 #include "oops/resolvedMethodEntry.hpp"
  42 #include "prims/jvmtiExport.hpp"
  43 #include "prims/jvmtiThreadState.hpp"
  44 #include "runtime/basicLock.hpp"
  45 #include "runtime/frame.inline.hpp"
  46 #include "runtime/javaThread.hpp"
  47 #include "runtime/safepointMechanism.hpp"
  48 #include "runtime/sharedRuntime.hpp"
  49 #include "utilities/globalDefinitions.hpp"
  50 #include "utilities/powerOfTwo.hpp"
  51 
  52 void InterpreterMacroAssembler::narrow(Register result) {
  53 
  54   // Get method->_constMethod->_result_type
  55   ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
  56   ldr(rscratch1, Address(rscratch1, Method::const_offset()));
  57   ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
  58 
  59   Label done, notBool, notByte, notChar;
  60 
  61   // common case first
  62   cmpw(rscratch1, T_INT);
  63   br(Assembler::EQ, done);
  64 
  65   // mask integer result to narrower return type.
  66   cmpw(rscratch1, T_BOOLEAN);
  67   br(Assembler::NE, notBool);
  68   andw(result, result, 0x1);
  69   b(done);

 685     const Register tmp3 = c_rarg5;
 686 
 687     const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
 688     const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
 689     const int mark_offset = lock_offset +
 690                             BasicLock::displaced_header_offset_in_bytes();
 691 
 692     Label slow_case;
 693 
 694     // Load object pointer into obj_reg %c_rarg3
 695     ldr(obj_reg, Address(lock_reg, obj_offset));
 696 
 697     if (DiagnoseSyncOnValueBasedClasses != 0) {
 698       load_klass(tmp, obj_reg);
 699       ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
 700       tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
 701       br(Assembler::NE, slow_case);
 702     }
 703 
 704     if (LockingMode == LM_LIGHTWEIGHT) {
 705       lightweight_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case);
 706       b(count);
 707     } else if (LockingMode == LM_LEGACY) {
 708       // Load (object->mark() | 1) into swap_reg
 709       ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 710       orr(swap_reg, rscratch1, 1);
 711 
 712       // Save (object->mark() | 1) into BasicLock's displaced header
 713       str(swap_reg, Address(lock_reg, mark_offset));
 714 
 715       assert(lock_offset == 0,
 716              "displached header must be first word in BasicObjectLock");
 717 
 718       Label fail;
 719       cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
 720 
 721       // Fast check for recursive lock.
 722       //
 723       // Can apply the optimization only if this is a stack lock
 724       // allocated in this thread. For efficiency, we can focus on
 725       // recently allocated stack locks (instead of reading the stack

 741       // owned by the current thread.
 742       //
 743       // These 3 tests can be done by evaluating the following
 744       // expression: ((mark - sp) & (7 - os::vm_page_size())),
 745       // assuming both stack pointer and pagesize have their
 746       // least significant 3 bits clear.
 747       // NOTE: the mark is in swap_reg %r0 as the result of cmpxchg
 748       // NOTE2: aarch64 does not like to subtract sp from rn so take a
 749       // copy
 750       mov(rscratch1, sp);
 751       sub(swap_reg, swap_reg, rscratch1);
 752       ands(swap_reg, swap_reg, (uint64_t)(7 - (int)os::vm_page_size()));
 753 
 754       // Save the test result, for recursive case, the result is zero
 755       str(swap_reg, Address(lock_reg, mark_offset));
 756       br(Assembler::EQ, count);
 757     }
 758     bind(slow_case);
 759 
 760     // Call the runtime routine for slow case
 761     call_VM(noreg,
 762             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
 763             lock_reg);






 764     b(done);
 765 
 766     bind(count);
 767     increment(Address(rthread, JavaThread::held_monitor_count_offset()));
 768 
 769     bind(done);
 770   }
 771 }
 772 
 773 
 774 // Unlocks an object. Used in monitorexit bytecode and
 775 // remove_activation.  Throws an IllegalMonitorException if object is
 776 // not locked by current thread.
 777 //
 778 // Args:
 779 //      c_rarg1: BasicObjectLock for lock
 780 //
 781 // Kills:
 782 //      r0
 783 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
< prev index next >