< prev index next >

src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp

Print this page
@@ -1310,11 +1310,10 @@
                                                masm->code(),
                                                vep_offset,
                                                frame_complete,
                                                stack_slots,
                                                in_ByteSize(-1),
-                                               in_ByteSize(-1),
                                                oop_maps,
                                                exception_offset);
      if (method->is_continuation_enter_intrinsic()) {
        ContinuationEntry::set_enter_code(nm, interpreted_entry_offset);
      } else if (method->is_continuation_yield_intrinsic()) {

@@ -1344,11 +1343,10 @@
                                         masm->code(),
                                         vep_offset,
                                         frame_complete,
                                         stack_slots / VMRegImpl::slots_per_word,
                                         in_ByteSize(-1),
-                                        in_ByteSize(-1),
                                         (OopMapSet*)NULL);
    }
    address native_func = method->native_function();
    assert(native_func != NULL, "must have function");
  

@@ -1404,11 +1402,10 @@
  
    // Now any space we need for handlizing a klass if static method
  
    int klass_slot_offset = 0;
    int klass_offset = -1;
-   int lock_slot_offset = 0;
    bool is_static = false;
  
    if (method->is_static()) {
      klass_slot_offset = stack_slots;
      stack_slots += VMRegImpl::slots_per_word;

@@ -1417,11 +1414,10 @@
    }
  
    // Plus a lock if needed
  
    if (method->is_synchronized()) {
-     lock_slot_offset = stack_slots;
      stack_slots += VMRegImpl::slots_per_word;
    }
  
    // Now a place (+2) to save return values or temp during shuffling
    // + 4 for return address (which we own) and saved rfp

@@ -1432,12 +1428,10 @@
    //
    // FP-> |                     |
    //      |---------------------|
    //      | 2 slots for moves   |
    //      |---------------------|
-   //      | lock box (if sync)  |
-   //      |---------------------| <- lock_slot_offset
    //      | klass (if static)   |
    //      |---------------------| <- klass_slot_offset
    //      | oopHandle area      |
    //      |---------------------| <- oop_handle_offset (8 java arg registers)
    //      | outbound memory     |

@@ -1690,51 +1684,20 @@
    Label slow_path_lock;
    Label lock_done;
  
    if (method->is_synchronized()) {
      Label count;
-     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
  
      // Get the handle (the 2nd argument)
      __ mov(oop_handle_reg, c_rarg1);
  
-     // Get address of the box
- 
-     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
- 
      // Load the oop from the handle
      __ ldr(obj_reg, Address(oop_handle_reg, 0));
  
      if (!UseHeavyMonitors) {
-       // Load (object->mark() | 1) into swap_reg %r0
-       __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
-       __ orr(swap_reg, rscratch1, 1);
- 
-       // Save (object->mark() | 1) into BasicLock's displaced header
-       __ str(swap_reg, Address(lock_reg, mark_word_offset));
- 
-       // src -> dest iff dest == r0 else r0 <- dest
-       __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/NULL);
- 
-       // Hmm should this move to the slow path code area???
- 
-       // Test if the oopMark is an obvious stack pointer, i.e.,
-       //  1) (mark & 3) == 0, and
-       //  2) sp <= mark < mark + os::pagesize()
-       // These 3 tests can be done by evaluating the following
-       // expression: ((mark - sp) & (3 - os::vm_page_size())),
-       // assuming both stack pointer and pagesize have their
-       // least significant 2 bits clear.
-       // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
- 
-       __ sub(swap_reg, sp, swap_reg);
-       __ neg(swap_reg, swap_reg);
-       __ ands(swap_reg, swap_reg, 3 - os::vm_page_size());
- 
-       // Save the test result, for recursive case, the result is zero
-       __ str(swap_reg, Address(lock_reg, mark_word_offset));
-       __ br(Assembler::NE, slow_path_lock);
+       __ ldr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
+       __ fast_lock(obj_reg, old_hdr, swap_reg, tmp, rscratch1, slow_path_lock);
      } else {
        __ b(slow_path_lock);
      }
      __ bind(count);
      __ increment(Address(rthread, JavaThread::held_monitor_count_offset()));

@@ -1839,37 +1802,20 @@
    if (method->is_synchronized()) {
  
      // Get locked oop from the handle we passed to jni
      __ ldr(obj_reg, Address(oop_handle_reg, 0));
  
-     Label done, not_recursive;
- 
-     if (!UseHeavyMonitors) {
-       // Simple recursive lock?
-       __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
-       __ cbnz(rscratch1, not_recursive);
-       __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
-       __ b(done);
-     }
- 
-     __ bind(not_recursive);
+     Label done;
  
      // Must save r0 if if it is live now because cmpxchg must use it
      if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
        save_native_result(masm, ret_type, stack_slots);
      }
  
      if (!UseHeavyMonitors) {
-       // get address of the stack lock
-       __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
-       //  get old displaced header
-       __ ldr(old_hdr, Address(r0, 0));
- 
-       // Atomic swap old header if oop still contains the stack lock
-       Label count;
-       __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, count, &slow_path_unlock);
-       __ bind(count);
+       __ ldr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
+       __ fast_unlock(obj_reg, old_hdr, swap_reg, rscratch1, slow_path_unlock);
        __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
      } else {
        __ b(slow_path_unlock);
      }
  

@@ -1935,15 +1881,14 @@
  
      // protect the args we've loaded
      save_args(masm, total_c_args, c_arg, out_regs);
  
      __ mov(c_rarg0, obj_reg);
-     __ mov(c_rarg1, lock_reg);
-     __ mov(c_rarg2, rthread);
+     __ mov(c_rarg1, rthread);
  
      // Not a leaf but we have last_Java_frame setup as we want
-     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
+     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 2);
      restore_args(masm, total_c_args, c_arg, out_regs);
  
  #ifdef ASSERT
      { Label L;
        __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));

@@ -1964,12 +1909,11 @@
  
      if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
        save_native_result(masm, ret_type, stack_slots);
      }
  
-     __ mov(c_rarg2, rthread);
-     __ lea(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
+     __ mov(c_rarg1, rthread);
      __ mov(c_rarg0, obj_reg);
  
      // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
      // NOTE that obj_reg == r19 currently
      __ ldr(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));

@@ -2069,11 +2013,10 @@
                                              masm->code(),
                                              vep_offset,
                                              frame_complete,
                                              stack_slots / VMRegImpl::slots_per_word,
                                              (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
-                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
                                              oop_maps);
  
    return nm;
  }
  
< prev index next >