< prev index next >

src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp

Print this page
*** 952,11 ***
                                         masm->code(),
                                         vep_offset,
                                         frame_complete,
                                         stack_slots / VMRegImpl::slots_per_word,
                                         in_ByteSize(-1),
-                                        in_ByteSize(-1),
                                         (OopMapSet*)NULL);
    }
    address native_func = method->native_function();
    assert(native_func != NULL, "must have function");
  
--- 952,10 ---

*** 1008,11 ***
  
    // Now any space we need for handlizing a klass if static method
  
    int klass_slot_offset = 0;
    int klass_offset = -1;
-   int lock_slot_offset = 0;
    bool is_static = false;
  
    if (method->is_static()) {
      klass_slot_offset = stack_slots;
      stack_slots += VMRegImpl::slots_per_word;
--- 1007,10 ---

*** 1021,11 ***
    }
  
    // Plus a lock if needed
  
    if (method->is_synchronized()) {
-     lock_slot_offset = stack_slots;
      stack_slots += VMRegImpl::slots_per_word;
    }
  
    // Now a place (+2) to save return values or temp during shuffling
    // + 4 for return address (which we own) and saved fp
--- 1019,10 ---

*** 1038,12 ***
    //      | 2 slots (ra)        |
    //      | 2 slots (fp)        |
    //      |---------------------|
    //      | 2 slots for moves   |
    //      |---------------------|
-   //      | lock box (if sync)  |
-   //      |---------------------| <- lock_slot_offset
    //      | klass (if static)   |
    //      |---------------------| <- klass_slot_offset
    //      | oopHandle area      |
    //      |---------------------| <- oop_handle_offset (8 java arg registers)
    //      | outbound memory     |
--- 1035,10 ---

*** 1295,51 ***
    Label slow_path_lock;
    Label lock_done;
  
    if (method->is_synchronized()) {
  
-     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
- 
      // Get the handle (the 2nd argument)
      __ mv(oop_handle_reg, c_rarg1);
  
-     // Get address of the box
- 
-     __ la(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
- 
      // Load the oop from the handle
      __ ld(obj_reg, Address(oop_handle_reg, 0));
  
      if (!UseHeavyMonitors) {
!       // Load (object->mark() | 1) into swap_reg % x10
!       __ ld(t0, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
-       __ ori(swap_reg, t0, 1);
- 
-       // Save (object->mark() | 1) into BasicLock's displaced header
-       __ sd(swap_reg, Address(lock_reg, mark_word_offset));
- 
-       // src -> dest if dest == x10 else x10 <- dest
-       {
-         Label here;
-         __ cmpxchg_obj_header(x10, lock_reg, obj_reg, t0, lock_done, /*fallthrough*/NULL);
-       }
- 
-       // Test if the oopMark is an obvious stack pointer, i.e.,
-       //  1) (mark & 3) == 0, and
-       //  2) sp <= mark < mark + os::pagesize()
-       // These 3 tests can be done by evaluating the following
-       // expression: ((mark - sp) & (3 - os::vm_page_size())),
-       // assuming both stack pointer and pagesize have their
-       // least significant 2 bits clear.
-       // NOTE: the oopMark is in swap_reg % 10 as the result of cmpxchg
- 
-       __ sub(swap_reg, swap_reg, sp);
-       __ andi(swap_reg, swap_reg, 3 - os::vm_page_size());
- 
-       // Save the test result, for recursive case, the result is zero
-       __ sd(swap_reg, Address(lock_reg, mark_word_offset));
-       __ bnez(swap_reg, slow_path_lock);
      } else {
        __ j(slow_path_lock);
      }
  
      // Slow path will re-enter here
--- 1290,19 ---
    Label slow_path_lock;
    Label lock_done;
  
    if (method->is_synchronized()) {
  
      // Get the handle (the 2nd argument)
      __ mv(oop_handle_reg, c_rarg1);
  
      // Load the oop from the handle
      __ ld(obj_reg, Address(oop_handle_reg, 0));
  
      if (!UseHeavyMonitors) {
!       __ ld(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
!       __ fast_lock(obj_reg, old_hdr, swap_reg, tmp, t0, slow_path_lock);
      } else {
        __ j(slow_path_lock);
      }
  
      // Slow path will re-enter here

*** 1427,32 ***
      // Get locked oop from the handle we passed to jni
      __ ld(obj_reg, Address(oop_handle_reg, 0));
  
      Label done;
  
-     if (!UseHeavyMonitors) {
-       // Simple recursive lock?
-       __ ld(t0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
-       __ beqz(t0, done);
-     }
- 
- 
      // Must save x10 if if it is live now because cmpxchg must use it
      if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
        save_native_result(masm, ret_type, stack_slots);
      }
  
      if (!UseHeavyMonitors) {
!       // get address of the stack lock
!       __ la(x10, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
-       //  get old displaced header
-       __ ld(old_hdr, Address(x10, 0));
- 
-       // Atomic swap old header if oop still contains the stack lock
-       Label succeed;
-       __ cmpxchg_obj_header(x10, old_hdr, obj_reg, t0, succeed, &slow_path_unlock);
-       __ bind(succeed);
      } else {
        __ j(slow_path_unlock);
      }
  
      // slow path re-enters here
--- 1390,18 ---
      // Get locked oop from the handle we passed to jni
      __ ld(obj_reg, Address(oop_handle_reg, 0));
  
      Label done;
  
      // Must save x10 if if it is live now because cmpxchg must use it
      if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
        save_native_result(masm, ret_type, stack_slots);
      }
  
      if (!UseHeavyMonitors) {
!       __ ld(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
!       __ fast_unlock(obj_reg, old_hdr, swap_reg, t0, slow_path_unlock);
      } else {
        __ j(slow_path_unlock);
      }
  
      // slow path re-enters here

*** 1517,15 ***
  
      // protect the args we've loaded
      save_args(masm, total_c_args, c_arg, out_regs);
  
      __ mv(c_rarg0, obj_reg);
!     __ mv(c_rarg1, lock_reg);
-     __ mv(c_rarg2, xthread);
  
      // Not a leaf but we have last_Java_frame setup as we want
!     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
      restore_args(masm, total_c_args, c_arg, out_regs);
  
  #ifdef ASSERT
      { Label L;
        __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
--- 1466,14 ---
  
      // protect the args we've loaded
      save_args(masm, total_c_args, c_arg, out_regs);
  
      __ mv(c_rarg0, obj_reg);
!     __ mv(c_rarg1, xthread);
  
      // Not a leaf but we have last_Java_frame setup as we want
!     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 2);
      restore_args(masm, total_c_args, c_arg, out_regs);
  
  #ifdef ASSERT
      { Label L;
        __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));

*** 1543,12 ***
  
      if (ret_type == T_FLOAT || ret_type == T_DOUBLE) {
        save_native_result(masm, ret_type, stack_slots);
      }
  
!     __ mv(c_rarg2, xthread);
-     __ la(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
      __ mv(c_rarg0, obj_reg);
  
      // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
      // NOTE that obj_reg == x9 currently
      __ ld(x9, Address(xthread, in_bytes(Thread::pending_exception_offset())));
--- 1491,11 ---
  
      if (ret_type == T_FLOAT || ret_type == T_DOUBLE) {
        save_native_result(masm, ret_type, stack_slots);
      }
  
!     __ mv(c_rarg1, xthread);
      __ mv(c_rarg0, obj_reg);
  
      // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
      // NOTE that obj_reg == x9 currently
      __ ld(x9, Address(xthread, in_bytes(Thread::pending_exception_offset())));

*** 1648,11 ***
                                              masm->code(),
                                              vep_offset,
                                              frame_complete,
                                              stack_slots / VMRegImpl::slots_per_word,
                                              (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
-                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
                                              oop_maps);
    assert(nm != NULL, "create native nmethod fail!");
    return nm;
  }
  
--- 1595,10 ---
< prev index next >