< prev index next >

src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp

Print this page
@@ -1,7 +1,7 @@
  /*
-  * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved.
+  * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   *
   * This code is free software; you can redistribute it and/or modify it
   * under the terms of the GNU General Public License version 2 only, as
   * published by the Free Software Foundation.

@@ -2069,45 +2069,52 @@
      __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
  
      // Load the oop from the handle
      __ movptr(obj_reg, Address(oop_handle_reg, 0));
  
-     if (UseBiasedLocking) {
-       __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, rscratch2, false, lock_done, &slow_path_lock);
-     }
+     if (LockingMode == LM_MONITOR) {
+       __ jmp(slow_path_lock);
+     } else if (LockingMode == LM_LEGACY) {
+       if (UseBiasedLocking) {
+         __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, rscratch2, false, lock_done, &slow_path_lock);
+       }
  
-     // Load immediate 1 into swap_reg %rax
-     __ movl(swap_reg, 1);
+       // Load immediate 1 into swap_reg %rax
+       __ movl(swap_reg, 1);
  
-     // Load (object->mark() | 1) into swap_reg %rax
-     __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
+       // Load (object->mark() | 1) into swap_reg %rax
+       __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
  
-     // Save (object->mark() | 1) into BasicLock's displaced header
-     __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
+       // Save (object->mark() | 1) into BasicLock's displaced header
+       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
  
-     // src -> dest iff dest == rax else rax <- dest
-     __ lock();
-     __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
-     __ jcc(Assembler::equal, lock_done);
+       // src -> dest iff dest == rax else rax <- dest
+       __ lock();
+       __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
+       __ jcc(Assembler::equal, lock_done);
  
-     // Hmm should this move to the slow path code area???
+       // Hmm should this move to the slow path code area???
  
-     // Test if the oopMark is an obvious stack pointer, i.e.,
-     //  1) (mark & 3) == 0, and
-     //  2) rsp <= mark < mark + os::pagesize()
-     // These 3 tests can be done by evaluating the following
-     // expression: ((mark - rsp) & (3 - os::vm_page_size())),
-     // assuming both stack pointer and pagesize have their
-     // least significant 2 bits clear.
-     // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
+       // Test if the oopMark is an obvious stack pointer, i.e.,
+       //  1) (mark & 3) == 0, and
+       //  2) rsp <= mark < mark + os::pagesize()
+       // These 3 tests can be done by evaluating the following
+       // expression: ((mark - rsp) & (3 - os::vm_page_size())),
+       // assuming both stack pointer and pagesize have their
+       // least significant 2 bits clear.
+       // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
  
-     __ subptr(swap_reg, rsp);
-     __ andptr(swap_reg, 3 - os::vm_page_size());
+       __ subptr(swap_reg, rsp);
+       __ andptr(swap_reg, 3 - os::vm_page_size());
  
-     // Save the test result, for recursive case, the result is zero
-     __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
-     __ jcc(Assembler::notEqual, slow_path_lock);
+       // Save the test result, for recursive case, the result is zero
+       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
+       __ jcc(Assembler::notEqual, slow_path_lock);
+     } else {
+       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
+       __ lightweight_lock(obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock);
+     }
  
      // Slow path will re-enter here
  
      __ bind(lock_done);
    }

@@ -2228,30 +2235,38 @@
  
      if (UseBiasedLocking) {
        __ biased_locking_exit(obj_reg, old_hdr, done);
      }
  
-     // Simple recursive lock?
+     if (LockingMode == LM_LEGACY) {
+       // Simple recursive lock?
  
-     __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
-     __ jcc(Assembler::equal, done);
+       __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
+       __ jcc(Assembler::equal, done);
+     }
  
      // Must save rax if if it is live now because cmpxchg must use it
      if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
        save_native_result(masm, ret_type, stack_slots);
      }
  
- 
-     // get address of the stack lock
-     __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
-     //  get old displaced header
-     __ movptr(old_hdr, Address(rax, 0));
- 
-     // Atomic swap old header if oop still contains the stack lock
-     __ lock();
-     __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
-     __ jcc(Assembler::notEqual, slow_path_unlock);
+     if (LockingMode == LM_MONITOR) {
+       __ jmp(slow_path_unlock);
+     } else if (LockingMode == LM_LEGACY) {
+       // get address of the stack lock
+       __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
+       //  get old displaced header
+       __ movptr(old_hdr, Address(rax, 0));
+ 
+       // Atomic swap old header if oop still contains the stack lock
+       __ lock();
+       __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
+       __ jcc(Assembler::notEqual, slow_path_unlock);
+     } else {
+       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
+       __ lightweight_unlock(obj_reg, swap_reg, r15_thread, lock_reg, slow_path_unlock);
+     }
  
      // slow path re-enters here
      __ bind(unlock_done);
      if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
        restore_native_result(masm, ret_type, stack_slots);
< prev index next >