< prev index next >

src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp

Print this page
@@ -1,7 +1,7 @@
  /*
-  * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
+  * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   *
   * This code is free software; you can redistribute it and/or modify it
   * under the terms of the GNU General Public License version 2 only, as
   * published by the Free Software Foundation.

@@ -1713,12 +1713,10 @@
        // Save the test result, for recursive case, the result is zero
        __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
        __ jcc(Assembler::notEqual, slow_path_lock);
      } else {
        assert(LockingMode == LM_LIGHTWEIGHT, "must be");
-       // Load object header
-       __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
        __ lightweight_lock(obj_reg, swap_reg, thread, lock_reg, slow_path_lock);
      }
      __ bind(count_mon);
      __ inc_held_monitor_count();
  

@@ -1872,13 +1870,11 @@
        __ cmpxchgptr(rbx, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
        __ jcc(Assembler::notEqual, slow_path_unlock);
        __ dec_held_monitor_count();
      } else {
        assert(LockingMode == LM_LIGHTWEIGHT, "must be");
-       __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
-       __ andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
-       __ lightweight_unlock(obj_reg, swap_reg, lock_reg, slow_path_unlock);
+       __ lightweight_unlock(obj_reg, swap_reg, thread, lock_reg, slow_path_unlock);
        __ dec_held_monitor_count();
      }
  
      // slow path re-enters here
      __ bind(unlock_done);
< prev index next >