< prev index next >

src/hotspot/cpu/riscv/macroAssembler_riscv.cpp

Print this page

  30 #include "compiler/disassembler.hpp"
  31 #include "gc/shared/barrierSet.hpp"
  32 #include "gc/shared/barrierSetAssembler.hpp"
  33 #include "gc/shared/cardTable.hpp"
  34 #include "gc/shared/cardTableBarrierSet.hpp"
  35 #include "gc/shared/collectedHeap.hpp"
  36 #include "interpreter/bytecodeHistogram.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "memory/universe.hpp"
  40 #include "nativeInst_riscv.hpp"
  41 #include "oops/accessDecorators.hpp"
  42 #include "oops/compressedOops.inline.hpp"
  43 #include "oops/klass.inline.hpp"
  44 #include "oops/oop.hpp"
  45 #include "runtime/interfaceSupport.inline.hpp"
  46 #include "runtime/javaThread.hpp"
  47 #include "runtime/jniHandles.inline.hpp"
  48 #include "runtime/sharedRuntime.hpp"
  49 #include "runtime/stubRoutines.hpp"

  50 #include "utilities/powerOfTwo.hpp"
  51 #ifdef COMPILER2
  52 #include "opto/compile.hpp"
  53 #include "opto/node.hpp"
  54 #include "opto/output.hpp"
  55 #endif
  56 
  57 #ifdef PRODUCT
  58 #define BLOCK_COMMENT(str) /* nothing */
  59 #else
  60 #define BLOCK_COMMENT(str) block_comment(str)
  61 #endif
  62 #define STOP(str) stop(str);
  63 #define BIND(label) bind(label); __ BLOCK_COMMENT(#label ":")
  64 
  65 static void pass_arg0(MacroAssembler* masm, Register arg) {
  66   if (c_rarg0 != arg) {
  67     masm->mv(c_rarg0, arg);
  68   }
  69 }

4686     });
4687   }
4688 }
4689 
4690 void MacroAssembler::test_bit(Register Rd, Register Rs, uint32_t bit_pos) {
4691   assert(bit_pos < 64, "invalid bit range");
4692   if (UseZbs) {
4693     bexti(Rd, Rs, bit_pos);
4694     return;
4695   }
4696   int64_t imm = (int64_t)(1UL << bit_pos);
4697   if (is_simm12(imm)) {
4698     and_imm12(Rd, Rs, imm);
4699   } else {
4700     srli(Rd, Rs, bit_pos);
4701     and_imm12(Rd, Rd, 1);
4702   }
4703 }
4704 
4705 // Implements lightweight-locking.
4706 // Branches to slow upon failure to lock the object.
4707 // Falls through upon success.
4708 //
4709 //  - obj: the object to be locked
4710 //  - hdr: the header, already loaded from obj, will be destroyed
4711 //  - tmp1, tmp2: temporary registers, will be destroyed
4712 void MacroAssembler::lightweight_lock(Register obj, Register hdr, Register tmp1, Register tmp2, Label& slow) {
4713   assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
4714   assert_different_registers(obj, hdr, tmp1, tmp2, t0);
4715 
4716   // Check if we would have space on lock-stack for the object.
4717   lwu(tmp1, Address(xthread, JavaThread::lock_stack_top_offset()));
4718   mv(tmp2, (unsigned)LockStack::end_offset());
4719   bge(tmp1, tmp2, slow, /* is_far */ true);
4720 
4721   // Load (object->mark() | 1) into hdr
4722   ori(hdr, hdr, markWord::unlocked_value);
4723   // Clear lock-bits, into tmp2
4724   xori(tmp2, hdr, markWord::unlocked_value);
4725 
4726   // Try to swing header from unlocked to locked
4727   Label success;
4728   cmpxchgptr(hdr, tmp2, obj, tmp1, success, &slow);
4729   bind(success);
4730 
4731   // After successful lock, push object on lock-stack
4732   lwu(tmp1, Address(xthread, JavaThread::lock_stack_top_offset()));
4733   add(tmp2, xthread, tmp1);
4734   sd(obj, Address(tmp2, 0));
4735   addw(tmp1, tmp1, oopSize);
4736   sw(tmp1, Address(xthread, JavaThread::lock_stack_top_offset()));
















4737 }
4738 
4739 // Implements ligthweight-unlocking.
4740 // Branches to slow upon failure.
4741 // Falls through upon success.
4742 //
4743 // - obj: the object to be unlocked
4744 // - hdr: the (pre-loaded) header of the object
4745 // - tmp1, tmp2: temporary registers
4746 void MacroAssembler::lightweight_unlock(Register obj, Register hdr, Register tmp1, Register tmp2, Label& slow) {
4747   assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
4748   assert_different_registers(obj, hdr, tmp1, tmp2, t0);
4749 
4750 #ifdef ASSERT
4751   {
4752     // The following checks rely on the fact that LockStack is only ever modified by
4753     // its owning thread, even if the lock got inflated concurrently; removal of LockStack
4754     // entries after inflation will happen delayed in that case.
4755 
4756     // Check for lock-stack underflow.
4757     Label stack_ok;
4758     lwu(tmp1, Address(xthread, JavaThread::lock_stack_top_offset()));
4759     mv(tmp2, (unsigned)LockStack::start_offset());
4760     bgt(tmp1, tmp2, stack_ok);
4761     STOP("Lock-stack underflow");
4762     bind(stack_ok);
4763   }
4764   {
4765     // Check if the top of the lock-stack matches the unlocked object.
4766     Label tos_ok;
4767     subw(tmp1, tmp1, oopSize);
4768     add(tmp1, xthread, tmp1);
4769     ld(tmp1, Address(tmp1, 0));
4770     beq(tmp1, obj, tos_ok);
4771     STOP("Top of lock-stack does not match the unlocked object");
4772     bind(tos_ok);
4773   }
4774   {
4775     // Check that hdr is fast-locked.
4776    Label hdr_ok;
4777     andi(tmp1, hdr, markWord::lock_mask_in_place);
4778     beqz(tmp1, hdr_ok);
4779     STOP("Header is not fast-locked");
4780     bind(hdr_ok);
4781   }
4782 #endif
4783 
4784   // Load the new header (unlocked) into tmp1
4785   ori(tmp1, hdr, markWord::unlocked_value);
























4786 
4787   // Try to swing header from locked to unlocked
4788   Label success;
4789   cmpxchgptr(hdr, tmp1, obj, tmp2, success, &slow);
4790   bind(success);
4791 
4792   // After successful unlock, pop object from lock-stack
4793   lwu(tmp1, Address(xthread, JavaThread::lock_stack_top_offset()));
4794   subw(tmp1, tmp1, oopSize);
4795 #ifdef ASSERT
4796   add(tmp2, xthread, tmp1);
4797   sd(zr, Address(tmp2, 0));




4798 #endif
4799   sw(tmp1, Address(xthread, JavaThread::lock_stack_top_offset()));
















4800 }

  30 #include "compiler/disassembler.hpp"
  31 #include "gc/shared/barrierSet.hpp"
  32 #include "gc/shared/barrierSetAssembler.hpp"
  33 #include "gc/shared/cardTable.hpp"
  34 #include "gc/shared/cardTableBarrierSet.hpp"
  35 #include "gc/shared/collectedHeap.hpp"
  36 #include "interpreter/bytecodeHistogram.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "memory/universe.hpp"
  40 #include "nativeInst_riscv.hpp"
  41 #include "oops/accessDecorators.hpp"
  42 #include "oops/compressedOops.inline.hpp"
  43 #include "oops/klass.inline.hpp"
  44 #include "oops/oop.hpp"
  45 #include "runtime/interfaceSupport.inline.hpp"
  46 #include "runtime/javaThread.hpp"
  47 #include "runtime/jniHandles.inline.hpp"
  48 #include "runtime/sharedRuntime.hpp"
  49 #include "runtime/stubRoutines.hpp"
  50 #include "utilities/globalDefinitions.hpp"
  51 #include "utilities/powerOfTwo.hpp"
  52 #ifdef COMPILER2
  53 #include "opto/compile.hpp"
  54 #include "opto/node.hpp"
  55 #include "opto/output.hpp"
  56 #endif
  57 
  58 #ifdef PRODUCT
  59 #define BLOCK_COMMENT(str) /* nothing */
  60 #else
  61 #define BLOCK_COMMENT(str) block_comment(str)
  62 #endif
  63 #define STOP(str) stop(str);
  64 #define BIND(label) bind(label); __ BLOCK_COMMENT(#label ":")
  65 
  66 static void pass_arg0(MacroAssembler* masm, Register arg) {
  67   if (c_rarg0 != arg) {
  68     masm->mv(c_rarg0, arg);
  69   }
  70 }

4687     });
4688   }
4689 }
4690 
4691 void MacroAssembler::test_bit(Register Rd, Register Rs, uint32_t bit_pos) {
4692   assert(bit_pos < 64, "invalid bit range");
4693   if (UseZbs) {
4694     bexti(Rd, Rs, bit_pos);
4695     return;
4696   }
4697   int64_t imm = (int64_t)(1UL << bit_pos);
4698   if (is_simm12(imm)) {
4699     and_imm12(Rd, Rs, imm);
4700   } else {
4701     srli(Rd, Rs, bit_pos);
4702     and_imm12(Rd, Rd, 1);
4703   }
4704 }
4705 
4706 // Implements lightweight-locking.


4707 //
4708 //  - obj: the object to be locked
4709 //  - tmp1, tmp2, tmp3: temporary registers, will be destroyed
4710 //  - slow: branched to if locking fails
4711 void MacroAssembler::lightweight_lock(Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow) {
4712   assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
4713   assert_different_registers(obj, tmp1, tmp2, tmp3, t0);
4714 
4715   Label push;
4716   const Register top = tmp1;
4717   const Register mark = tmp2;
4718   const Register t = tmp3;
4719 
4720   // Preload the markWord. It is important that this is the first
4721   // instruction emitted as it is part of C1's null check semantics.
4722   ld(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
4723 
4724   // Check if the lock-stack is full.
4725   lwu(top, Address(xthread, JavaThread::lock_stack_top_offset()));
4726   mv(t, (unsigned)LockStack::end_offset());
4727   bge(top, t, slow, /* is_far */ true);
4728 
4729   // Check for recursion.
4730   add(t, xthread, top);
4731   ld(t, Address(t, -oopSize));
4732   beq(obj, t, push);
4733 
4734   // Check header for monitor (0b10).
4735   test_bit(t, mark, exact_log2(markWord::monitor_value));
4736   bnez(t, slow, /* is_far */ true);
4737 
4738   // Try to lock. Transition lock-bits 0b01 => 0b00
4739   assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid a la");
4740   ori(mark, mark, markWord::unlocked_value);
4741   xori(t, mark, markWord::unlocked_value);
4742   cmpxchg(/*addr*/ obj, /*expected*/ mark, /*new*/ t, Assembler::int64,
4743           /*acquire*/ Assembler::aq, /*release*/ Assembler::relaxed, /*result*/ t);
4744   bne(mark, t, slow, /* is_far */ true);
4745 
4746   bind(push);
4747   // After successful lock, push object on lock-stack.
4748   add(t, xthread, top);
4749   sd(obj, Address(t));
4750   addw(top, top, oopSize);
4751   sw(top, Address(xthread, JavaThread::lock_stack_top_offset()));
4752 }
4753 
4754 // Implements ligthweight-unlocking.


4755 //
4756 // - obj: the object to be unlocked
4757 // - tmp1, tmp2, tmp3: temporary registers
4758 // - slow: branched to if unlocking fails
4759 void MacroAssembler::lightweight_unlock(Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow) {
4760   assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
4761   assert_different_registers(obj, tmp1, tmp2, tmp3, t0);
4762 
4763 #ifdef ASSERT
4764   {




4765     // Check for lock-stack underflow.
4766     Label stack_ok;
4767     lwu(tmp1, Address(xthread, JavaThread::lock_stack_top_offset()));
4768     mv(tmp2, (unsigned)LockStack::start_offset());
4769     bge(tmp1, tmp2, stack_ok);
4770     STOP("Lock-stack underflow");
4771     bind(stack_ok);
4772   }


















4773 #endif
4774 
4775   Label unlocked, push_and_slow;
4776   const Register top = tmp1;
4777   const Register mark = tmp2;
4778   const Register t = tmp3;
4779 
4780   // Check if obj is top of lock-stack.
4781   lwu(top, Address(xthread, JavaThread::lock_stack_top_offset()));
4782   subw(top, top, oopSize);
4783   add(t, xthread, top);
4784   ld(t, Address(t));
4785   bne(obj, t, slow, /* is_far */ true);
4786 
4787   // Pop lock-stack.
4788   DEBUG_ONLY(add(t, xthread, top);)
4789   DEBUG_ONLY(sd(zr, Address(t));)
4790   sw(top, Address(xthread, JavaThread::lock_stack_top_offset()));
4791 
4792   // Check if recursive.
4793   add(t, xthread, top);
4794   ld(t, Address(t, -oopSize));
4795   beq(obj, t, unlocked);
4796 
4797   // Not recursive. Check header for monitor (0b10).
4798   ld(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
4799   test_bit(t, mark, exact_log2(markWord::monitor_value));
4800   bnez(t, push_and_slow);
4801 








4802 #ifdef ASSERT
4803   // Check header not unlocked (0b01).
4804   Label not_unlocked;
4805   test_bit(t, mark, exact_log2(markWord::unlocked_value));
4806   beqz(t, not_unlocked);
4807   stop("lightweight_unlock already unlocked");
4808   bind(not_unlocked);
4809 #endif
4810 
4811   // Try to unlock. Transition lock bits 0b00 => 0b01
4812   assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea");
4813   ori(t, mark, markWord::unlocked_value);
4814   cmpxchg(/*addr*/ obj, /*expected*/ mark, /*new*/ t, Assembler::int64,
4815           /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, /*result*/ t);
4816   beq(mark, t, unlocked);
4817 
4818   bind(push_and_slow);
4819   // Restore lock-stack and handle the unlock in runtime.
4820   DEBUG_ONLY(add(t, xthread, top);)
4821   DEBUG_ONLY(sd(obj, Address(t));)
4822   addw(top, top, oopSize);
4823   sw(top, Address(xthread, JavaThread::lock_stack_top_offset()));
4824   j(slow);
4825 
4826   bind(unlocked);
4827 }
< prev index next >