< prev index next >

src/hotspot/cpu/aarch64/aarch64.ad

Print this page
*** 1984,11 ***
  
    if (do_polling() && C->is_method_compilation()) {
      Label dummy_label;
      Label* code_stub = &dummy_label;
      if (!C->output()->in_scratch_emit_size()) {
!       code_stub = &C->output()->safepoint_poll_table()->add_safepoint(__ offset());
      }
      __ relocate(relocInfo::poll_return_type);
      __ safepoint_poll(*code_stub, true /* at_return */, false /* acquire */, true /* in_nmethod */);
    }
  }
--- 1984,13 ---
  
    if (do_polling() && C->is_method_compilation()) {
      Label dummy_label;
      Label* code_stub = &dummy_label;
      if (!C->output()->in_scratch_emit_size()) {
!       C2SafepointPollStub* stub = new (C->comp_arena()) C2SafepointPollStub(__ offset());
+       C->output()->add_stub(stub);
+       code_stub = &stub->entry();
      }
      __ relocate(relocInfo::poll_return_type);
      __ safepoint_poll(*code_stub, true /* at_return */, false /* acquire */, true /* in_nmethod */);
    }
  }

*** 3826,42 ***
      }
  
      // Check for existing monitor
      __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
  
!     // Set tmp to be (markWord of object | UNLOCK_VALUE).
!     __ orr(tmp, disp_hdr, markWord::unlocked_value);
! 
!     // Initialize the box. (Must happen before we update the object mark!)
!     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
! 
!     // Compare object markWord with an unlocked value (tmp) and if
!     // equal exchange the stack address of our box with object markWord.
!     // On failure disp_hdr contains the possibly locked markWord.
!     __ cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
!                /*release*/ true, /*weak*/ false, disp_hdr);
!     __ br(Assembler::EQ, cont);
! 
!     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
! 
!     // If the compare-and-exchange succeeded, then we found an unlocked
!     // object, will have now locked it will continue at label cont
! 
!     __ bind(cas_failed);
!     // We did not see an unlocked object so try the fast recursive case.
! 
!     // Check if the owner is self by comparing the value in the
!     // markWord of object (disp_hdr) with the stack pointer.
!     __ mov(rscratch1, sp);
!     __ sub(disp_hdr, disp_hdr, rscratch1);
!     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
!     // If condition is true we are cont and hence we can store 0 as the
!     // displaced header in the box, which indicates that it is a recursive lock.
!     __ ands(tmp/*==0?*/, disp_hdr, tmp);   // Sets flags for result
!     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
! 
!     __ b(cont);
  
      // Handle existing monitor.
      __ bind(object_has_monitor);
  
      // The object's monitor m is unlocked iff m->owner == NULL,
--- 3828,50 ---
      }
  
      // Check for existing monitor
      __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
  
!     if (LockingMode == LM_MONITOR) {
!       __ tst(oop, oop); // Set NE to indicate 'failure' -> take slow-path. We know that oop != 0.
!       __ b(cont);
!     } else if (LockingMode == LM_LEGACY) {
!       // Set tmp to be (markWord of object | UNLOCK_VALUE).
!       __ orr(tmp, disp_hdr, markWord::unlocked_value);
! 
!       // Initialize the box. (Must happen before we update the object mark!)
!       __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
! 
!       // Compare object markWord with an unlocked value (tmp) and if
!       // equal exchange the stack address of our box with object markWord.
!       // On failure disp_hdr contains the possibly locked markWord.
!       __ cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
!                  /*release*/ true, /*weak*/ false, disp_hdr);
!       __ br(Assembler::EQ, cont);
! 
!       assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
! 
!       // If the compare-and-exchange succeeded, then we found an unlocked
!       // object, will have now locked it will continue at label cont
! 
!       __ bind(cas_failed);
!       // We did not see an unlocked object so try the fast recursive case.
! 
!       // Check if the owner is self by comparing the value in the
!       // markWord of object (disp_hdr) with the stack pointer.
!       __ mov(rscratch1, sp);
!       __ sub(disp_hdr, disp_hdr, rscratch1);
!       __ mov(tmp, (address) (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
!       // If condition is true we are cont and hence we can store 0 as the
!       // displaced header in the box, which indicates that it is a recursive lock.
+       __ ands(tmp/*==0?*/, disp_hdr, tmp);   // Sets flags for result
+       __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
+       __ b(cont);
+     } else {
+       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
+       __ fast_lock(oop, disp_hdr, tmp, rscratch1, cont);
+       __ b(cont);
+     }
  
      // Handle existing monitor.
      __ bind(object_has_monitor);
  
      // The object's monitor m is unlocked iff m->owner == NULL,

*** 3870,17 ***
      // Try to CAS m->owner from NULL to current thread.
      __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markWord::monitor_value));
      __ cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
                 /*release*/ true, /*weak*/ false, rscratch1); // Sets flags for result
  
!     // Store a non-null value into the box to avoid looking like a re-entrant
!     // lock. The fast-path monitor unlock code checks for
!     // markWord::monitor_value so use markWord::unused_mark which has the
!     // relevant bit set, and also matches ObjectSynchronizer::enter.
!     __ mov(tmp, (address)markWord::unused_mark().value());
!     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
! 
      __ br(Assembler::EQ, cont); // CAS success means locking succeeded
  
      __ cmp(rscratch1, rthread);
      __ br(Assembler::NE, cont); // Check for recursive locking
  
--- 3880,18 ---
      // Try to CAS m->owner from NULL to current thread.
      __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markWord::monitor_value));
      __ cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
                 /*release*/ true, /*weak*/ false, rscratch1); // Sets flags for result
  
!     if (LockingMode != LM_LIGHTWEIGHT) {
!       // Store a non-null value into the box to avoid looking like a re-entrant
!       // lock. The fast-path monitor unlock code checks for
!       // markWord::monitor_value so use markWord::unused_mark which has the
!       // relevant bit set, and also matches ObjectSynchronizer::enter.
!       __ mov(tmp, (address)markWord::unused_mark().value());
!       __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
+     }
      __ br(Assembler::EQ, cont); // CAS success means locking succeeded
  
      __ cmp(rscratch1, rthread);
      __ br(Assembler::NE, cont); // Check for recursive locking
  

*** 3906,35 ***
  
      if (UseBiasedLocking && !UseOptoBiasInlining) {
        __ biased_locking_exit(oop, tmp, cont);
      }
  
!     // Find the lock address and load the displaced header from the stack.
!     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
  
!     // If the displaced header is 0, we have a recursive unlock.
!     __ cmp(disp_hdr, zr);
!     __ br(Assembler::EQ, cont);
  
      // Handle existing monitor.
      __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
      __ tbnz(tmp, exact_log2(markWord::monitor_value), object_has_monitor);
  
!     // Check if it is still a light weight lock, this is is true if we
!     // see the stack address of the basicLock in the markWord of the
!     // object.
! 
!     __ cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
!                /*release*/ true, /*weak*/ false, tmp);
!     __ b(cont);
  
      assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
  
      // Handle existing monitor.
      __ bind(object_has_monitor);
      STATIC_ASSERT(markWord::monitor_value <= INT_MAX);
      __ add(tmp, tmp, -(int)markWord::monitor_value); // monitor
      __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
  
      Label notRecursive;
      __ cbz(disp_hdr, notRecursive);
  
--- 3917,60 ---
  
      if (UseBiasedLocking && !UseOptoBiasInlining) {
        __ biased_locking_exit(oop, tmp, cont);
      }
  
!     if (LockingMode == LM_LEGACY) {
!       // Find the lock address and load the displaced header from the stack.
+       __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
  
!       // If the displaced header is 0, we have a recursive unlock.
!       __ cmp(disp_hdr, zr);
!       __ br(Assembler::EQ, cont);
+     }
  
      // Handle existing monitor.
      __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
      __ tbnz(tmp, exact_log2(markWord::monitor_value), object_has_monitor);
  
!     if (LockingMode == LM_MONITOR) {
!       __ tst(oop, oop); // Set NE to indicate 'failure' -> take slow-path. We know that oop != 0.
!       __ b(cont);
!     } else if (LockingMode == LM_LEGACY) {
!       // Check if it is still a light weight lock, this is is true if we
!       // see the stack address of the basicLock in the markWord of the
!       // object.
+ 
+       __ cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
+                  /*release*/ true, /*weak*/ false, tmp);
+       __ b(cont);
+     } else {
+       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
+       __ fast_unlock(oop, tmp, box, disp_hdr, cont);
+       __ b(cont);
+     }
  
      assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
  
      // Handle existing monitor.
      __ bind(object_has_monitor);
      STATIC_ASSERT(markWord::monitor_value <= INT_MAX);
      __ add(tmp, tmp, -(int)markWord::monitor_value); // monitor
+ 
+     if (LockingMode == LM_LIGHTWEIGHT) {
+       // If the owner is anonymous, we need to fix it -- in an outline stub.
+       Register tmp2 = disp_hdr;
+       __ ldr(tmp2, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
+       // We cannot use tbnz here, the target might be too far away and cannot
+       // be encoded.
+       __ tst(tmp2, (uint64_t)ObjectMonitor::ANONYMOUS_OWNER);
+       C2HandleAnonOMOwnerStub* stub = new (Compile::current()->comp_arena()) C2HandleAnonOMOwnerStub(tmp, tmp2);
+       Compile::current()->output()->add_stub(stub);
+       __ br(Assembler::NE, stub->entry());
+       __ bind(stub->continuation());
+     }
+ 
      __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
  
      Label notRecursive;
      __ cbz(disp_hdr, notRecursive);
  

*** 7437,20 ***
  
  // Load Narrow Klass Pointer
  instruct loadNKlass(iRegNNoSp dst, memory4 mem)
  %{
    match(Set dst (LoadNKlass mem));
!   predicate(!needs_acquiring_load(n));
  
    ins_cost(4 * INSN_COST);
    format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
  
    ins_encode(aarch64_enc_ldrw(dst, mem));
  
    ins_pipe(iload_reg_mem);
  %}
  
  // Load Float
  instruct loadF(vRegF dst, memory4 mem)
  %{
    match(Set dst (LoadF mem));
    predicate(!needs_acquiring_load(n));
--- 7473,34 ---
  
  // Load Narrow Klass Pointer
  instruct loadNKlass(iRegNNoSp dst, memory4 mem)
  %{
    match(Set dst (LoadNKlass mem));
!   predicate(!needs_acquiring_load(n) && !UseCompactObjectHeaders);
  
    ins_cost(4 * INSN_COST);
    format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
  
    ins_encode(aarch64_enc_ldrw(dst, mem));
  
    ins_pipe(iload_reg_mem);
  %}
  
+ instruct loadNKlassCompactHeaders(iRegNNoSp dst, memory4 mem, rFlagsReg cr)
+ %{
+   match(Set dst (LoadNKlass mem));
+   effect(KILL cr);
+   predicate(!needs_acquiring_load(n) && UseCompactObjectHeaders);
+ 
+   ins_cost(4 * INSN_COST);
+   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
+   ins_encode %{
+     __ load_nklass_compact($dst$$Register, $mem$$base$$Register, $mem$$index$$Register, $mem$$scale, $mem$$disp);
+   %}
+   ins_pipe(pipe_slow);
+ %}
+ 
  // Load Float
  instruct loadF(vRegF dst, memory4 mem)
  %{
    match(Set dst (LoadF mem));
    predicate(!needs_acquiring_load(n));
< prev index next >