< prev index next >

src/hotspot/cpu/aarch64/aarch64.ad

Print this page

 1895     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
 1896 
 1897     Label L_skip_barrier;
 1898 
 1899     __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
 1900     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 1901     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 1902     __ bind(L_skip_barrier);
 1903   }
 1904 
 1905   if (C->max_vector_size() >= 16) {
 1906     __ reinitialize_ptrue();
 1907   }
 1908 
 1909   int bangsize = C->output()->bang_size_in_bytes();
 1910   if (C->output()->need_stack_bang(bangsize))
 1911     __ generate_stack_overflow_check(bangsize);
 1912 
 1913   __ build_frame(framesize);
 1914 












 1915   if (C->stub_function() == NULL) {
 1916     BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 1917     bs->nmethod_entry_barrier(&_masm);
 1918   }
 1919 
 1920   if (VerifyStackAtCalls) {
 1921     Unimplemented();
 1922   }
 1923 
 1924   C->output()->set_frame_complete(cbuf.insts_size());
 1925 
 1926   if (C->has_mach_constant_base_node()) {
 1927     // NOTE: We set the table base offset here because users might be
 1928     // emitted before MachConstantBaseNode.
 1929     ConstantTable& constant_table = C->output()->constant_table();
 1930     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 1931   }
 1932 }
 1933 
 1934 uint MachPrologNode::size(PhaseRegAlloc* ra_) const

 1969     st->print("bhi #slow_path");
 1970   }
 1971 }
 1972 #endif
 1973 
 1974 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1975   Compile* C = ra_->C;
 1976   C2_MacroAssembler _masm(&cbuf);
 1977   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1978 
 1979   __ remove_frame(framesize);
 1980 
 1981   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
 1982     __ reserved_stack_check();
 1983   }
 1984 
 1985   if (do_polling() && C->is_method_compilation()) {
 1986     Label dummy_label;
 1987     Label* code_stub = &dummy_label;
 1988     if (!C->output()->in_scratch_emit_size()) {
 1989       code_stub = &C->output()->safepoint_poll_table()->add_safepoint(__ offset());


 1990     }
 1991     __ relocate(relocInfo::poll_return_type);
 1992     __ safepoint_poll(*code_stub, true /* at_return */, false /* acquire */, true /* in_nmethod */);
 1993   }
 1994 }
 1995 
 1996 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 1997   // Variable size. Determine dynamically.
 1998   return MachNode::size(ra_);
 1999 }
 2000 
 2001 int MachEpilogNode::reloc() const {
 2002   // Return number of relocatable values contained in this instruction.
 2003   return 1; // 1 for polling page.
 2004 }
 2005 
 2006 const Pipeline * MachEpilogNode::pipeline() const {
 2007   return MachNode::pipeline_class();
 2008 }
 2009 

 3811 
 3812     assert_different_registers(oop, box, tmp, disp_hdr);
 3813 
 3814     // Load markWord from object into displaced_header.
 3815     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
 3816 
 3817     if (DiagnoseSyncOnValueBasedClasses != 0) {
 3818       __ load_klass(tmp, oop);
 3819       __ ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
 3820       __ tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
 3821       __ br(Assembler::NE, cont);
 3822     }
 3823 
 3824     if (UseBiasedLocking && !UseOptoBiasInlining) {
 3825       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
 3826     }
 3827 
 3828     // Check for existing monitor
 3829     __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
 3830 
 3831     // Set tmp to be (markWord of object | UNLOCK_VALUE).
 3832     __ orr(tmp, disp_hdr, markWord::unlocked_value);
 3833 
 3834     // Initialize the box. (Must happen before we update the object mark!)
 3835     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 3836 
 3837     // Compare object markWord with an unlocked value (tmp) and if
 3838     // equal exchange the stack address of our box with object markWord.
 3839     // On failure disp_hdr contains the possibly locked markWord.
 3840     __ cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
 3841                /*release*/ true, /*weak*/ false, disp_hdr);
 3842     __ br(Assembler::EQ, cont);
 3843 
 3844     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
 3845 
 3846     // If the compare-and-exchange succeeded, then we found an unlocked
 3847     // object, will have now locked it will continue at label cont
 3848 
 3849     __ bind(cas_failed);
 3850     // We did not see an unlocked object so try the fast recursive case.
 3851 
 3852     // Check if the owner is self by comparing the value in the
 3853     // markWord of object (disp_hdr) with the stack pointer.
 3854     __ mov(rscratch1, sp);
 3855     __ sub(disp_hdr, disp_hdr, rscratch1);
 3856     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
 3857     // If condition is true we are cont and hence we can store 0 as the
 3858     // displaced header in the box, which indicates that it is a recursive lock.
 3859     __ ands(tmp/*==0?*/, disp_hdr, tmp);   // Sets flags for result
 3860     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 3861 





 3862     __ b(cont);
 3863 
 3864     // Handle existing monitor.
 3865     __ bind(object_has_monitor);
 3866 
 3867     // The object's monitor m is unlocked iff m->owner == NULL,
 3868     // otherwise m->owner may contain a thread or a stack address.
 3869     //
 3870     // Try to CAS m->owner from NULL to current thread.
 3871     __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markWord::monitor_value));
 3872     __ cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
 3873                /*release*/ true, /*weak*/ false, rscratch1); // Sets flags for result
 3874 
 3875     // Store a non-null value into the box to avoid looking like a re-entrant
 3876     // lock. The fast-path monitor unlock code checks for
 3877     // markWord::monitor_value so use markWord::unused_mark which has the
 3878     // relevant bit set, and also matches ObjectSynchronizer::enter.
 3879     __ mov(tmp, (address)markWord::unused_mark().value());
 3880     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 3881 

 3882     __ br(Assembler::EQ, cont); // CAS success means locking succeeded
 3883 
 3884     __ cmp(rscratch1, rthread);
 3885     __ br(Assembler::NE, cont); // Check for recursive locking
 3886 
 3887     // Recursive lock case
 3888     __ increment(Address(disp_hdr, ObjectMonitor::recursions_offset_in_bytes() - markWord::monitor_value), 1);
 3889     // flag == EQ still from the cmp above, checking if this is a reentrant lock
 3890 
 3891     __ bind(cont);
 3892     // flag == EQ indicates success
 3893     // flag == NE indicates failure
 3894   %}
 3895 
 3896   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
 3897     C2_MacroAssembler _masm(&cbuf);
 3898     Register oop = as_Register($object$$reg);
 3899     Register box = as_Register($box$$reg);
 3900     Register disp_hdr = as_Register($tmp$$reg);
 3901     Register tmp = as_Register($tmp2$$reg);
 3902     Label cont;
 3903     Label object_has_monitor;
 3904 
 3905     assert_different_registers(oop, box, tmp, disp_hdr);
 3906 
 3907     if (UseBiasedLocking && !UseOptoBiasInlining) {
 3908       __ biased_locking_exit(oop, tmp, cont);
 3909     }
 3910 
 3911     // Find the lock address and load the displaced header from the stack.
 3912     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));

 3913 
 3914     // If the displaced header is 0, we have a recursive unlock.
 3915     __ cmp(disp_hdr, zr);
 3916     __ br(Assembler::EQ, cont);

 3917 
 3918     // Handle existing monitor.
 3919     __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
 3920     __ tbnz(tmp, exact_log2(markWord::monitor_value), object_has_monitor);
 3921 
 3922     // Check if it is still a light weight lock, this is is true if we
 3923     // see the stack address of the basicLock in the markWord of the
 3924     // object.
 3925 
 3926     __ cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
 3927                /*release*/ true, /*weak*/ false, tmp);
 3928     __ b(cont);





 3929 




 3930     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
 3931 
 3932     // Handle existing monitor.
 3933     __ bind(object_has_monitor);
 3934     STATIC_ASSERT(markWord::monitor_value <= INT_MAX);
 3935     __ add(tmp, tmp, -(int)markWord::monitor_value); // monitor














 3936     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
 3937 
 3938     Label notRecursive;
 3939     __ cbz(disp_hdr, notRecursive);
 3940 
 3941     // Recursive lock
 3942     __ sub(disp_hdr, disp_hdr, 1u);
 3943     __ str(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
 3944     __ cmp(disp_hdr, disp_hdr); // Sets flags for result
 3945     __ b(cont);
 3946 
 3947     __ bind(notRecursive);
 3948     __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
 3949     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
 3950     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
 3951     __ cmp(rscratch1, zr); // Sets flags for result
 3952     __ cbnz(rscratch1, cont);
 3953     // need a release store here
 3954     __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
 3955     __ stlr(zr, tmp); // set unowned

 7422 %}
 7423 
 7424 // Load Klass Pointer
 7425 instruct loadKlass(iRegPNoSp dst, memory8 mem)
 7426 %{
 7427   match(Set dst (LoadKlass mem));
 7428   predicate(!needs_acquiring_load(n));
 7429 
 7430   ins_cost(4 * INSN_COST);
 7431   format %{ "ldr  $dst, $mem\t# class" %}
 7432 
 7433   ins_encode(aarch64_enc_ldr(dst, mem));
 7434 
 7435   ins_pipe(iload_reg_mem);
 7436 %}
 7437 
 7438 // Load Narrow Klass Pointer
 7439 instruct loadNKlass(iRegNNoSp dst, memory4 mem)
 7440 %{
 7441   match(Set dst (LoadNKlass mem));
 7442   predicate(!needs_acquiring_load(n));
 7443 
 7444   ins_cost(4 * INSN_COST);
 7445   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
 7446 
 7447   ins_encode(aarch64_enc_ldrw(dst, mem));
 7448 
 7449   ins_pipe(iload_reg_mem);
 7450 %}
 7451 


























 7452 // Load Float
 7453 instruct loadF(vRegF dst, memory4 mem)
 7454 %{
 7455   match(Set dst (LoadF mem));
 7456   predicate(!needs_acquiring_load(n));
 7457 
 7458   ins_cost(4 * INSN_COST);
 7459   format %{ "ldrs  $dst, $mem\t# float" %}
 7460 
 7461   ins_encode( aarch64_enc_ldrs(dst, mem) );
 7462 
 7463   ins_pipe(pipe_class_memory);
 7464 %}
 7465 
 7466 // Load Double
 7467 instruct loadD(vRegD dst, memory8 mem)
 7468 %{
 7469   match(Set dst (LoadD mem));
 7470   predicate(!needs_acquiring_load(n));
 7471 

 1895     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
 1896 
 1897     Label L_skip_barrier;
 1898 
 1899     __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
 1900     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 1901     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 1902     __ bind(L_skip_barrier);
 1903   }
 1904 
 1905   if (C->max_vector_size() >= 16) {
 1906     __ reinitialize_ptrue();
 1907   }
 1908 
 1909   int bangsize = C->output()->bang_size_in_bytes();
 1910   if (C->output()->need_stack_bang(bangsize))
 1911     __ generate_stack_overflow_check(bangsize);
 1912 
 1913   __ build_frame(framesize);
 1914 
 1915   int max_monitors = C->method() != NULL ? C->max_monitors() : 0;
 1916   if (UseFastLocking && max_monitors > 0) {
 1917     C2CheckLockStackStub* stub = new (C->comp_arena()) C2CheckLockStackStub();
 1918     C->output()->add_stub(stub);
 1919     __ ldr(r9, Address(rthread, JavaThread::lock_stack_current_offset()));
 1920     __ add(r9, r9, max_monitors * oopSize);
 1921     __ ldr(r10, Address(rthread, JavaThread::lock_stack_limit_offset()));
 1922     __ cmp(r9, r10);
 1923     __ br(Assembler::GE, stub->entry());
 1924     __ bind(stub->continuation());
 1925   }
 1926 
 1927   if (C->stub_function() == NULL) {
 1928     BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 1929     bs->nmethod_entry_barrier(&_masm);
 1930   }
 1931 
 1932   if (VerifyStackAtCalls) {
 1933     Unimplemented();
 1934   }
 1935 
 1936   C->output()->set_frame_complete(cbuf.insts_size());
 1937 
 1938   if (C->has_mach_constant_base_node()) {
 1939     // NOTE: We set the table base offset here because users might be
 1940     // emitted before MachConstantBaseNode.
 1941     ConstantTable& constant_table = C->output()->constant_table();
 1942     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 1943   }
 1944 }
 1945 
 1946 uint MachPrologNode::size(PhaseRegAlloc* ra_) const

 1981     st->print("bhi #slow_path");
 1982   }
 1983 }
 1984 #endif
 1985 
 1986 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1987   Compile* C = ra_->C;
 1988   C2_MacroAssembler _masm(&cbuf);
 1989   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1990 
 1991   __ remove_frame(framesize);
 1992 
 1993   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
 1994     __ reserved_stack_check();
 1995   }
 1996 
 1997   if (do_polling() && C->is_method_compilation()) {
 1998     Label dummy_label;
 1999     Label* code_stub = &dummy_label;
 2000     if (!C->output()->in_scratch_emit_size()) {
 2001       C2SafepointPollStub* stub = new (C->comp_arena()) C2SafepointPollStub(__ offset());
 2002       C->output()->add_stub(stub);
 2003       code_stub = &stub->entry();
 2004     }
 2005     __ relocate(relocInfo::poll_return_type);
 2006     __ safepoint_poll(*code_stub, true /* at_return */, false /* acquire */, true /* in_nmethod */);
 2007   }
 2008 }
 2009 
 2010 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 2011   // Variable size. Determine dynamically.
 2012   return MachNode::size(ra_);
 2013 }
 2014 
 2015 int MachEpilogNode::reloc() const {
 2016   // Return number of relocatable values contained in this instruction.
 2017   return 1; // 1 for polling page.
 2018 }
 2019 
 2020 const Pipeline * MachEpilogNode::pipeline() const {
 2021   return MachNode::pipeline_class();
 2022 }
 2023 

 3825 
 3826     assert_different_registers(oop, box, tmp, disp_hdr);
 3827 
 3828     // Load markWord from object into displaced_header.
 3829     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
 3830 
 3831     if (DiagnoseSyncOnValueBasedClasses != 0) {
 3832       __ load_klass(tmp, oop);
 3833       __ ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
 3834       __ tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
 3835       __ br(Assembler::NE, cont);
 3836     }
 3837 
 3838     if (UseBiasedLocking && !UseOptoBiasInlining) {
 3839       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
 3840     }
 3841 
 3842     // Check for existing monitor
 3843     __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
 3844 
 3845     if (UseFastLocking) {
 3846       __ fast_lock(oop, disp_hdr, tmp, rscratch1, cont, false);
 3847       // Indicate success at cont.
 3848       __ cmp(oop, oop);
 3849     } else {
 3850       // Set tmp to be (markWord of object | UNLOCK_VALUE).
 3851       __ orr(tmp, disp_hdr, markWord::unlocked_value);
 3852 
 3853       // Initialize the box. (Must happen before we update the object mark!)
 3854       __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 3855 
 3856       // Compare object markWord with an unlocked value (tmp) and if
 3857       // equal exchange the stack address of our box with object markWord.
 3858       // On failure disp_hdr contains the possibly locked markWord.
 3859       __ cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
 3860                  /*release*/ true, /*weak*/ false, disp_hdr);
 3861       __ br(Assembler::EQ, cont);
 3862 
 3863       assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
 3864 
 3865       // If the compare-and-exchange succeeded, then we found an unlocked
 3866       // object, will have now locked it will continue at label cont
 3867 
 3868       __ bind(cas_failed);
 3869       // We did not see an unlocked object so try the fast recursive case.
 3870 
 3871       // Check if the owner is self by comparing the value in the
 3872       // markWord of object (disp_hdr) with the stack pointer.
 3873       __ mov(rscratch1, sp);
 3874       __ sub(disp_hdr, disp_hdr, rscratch1);
 3875       __ mov(tmp, (address) (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
 3876       // If condition is true we are cont and hence we can store 0 as the
 3877       // displaced header in the box, which indicates that it is a recursive lock.
 3878       __ ands(tmp/*==0?*/, disp_hdr, tmp);   // Sets flags for result
 3879       __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 3880     }
 3881     __ b(cont);
 3882 
 3883     // Handle existing monitor.
 3884     __ bind(object_has_monitor);
 3885 
 3886     // The object's monitor m is unlocked iff m->owner == NULL,
 3887     // otherwise m->owner may contain a thread or a stack address.
 3888     //
 3889     // Try to CAS m->owner from NULL to current thread.
 3890     __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markWord::monitor_value));
 3891     __ cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
 3892                /*release*/ true, /*weak*/ false, rscratch1); // Sets flags for result
 3893 
 3894     if (!UseFastLocking) {
 3895       // Store a non-null value into the box to avoid looking like a re-entrant
 3896       // lock. The fast-path monitor unlock code checks for
 3897       // markWord::monitor_value so use markWord::unused_mark which has the
 3898       // relevant bit set, and also matches ObjectSynchronizer::enter.
 3899       __ mov(tmp, (address)markWord::unused_mark().value());
 3900       __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 3901     }
 3902     __ br(Assembler::EQ, cont); // CAS success means locking succeeded
 3903 
 3904     __ cmp(rscratch1, rthread);
 3905     __ br(Assembler::NE, cont); // Check for recursive locking
 3906 
 3907     // Recursive lock case
 3908     __ increment(Address(disp_hdr, ObjectMonitor::recursions_offset_in_bytes() - markWord::monitor_value), 1);
 3909     // flag == EQ still from the cmp above, checking if this is a reentrant lock
 3910 
 3911     __ bind(cont);
 3912     // flag == EQ indicates success
 3913     // flag == NE indicates failure
 3914   %}
 3915 
 3916   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
 3917     C2_MacroAssembler _masm(&cbuf);
 3918     Register oop = as_Register($object$$reg);
 3919     Register box = as_Register($box$$reg);
 3920     Register disp_hdr = as_Register($tmp$$reg);
 3921     Register tmp = as_Register($tmp2$$reg);
 3922     Label cont;
 3923     Label object_has_monitor;
 3924 
 3925     assert_different_registers(oop, box, tmp, disp_hdr);
 3926 
 3927     if (UseBiasedLocking && !UseOptoBiasInlining) {
 3928       __ biased_locking_exit(oop, tmp, cont);
 3929     }
 3930 
 3931     if (!UseFastLocking) {
 3932       // Find the lock address and load the displaced header from the stack.
 3933       __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 3934 
 3935       // If the displaced header is 0, we have a recursive unlock.
 3936       __ cmp(disp_hdr, zr);
 3937       __ br(Assembler::EQ, cont);
 3938     }
 3939 
 3940     // Handle existing monitor.
 3941     __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
 3942     __ tbnz(tmp, exact_log2(markWord::monitor_value), object_has_monitor);
 3943 



 3944 
 3945     if (UseFastLocking) {
 3946       __ fast_unlock(oop, tmp, box, disp_hdr, cont);
 3947       // Indicate success at cont.
 3948       __ cmp(oop, oop);
 3949     } else {
 3950       // Check if it is still a light weight lock, this is is true if we
 3951       // see the stack address of the basicLock in the markWord of the
 3952       // object.
 3953 
 3954       __ cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
 3955                  /*release*/ true, /*weak*/ false, tmp);
 3956     }
 3957     __ b(cont);
 3958     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
 3959 
 3960     // Handle existing monitor.
 3961     __ bind(object_has_monitor);
 3962     STATIC_ASSERT(markWord::monitor_value <= INT_MAX);
 3963     __ add(tmp, tmp, -(int)markWord::monitor_value); // monitor
 3964 
 3965     if (UseFastLocking) {
 3966       // If the owner is anonymous, we need to fix it -- in an outline stub.
 3967       Register tmp2 = disp_hdr;
 3968       __ ldr(tmp2, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
 3969       // We cannot use tbnz here, the target might be too far away and cannot
 3970       // be encoded.
 3971       __ tst(tmp2, (uint64_t)(intptr_t) ANONYMOUS_OWNER);
 3972       C2HandleAnonOMOwnerStub* stub = new (Compile::current()->comp_arena()) C2HandleAnonOMOwnerStub(tmp, tmp2);
 3973       Compile::current()->output()->add_stub(stub);
 3974       __ br(Assembler::NE, stub->entry());
 3975       __ bind(stub->continuation());
 3976     }
 3977 
 3978     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
 3979 
 3980     Label notRecursive;
 3981     __ cbz(disp_hdr, notRecursive);
 3982 
 3983     // Recursive lock
 3984     __ sub(disp_hdr, disp_hdr, 1u);
 3985     __ str(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
 3986     __ cmp(disp_hdr, disp_hdr); // Sets flags for result
 3987     __ b(cont);
 3988 
 3989     __ bind(notRecursive);
 3990     __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
 3991     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
 3992     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
 3993     __ cmp(rscratch1, zr); // Sets flags for result
 3994     __ cbnz(rscratch1, cont);
 3995     // need a release store here
 3996     __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
 3997     __ stlr(zr, tmp); // set unowned

 7464 %}
 7465 
 7466 // Load Klass Pointer
 7467 instruct loadKlass(iRegPNoSp dst, memory8 mem)
 7468 %{
 7469   match(Set dst (LoadKlass mem));
 7470   predicate(!needs_acquiring_load(n));
 7471 
 7472   ins_cost(4 * INSN_COST);
 7473   format %{ "ldr  $dst, $mem\t# class" %}
 7474 
 7475   ins_encode(aarch64_enc_ldr(dst, mem));
 7476 
 7477   ins_pipe(iload_reg_mem);
 7478 %}
 7479 
 7480 // Load Narrow Klass Pointer
 7481 instruct loadNKlass(iRegNNoSp dst, memory4 mem)
 7482 %{
 7483   match(Set dst (LoadNKlass mem));
 7484   predicate(!needs_acquiring_load(n) && !UseCompactObjectHeaders);
 7485 
 7486   ins_cost(4 * INSN_COST);
 7487   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
 7488 
 7489   ins_encode(aarch64_enc_ldrw(dst, mem));
 7490 
 7491   ins_pipe(iload_reg_mem);
 7492 %}
 7493 
 7494 instruct loadNKlassLilliput(iRegNNoSp dst, memory4 mem, rFlagsReg cr)
 7495 %{
 7496   match(Set dst (LoadNKlass mem));
 7497   effect(TEMP_DEF dst, KILL cr);
 7498   predicate(!needs_acquiring_load(n) && UseCompactObjectHeaders);
 7499 
 7500   ins_cost(4 * INSN_COST);
 7501   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
 7502   ins_encode %{
 7503     assert($mem$$disp == oopDesc::klass_offset_in_bytes(), "expect correct offset");
 7504     assert($mem$$index$$Register == noreg, "expect no index");
 7505     Register dst = $dst$$Register;
 7506     Register obj = $mem$$base$$Register;
 7507     C2LoadNKlassStub* stub = new (Compile::current()->comp_arena()) C2LoadNKlassStub(dst);
 7508     Compile::current()->output()->add_stub(stub);
 7509     __ ldr(dst, Address(obj, oopDesc::mark_offset_in_bytes()));
 7510     // NOTE: We can't use tbnz here, because the target is sometimes too far away
 7511     // and cannot be encoded.
 7512     __ tst(dst, markWord::monitor_value);
 7513     __ br(Assembler::NE, stub->entry());
 7514     __ bind(stub->continuation());
 7515     __ lsr(dst, dst, markWord::klass_shift);
 7516   %}
 7517   ins_pipe(pipe_slow);
 7518 %}
 7519 
 7520 // Load Float
 7521 instruct loadF(vRegF dst, memory4 mem)
 7522 %{
 7523   match(Set dst (LoadF mem));
 7524   predicate(!needs_acquiring_load(n));
 7525 
 7526   ins_cost(4 * INSN_COST);
 7527   format %{ "ldrs  $dst, $mem\t# float" %}
 7528 
 7529   ins_encode( aarch64_enc_ldrs(dst, mem) );
 7530 
 7531   ins_pipe(pipe_class_memory);
 7532 %}
 7533 
 7534 // Load Double
 7535 instruct loadD(vRegD dst, memory8 mem)
 7536 %{
 7537   match(Set dst (LoadD mem));
 7538   predicate(!needs_acquiring_load(n));
 7539 
< prev index next >