< prev index next >

src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp

Print this page




  26 
  27 #include "precompiled.hpp"
  28 #include "asm/assembler.hpp"
  29 #include "c1/c1_CodeStubs.hpp"
  30 #include "c1/c1_Compilation.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_MacroAssembler.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArrayKlass.hpp"
  36 #include "ci/ciInstance.hpp"
  37 #include "gc_interface/collectedHeap.hpp"
  38 #include "memory/barrierSet.hpp"
  39 #include "memory/cardTableModRefBS.hpp"
  40 #include "nativeInst_aarch64.hpp"
  41 #include "oops/objArrayKlass.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "vmreg_aarch64.inline.hpp"
  44 
  45 



  46 
  47 #ifndef PRODUCT
  48 #define COMMENT(x)   do { __ block_comment(x); } while (0)
  49 #else
  50 #define COMMENT(x)
  51 #endif
  52 
  53 NEEDS_CLEANUP // remove this definitions ?
  54 const Register IC_Klass    = rscratch2;   // where the IC klass is cached
  55 const Register SYNC_header = r0;   // synchronization header
  56 const Register SHIFT_count = r0;   // where count for shift operations must be
  57 
  58 #define __ _masm->
  59 
  60 
  61 static void select_different_registers(Register preserve,
  62                                        Register extra,
  63                                        Register &tmp1,
  64                                        Register &tmp2) {
  65   if (tmp1 == preserve) {


1595     __ mov(dst, 1);
1596     __ bind(done);
1597   } else {
1598     ShouldNotReachHere();
1599   }
1600 }
1601 
1602 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1603   __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, rscratch1);
1604   __ cset(rscratch1, Assembler::NE);
1605   __ membar(__ AnyAny);
1606 }
1607 
1608 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1609   __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, rscratch1);
1610   __ cset(rscratch1, Assembler::NE);
1611   __ membar(__ AnyAny);
1612 }
1613 
1614 

1615 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1616   assert(VM_Version::supports_cx8(), "wrong machine");
1617   Register addr = as_reg(op->addr());
1618   Register newval = as_reg(op->new_value());
1619   Register cmpval = as_reg(op->cmp_value());
1620   Label succeed, fail, around;

1621 
1622   if (op->code() == lir_cas_obj) {


1623     if (UseCompressedOops) {
1624       Register t1 = op->tmp1()->as_register();
1625       assert(op->tmp1()->is_valid(), "must be");
1626       __ encode_heap_oop(t1, cmpval);
1627       cmpval = t1;
1628       __ encode_heap_oop(rscratch2, newval);
1629       newval = rscratch2;
1630       casw(addr, newval, cmpval);












1631     } else {
1632       casl(addr, newval, cmpval);








1633     }
1634   } else if (op->code() == lir_cas_int) {
1635     casw(addr, newval, cmpval);

1636   } else {
1637     casl(addr, newval, cmpval);

1638   }
1639 }
1640 
1641 
1642 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
1643 
1644   Assembler::Condition acond, ncond;
1645   switch (condition) {
1646   case lir_cond_equal:        acond = Assembler::EQ; ncond = Assembler::NE; break;
1647   case lir_cond_notEqual:     acond = Assembler::NE; ncond = Assembler::EQ; break;
1648   case lir_cond_less:         acond = Assembler::LT; ncond = Assembler::GE; break;
1649   case lir_cond_lessEqual:    acond = Assembler::LE; ncond = Assembler::GT; break;
1650   case lir_cond_greaterEqual: acond = Assembler::GE; ncond = Assembler::LT; break;
1651   case lir_cond_greater:      acond = Assembler::GT; ncond = Assembler::LE; break;
1652   case lir_cond_belowEqual:   Unimplemented(); break;
1653   case lir_cond_aboveEqual:   Unimplemented(); break;
1654   default:                    ShouldNotReachHere();
1655   }
1656 
1657   assert(result->is_single_cpu() || result->is_double_cpu(),


2862 
2863 
2864 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
2865   if (left->is_single_cpu()) {
2866     assert(dest->is_single_cpu(), "expect single result reg");
2867     __ negw(dest->as_register(), left->as_register());
2868   } else if (left->is_double_cpu()) {
2869     assert(dest->is_double_cpu(), "expect double result reg");
2870     __ neg(dest->as_register_lo(), left->as_register_lo());
2871   } else if (left->is_single_fpu()) {
2872     assert(dest->is_single_fpu(), "expect single float result reg");
2873     __ fnegs(dest->as_float_reg(), left->as_float_reg());
2874   } else {
2875     assert(left->is_double_fpu(), "expect double float operand reg");
2876     assert(dest->is_double_fpu(), "expect double float result reg");
2877     __ fnegd(dest->as_double_reg(), left->as_double_reg());
2878   }
2879 }
2880 
2881 
2882 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) {







2883   __ lea(dest->as_register_lo(), as_Address(addr->as_address_ptr()));
2884 }
2885 
2886 
2887 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2888   assert(!tmp->is_valid(), "don't need temporary");
2889 
2890   CodeBlob *cb = CodeCache::find_blob(dest);
2891   if (cb) {
2892     __ far_call(RuntimeAddress(dest));
2893   } else {
2894     __ mov(rscratch1, RuntimeAddress(dest));
2895     __ blr(rscratch1);
2896   }
2897 
2898   if (info != NULL) {
2899     add_call_info_here(info);
2900   }
2901   __ maybe_isb();
2902 }




  26 
  27 #include "precompiled.hpp"
  28 #include "asm/assembler.hpp"
  29 #include "c1/c1_CodeStubs.hpp"
  30 #include "c1/c1_Compilation.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_MacroAssembler.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArrayKlass.hpp"
  36 #include "ci/ciInstance.hpp"
  37 #include "gc_interface/collectedHeap.hpp"
  38 #include "memory/barrierSet.hpp"
  39 #include "memory/cardTableModRefBS.hpp"
  40 #include "nativeInst_aarch64.hpp"
  41 #include "oops/objArrayKlass.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "vmreg_aarch64.inline.hpp"
  44 
  45 
  46 #if INCLUDE_ALL_GCS
  47 #include "shenandoahBarrierSetAssembler_aarch64.hpp"
  48 #endif
  49 
  50 #ifndef PRODUCT
  51 #define COMMENT(x)   do { __ block_comment(x); } while (0)
  52 #else
  53 #define COMMENT(x)
  54 #endif
  55 
  56 NEEDS_CLEANUP // remove this definitions ?
  57 const Register IC_Klass    = rscratch2;   // where the IC klass is cached
  58 const Register SYNC_header = r0;   // synchronization header
  59 const Register SHIFT_count = r0;   // where count for shift operations must be
  60 
  61 #define __ _masm->
  62 
  63 
  64 static void select_different_registers(Register preserve,
  65                                        Register extra,
  66                                        Register &tmp1,
  67                                        Register &tmp2) {
  68   if (tmp1 == preserve) {


1598     __ mov(dst, 1);
1599     __ bind(done);
1600   } else {
1601     ShouldNotReachHere();
1602   }
1603 }
1604 
1605 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1606   __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, rscratch1);
1607   __ cset(rscratch1, Assembler::NE);
1608   __ membar(__ AnyAny);
1609 }
1610 
1611 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1612   __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, rscratch1);
1613   __ cset(rscratch1, Assembler::NE);
1614   __ membar(__ AnyAny);
1615 }
1616 
1617 
1618 // Return 1 in rscratch1 if the CAS fails.
1619 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1620   assert(VM_Version::supports_cx8(), "wrong machine");
1621   Register addr = as_reg(op->addr());
1622   Register newval = as_reg(op->new_value());
1623   Register cmpval = as_reg(op->cmp_value());
1624   Label succeed, fail, around;
1625   Register res = op->result_opr()->as_register();
1626 
1627   if (op->code() == lir_cas_obj) {
1628     assert(op->tmp1()->is_valid(), "must be");
1629     Register t1 = op->tmp1()->as_register();
1630     if (UseCompressedOops) {
1631 #if INCLUDE_ALL_GCS
1632       if (UseShenandoahGC && ShenandoahCASBarrier) {
1633         __ encode_heap_oop(t1, cmpval);
1634         cmpval = t1;
1635         assert(op->tmp2()->is_valid(), "must be");
1636         Register t2 = op->tmp2()->as_register();
1637         __ encode_heap_oop(t2, newval);
1638         newval = t2;
1639         ShenandoahBarrierSetAssembler::bsasm()->cmpxchg_oop(_masm, addr, cmpval, newval, /*acquire*/ false, /*release*/ true, /*weak*/ false, /*is_cae*/ false, res);
1640       } else
1641 #endif
1642       {
1643         __ encode_heap_oop(t1, cmpval);
1644         cmpval = t1;
1645         __ encode_heap_oop(rscratch2, newval);
1646         newval = rscratch2;
1647         casw(addr, newval, cmpval);
1648         __ eorw (res, r8, 1);
1649       }
1650     } else {
1651 #if INCLUDE_ALL_GCS
1652       if (UseShenandoahGC && ShenandoahCASBarrier) {
1653         ShenandoahBarrierSetAssembler::bsasm()->cmpxchg_oop(_masm, addr, cmpval, newval, /*acquire*/ false, /*release*/ true, /*weak*/ false, /*is_cae*/ false, res);
1654       } else
1655 #endif
1656       {
1657         casl(addr, newval, cmpval);
1658         __ eorw (res, r8, 1);
1659       }
1660     }
1661   } else if (op->code() == lir_cas_int) {
1662     casw(addr, newval, cmpval);
1663     __ eorw (res, r8, 1);
1664   } else {
1665     casl(addr, newval, cmpval);
1666     __ eorw (res, r8, 1);
1667   }
1668 }
1669 
1670 
1671 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
1672 
1673   Assembler::Condition acond, ncond;
1674   switch (condition) {
1675   case lir_cond_equal:        acond = Assembler::EQ; ncond = Assembler::NE; break;
1676   case lir_cond_notEqual:     acond = Assembler::NE; ncond = Assembler::EQ; break;
1677   case lir_cond_less:         acond = Assembler::LT; ncond = Assembler::GE; break;
1678   case lir_cond_lessEqual:    acond = Assembler::LE; ncond = Assembler::GT; break;
1679   case lir_cond_greaterEqual: acond = Assembler::GE; ncond = Assembler::LT; break;
1680   case lir_cond_greater:      acond = Assembler::GT; ncond = Assembler::LE; break;
1681   case lir_cond_belowEqual:   Unimplemented(); break;
1682   case lir_cond_aboveEqual:   Unimplemented(); break;
1683   default:                    ShouldNotReachHere();
1684   }
1685 
1686   assert(result->is_single_cpu() || result->is_double_cpu(),


2891 
2892 
2893 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
2894   if (left->is_single_cpu()) {
2895     assert(dest->is_single_cpu(), "expect single result reg");
2896     __ negw(dest->as_register(), left->as_register());
2897   } else if (left->is_double_cpu()) {
2898     assert(dest->is_double_cpu(), "expect double result reg");
2899     __ neg(dest->as_register_lo(), left->as_register_lo());
2900   } else if (left->is_single_fpu()) {
2901     assert(dest->is_single_fpu(), "expect single float result reg");
2902     __ fnegs(dest->as_float_reg(), left->as_float_reg());
2903   } else {
2904     assert(left->is_double_fpu(), "expect double float operand reg");
2905     assert(dest->is_double_fpu(), "expect double float result reg");
2906     __ fnegd(dest->as_double_reg(), left->as_double_reg());
2907   }
2908 }
2909 
2910 
2911 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
2912 #if INCLUDE_ALL_GCS
2913   if (UseShenandoahGC && patch_code != lir_patch_none) {
2914     deoptimize_trap(info);
2915     return;
2916   }
2917 #endif
2918 
2919   __ lea(dest->as_register_lo(), as_Address(addr->as_address_ptr()));
2920 }
2921 
2922 
2923 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2924   assert(!tmp->is_valid(), "don't need temporary");
2925 
2926   CodeBlob *cb = CodeCache::find_blob(dest);
2927   if (cb) {
2928     __ far_call(RuntimeAddress(dest));
2929   } else {
2930     __ mov(rscratch1, RuntimeAddress(dest));
2931     __ blr(rscratch1);
2932   }
2933 
2934   if (info != NULL) {
2935     add_call_info_here(info);
2936   }
2937   __ maybe_isb();
2938 }


< prev index next >