< prev index next >

src/hotspot/share/c1/c1_LIRGenerator.cpp

Print this page




1660     return _barrier_set->BarrierSetC1::atomic_xchg_at(access, value);
1661   } else {
1662     return _barrier_set->atomic_xchg_at(access, value);
1663   }
1664 }
1665 
1666 LIR_Opr LIRGenerator::access_atomic_add_at(DecoratorSet decorators, BasicType type,
1667                                            LIRItem& base, LIRItem& offset, LIRItem& value) {
1668   // Atomic operations are SEQ_CST by default
1669   decorators |= C1_READ_ACCESS;
1670   decorators |= C1_WRITE_ACCESS;
1671   decorators |= ((decorators & MO_DECORATOR_MASK) != 0) ? MO_SEQ_CST : 0;
1672   LIRAccess access(this, decorators, base, offset, type);
1673   if (access.is_raw()) {
1674     return _barrier_set->BarrierSetC1::atomic_add_at(access, value);
1675   } else {
1676     return _barrier_set->atomic_add_at(access, value);
1677   }
1678 }
1679 












1680 void LIRGenerator::do_LoadField(LoadField* x) {
1681   bool needs_patching = x->needs_patching();
1682   bool is_volatile = x->field()->is_volatile();
1683   BasicType field_type = x->field_type();
1684 
1685   CodeEmitInfo* info = NULL;
1686   if (needs_patching) {
1687     assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1688     info = state_for(x, x->state_before());
1689   } else if (x->needs_null_check()) {
1690     NullCheck* nc = x->explicit_null_check();
1691     if (nc == NULL) {
1692       info = state_for(x);
1693     } else {
1694       info = state_for(nc);
1695     }
1696   }
1697 
1698   LIRItem object(x->obj(), this);
1699 


1737 }
1738 
1739 
1740 //------------------------java.nio.Buffer.checkIndex------------------------
1741 
1742 // int java.nio.Buffer.checkIndex(int)
1743 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1744   // NOTE: by the time we are in checkIndex() we are guaranteed that
1745   // the buffer is non-null (because checkIndex is package-private and
1746   // only called from within other methods in the buffer).
1747   assert(x->number_of_arguments() == 2, "wrong type");
1748   LIRItem buf  (x->argument_at(0), this);
1749   LIRItem index(x->argument_at(1), this);
1750   buf.load_item();
1751   index.load_item();
1752 
1753   LIR_Opr result = rlock_result(x);
1754   if (GenerateRangeChecks) {
1755     CodeEmitInfo* info = state_for(x);
1756     CodeStub* stub = new RangeCheckStub(info, index.result());

1757     if (index.result()->is_constant()) {
1758       cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
1759       __ branch(lir_cond_belowEqual, T_INT, stub);
1760     } else {
1761       cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
1762                   java_nio_Buffer::limit_offset(), T_INT, info);
1763       __ branch(lir_cond_aboveEqual, T_INT, stub);
1764     }
1765     __ move(index.result(), result);
1766   } else {
1767     // Just load the index into the result register
1768     __ move(index.result(), result);
1769   }
1770 }
1771 
1772 
1773 //------------------------array access--------------------------------------
1774 
1775 
1776 void LIRGenerator::do_ArrayLength(ArrayLength* x) {
1777   LIRItem array(x->array(), this);
1778   array.load_item();
1779   LIR_Opr reg = rlock_result(x);
1780 
1781   CodeEmitInfo* info = NULL;




1660     return _barrier_set->BarrierSetC1::atomic_xchg_at(access, value);
1661   } else {
1662     return _barrier_set->atomic_xchg_at(access, value);
1663   }
1664 }
1665 
1666 LIR_Opr LIRGenerator::access_atomic_add_at(DecoratorSet decorators, BasicType type,
1667                                            LIRItem& base, LIRItem& offset, LIRItem& value) {
1668   // Atomic operations are SEQ_CST by default
1669   decorators |= C1_READ_ACCESS;
1670   decorators |= C1_WRITE_ACCESS;
1671   decorators |= ((decorators & MO_DECORATOR_MASK) != 0) ? MO_SEQ_CST : 0;
1672   LIRAccess access(this, decorators, base, offset, type);
1673   if (access.is_raw()) {
1674     return _barrier_set->BarrierSetC1::atomic_add_at(access, value);
1675   } else {
1676     return _barrier_set->atomic_add_at(access, value);
1677   }
1678 }
1679 
1680 LIR_Opr LIRGenerator::access_resolve_for_read(DecoratorSet decorators, LIR_Opr obj, CodeEmitInfo* info) {
1681   decorators |= C1_READ_ACCESS;
1682   LIRAccess access(this, decorators, obj, obj /* dummy */, T_OBJECT, NULL, info);
1683   return _barrier_set->resolve_for_read(access);
1684 }
1685 
1686 LIR_Opr LIRGenerator::access_resolve_for_write(DecoratorSet decorators, LIR_Opr obj, CodeEmitInfo* info) {
1687   decorators |= C1_WRITE_ACCESS;
1688   LIRAccess access(this, decorators, obj, obj /* dummy */, T_OBJECT, NULL, info);
1689   return _barrier_set->resolve_for_write(access);
1690 }
1691 
1692 void LIRGenerator::do_LoadField(LoadField* x) {
1693   bool needs_patching = x->needs_patching();
1694   bool is_volatile = x->field()->is_volatile();
1695   BasicType field_type = x->field_type();
1696 
1697   CodeEmitInfo* info = NULL;
1698   if (needs_patching) {
1699     assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1700     info = state_for(x, x->state_before());
1701   } else if (x->needs_null_check()) {
1702     NullCheck* nc = x->explicit_null_check();
1703     if (nc == NULL) {
1704       info = state_for(x);
1705     } else {
1706       info = state_for(nc);
1707     }
1708   }
1709 
1710   LIRItem object(x->obj(), this);
1711 


1749 }
1750 
1751 
1752 //------------------------java.nio.Buffer.checkIndex------------------------
1753 
1754 // int java.nio.Buffer.checkIndex(int)
1755 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1756   // NOTE: by the time we are in checkIndex() we are guaranteed that
1757   // the buffer is non-null (because checkIndex is package-private and
1758   // only called from within other methods in the buffer).
1759   assert(x->number_of_arguments() == 2, "wrong type");
1760   LIRItem buf  (x->argument_at(0), this);
1761   LIRItem index(x->argument_at(1), this);
1762   buf.load_item();
1763   index.load_item();
1764 
1765   LIR_Opr result = rlock_result(x);
1766   if (GenerateRangeChecks) {
1767     CodeEmitInfo* info = state_for(x);
1768     CodeStub* stub = new RangeCheckStub(info, index.result());
1769     LIR_Opr buf_obj = access_resolve_for_read(IN_HEAP | IS_NOT_NULL, buf.result(), NULL);
1770     if (index.result()->is_constant()) {
1771       cmp_mem_int(lir_cond_belowEqual, buf_obj, java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
1772       __ branch(lir_cond_belowEqual, T_INT, stub);
1773     } else {
1774       cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf_obj,
1775                   java_nio_Buffer::limit_offset(), T_INT, info);
1776       __ branch(lir_cond_aboveEqual, T_INT, stub);
1777     }
1778     __ move(index.result(), result);
1779   } else {
1780     // Just load the index into the result register
1781     __ move(index.result(), result);
1782   }
1783 }
1784 
1785 
1786 //------------------------array access--------------------------------------
1787 
1788 
1789 void LIRGenerator::do_ArrayLength(ArrayLength* x) {
1790   LIRItem array(x->array(), this);
1791   array.load_item();
1792   LIR_Opr reg = rlock_result(x);
1793 
1794   CodeEmitInfo* info = NULL;


< prev index next >