< prev index next >

src/hotspot/share/c1/c1_LIRGenerator.hpp

Print this page




 290 
 291   LIR_Opr access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type,
 292                                    LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value);
 293 
 294   LIR_Opr access_atomic_xchg_at(DecoratorSet decorators, BasicType type,
 295                                 LIRItem& base, LIRItem& offset, LIRItem& value);
 296 
 297   LIR_Opr access_atomic_add_at(DecoratorSet decorators, BasicType type,
 298                                LIRItem& base, LIRItem& offset, LIRItem& value);
 299 
 300   // These need to guarantee JMM volatile semantics are preserved on each platform
 301   // and requires one implementation per architecture.
 302   LIR_Opr atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value);
 303   LIR_Opr atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& new_value);
 304   LIR_Opr atomic_add(BasicType type, LIR_Opr addr, LIRItem& new_value);
 305 
 306 #ifdef CARDTABLEBARRIERSET_POST_BARRIER_HELPER
 307   virtual void CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base);
 308 #endif
 309 



 310   // specific implementations
 311   void array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci);
 312 
 313   static LIR_Opr result_register_for(ValueType* type, bool callee = false);
 314 
 315   ciObject* get_jobject_constant(Value value);
 316 
 317   LIRItemList* invoke_visit_arguments(Invoke* x);
 318   void invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list);
 319 
 320   void trace_block_entry(BlockBegin* block);
 321 
 322   // volatile field operations are never patchable because a klass
 323   // must be loaded to know it's volatile which means that the offset
 324   // it always known as well.
 325   void volatile_field_store(LIR_Opr value, LIR_Address* address, CodeEmitInfo* info);
 326   void volatile_field_load(LIR_Address* address, LIR_Opr result, CodeEmitInfo* info);
 327 
 328   void put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data, BasicType type, bool is_volatile);
 329   void get_Object_unsafe(LIR_Opr dest, LIR_Opr src, LIR_Opr offset, BasicType type, bool is_volatile);




 290 
 291   LIR_Opr access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type,
 292                                    LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value);
 293 
 294   LIR_Opr access_atomic_xchg_at(DecoratorSet decorators, BasicType type,
 295                                 LIRItem& base, LIRItem& offset, LIRItem& value);
 296 
 297   LIR_Opr access_atomic_add_at(DecoratorSet decorators, BasicType type,
 298                                LIRItem& base, LIRItem& offset, LIRItem& value);
 299 
 300   // These need to guarantee JMM volatile semantics are preserved on each platform
 301   // and requires one implementation per architecture.
 302   LIR_Opr atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value);
 303   LIR_Opr atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& new_value);
 304   LIR_Opr atomic_add(BasicType type, LIR_Opr addr, LIRItem& new_value);
 305 
 306 #ifdef CARDTABLEBARRIERSET_POST_BARRIER_HELPER
 307   virtual void CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base);
 308 #endif
 309 
 310   LIR_Opr access_resolve_for_read(DecoratorSet decorators, LIR_Opr obj, CodeEmitInfo* info);
 311   LIR_Opr access_resolve_for_write(DecoratorSet decorators, LIR_Opr obj, CodeEmitInfo* info);
 312 
 313   // specific implementations
 314   void array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci);
 315 
 316   static LIR_Opr result_register_for(ValueType* type, bool callee = false);
 317 
 318   ciObject* get_jobject_constant(Value value);
 319 
 320   LIRItemList* invoke_visit_arguments(Invoke* x);
 321   void invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list);
 322 
 323   void trace_block_entry(BlockBegin* block);
 324 
 325   // volatile field operations are never patchable because a klass
 326   // must be loaded to know it's volatile which means that the offset
 327   // it always known as well.
 328   void volatile_field_store(LIR_Opr value, LIR_Address* address, CodeEmitInfo* info);
 329   void volatile_field_load(LIR_Address* address, LIR_Opr result, CodeEmitInfo* info);
 330 
 331   void put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data, BasicType type, bool is_volatile);
 332   void get_Object_unsafe(LIR_Opr dest, LIR_Opr src, LIR_Opr offset, BasicType type, bool is_volatile);


< prev index next >