< prev index next >

src/hotspot/share/opto/parseHelper.cpp

Print this page




 420   Node* cnt  = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
 421   Node* incr = _gvn.transform(new AddINode(cnt, _gvn.intcon(DataLayout::counter_increment)));
 422   store_to_memory(NULL, adr_node, incr, T_INT, adr_type, MemNode::unordered);
 423 }
 424 
 425 //--------------------------test_for_osr_md_counter_at-------------------------
 426 void Parse::test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, int limit) {
 427   Node* adr_node = method_data_addressing(md, data, counter_offset);
 428 
 429   const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
 430   Node* cnt  = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
 431 
 432   test_counter_against_threshold(cnt, limit);
 433 }
 434 
 435 //-------------------------------set_md_flag_at--------------------------------
 436 void Parse::set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant) {
 437   Node* adr_node = method_data_addressing(md, data, DataLayout::flags_offset());
 438 
 439   const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
 440   Node* flags = make_load(NULL, adr_node, TypeInt::BYTE, T_BYTE, adr_type, MemNode::unordered);
 441   Node* incr = _gvn.transform(new OrINode(flags, _gvn.intcon(flag_constant)));
 442   store_to_memory(NULL, adr_node, incr, T_BYTE, adr_type, MemNode::unordered);
 443 }
 444 
 445 //----------------------------profile_taken_branch-----------------------------
 446 void Parse::profile_taken_branch(int target_bci, bool force_update) {
 447   // This is a potential osr_site if we have a backedge.
 448   int cur_bci = bci();
 449   bool osr_site =
 450     (target_bci <= cur_bci) && count_invocations() && UseOnStackReplacement;
 451 
 452   // If we are going to OSR, restart at the target bytecode.
 453   set_bci(target_bci);
 454 
 455   // To do: factor out the the limit calculations below. These duplicate
 456   // the similar limit calculations in the interpreter.
 457 
 458   if (method_data_update() || force_update) {
 459     ciMethodData* md = method()->method_data();
 460     assert(md != NULL, "expected valid ciMethodData");
 461     ciProfileData* data = md->bci_to_data(cur_bci);
 462     assert(data != NULL && data->is_JumpData(), "need JumpData for taken branch");




 420   Node* cnt  = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
 421   Node* incr = _gvn.transform(new AddINode(cnt, _gvn.intcon(DataLayout::counter_increment)));
 422   store_to_memory(NULL, adr_node, incr, T_INT, adr_type, MemNode::unordered);
 423 }
 424 
 425 //--------------------------test_for_osr_md_counter_at-------------------------
 426 void Parse::test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, int limit) {
 427   Node* adr_node = method_data_addressing(md, data, counter_offset);
 428 
 429   const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
 430   Node* cnt  = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
 431 
 432   test_counter_against_threshold(cnt, limit);
 433 }
 434 
 435 //-------------------------------set_md_flag_at--------------------------------
 436 void Parse::set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant) {
 437   Node* adr_node = method_data_addressing(md, data, DataLayout::flags_offset());
 438 
 439   const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
 440   Node* flags = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
 441   Node* incr = _gvn.transform(new OrINode(flags, _gvn.intcon(flag_constant)));
 442   store_to_memory(NULL, adr_node, incr, T_INT, adr_type, MemNode::unordered);
 443 }
 444 
 445 //----------------------------profile_taken_branch-----------------------------
 446 void Parse::profile_taken_branch(int target_bci, bool force_update) {
 447   // This is a potential osr_site if we have a backedge.
 448   int cur_bci = bci();
 449   bool osr_site =
 450     (target_bci <= cur_bci) && count_invocations() && UseOnStackReplacement;
 451 
 452   // If we are going to OSR, restart at the target bytecode.
 453   set_bci(target_bci);
 454 
 455   // To do: factor out the the limit calculations below. These duplicate
 456   // the similar limit calculations in the interpreter.
 457 
 458   if (method_data_update() || force_update) {
 459     ciMethodData* md = method()->method_data();
 460     assert(md != NULL, "expected valid ciMethodData");
 461     ciProfileData* data = md->bci_to_data(cur_bci);
 462     assert(data != NULL && data->is_JumpData(), "need JumpData for taken branch");


< prev index next >