< prev index next >

src/share/vm/opto/lcm.cpp

Print this page




  25 #include "precompiled.hpp"
  26 #include "memory/allocation.inline.hpp"
  27 #include "opto/block.hpp"
  28 #include "opto/c2compiler.hpp"
  29 #include "opto/callnode.hpp"
  30 #include "opto/cfgnode.hpp"
  31 #include "opto/machnode.hpp"
  32 #include "opto/runtime.hpp"
  33 #if defined AD_MD_HPP
  34 # include AD_MD_HPP
  35 #elif defined TARGET_ARCH_MODEL_x86_32
  36 # include "adfiles/ad_x86_32.hpp"
  37 #elif defined TARGET_ARCH_MODEL_x86_64
  38 # include "adfiles/ad_x86_64.hpp"
  39 #elif defined TARGET_ARCH_MODEL_sparc
  40 # include "adfiles/ad_sparc.hpp"
  41 #elif defined TARGET_ARCH_MODEL_zero
  42 # include "adfiles/ad_zero.hpp"
  43 #elif defined TARGET_ARCH_MODEL_ppc_64
  44 # include "adfiles/ad_ppc_64.hpp"


  45 #endif
  46 
  47 // Optimization - Graph Style
  48 
  49 // Check whether val is not-null-decoded compressed oop,
  50 // i.e. will grab into the base of the heap if it represents NULL.
  51 static bool accesses_heap_base_zone(Node *val) {
  52   if (Universe::narrow_oop_base() != NULL) { // Implies UseCompressedOops.
  53     if (val && val->is_Mach()) {
  54       if (val->as_Mach()->ideal_Opcode() == Op_DecodeN) {
  55         // This assumes all Decodes with TypePtr::NotNull are matched to nodes that
  56         // decode NULL to point to the heap base (Decode_NN).
  57         if (val->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull) {
  58           return true;
  59         }
  60       }
  61       // Must recognize load operation with Decode matched in memory operand.
  62       // We should not reach here exept for PPC/AIX, as os::zero_page_read_protected()
  63       // returns true everywhere else. On PPC, no such memory operands
  64       // exist, therefore we did not yet implement a check for such operands.


 444     Node* in = old_tst->in(i3);
 445     old_tst->set_req(i3, NULL);
 446     if (in->outcnt() == 0) {
 447       // Remove dead input node
 448       in->disconnect_inputs(NULL, C);
 449       block->find_remove(in);
 450     }
 451   }
 452 
 453   latency_from_uses(nul_chk);
 454   latency_from_uses(best);
 455 
 456   // insert anti-dependences to defs in this block
 457   if (! best->needs_anti_dependence_check()) {
 458     for (uint k = 1; k < block->number_of_nodes(); k++) {
 459       Node *n = block->get_node(k);
 460       if (n->needs_anti_dependence_check() &&
 461           n->in(LoadNode::Memory) == best->in(StoreNode::Memory)) {
 462         // Found anti-dependent load
 463         insert_anti_dependences(block, n);
 464       }
 465     }
 466   }
 467 }
 468 
 469 
 470 //------------------------------select-----------------------------------------
 471 // Select a nice fellow from the worklist to schedule next. If there is only
 472 // one choice, then use it. Projections take top priority for correctness
 473 // reasons - if I see a projection, then it is next.  There are a number of
 474 // other special cases, for instructions that consume condition codes, et al.
 475 // These are chosen immediately. Some instructions are required to immediately
 476 // precede the last instruction in the block, and these are taken last. Of the
 477 // remaining cases (most), choose the instruction with the greatest latency
 478 // (that is, the most number of pseudo-cycles required to the end of the
 479 // routine). If there is a tie, choose the instruction with the most inputs.
 480 Node* PhaseCFG::select(Block* block, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot) {
 481 
 482   // If only a single entry on the stack, use it
 483   uint cnt = worklist.size();
 484   if (cnt == 1) {




  25 #include "precompiled.hpp"
  26 #include "memory/allocation.inline.hpp"
  27 #include "opto/block.hpp"
  28 #include "opto/c2compiler.hpp"
  29 #include "opto/callnode.hpp"
  30 #include "opto/cfgnode.hpp"
  31 #include "opto/machnode.hpp"
  32 #include "opto/runtime.hpp"
  33 #if defined AD_MD_HPP
  34 # include AD_MD_HPP
  35 #elif defined TARGET_ARCH_MODEL_x86_32
  36 # include "adfiles/ad_x86_32.hpp"
  37 #elif defined TARGET_ARCH_MODEL_x86_64
  38 # include "adfiles/ad_x86_64.hpp"
  39 #elif defined TARGET_ARCH_MODEL_sparc
  40 # include "adfiles/ad_sparc.hpp"
  41 #elif defined TARGET_ARCH_MODEL_zero
  42 # include "adfiles/ad_zero.hpp"
  43 #elif defined TARGET_ARCH_MODEL_ppc_64
  44 # include "adfiles/ad_ppc_64.hpp"
  45 #elif defined TARGET_ARCH_MODEL_aarch32
  46 # include "adfiles/ad_aarch32.hpp"
  47 #endif
  48 
  49 // Optimization - Graph Style
  50 
  51 // Check whether val is not-null-decoded compressed oop,
  52 // i.e. will grab into the base of the heap if it represents NULL.
  53 static bool accesses_heap_base_zone(Node *val) {
  54   if (Universe::narrow_oop_base() != NULL) { // Implies UseCompressedOops.
  55     if (val && val->is_Mach()) {
  56       if (val->as_Mach()->ideal_Opcode() == Op_DecodeN) {
  57         // This assumes all Decodes with TypePtr::NotNull are matched to nodes that
  58         // decode NULL to point to the heap base (Decode_NN).
  59         if (val->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull) {
  60           return true;
  61         }
  62       }
  63       // Must recognize load operation with Decode matched in memory operand.
  64       // We should not reach here exept for PPC/AIX, as os::zero_page_read_protected()
  65       // returns true everywhere else. On PPC, no such memory operands
  66       // exist, therefore we did not yet implement a check for such operands.


 446     Node* in = old_tst->in(i3);
 447     old_tst->set_req(i3, NULL);
 448     if (in->outcnt() == 0) {
 449       // Remove dead input node
 450       in->disconnect_inputs(NULL, C);
 451       block->find_remove(in);
 452     }
 453   }
 454 
 455   latency_from_uses(nul_chk);
 456   latency_from_uses(best);
 457 
 458   // insert anti-dependences to defs in this block
 459   if (! best->needs_anti_dependence_check()) {
 460     for (uint k = 1; k < block->number_of_nodes(); k++) {
 461       Node *n = block->get_node(k);
 462       if (n->needs_anti_dependence_check() &&
 463           n->in(LoadNode::Memory) == best->in(StoreNode::Memory)) {
 464         // Found anti-dependent load
 465         insert_anti_dependences(block, n);
 466 }
 467     }
 468   }
 469 }
 470 
 471 
 472 //------------------------------select-----------------------------------------
 473 // Select a nice fellow from the worklist to schedule next. If there is only
 474 // one choice, then use it. Projections take top priority for correctness
 475 // reasons - if I see a projection, then it is next.  There are a number of
 476 // other special cases, for instructions that consume condition codes, et al.
 477 // These are chosen immediately. Some instructions are required to immediately
 478 // precede the last instruction in the block, and these are taken last. Of the
 479 // remaining cases (most), choose the instruction with the greatest latency
 480 // (that is, the most number of pseudo-cycles required to the end of the
 481 // routine). If there is a tie, choose the instruction with the most inputs.
 482 Node* PhaseCFG::select(Block* block, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot) {
 483 
 484   // If only a single entry on the stack, use it
 485   uint cnt = worklist.size();
 486   if (cnt == 1) {


< prev index next >