< prev index next >

src/hotspot/share/opto/compile.cpp

Print this page




  65 #include "opto/phaseX.hpp"
  66 #include "opto/rootnode.hpp"
  67 #include "opto/runtime.hpp"
  68 #include "opto/stringopts.hpp"
  69 #include "opto/type.hpp"
  70 #include "opto/vectornode.hpp"
  71 #include "runtime/arguments.hpp"
  72 #include "runtime/sharedRuntime.hpp"
  73 #include "runtime/signature.hpp"
  74 #include "runtime/stubRoutines.hpp"
  75 #include "runtime/timer.hpp"
  76 #include "utilities/align.hpp"
  77 #include "utilities/copy.hpp"
  78 #include "utilities/macros.hpp"
  79 #if INCLUDE_G1GC
  80 #include "gc/g1/g1ThreadLocalData.hpp"
  81 #endif // INCLUDE_G1GC
  82 #if INCLUDE_ZGC
  83 #include "gc/z/c2/zBarrierSetC2.hpp"
  84 #endif



  85 
  86 
  87 // -------------------- Compile::mach_constant_base_node -----------------------
  88 // Constant table base node singleton.
  89 MachConstantBaseNode* Compile::mach_constant_base_node() {
  90   if (_mach_constant_base_node == NULL) {
  91     _mach_constant_base_node = new MachConstantBaseNode();
  92     _mach_constant_base_node->add_req(C->root());
  93   }
  94   return _mach_constant_base_node;
  95 }
  96 
  97 
  98 /// Support for intrinsics.
  99 
 100 // Return the index at which m must be inserted (or already exists).
 101 // The sort order is by the address of the ciMethod, with is_virtual as minor key.
 102 class IntrinsicDescPair {
 103  private:
 104   ciMethod* _m;


2402     // No more loop optimizations. Remove all range check dependent CastIINodes.
2403     C->remove_range_check_casts(igvn);
2404     igvn.optimize();
2405   }
2406 
2407 #ifdef ASSERT
2408   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2409   bs->verify_gc_barriers(false);
2410 #endif
2411 
2412   {
2413     TracePhase tp("macroExpand", &timers[_t_macroExpand]);
2414     PhaseMacroExpand  mex(igvn);
2415     print_method(PHASE_BEFORE_MACRO_EXPANSION, 2);
2416     if (mex.expand_macro_nodes()) {
2417       assert(failing(), "must bail out w/ explicit message");
2418       return;
2419     }
2420   }
2421 









2422   if (opaque4_count() > 0) {
2423     C->remove_opaque4_nodes(igvn);
2424     igvn.optimize();
2425   }
2426 
2427   DEBUG_ONLY( _modified_nodes = NULL; )
2428  } // (End scope of igvn; run destructor if necessary for asserts.)
2429 
2430  process_print_inlining();
2431  // A method with only infinite loops has no edges entering loops from root
2432  {
2433    TracePhase tp("graphReshape", &timers[_t_graphReshaping]);
2434    if (final_graph_reshaping()) {
2435      assert(failing(), "must bail out w/ explicit message");
2436      return;
2437    }
2438  }
2439 
2440  print_method(PHASE_OPTIMIZE_FINISHED, 2);
2441 }


2842   // case Op_ConvD2L: // handled by leaf call
2843   case Op_ConD:
2844   case Op_CmpD:
2845   case Op_CmpD3:
2846     frc.inc_double_count();
2847     break;
2848   case Op_Opaque1:              // Remove Opaque Nodes before matching
2849   case Op_Opaque2:              // Remove Opaque Nodes before matching
2850   case Op_Opaque3:
2851     n->subsume_by(n->in(1), this);
2852     break;
2853   case Op_CallStaticJava:
2854   case Op_CallJava:
2855   case Op_CallDynamicJava:
2856     frc.inc_java_call_count(); // Count java call site;
2857   case Op_CallRuntime:
2858   case Op_CallLeaf:
2859   case Op_CallLeafNoFP: {
2860     assert (n->is_Call(), "");
2861     CallNode *call = n->as_Call();











2862     // Count call sites where the FP mode bit would have to be flipped.
2863     // Do not count uncommon runtime calls:
2864     // uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
2865     // _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
2866     if (!call->is_CallStaticJava() || !call->as_CallStaticJava()->_name) {
2867       frc.inc_call_count();   // Count the call site
2868     } else {                  // See if uncommon argument is shared
2869       Node *n = call->in(TypeFunc::Parms);
2870       int nop = n->Opcode();
2871       // Clone shared simple arguments to uncommon calls, item (1).
2872       if (n->outcnt() > 1 &&
2873           !n->is_Proj() &&
2874           nop != Op_CreateEx &&
2875           nop != Op_CheckCastPP &&
2876           nop != Op_DecodeN &&
2877           nop != Op_DecodeNKlass &&
2878           !n->is_Mem() &&
2879           !n->is_Phi()) {
2880         Node *x = n->clone();
2881         call->set_req(TypeFunc::Parms, x);


3408       // register allocation can be confused.
3409       ResourceMark rm;
3410       Unique_Node_List wq;
3411       wq.push(n->in(MemBarNode::Precedent));
3412       n->set_req(MemBarNode::Precedent, top());
3413       while (wq.size() > 0) {
3414         Node* m = wq.pop();
3415         if (m->outcnt() == 0) {
3416           for (uint j = 0; j < m->req(); j++) {
3417             Node* in = m->in(j);
3418             if (in != NULL) {
3419               wq.push(in);
3420             }
3421           }
3422           m->disconnect_inputs(NULL, this);
3423         }
3424       }
3425     }
3426     break;
3427   }






















3428   case Op_RangeCheck: {
3429     RangeCheckNode* rc = n->as_RangeCheck();
3430     Node* iff = new IfNode(rc->in(0), rc->in(1), rc->_prob, rc->_fcnt);
3431     n->subsume_by(iff, this);
3432     frc._tests.push(iff);
3433     break;
3434   }
3435   case Op_ConvI2L: {
3436     if (!Matcher::convi2l_type_required) {
3437       // Code generation on some platforms doesn't need accurate
3438       // ConvI2L types. Widening the type can help remove redundant
3439       // address computations.
3440       n->as_Type()->set_type(TypeLong::INT);
3441       ResourceMark rm;
3442       Unique_Node_List wq;
3443       wq.push(n);
3444       for (uint next = 0; next < wq.size(); next++) {
3445         Node *m = wq.at(next);
3446 
3447         for(;;) {


3844           if (use->is_Con())        continue;  // a dead ConNode is OK
3845           // At this point, we have found a dead node which is DU-reachable.
3846           if (!dead_nodes) {
3847             tty->print_cr("*** Dead nodes reachable via DU edges:");
3848             dead_nodes = true;
3849           }
3850           use->dump(2);
3851           tty->print_cr("---");
3852           checked.push(use);  // No repeats; pretend it is now checked.
3853         }
3854       }
3855       assert(!dead_nodes, "using nodes must be reachable from root");
3856     }
3857   }
3858 }
3859 
3860 // Verify GC barriers consistency
3861 // Currently supported:
3862 // - G1 pre-barriers (see GraphKit::g1_write_barrier_pre())
3863 void Compile::verify_barriers() {
3864 #if INCLUDE_G1GC
3865   if (UseG1GC) {
3866     // Verify G1 pre-barriers





3867     const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());



3868 
3869     ResourceArea *area = Thread::current()->resource_area();
3870     Unique_Node_List visited(area);
3871     Node_List worklist(area);
3872     // We're going to walk control flow backwards starting from the Root
3873     worklist.push(_root);
3874     while (worklist.size() > 0) {
3875       Node* x = worklist.pop();
3876       if (x == NULL || x == top()) continue;
3877       if (visited.member(x)) {
3878         continue;
3879       } else {
3880         visited.push(x);
3881       }
3882 
3883       if (x->is_Region()) {
3884         for (uint i = 1; i < x->req(); i++) {
3885           worklist.push(x->in(i));
3886         }
3887       } else {




  65 #include "opto/phaseX.hpp"
  66 #include "opto/rootnode.hpp"
  67 #include "opto/runtime.hpp"
  68 #include "opto/stringopts.hpp"
  69 #include "opto/type.hpp"
  70 #include "opto/vectornode.hpp"
  71 #include "runtime/arguments.hpp"
  72 #include "runtime/sharedRuntime.hpp"
  73 #include "runtime/signature.hpp"
  74 #include "runtime/stubRoutines.hpp"
  75 #include "runtime/timer.hpp"
  76 #include "utilities/align.hpp"
  77 #include "utilities/copy.hpp"
  78 #include "utilities/macros.hpp"
  79 #if INCLUDE_G1GC
  80 #include "gc/g1/g1ThreadLocalData.hpp"
  81 #endif // INCLUDE_G1GC
  82 #if INCLUDE_ZGC
  83 #include "gc/z/c2/zBarrierSetC2.hpp"
  84 #endif
  85 #if INCLUDE_SHENANDOAHGC
  86 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  87 #endif
  88 
  89 
  90 // -------------------- Compile::mach_constant_base_node -----------------------
  91 // Constant table base node singleton.
  92 MachConstantBaseNode* Compile::mach_constant_base_node() {
  93   if (_mach_constant_base_node == NULL) {
  94     _mach_constant_base_node = new MachConstantBaseNode();
  95     _mach_constant_base_node->add_req(C->root());
  96   }
  97   return _mach_constant_base_node;
  98 }
  99 
 100 
 101 /// Support for intrinsics.
 102 
 103 // Return the index at which m must be inserted (or already exists).
 104 // The sort order is by the address of the ciMethod, with is_virtual as minor key.
 105 class IntrinsicDescPair {
 106  private:
 107   ciMethod* _m;


2405     // No more loop optimizations. Remove all range check dependent CastIINodes.
2406     C->remove_range_check_casts(igvn);
2407     igvn.optimize();
2408   }
2409 
2410 #ifdef ASSERT
2411   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2412   bs->verify_gc_barriers(false);
2413 #endif
2414 
2415   {
2416     TracePhase tp("macroExpand", &timers[_t_macroExpand]);
2417     PhaseMacroExpand  mex(igvn);
2418     print_method(PHASE_BEFORE_MACRO_EXPANSION, 2);
2419     if (mex.expand_macro_nodes()) {
2420       assert(failing(), "must bail out w/ explicit message");
2421       return;
2422     }
2423   }
2424 
2425   print_method(PHASE_BEFORE_BARRIER_EXPAND, 2);
2426 
2427 #if INCLUDE_SHENANDOAHGC
2428   if (UseShenandoahGC && ((ShenandoahBarrierSetC2*)BarrierSet::barrier_set()->barrier_set_c2())->expand_barriers(this, igvn)) {
2429     assert(failing(), "must bail out w/ explicit message");
2430     return;
2431   }
2432 #endif
2433 
2434   if (opaque4_count() > 0) {
2435     C->remove_opaque4_nodes(igvn);
2436     igvn.optimize();
2437   }
2438 
2439   DEBUG_ONLY( _modified_nodes = NULL; )
2440  } // (End scope of igvn; run destructor if necessary for asserts.)
2441 
2442  process_print_inlining();
2443  // A method with only infinite loops has no edges entering loops from root
2444  {
2445    TracePhase tp("graphReshape", &timers[_t_graphReshaping]);
2446    if (final_graph_reshaping()) {
2447      assert(failing(), "must bail out w/ explicit message");
2448      return;
2449    }
2450  }
2451 
2452  print_method(PHASE_OPTIMIZE_FINISHED, 2);
2453 }


2854   // case Op_ConvD2L: // handled by leaf call
2855   case Op_ConD:
2856   case Op_CmpD:
2857   case Op_CmpD3:
2858     frc.inc_double_count();
2859     break;
2860   case Op_Opaque1:              // Remove Opaque Nodes before matching
2861   case Op_Opaque2:              // Remove Opaque Nodes before matching
2862   case Op_Opaque3:
2863     n->subsume_by(n->in(1), this);
2864     break;
2865   case Op_CallStaticJava:
2866   case Op_CallJava:
2867   case Op_CallDynamicJava:
2868     frc.inc_java_call_count(); // Count java call site;
2869   case Op_CallRuntime:
2870   case Op_CallLeaf:
2871   case Op_CallLeafNoFP: {
2872     assert (n->is_Call(), "");
2873     CallNode *call = n->as_Call();
2874 #if INCLUDE_SHENANDOAHGC
2875     if (UseShenandoahGC && ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(call)) {
2876       uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type()->domain()->cnt();
2877       if (call->req() > cnt) {
2878         assert(call->req() == cnt+1, "only one extra input");
2879         Node* addp = call->in(cnt);
2880         assert(!ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(addp), "useless address computation?");
2881         call->del_req(cnt);
2882       }
2883     }
2884 #endif
2885     // Count call sites where the FP mode bit would have to be flipped.
2886     // Do not count uncommon runtime calls:
2887     // uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
2888     // _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
2889     if (!call->is_CallStaticJava() || !call->as_CallStaticJava()->_name) {
2890       frc.inc_call_count();   // Count the call site
2891     } else {                  // See if uncommon argument is shared
2892       Node *n = call->in(TypeFunc::Parms);
2893       int nop = n->Opcode();
2894       // Clone shared simple arguments to uncommon calls, item (1).
2895       if (n->outcnt() > 1 &&
2896           !n->is_Proj() &&
2897           nop != Op_CreateEx &&
2898           nop != Op_CheckCastPP &&
2899           nop != Op_DecodeN &&
2900           nop != Op_DecodeNKlass &&
2901           !n->is_Mem() &&
2902           !n->is_Phi()) {
2903         Node *x = n->clone();
2904         call->set_req(TypeFunc::Parms, x);


3431       // register allocation can be confused.
3432       ResourceMark rm;
3433       Unique_Node_List wq;
3434       wq.push(n->in(MemBarNode::Precedent));
3435       n->set_req(MemBarNode::Precedent, top());
3436       while (wq.size() > 0) {
3437         Node* m = wq.pop();
3438         if (m->outcnt() == 0) {
3439           for (uint j = 0; j < m->req(); j++) {
3440             Node* in = m->in(j);
3441             if (in != NULL) {
3442               wq.push(in);
3443             }
3444           }
3445           m->disconnect_inputs(NULL, this);
3446         }
3447       }
3448     }
3449     break;
3450   }
3451 #if INCLUDE_SHENANDOAHGC
3452   case Op_ShenandoahCompareAndSwapP:
3453   case Op_ShenandoahCompareAndSwapN:
3454   case Op_ShenandoahWeakCompareAndSwapN:
3455   case Op_ShenandoahWeakCompareAndSwapP:
3456   case Op_ShenandoahCompareAndExchangeP:
3457   case Op_ShenandoahCompareAndExchangeN:
3458 #ifdef ASSERT
3459     if( VerifyOptoOopOffsets ) {
3460       MemNode* mem  = n->as_Mem();
3461       // Check to see if address types have grounded out somehow.
3462       const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr();
3463       ciInstanceKlass *k = tp->klass()->as_instance_klass();
3464       bool oop_offset_is_sane = k->contains_field_offset(tp->offset());
3465       assert( !tp || oop_offset_is_sane, "" );
3466     }
3467 #endif
3468      break;
3469   case Op_ShenandoahLoadReferenceBarrier:
3470     assert(false, "should have been expanded already");
3471     break;
3472 #endif
3473   case Op_RangeCheck: {
3474     RangeCheckNode* rc = n->as_RangeCheck();
3475     Node* iff = new IfNode(rc->in(0), rc->in(1), rc->_prob, rc->_fcnt);
3476     n->subsume_by(iff, this);
3477     frc._tests.push(iff);
3478     break;
3479   }
3480   case Op_ConvI2L: {
3481     if (!Matcher::convi2l_type_required) {
3482       // Code generation on some platforms doesn't need accurate
3483       // ConvI2L types. Widening the type can help remove redundant
3484       // address computations.
3485       n->as_Type()->set_type(TypeLong::INT);
3486       ResourceMark rm;
3487       Unique_Node_List wq;
3488       wq.push(n);
3489       for (uint next = 0; next < wq.size(); next++) {
3490         Node *m = wq.at(next);
3491 
3492         for(;;) {


3889           if (use->is_Con())        continue;  // a dead ConNode is OK
3890           // At this point, we have found a dead node which is DU-reachable.
3891           if (!dead_nodes) {
3892             tty->print_cr("*** Dead nodes reachable via DU edges:");
3893             dead_nodes = true;
3894           }
3895           use->dump(2);
3896           tty->print_cr("---");
3897           checked.push(use);  // No repeats; pretend it is now checked.
3898         }
3899       }
3900       assert(!dead_nodes, "using nodes must be reachable from root");
3901     }
3902   }
3903 }
3904 
3905 // Verify GC barriers consistency
3906 // Currently supported:
3907 // - G1 pre-barriers (see GraphKit::g1_write_barrier_pre())
3908 void Compile::verify_barriers() {
3909 #if INCLUDE_G1GC || INCLUDE_SHENANDOAHGC
3910   if (UseG1GC || UseShenandoahGC) {
3911     // Verify G1 pre-barriers
3912 
3913 #if INCLUDE_G1GC && INCLUDE_SHENANDOAHGC
3914     const int marking_offset = in_bytes(UseG1GC ? G1ThreadLocalData::satb_mark_queue_active_offset()
3915                                                 : ShenandoahThreadLocalData::satb_mark_queue_active_offset());
3916 #elif INCLUDE_G1GC
3917     const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
3918 #else
3919     const int marking_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset());
3920 #endif
3921 
3922     ResourceArea *area = Thread::current()->resource_area();
3923     Unique_Node_List visited(area);
3924     Node_List worklist(area);
3925     // We're going to walk control flow backwards starting from the Root
3926     worklist.push(_root);
3927     while (worklist.size() > 0) {
3928       Node* x = worklist.pop();
3929       if (x == NULL || x == top()) continue;
3930       if (visited.member(x)) {
3931         continue;
3932       } else {
3933         visited.push(x);
3934       }
3935 
3936       if (x->is_Region()) {
3937         for (uint i = 1; i < x->req(); i++) {
3938           worklist.push(x->in(i));
3939         }
3940       } else {


< prev index next >