< prev index next >

src/hotspot/share/opto/parse2.cpp

Print this page

   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciMethodData.hpp"

  27 #include "classfile/vmSymbols.hpp"
  28 #include "compiler/compileLog.hpp"
  29 #include "interpreter/linkResolver.hpp"
  30 #include "jvm_io.h"
  31 #include "memory/resourceArea.hpp"
  32 #include "memory/universe.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "opto/addnode.hpp"
  35 #include "opto/castnode.hpp"
  36 #include "opto/convertnode.hpp"
  37 #include "opto/divnode.hpp"
  38 #include "opto/idealGraphPrinter.hpp"


  39 #include "opto/matcher.hpp"
  40 #include "opto/memnode.hpp"
  41 #include "opto/mulnode.hpp"
  42 #include "opto/opaquenode.hpp"
  43 #include "opto/parse.hpp"
  44 #include "opto/runtime.hpp"
  45 #include "runtime/deoptimization.hpp"
  46 #include "runtime/sharedRuntime.hpp"
  47 
  48 #ifndef PRODUCT
  49 extern uint explicit_null_checks_inserted,
  50             explicit_null_checks_elided;
  51 #endif
  52 

















  53 //---------------------------------array_load----------------------------------
  54 void Parse::array_load(BasicType bt) {
  55   const Type* elemtype = Type::TOP;
  56   bool big_val = bt == T_DOUBLE || bt == T_LONG;
  57   Node* adr = array_addressing(bt, 0, elemtype);
  58   if (stopped())  return;     // guaranteed null or range check
  59 
  60   pop();                      // index (already used)
  61   Node* array = pop();        // the array itself


























































































  62 
  63   if (elemtype == TypeInt::BOOL) {
  64     bt = T_BOOLEAN;
  65   }
  66   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
  67 
  68   Node* ld = access_load_at(array, adr, adr_type, elemtype, bt,
  69                             IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD);
  70   if (big_val) {
  71     push_pair(ld);
  72   } else {
  73     push(ld);

  74   }

  75 }
  76 
  77 
  78 //--------------------------------array_store----------------------------------
  79 void Parse::array_store(BasicType bt) {
  80   const Type* elemtype = Type::TOP;
  81   bool big_val = bt == T_DOUBLE || bt == T_LONG;
  82   Node* adr = array_addressing(bt, big_val ? 2 : 1, elemtype);
  83   if (stopped())  return;     // guaranteed null or range check

  84   if (bt == T_OBJECT) {
  85     array_store_check();
  86     if (stopped()) {
  87       return;
  88     }
  89   }
  90   Node* val;                  // Oop to store
  91   if (big_val) {
  92     val = pop_pair();
  93   } else {
  94     val = pop();
  95   }
  96   pop();                      // index (already used)
  97   Node* array = pop();        // the array itself




  98 
  99   if (elemtype == TypeInt::BOOL) {
 100     bt = T_BOOLEAN;

















































































































 101   }
 102   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
 103 
 104   access_store_at(array, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY);
 105 }
 106 
 107 
 108 //------------------------------array_addressing-------------------------------
 109 // Pull array and index from the stack.  Compute pointer-to-element.
 110 Node* Parse::array_addressing(BasicType type, int vals, const Type*& elemtype) {
 111   Node *idx   = peek(0+vals);   // Get from stack without popping
 112   Node *ary   = peek(1+vals);   // in case of exception
 113 
 114   // Null check the array base, with correct stack contents
 115   ary = null_check(ary, T_ARRAY);
 116   // Compile-time detect of null-exception?
 117   if (stopped())  return top();
 118 
 119   const TypeAryPtr* arytype  = _gvn.type(ary)->is_aryptr();
 120   const TypeInt*    sizetype = arytype->size();
 121   elemtype = arytype->elem();
 122 
 123   if (UseUniqueSubclasses) {
 124     const Type* el = elemtype->make_ptr();

 185       if (C->allow_range_check_smearing()) {
 186         // Do not use builtin_throw, since range checks are sometimes
 187         // made more stringent by an optimistic transformation.
 188         // This creates "tentative" range checks at this point,
 189         // which are not guaranteed to throw exceptions.
 190         // See IfNode::Ideal, is_range_check, adjust_check.
 191         uncommon_trap(Deoptimization::Reason_range_check,
 192                       Deoptimization::Action_make_not_entrant,
 193                       nullptr, "range_check");
 194       } else {
 195         // If we have already recompiled with the range-check-widening
 196         // heroic optimization turned off, then we must really be throwing
 197         // range check exceptions.
 198         builtin_throw(Deoptimization::Reason_range_check);
 199       }
 200     }
 201   }
 202   // Check for always knowing you are throwing a range-check exception
 203   if (stopped())  return top();
 204 




















































































































 205   // Make array address computation control dependent to prevent it
 206   // from floating above the range check during loop optimizations.
 207   Node* ptr = array_element_address(ary, idx, type, sizetype, control());
 208   assert(ptr != top(), "top should go hand-in-hand with stopped");
 209 
 210   return ptr;
 211 }
 212 
 213 
 214 // returns IfNode
 215 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) {
 216   Node   *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
 217   Node   *tst = _gvn.transform(new BoolNode(cmp, mask));
 218   IfNode *iff = create_and_map_if(control(), tst, prob, cnt);
 219   return iff;
 220 }
 221 
 222 
 223 // sentinel value for the target bci to mark never taken branches
 224 // (according to profiling)

1440       }
1441     }
1442   }
1443 
1444   // False branch
1445   Node* iffalse = _gvn.transform( new IfFalseNode(iff) );
1446   set_control(iffalse);
1447 
1448   if (stopped()) {              // Path is dead?
1449     NOT_PRODUCT(explicit_null_checks_elided++);
1450     if (C->eliminate_boxing()) {
1451       // Mark the successor block as parsed
1452       next_block->next_path_num();
1453     }
1454   } else  {                     // Path is live.
1455     adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, next_block);
1456   }
1457 }
1458 
1459 //------------------------------------do_if------------------------------------
1460 void Parse::do_if(BoolTest::mask btest, Node* c) {
1461   int target_bci = iter().get_dest();
1462 
1463   Block* branch_block = successor_for_bci(target_bci);
1464   Block* next_block   = successor_for_bci(iter().next_bci());
1465 
1466   float cnt;
1467   float prob = branch_prediction(cnt, btest, target_bci, c);
1468   float untaken_prob = 1.0 - prob;
1469 
1470   if (prob == PROB_UNKNOWN) {
1471     if (PrintOpto && Verbose) {
1472       tty->print_cr("Never-taken edge stops compilation at bci %d", bci());
1473     }
1474     repush_if_args(); // to gather stats on loop
1475     uncommon_trap(Deoptimization::Reason_unreached,
1476                   Deoptimization::Action_reinterpret,
1477                   nullptr, "cold");
1478     if (C->eliminate_boxing()) {
1479       // Mark the successor blocks as parsed
1480       branch_block->next_path_num();

1524   }
1525 
1526   // Generate real control flow
1527   float true_prob = (taken_if_true ? prob : untaken_prob);
1528   IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1529   assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1530   Node* taken_branch   = new IfTrueNode(iff);
1531   Node* untaken_branch = new IfFalseNode(iff);
1532   if (!taken_if_true) {  // Finish conversion to canonical form
1533     Node* tmp      = taken_branch;
1534     taken_branch   = untaken_branch;
1535     untaken_branch = tmp;
1536   }
1537 
1538   // Branch is taken:
1539   { PreserveJVMState pjvms(this);
1540     taken_branch = _gvn.transform(taken_branch);
1541     set_control(taken_branch);
1542 
1543     if (stopped()) {
1544       if (C->eliminate_boxing()) {
1545         // Mark the successor block as parsed
1546         branch_block->next_path_num();
1547       }
1548     } else {
1549       adjust_map_after_if(taken_btest, c, prob, branch_block);
1550       if (!stopped()) {
1551         merge(target_bci);








1552       }
1553     }
1554   }
1555 
1556   untaken_branch = _gvn.transform(untaken_branch);
1557   set_control(untaken_branch);
1558 
1559   // Branch not taken.
1560   if (stopped()) {
1561     if (C->eliminate_boxing()) {
1562       // Mark the successor block as parsed
1563       next_block->next_path_num();
1564     }
1565   } else {
1566     adjust_map_after_if(untaken_btest, c, untaken_prob, next_block);




















































































































































































































































































































































































































1567   }
1568 }
1569 
1570 bool Parse::path_is_suitable_for_uncommon_trap(float prob) const {
1571   // Don't want to speculate on uncommon traps when running with -Xcomp
1572   if (!UseInterpreter) {
1573     return false;
1574   }
1575   return (seems_never_taken(prob) && seems_stable_comparison());
1576 }
1577 
1578 void Parse::maybe_add_predicate_after_if(Block* path) {
1579   if (path->is_SEL_head() && path->preds_parsed() == 0) {
1580     // Add predicates at bci of if dominating the loop so traps can be
1581     // recorded on the if's profile data
1582     int bc_depth = repush_if_args();
1583     add_parse_predicates();
1584     dec_sp(bc_depth);
1585     path->set_has_predicates();
1586   }
1587 }
1588 
1589 
1590 //----------------------------adjust_map_after_if------------------------------
1591 // Adjust the JVM state to reflect the result of taking this path.
1592 // Basically, it means inspecting the CmpNode controlling this
1593 // branch, seeing how it constrains a tested value, and then
1594 // deciding if it's worth our while to encode this constraint
1595 // as graph nodes in the current abstract interpretation map.
1596 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path) {
1597   if (!c->is_Cmp()) {
1598     maybe_add_predicate_after_if(path);
1599     return;
1600   }
1601 
1602   if (stopped() || btest == BoolTest::illegal) {
1603     return;                             // nothing to do
1604   }
1605 
1606   bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
1607 
1608   if (path_is_suitable_for_uncommon_trap(prob)) {
1609     repush_if_args();
1610     Node* call = uncommon_trap(Deoptimization::Reason_unstable_if,
1611                   Deoptimization::Action_reinterpret,
1612                   nullptr,
1613                   (is_fallthrough ? "taken always" : "taken never"));
1614 
1615     if (call != nullptr) {
1616       C->record_unstable_if_trap(new UnstableIfTrap(call->as_CallStaticJava(), path));
1617     }
1618     return;
1619   }
1620 
1621   Node* val = c->in(1);
1622   Node* con = c->in(2);
1623   const Type* tcon = _gvn.type(con);
1624   const Type* tval = _gvn.type(val);
1625   bool have_con = tcon->singleton();
1626   if (tval->singleton()) {
1627     if (!have_con) {
1628       // Swap, so constant is in con.

1685     if (obj != nullptr && (con_type->isa_instptr() || con_type->isa_aryptr())) {
1686        // Found:
1687        //   Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq])
1688        // or the narrowOop equivalent.
1689        const Type* obj_type = _gvn.type(obj);
1690        const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr();
1691        if (tboth != nullptr && tboth->klass_is_exact() && tboth != obj_type &&
1692            tboth->higher_equal(obj_type)) {
1693           // obj has to be of the exact type Foo if the CmpP succeeds.
1694           int obj_in_map = map()->find_edge(obj);
1695           JVMState* jvms = this->jvms();
1696           if (obj_in_map >= 0 &&
1697               (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
1698             TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth);
1699             const Type* tcc = ccast->as_Type()->type();
1700             assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve");
1701             // Delay transform() call to allow recovery of pre-cast value
1702             // at the control merge.
1703             _gvn.set_type_bottom(ccast);
1704             record_for_igvn(ccast);



1705             // Here's the payoff.
1706             replace_in_map(obj, ccast);
1707           }
1708        }
1709     }
1710   }
1711 
1712   int val_in_map = map()->find_edge(val);
1713   if (val_in_map < 0)  return;          // replace_in_map would be useless
1714   {
1715     JVMState* jvms = this->jvms();
1716     if (!(jvms->is_loc(val_in_map) ||
1717           jvms->is_stk(val_in_map)))
1718       return;                           // again, it would be useless
1719   }
1720 
1721   // Check for a comparison to a constant, and "know" that the compared
1722   // value is constrained on this path.
1723   assert(tcon->singleton(), "");
1724   ConstraintCastNode* ccast = nullptr;

1790   if (c->Opcode() == Op_CmpP &&
1791       (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
1792       c->in(2)->is_Con()) {
1793     Node* load_klass = nullptr;
1794     Node* decode = nullptr;
1795     if (c->in(1)->Opcode() == Op_DecodeNKlass) {
1796       decode = c->in(1);
1797       load_klass = c->in(1)->in(1);
1798     } else {
1799       load_klass = c->in(1);
1800     }
1801     if (load_klass->in(2)->is_AddP()) {
1802       Node* addp = load_klass->in(2);
1803       Node* obj = addp->in(AddPNode::Address);
1804       const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
1805       if (obj_type->speculative_type_not_null() != nullptr) {
1806         ciKlass* k = obj_type->speculative_type();
1807         inc_sp(2);
1808         obj = maybe_cast_profiled_obj(obj, k);
1809         dec_sp(2);




1810         // Make the CmpP use the casted obj
1811         addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
1812         load_klass = load_klass->clone();
1813         load_klass->set_req(2, addp);
1814         load_klass = _gvn.transform(load_klass);
1815         if (decode != nullptr) {
1816           decode = decode->clone();
1817           decode->set_req(1, load_klass);
1818           load_klass = _gvn.transform(decode);
1819         }
1820         c = c->clone();
1821         c->set_req(1, load_klass);
1822         c = _gvn.transform(c);
1823       }
1824     }
1825   }
1826   return c;
1827 }
1828 
1829 //------------------------------do_one_bytecode--------------------------------

2636     // See if we can get some profile data and hand it off to the next block
2637     Block *target_block = block()->successor_for_bci(target_bci);
2638     if (target_block->pred_count() != 1)  break;
2639     ciMethodData* methodData = method()->method_data();
2640     if (!methodData->is_mature())  break;
2641     ciProfileData* data = methodData->bci_to_data(bci());
2642     assert(data != nullptr && data->is_JumpData(), "need JumpData for taken branch");
2643     int taken = ((ciJumpData*)data)->taken();
2644     taken = method()->scale_count(taken);
2645     target_block->set_count(taken);
2646     break;
2647   }
2648 
2649   case Bytecodes::_ifnull:    btest = BoolTest::eq; goto handle_if_null;
2650   case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null;
2651   handle_if_null:
2652     // If this is a backwards branch in the bytecodes, add Safepoint
2653     maybe_add_safepoint(iter().get_dest());
2654     a = null();
2655     b = pop();
2656     if (!_gvn.type(b)->speculative_maybe_null() &&
2657         !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
2658       inc_sp(1);
2659       Node* null_ctl = top();
2660       b = null_check_oop(b, &null_ctl, true, true, true);
2661       assert(null_ctl->is_top(), "no null control here");
2662       dec_sp(1);
2663     } else if (_gvn.type(b)->speculative_always_null() &&
2664                !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
2665       inc_sp(1);
2666       b = null_assert(b);
2667       dec_sp(1);
2668     }
2669     c = _gvn.transform( new CmpPNode(b, a) );






2670     do_ifnull(btest, c);
2671     break;
2672 
2673   case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
2674   case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
2675   handle_if_acmp:
2676     // If this is a backwards branch in the bytecodes, add Safepoint
2677     maybe_add_safepoint(iter().get_dest());
2678     a = pop();
2679     b = pop();
2680     c = _gvn.transform( new CmpPNode(b, a) );
2681     c = optimize_cmp_with_klass(c);
2682     do_if(btest, c);
2683     break;
2684 
2685   case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
2686   case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
2687   case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
2688   case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
2689   case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
2690   case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
2691   handle_ifxx:
2692     // If this is a backwards branch in the bytecodes, add Safepoint
2693     maybe_add_safepoint(iter().get_dest());
2694     a = _gvn.intcon(0);
2695     b = pop();
2696     c = _gvn.transform( new CmpINode(b, a) );
2697     do_if(btest, c);
2698     break;
2699 
2700   case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
2701   case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
2702   case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;

2717     break;
2718 
2719   case Bytecodes::_lookupswitch:
2720     do_lookupswitch();
2721     break;
2722 
2723   case Bytecodes::_invokestatic:
2724   case Bytecodes::_invokedynamic:
2725   case Bytecodes::_invokespecial:
2726   case Bytecodes::_invokevirtual:
2727   case Bytecodes::_invokeinterface:
2728     do_call();
2729     break;
2730   case Bytecodes::_checkcast:
2731     do_checkcast();
2732     break;
2733   case Bytecodes::_instanceof:
2734     do_instanceof();
2735     break;
2736   case Bytecodes::_anewarray:
2737     do_anewarray();
2738     break;
2739   case Bytecodes::_newarray:
2740     do_newarray((BasicType)iter().get_index());
2741     break;
2742   case Bytecodes::_multianewarray:
2743     do_multianewarray();
2744     break;
2745   case Bytecodes::_new:
2746     do_new();
2747     break;
2748 
2749   case Bytecodes::_jsr:
2750   case Bytecodes::_jsr_w:
2751     do_jsr();
2752     break;
2753 
2754   case Bytecodes::_ret:
2755     do_ret();
2756     break;
2757 

   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciMethodData.hpp"
  27 #include "ci/ciSymbols.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "compiler/compileLog.hpp"
  30 #include "interpreter/linkResolver.hpp"
  31 #include "jvm_io.h"
  32 #include "memory/resourceArea.hpp"
  33 #include "memory/universe.hpp"
  34 #include "oops/oop.inline.hpp"
  35 #include "opto/addnode.hpp"
  36 #include "opto/castnode.hpp"
  37 #include "opto/convertnode.hpp"
  38 #include "opto/divnode.hpp"
  39 #include "opto/idealGraphPrinter.hpp"
  40 #include "opto/idealKit.hpp"
  41 #include "opto/inlinetypenode.hpp"
  42 #include "opto/matcher.hpp"
  43 #include "opto/memnode.hpp"
  44 #include "opto/mulnode.hpp"
  45 #include "opto/opaquenode.hpp"
  46 #include "opto/parse.hpp"
  47 #include "opto/runtime.hpp"
  48 #include "runtime/deoptimization.hpp"
  49 #include "runtime/sharedRuntime.hpp"
  50 
  51 #ifndef PRODUCT
  52 extern uint explicit_null_checks_inserted,
  53             explicit_null_checks_elided;
  54 #endif
  55 
  56 Node* Parse::record_profile_for_speculation_at_array_load(Node* ld) {
  57   // Feed unused profile data to type speculation
  58   if (UseTypeSpeculation && UseArrayLoadStoreProfile) {
  59     ciKlass* array_type = nullptr;
  60     ciKlass* element_type = nullptr;
  61     ProfilePtrKind element_ptr = ProfileMaybeNull;
  62     bool flat_array = true;
  63     bool null_free_array = true;
  64     method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
  65     if (element_type != nullptr || element_ptr != ProfileMaybeNull) {
  66       ld = record_profile_for_speculation(ld, element_type, element_ptr);
  67     }
  68   }
  69   return ld;
  70 }
  71 
  72 
  73 //---------------------------------array_load----------------------------------
  74 void Parse::array_load(BasicType bt) {
  75   const Type* elemtype = Type::TOP;

  76   Node* adr = array_addressing(bt, 0, elemtype);
  77   if (stopped())  return;     // guaranteed null or range check
  78 
  79   Node* idx = pop();
  80   Node* ary = pop();
  81 
  82   // Handle inline type arrays
  83   const TypeOopPtr* elemptr = elemtype->make_oopptr();
  84   const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
  85   if (ary_t->is_flat()) {
  86     // Load from flat inline type array
  87     Node* vt = InlineTypeNode::make_from_flat(this, elemtype->inline_klass(), ary, adr);
  88     push(vt);
  89     return;
  90   } else if (ary_t->is_null_free()) {
  91     // Load from non-flat inline type array (elements can never be null)
  92     bt = T_OBJECT;
  93   } else if (!ary_t->is_not_flat()) {
  94     // Cannot statically determine if array is a flat array, emit runtime check
  95     assert(UseFlatArray && is_reference_type(bt) && elemptr->can_be_inline_type() && !ary_t->klass_is_exact() && !ary_t->is_not_null_free() &&
  96            (!elemptr->is_inlinetypeptr() || elemptr->inline_klass()->flat_in_array()), "array can't be flat");
  97     IdealKit ideal(this);
  98     IdealVariable res(ideal);
  99     ideal.declarations_done();
 100     ideal.if_then(flat_array_test(ary, /* flat = */ false)); {
 101       // non-flat array
 102       assert(ideal.ctrl()->in(0)->as_If()->is_flat_array_check(&_gvn), "Should be found");
 103       sync_kit(ideal);
 104       const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
 105       Node* ld = access_load_at(ary, adr, adr_type, elemptr, bt,
 106                                 IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD);
 107       if (elemptr->is_inlinetypeptr()) {
 108         assert(elemptr->maybe_null(), "null free array should be handled above");
 109         ld = InlineTypeNode::make_from_oop(this, ld, elemptr->inline_klass(), false);
 110       }
 111       ideal.sync_kit(this);
 112       ideal.set(res, ld);
 113     } ideal.else_(); {
 114       // flat array
 115       sync_kit(ideal);
 116       if (elemptr->is_inlinetypeptr()) {
 117         // Element type is known, cast and load from flat representation
 118         ciInlineKlass* vk = elemptr->inline_klass();
 119         assert(vk->flat_in_array() && elemptr->maybe_null(), "never/always flat - should be optimized");
 120         ciArrayKlass* array_klass = ciArrayKlass::make(vk, /* null_free */ true);
 121         const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr();
 122         Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, arytype));
 123         Node* casted_adr = array_element_address(cast, idx, T_OBJECT, ary_t->size(), control());
 124         // Re-execute flat array load if buffering triggers deoptimization
 125         PreserveReexecuteState preexecs(this);
 126         jvms()->set_should_reexecute(true);
 127         inc_sp(2);
 128         Node* vt = InlineTypeNode::make_from_flat(this, vk, cast, casted_adr)->buffer(this, false);
 129         ideal.set(res, vt);
 130         ideal.sync_kit(this);
 131       } else {
 132         // Element type is unknown, emit runtime call
 133 
 134         // Below membars keep this access to an unknown flat array correctly
 135         // ordered with other unknown and known flat array accesses.
 136         insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
 137 
 138         Node* call = nullptr;
 139         {
 140           // Re-execute flat array load if runtime call triggers deoptimization
 141           PreserveReexecuteState preexecs(this);
 142           jvms()->set_bci(_bci);
 143           jvms()->set_should_reexecute(true);
 144           inc_sp(2);
 145           kill_dead_locals();
 146           call = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
 147                                    OptoRuntime::load_unknown_inline_type(),
 148                                    OptoRuntime::load_unknown_inline_Java(),
 149                                    nullptr, TypeRawPtr::BOTTOM,
 150                                    ary, idx);
 151         }
 152         make_slow_call_ex(call, env()->Throwable_klass(), false);
 153         Node* buffer = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
 154 
 155         insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
 156 
 157         // Keep track of the information that the inline type is in flat arrays
 158         const Type* unknown_value = elemptr->is_instptr()->cast_to_flat_in_array();
 159         buffer = _gvn.transform(new CheckCastPPNode(control(), buffer, unknown_value));
 160 
 161         ideal.sync_kit(this);
 162         ideal.set(res, buffer);
 163       }
 164     } ideal.end_if();
 165     sync_kit(ideal);
 166     Node* ld = _gvn.transform(ideal.value(res));
 167     ld = record_profile_for_speculation_at_array_load(ld);
 168     push_node(bt, ld);
 169     return;
 170   }
 171 
 172   if (elemtype == TypeInt::BOOL) {
 173     bt = T_BOOLEAN;
 174   }
 175   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
 176   Node* ld = access_load_at(ary, adr, adr_type, elemtype, bt,

 177                             IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD);
 178   ld = record_profile_for_speculation_at_array_load(ld);
 179   // Loading an inline type from a non-flat array
 180   if (elemptr != nullptr && elemptr->is_inlinetypeptr()) {
 181     assert(!ary_t->is_null_free() || !elemptr->maybe_null(), "inline type array elements should never be null");
 182     ld = InlineTypeNode::make_from_oop(this, ld, elemptr->inline_klass(), !elemptr->maybe_null());
 183   }
 184   push_node(bt, ld);
 185 }
 186 
 187 
 188 //--------------------------------array_store----------------------------------
 189 void Parse::array_store(BasicType bt) {
 190   const Type* elemtype = Type::TOP;
 191   Node* adr = array_addressing(bt, type2size[bt], elemtype);

 192   if (stopped())  return;     // guaranteed null or range check
 193   Node* cast_val = nullptr;
 194   if (bt == T_OBJECT) {
 195     cast_val = array_store_check(adr, elemtype);
 196     if (stopped()) return;








 197   }
 198   Node* val = pop_node(bt); // Value to store
 199   Node* idx = pop();        // Index in the array
 200   Node* ary = pop();        // The array itself
 201 
 202   const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
 203   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
 204 
 205   if (elemtype == TypeInt::BOOL) {
 206     bt = T_BOOLEAN;
 207   } else if (bt == T_OBJECT) {
 208     elemtype = elemtype->make_oopptr();
 209     const Type* tval = _gvn.type(cast_val);
 210     // Based on the value to be stored, try to determine if the array is not null-free and/or not flat.
 211     // This is only legal for non-null stores because the array_store_check always passes for null, even
 212     // if the array is null-free. Null stores are handled in GraphKit::gen_inline_array_null_guard().
 213     bool not_null_free = !tval->maybe_null() && !tval->is_oopptr()->can_be_inline_type();
 214     bool not_flat = not_null_free || (tval->is_inlinetypeptr() && !tval->inline_klass()->flat_in_array());
 215     if (!ary_t->is_not_null_free() && not_null_free) {
 216       // Storing a non-inline type, mark array as not null-free (-> not flat).
 217       ary_t = ary_t->cast_to_not_null_free();
 218       Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, ary_t));
 219       replace_in_map(ary, cast);
 220       ary = cast;
 221     } else if (!ary_t->is_not_flat() && not_flat) {
 222       // Storing to a non-flat array, mark array as not flat.
 223       ary_t = ary_t->cast_to_not_flat();
 224       Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, ary_t));
 225       replace_in_map(ary, cast);
 226       ary = cast;
 227     }
 228 
 229     if (ary_t->is_flat()) {
 230       // Store to flat inline type array
 231       assert(!tval->maybe_null(), "should be guaranteed by array store check");
 232       // Re-execute flat array store if buffering triggers deoptimization
 233       PreserveReexecuteState preexecs(this);
 234       inc_sp(3);
 235       jvms()->set_should_reexecute(true);
 236       cast_val->as_InlineType()->store_flat(this, ary, adr, nullptr, 0, MO_UNORDERED | IN_HEAP | IS_ARRAY);
 237       return;
 238     } else if (ary_t->is_null_free()) {
 239       // Store to non-flat inline type array (elements can never be null)
 240       assert(!tval->maybe_null(), "should be guaranteed by array store check");
 241       if (elemtype->inline_klass()->is_empty()) {
 242         // Ignore empty inline stores, array is already initialized.
 243         return;
 244       }
 245     } else if (!ary_t->is_not_flat() && (tval != TypePtr::NULL_PTR || StressReflectiveCode)) {
 246       // Array might be a flat array, emit runtime checks (for nullptr, a simple inline_array_null_guard is sufficient).
 247       assert(UseFlatArray && !not_flat && elemtype->is_oopptr()->can_be_inline_type() &&
 248              !ary_t->klass_is_exact() && !ary_t->is_not_null_free(), "array can't be a flat array");
 249       IdealKit ideal(this);
 250       ideal.if_then(flat_array_test(ary, /* flat = */ false)); {
 251         // non-flat array
 252         assert(ideal.ctrl()->in(0)->as_If()->is_flat_array_check(&_gvn), "Should be found");
 253         sync_kit(ideal);
 254         Node* cast_ary = inline_array_null_guard(ary, cast_val, 3);
 255         inc_sp(3);
 256         access_store_at(cast_ary, adr, adr_type, cast_val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY, false);
 257         dec_sp(3);
 258         ideal.sync_kit(this);
 259       } ideal.else_(); {
 260         sync_kit(ideal);
 261         // flat array
 262         Node* null_ctl = top();
 263         Node* val = null_check_oop(cast_val, &null_ctl);
 264         if (null_ctl != top()) {
 265           PreserveJVMState pjvms(this);
 266           inc_sp(3);
 267           set_control(null_ctl);
 268           uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none);
 269           dec_sp(3);
 270         }
 271         // Try to determine the inline klass
 272         ciInlineKlass* vk = nullptr;
 273         if (tval->is_inlinetypeptr()) {
 274           vk = tval->inline_klass();
 275         } else if (elemtype->is_inlinetypeptr()) {
 276           vk = elemtype->inline_klass();
 277         }
 278         Node* casted_ary = ary;
 279         if (vk != nullptr && !stopped()) {
 280           // Element type is known, cast and store to flat representation
 281           assert(vk->flat_in_array() && elemtype->maybe_null(), "never/always flat - should be optimized");
 282           ciArrayKlass* array_klass = ciArrayKlass::make(vk, /* null_free */ true);
 283           const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr();
 284           casted_ary = _gvn.transform(new CheckCastPPNode(control(), casted_ary, arytype));
 285           Node* casted_adr = array_element_address(casted_ary, idx, T_OBJECT, arytype->size(), control());
 286           if (!val->is_InlineType()) {
 287             assert(!gvn().type(val)->maybe_null(), "inline type array elements should never be null");
 288             val = InlineTypeNode::make_from_oop(this, val, vk);
 289           }
 290           // Re-execute flat array store if buffering triggers deoptimization
 291           PreserveReexecuteState preexecs(this);
 292           inc_sp(3);
 293           jvms()->set_should_reexecute(true);
 294           val->as_InlineType()->store_flat(this, casted_ary, casted_adr, nullptr, 0, MO_UNORDERED | IN_HEAP | IS_ARRAY);
 295         } else if (!stopped()) {
 296           // Element type is unknown, emit runtime call
 297 
 298           // Below membars keep this access to an unknown flat array correctly
 299           // ordered with other unknown and known flat array accesses.
 300           insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
 301 
 302           make_runtime_call(RC_LEAF,
 303                             OptoRuntime::store_unknown_inline_type(),
 304                             CAST_FROM_FN_PTR(address, OptoRuntime::store_unknown_inline),
 305                             "store_unknown_inline", TypeRawPtr::BOTTOM,
 306                             val, casted_ary, idx);
 307 
 308           insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
 309         }
 310         ideal.sync_kit(this);
 311       }
 312       ideal.end_if();
 313       sync_kit(ideal);
 314       return;
 315     } else if (!ary_t->is_not_null_free()) {
 316       // Array is not flat but may be null free
 317       assert(elemtype->is_oopptr()->can_be_inline_type() && !ary_t->klass_is_exact(), "array can't be null-free");
 318       ary = inline_array_null_guard(ary, cast_val, 3, true);
 319     }
 320   }
 321   inc_sp(3);
 322   access_store_at(ary, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY);
 323   dec_sp(3);
 324 }
 325 
 326 
 327 //------------------------------array_addressing-------------------------------
 328 // Pull array and index from the stack.  Compute pointer-to-element.
 329 Node* Parse::array_addressing(BasicType type, int vals, const Type*& elemtype) {
 330   Node *idx   = peek(0+vals);   // Get from stack without popping
 331   Node *ary   = peek(1+vals);   // in case of exception
 332 
 333   // Null check the array base, with correct stack contents
 334   ary = null_check(ary, T_ARRAY);
 335   // Compile-time detect of null-exception?
 336   if (stopped())  return top();
 337 
 338   const TypeAryPtr* arytype  = _gvn.type(ary)->is_aryptr();
 339   const TypeInt*    sizetype = arytype->size();
 340   elemtype = arytype->elem();
 341 
 342   if (UseUniqueSubclasses) {
 343     const Type* el = elemtype->make_ptr();

 404       if (C->allow_range_check_smearing()) {
 405         // Do not use builtin_throw, since range checks are sometimes
 406         // made more stringent by an optimistic transformation.
 407         // This creates "tentative" range checks at this point,
 408         // which are not guaranteed to throw exceptions.
 409         // See IfNode::Ideal, is_range_check, adjust_check.
 410         uncommon_trap(Deoptimization::Reason_range_check,
 411                       Deoptimization::Action_make_not_entrant,
 412                       nullptr, "range_check");
 413       } else {
 414         // If we have already recompiled with the range-check-widening
 415         // heroic optimization turned off, then we must really be throwing
 416         // range check exceptions.
 417         builtin_throw(Deoptimization::Reason_range_check);
 418       }
 419     }
 420   }
 421   // Check for always knowing you are throwing a range-check exception
 422   if (stopped())  return top();
 423 
 424   // This could be an access to an inline type array. We can't tell if it's
 425   // flat or not. Knowing the exact type avoids runtime checks and leads to
 426   // a much simpler graph shape. Check profile information.
 427   if (!arytype->is_flat() && !arytype->is_not_flat()) {
 428     // First check the speculative type
 429     Deoptimization::DeoptReason reason = Deoptimization::Reason_speculate_class_check;
 430     ciKlass* array_type = arytype->speculative_type();
 431     if (too_many_traps_or_recompiles(reason) || array_type == nullptr) {
 432       // No speculative type, check profile data at this bci
 433       array_type = nullptr;
 434       reason = Deoptimization::Reason_class_check;
 435       if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(reason)) {
 436         ciKlass* element_type = nullptr;
 437         ProfilePtrKind element_ptr = ProfileMaybeNull;
 438         bool flat_array = true;
 439         bool null_free_array = true;
 440         method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
 441       }
 442     }
 443     if (array_type != nullptr) {
 444       // Speculate that this array has the exact type reported by profile data
 445       Node* better_ary = nullptr;
 446       DEBUG_ONLY(Node* old_control = control();)
 447       Node* slow_ctl = type_check_receiver(ary, array_type, 1.0, &better_ary);
 448       if (stopped()) {
 449         // The check always fails and therefore profile information is incorrect. Don't use it.
 450         assert(old_control == slow_ctl, "type check should have been removed");
 451         set_control(slow_ctl);
 452       } else if (!slow_ctl->is_top()) {
 453         { PreserveJVMState pjvms(this);
 454           set_control(slow_ctl);
 455           uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
 456         }
 457         replace_in_map(ary, better_ary);
 458         ary = better_ary;
 459         arytype  = _gvn.type(ary)->is_aryptr();
 460         elemtype = arytype->elem();
 461       }
 462     }
 463   } else if (UseTypeSpeculation && UseArrayLoadStoreProfile) {
 464     // No need to speculate: feed profile data at this bci for the
 465     // array to type speculation
 466     ciKlass* array_type = nullptr;
 467     ciKlass* element_type = nullptr;
 468     ProfilePtrKind element_ptr = ProfileMaybeNull;
 469     bool flat_array = true;
 470     bool null_free_array = true;
 471     method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
 472     if (array_type != nullptr) {
 473       ary = record_profile_for_speculation(ary, array_type, ProfileMaybeNull);
 474     }
 475   }
 476 
 477   // We have no exact array type from profile data. Check profile data
 478   // for a non null-free or non flat array. Non null-free implies non
 479   // flat so check this one first. Speculating on a non null-free
 480   // array doesn't help aaload but could be profitable for a
 481   // subsequent aastore.
 482   if (!arytype->is_null_free() && !arytype->is_not_null_free()) {
 483     bool null_free_array = true;
 484     Deoptimization::DeoptReason reason = Deoptimization::Reason_none;
 485     if (arytype->speculative() != nullptr &&
 486         arytype->speculative()->is_aryptr()->is_not_null_free() &&
 487         !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
 488       null_free_array = false;
 489       reason = Deoptimization::Reason_speculate_class_check;
 490     } else if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(Deoptimization::Reason_class_check)) {
 491       ciKlass* array_type = nullptr;
 492       ciKlass* element_type = nullptr;
 493       ProfilePtrKind element_ptr = ProfileMaybeNull;
 494       bool flat_array = true;
 495       method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
 496       reason = Deoptimization::Reason_class_check;
 497     }
 498     if (!null_free_array) {
 499       { // Deoptimize if null-free array
 500         BuildCutout unless(this, null_free_array_test(load_object_klass(ary), /* null_free = */ false), PROB_MAX);
 501         uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
 502       }
 503       assert(!stopped(), "null-free array should have been caught earlier");
 504       Node* better_ary = _gvn.transform(new CheckCastPPNode(control(), ary, arytype->cast_to_not_null_free()));
 505       replace_in_map(ary, better_ary);
 506       ary = better_ary;
 507       arytype = _gvn.type(ary)->is_aryptr();
 508     }
 509   }
 510 
 511   if (!arytype->is_flat() && !arytype->is_not_flat()) {
 512     bool flat_array = true;
 513     Deoptimization::DeoptReason reason = Deoptimization::Reason_none;
 514     if (arytype->speculative() != nullptr &&
 515         arytype->speculative()->is_aryptr()->is_not_flat() &&
 516         !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
 517       flat_array = false;
 518       reason = Deoptimization::Reason_speculate_class_check;
 519     } else if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(reason)) {
 520       ciKlass* array_type = nullptr;
 521       ciKlass* element_type = nullptr;
 522       ProfilePtrKind element_ptr = ProfileMaybeNull;
 523       bool null_free_array = true;
 524       method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
 525       reason = Deoptimization::Reason_class_check;
 526     }
 527     if (!flat_array) {
 528       { // Deoptimize if flat array
 529         BuildCutout unless(this, flat_array_test(ary, /* flat = */ false), PROB_MAX);
 530         uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
 531       }
 532       assert(!stopped(), "flat array should have been caught earlier");
 533       Node* better_ary = _gvn.transform(new CheckCastPPNode(control(), ary, arytype->cast_to_not_flat()));
 534       replace_in_map(ary, better_ary);
 535       ary = better_ary;
 536       arytype = _gvn.type(ary)->is_aryptr();
 537     }
 538   }
 539 
 540   // Make array address computation control dependent to prevent it
 541   // from floating above the range check during loop optimizations.
 542   Node* ptr = array_element_address(ary, idx, type, sizetype, control());
 543   assert(ptr != top(), "top should go hand-in-hand with stopped");
 544 
 545   return ptr;
 546 }
 547 
 548 
 549 // returns IfNode
 550 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) {
 551   Node   *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
 552   Node   *tst = _gvn.transform(new BoolNode(cmp, mask));
 553   IfNode *iff = create_and_map_if(control(), tst, prob, cnt);
 554   return iff;
 555 }
 556 
 557 
 558 // sentinel value for the target bci to mark never taken branches
 559 // (according to profiling)

1775       }
1776     }
1777   }
1778 
1779   // False branch
1780   Node* iffalse = _gvn.transform( new IfFalseNode(iff) );
1781   set_control(iffalse);
1782 
1783   if (stopped()) {              // Path is dead?
1784     NOT_PRODUCT(explicit_null_checks_elided++);
1785     if (C->eliminate_boxing()) {
1786       // Mark the successor block as parsed
1787       next_block->next_path_num();
1788     }
1789   } else  {                     // Path is live.
1790     adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, next_block);
1791   }
1792 }
1793 
1794 //------------------------------------do_if------------------------------------
1795 void Parse::do_if(BoolTest::mask btest, Node* c, bool can_trap, bool new_path, Node** ctrl_taken) {
1796   int target_bci = iter().get_dest();
1797 
1798   Block* branch_block = successor_for_bci(target_bci);
1799   Block* next_block   = successor_for_bci(iter().next_bci());
1800 
1801   float cnt;
1802   float prob = branch_prediction(cnt, btest, target_bci, c);
1803   float untaken_prob = 1.0 - prob;
1804 
1805   if (prob == PROB_UNKNOWN) {
1806     if (PrintOpto && Verbose) {
1807       tty->print_cr("Never-taken edge stops compilation at bci %d", bci());
1808     }
1809     repush_if_args(); // to gather stats on loop
1810     uncommon_trap(Deoptimization::Reason_unreached,
1811                   Deoptimization::Action_reinterpret,
1812                   nullptr, "cold");
1813     if (C->eliminate_boxing()) {
1814       // Mark the successor blocks as parsed
1815       branch_block->next_path_num();

1859   }
1860 
1861   // Generate real control flow
1862   float true_prob = (taken_if_true ? prob : untaken_prob);
1863   IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1864   assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1865   Node* taken_branch   = new IfTrueNode(iff);
1866   Node* untaken_branch = new IfFalseNode(iff);
1867   if (!taken_if_true) {  // Finish conversion to canonical form
1868     Node* tmp      = taken_branch;
1869     taken_branch   = untaken_branch;
1870     untaken_branch = tmp;
1871   }
1872 
1873   // Branch is taken:
1874   { PreserveJVMState pjvms(this);
1875     taken_branch = _gvn.transform(taken_branch);
1876     set_control(taken_branch);
1877 
1878     if (stopped()) {
1879       if (C->eliminate_boxing() && !new_path) {
1880         // Mark the successor block as parsed (if we haven't created a new path)
1881         branch_block->next_path_num();
1882       }
1883     } else {
1884       adjust_map_after_if(taken_btest, c, prob, branch_block, can_trap);
1885       if (!stopped()) {
1886         if (new_path) {
1887           // Merge by using a new path
1888           merge_new_path(target_bci);
1889         } else if (ctrl_taken != nullptr) {
1890           // Don't merge but save taken branch to be wired by caller
1891           *ctrl_taken = control();
1892         } else {
1893           merge(target_bci);
1894         }
1895       }
1896     }
1897   }
1898 
1899   untaken_branch = _gvn.transform(untaken_branch);
1900   set_control(untaken_branch);
1901 
1902   // Branch not taken.
1903   if (stopped() && ctrl_taken == nullptr) {
1904     if (C->eliminate_boxing()) {
1905       // Mark the successor block as parsed (if caller does not re-wire control flow)
1906       next_block->next_path_num();
1907     }
1908   } else {
1909     adjust_map_after_if(untaken_btest, c, untaken_prob, next_block, can_trap);
1910   }
1911 }
1912 
1913 
1914 static ProfilePtrKind speculative_ptr_kind(const TypeOopPtr* t) {
1915   if (t->speculative() == nullptr) {
1916     return ProfileUnknownNull;
1917   }
1918   if (t->speculative_always_null()) {
1919     return ProfileAlwaysNull;
1920   }
1921   if (t->speculative_maybe_null()) {
1922     return ProfileMaybeNull;
1923   }
1924   return ProfileNeverNull;
1925 }
1926 
1927 void Parse::acmp_always_null_input(Node* input, const TypeOopPtr* tinput, BoolTest::mask btest, Node* eq_region) {
1928   inc_sp(2);
1929   Node* cast = null_check_common(input, T_OBJECT, true, nullptr,
1930                                  !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check) &&
1931                                  speculative_ptr_kind(tinput) == ProfileAlwaysNull);
1932   dec_sp(2);
1933   if (btest == BoolTest::ne) {
1934     {
1935       PreserveJVMState pjvms(this);
1936       replace_in_map(input, cast);
1937       int target_bci = iter().get_dest();
1938       merge(target_bci);
1939     }
1940     record_for_igvn(eq_region);
1941     set_control(_gvn.transform(eq_region));
1942   } else {
1943     replace_in_map(input, cast);
1944   }
1945 }
1946 
1947 Node* Parse::acmp_null_check(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, Node*& null_ctl) {
1948   inc_sp(2);
1949   null_ctl = top();
1950   Node* cast = null_check_oop(input, &null_ctl,
1951                               input_ptr == ProfileNeverNull || (input_ptr == ProfileUnknownNull && !too_many_traps_or_recompiles(Deoptimization::Reason_null_check)),
1952                               false,
1953                               speculative_ptr_kind(tinput) == ProfileNeverNull &&
1954                               !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check));
1955   dec_sp(2);
1956   assert(!stopped(), "null input should have been caught earlier");
1957   return cast;
1958 }
1959 
1960 void Parse::acmp_known_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, ciKlass* input_type, BoolTest::mask btest, Node* eq_region) {
1961   Node* ne_region = new RegionNode(1);
1962   Node* null_ctl;
1963   Node* cast = acmp_null_check(input, tinput, input_ptr, null_ctl);
1964   ne_region->add_req(null_ctl);
1965 
1966   Node* slow_ctl = type_check_receiver(cast, input_type, 1.0, &cast);
1967   {
1968     PreserveJVMState pjvms(this);
1969     inc_sp(2);
1970     set_control(slow_ctl);
1971     Deoptimization::DeoptReason reason;
1972     if (tinput->speculative_type() != nullptr && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
1973       reason = Deoptimization::Reason_speculate_class_check;
1974     } else {
1975       reason = Deoptimization::Reason_class_check;
1976     }
1977     uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
1978   }
1979   ne_region->add_req(control());
1980 
1981   record_for_igvn(ne_region);
1982   set_control(_gvn.transform(ne_region));
1983   if (btest == BoolTest::ne) {
1984     {
1985       PreserveJVMState pjvms(this);
1986       if (null_ctl == top()) {
1987         replace_in_map(input, cast);
1988       }
1989       int target_bci = iter().get_dest();
1990       merge(target_bci);
1991     }
1992     record_for_igvn(eq_region);
1993     set_control(_gvn.transform(eq_region));
1994   } else {
1995     if (null_ctl == top()) {
1996       replace_in_map(input, cast);
1997     }
1998     set_control(_gvn.transform(ne_region));
1999   }
2000 }
2001 
2002 void Parse::acmp_unknown_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, BoolTest::mask btest, Node* eq_region) {
2003   Node* ne_region = new RegionNode(1);
2004   Node* null_ctl;
2005   Node* cast = acmp_null_check(input, tinput, input_ptr, null_ctl);
2006   ne_region->add_req(null_ctl);
2007 
2008   {
2009     BuildCutout unless(this, inline_type_test(cast, /* is_inline = */ false), PROB_MAX);
2010     inc_sp(2);
2011     uncommon_trap_exact(Deoptimization::Reason_class_check, Deoptimization::Action_maybe_recompile);
2012   }
2013 
2014   ne_region->add_req(control());
2015 
2016   record_for_igvn(ne_region);
2017   set_control(_gvn.transform(ne_region));
2018   if (btest == BoolTest::ne) {
2019     {
2020       PreserveJVMState pjvms(this);
2021       if (null_ctl == top()) {
2022         replace_in_map(input, cast);
2023       }
2024       int target_bci = iter().get_dest();
2025       merge(target_bci);
2026     }
2027     record_for_igvn(eq_region);
2028     set_control(_gvn.transform(eq_region));
2029   } else {
2030     if (null_ctl == top()) {
2031       replace_in_map(input, cast);
2032     }
2033     set_control(_gvn.transform(ne_region));
2034   }
2035 }
2036 
2037 void Parse::do_acmp(BoolTest::mask btest, Node* left, Node* right) {
2038   ciKlass* left_type = nullptr;
2039   ciKlass* right_type = nullptr;
2040   ProfilePtrKind left_ptr = ProfileUnknownNull;
2041   ProfilePtrKind right_ptr = ProfileUnknownNull;
2042   bool left_inline_type = true;
2043   bool right_inline_type = true;
2044 
2045   // Leverage profiling at acmp
2046   if (UseACmpProfile) {
2047     method()->acmp_profiled_type(bci(), left_type, right_type, left_ptr, right_ptr, left_inline_type, right_inline_type);
2048     if (too_many_traps_or_recompiles(Deoptimization::Reason_class_check)) {
2049       left_type = nullptr;
2050       right_type = nullptr;
2051       left_inline_type = true;
2052       right_inline_type = true;
2053     }
2054     if (too_many_traps_or_recompiles(Deoptimization::Reason_null_check)) {
2055       left_ptr = ProfileUnknownNull;
2056       right_ptr = ProfileUnknownNull;
2057     }
2058   }
2059 
2060   if (UseTypeSpeculation) {
2061     record_profile_for_speculation(left, left_type, left_ptr);
2062     record_profile_for_speculation(right, right_type, right_ptr);
2063   }
2064 
2065   if (!EnableValhalla) {
2066     Node* cmp = CmpP(left, right);
2067     cmp = optimize_cmp_with_klass(cmp);
2068     do_if(btest, cmp);
2069     return;
2070   }
2071 
2072   // Check for equality before potentially allocating
2073   if (left == right) {
2074     do_if(btest, makecon(TypeInt::CC_EQ));
2075     return;
2076   }
2077 
2078   // Allocate inline type operands and re-execute on deoptimization
2079   if (left->is_InlineType()) {
2080     if (_gvn.type(right)->is_zero_type() ||
2081         (right->is_InlineType() && _gvn.type(right->as_InlineType()->get_is_init())->is_zero_type())) {
2082       // Null checking a scalarized but nullable inline type. Check the IsInit
2083       // input instead of the oop input to avoid keeping buffer allocations alive.
2084       Node* cmp = CmpI(left->as_InlineType()->get_is_init(), intcon(0));
2085       do_if(btest, cmp);
2086       return;
2087     } else {
2088       PreserveReexecuteState preexecs(this);
2089       inc_sp(2);
2090       jvms()->set_should_reexecute(true);
2091       left = left->as_InlineType()->buffer(this)->get_oop();
2092     }
2093   }
2094   if (right->is_InlineType()) {
2095     PreserveReexecuteState preexecs(this);
2096     inc_sp(2);
2097     jvms()->set_should_reexecute(true);
2098     right = right->as_InlineType()->buffer(this)->get_oop();
2099   }
2100 
2101   // First, do a normal pointer comparison
2102   const TypeOopPtr* tleft = _gvn.type(left)->isa_oopptr();
2103   const TypeOopPtr* tright = _gvn.type(right)->isa_oopptr();
2104   Node* cmp = CmpP(left, right);
2105   cmp = optimize_cmp_with_klass(cmp);
2106   if (tleft == nullptr || !tleft->can_be_inline_type() ||
2107       tright == nullptr || !tright->can_be_inline_type()) {
2108     // This is sufficient, if one of the operands can't be an inline type
2109     do_if(btest, cmp);
2110     return;
2111   }
2112 
2113   // Don't add traps to unstable if branches because additional checks are required to
2114   // decide if the operands are equal/substitutable and we therefore shouldn't prune
2115   // branches for one if based on the profiling of the acmp branches.
2116   // Also, OptimizeUnstableIf would set an incorrect re-rexecution state because it
2117   // assumes that there is a 1-1 mapping between the if and the acmp branches and that
2118   // hitting a trap means that we will take the corresponding acmp branch on re-execution.
2119   const bool can_trap = true;
2120 
2121   Node* eq_region = nullptr;
2122   if (btest == BoolTest::eq) {
2123     do_if(btest, cmp, !can_trap, true);
2124     if (stopped()) {
2125       // Pointers are equal, operands must be equal
2126       return;
2127     }
2128   } else {
2129     assert(btest == BoolTest::ne, "only eq or ne");
2130     Node* is_not_equal = nullptr;
2131     eq_region = new RegionNode(3);
2132     {
2133       PreserveJVMState pjvms(this);
2134       // Pointers are not equal, but more checks are needed to determine if the operands are (not) substitutable
2135       do_if(btest, cmp, !can_trap, false, &is_not_equal);
2136       if (!stopped()) {
2137         eq_region->init_req(1, control());
2138       }
2139     }
2140     if (is_not_equal == nullptr || is_not_equal->is_top()) {
2141       record_for_igvn(eq_region);
2142       set_control(_gvn.transform(eq_region));
2143       return;
2144     }
2145     set_control(is_not_equal);
2146   }
2147 
2148   // Prefer speculative types if available
2149   if (!too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
2150     if (tleft->speculative_type() != nullptr) {
2151       left_type = tleft->speculative_type();
2152     }
2153     if (tright->speculative_type() != nullptr) {
2154       right_type = tright->speculative_type();
2155     }
2156   }
2157 
2158   if (speculative_ptr_kind(tleft) != ProfileMaybeNull && speculative_ptr_kind(tleft) != ProfileUnknownNull) {
2159     ProfilePtrKind speculative_left_ptr = speculative_ptr_kind(tleft);
2160     if (speculative_left_ptr == ProfileAlwaysNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_assert)) {
2161       left_ptr = speculative_left_ptr;
2162     } else if (speculative_left_ptr == ProfileNeverNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check)) {
2163       left_ptr = speculative_left_ptr;
2164     }
2165   }
2166   if (speculative_ptr_kind(tright) != ProfileMaybeNull && speculative_ptr_kind(tright) != ProfileUnknownNull) {
2167     ProfilePtrKind speculative_right_ptr = speculative_ptr_kind(tright);
2168     if (speculative_right_ptr == ProfileAlwaysNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_assert)) {
2169       right_ptr = speculative_right_ptr;
2170     } else if (speculative_right_ptr == ProfileNeverNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check)) {
2171       right_ptr = speculative_right_ptr;
2172     }
2173   }
2174 
2175   if (left_ptr == ProfileAlwaysNull) {
2176     // Comparison with null. Assert the input is indeed null and we're done.
2177     acmp_always_null_input(left, tleft, btest, eq_region);
2178     return;
2179   }
2180   if (right_ptr == ProfileAlwaysNull) {
2181     // Comparison with null. Assert the input is indeed null and we're done.
2182     acmp_always_null_input(right, tright, btest, eq_region);
2183     return;
2184   }
2185   if (left_type != nullptr && !left_type->is_inlinetype()) {
2186     // Comparison with an object of known type
2187     acmp_known_non_inline_type_input(left, tleft, left_ptr, left_type, btest, eq_region);
2188     return;
2189   }
2190   if (right_type != nullptr && !right_type->is_inlinetype()) {
2191     // Comparison with an object of known type
2192     acmp_known_non_inline_type_input(right, tright, right_ptr, right_type, btest, eq_region);
2193     return;
2194   }
2195   if (!left_inline_type) {
2196     // Comparison with an object known not to be an inline type
2197     acmp_unknown_non_inline_type_input(left, tleft, left_ptr, btest, eq_region);
2198     return;
2199   }
2200   if (!right_inline_type) {
2201     // Comparison with an object known not to be an inline type
2202     acmp_unknown_non_inline_type_input(right, tright, right_ptr, btest, eq_region);
2203     return;
2204   }
2205 
2206   // Pointers are not equal, check if first operand is non-null
2207   Node* ne_region = new RegionNode(6);
2208   Node* null_ctl;
2209   Node* not_null_right = acmp_null_check(right, tright, right_ptr, null_ctl);
2210   ne_region->init_req(1, null_ctl);
2211 
2212   // First operand is non-null, check if it is an inline type
2213   Node* is_value = inline_type_test(not_null_right);
2214   IfNode* is_value_iff = create_and_map_if(control(), is_value, PROB_FAIR, COUNT_UNKNOWN);
2215   Node* not_value = _gvn.transform(new IfFalseNode(is_value_iff));
2216   ne_region->init_req(2, not_value);
2217   set_control(_gvn.transform(new IfTrueNode(is_value_iff)));
2218 
2219   // The first operand is an inline type, check if the second operand is non-null
2220   Node* not_null_left = acmp_null_check(left, tleft, left_ptr, null_ctl);
2221   ne_region->init_req(3, null_ctl);
2222 
2223   // Check if both operands are of the same class.
2224   Node* kls_left = load_object_klass(not_null_left);
2225   Node* kls_right = load_object_klass(not_null_right);
2226   Node* kls_cmp = CmpP(kls_left, kls_right);
2227   Node* kls_bol = _gvn.transform(new BoolNode(kls_cmp, BoolTest::ne));
2228   IfNode* kls_iff = create_and_map_if(control(), kls_bol, PROB_FAIR, COUNT_UNKNOWN);
2229   Node* kls_ne = _gvn.transform(new IfTrueNode(kls_iff));
2230   set_control(_gvn.transform(new IfFalseNode(kls_iff)));
2231   ne_region->init_req(4, kls_ne);
2232 
2233   if (stopped()) {
2234     record_for_igvn(ne_region);
2235     set_control(_gvn.transform(ne_region));
2236     if (btest == BoolTest::ne) {
2237       {
2238         PreserveJVMState pjvms(this);
2239         int target_bci = iter().get_dest();
2240         merge(target_bci);
2241       }
2242       record_for_igvn(eq_region);
2243       set_control(_gvn.transform(eq_region));
2244     }
2245     return;
2246   }
2247 
2248   // Both operands are values types of the same class, we need to perform a
2249   // substitutability test. Delegate to ValueObjectMethods::isSubstitutable().
2250   Node* ne_io_phi = PhiNode::make(ne_region, i_o());
2251   Node* mem = reset_memory();
2252   Node* ne_mem_phi = PhiNode::make(ne_region, mem);
2253 
2254   Node* eq_io_phi = nullptr;
2255   Node* eq_mem_phi = nullptr;
2256   if (eq_region != nullptr) {
2257     eq_io_phi = PhiNode::make(eq_region, i_o());
2258     eq_mem_phi = PhiNode::make(eq_region, mem);
2259   }
2260 
2261   set_all_memory(mem);
2262 
2263   kill_dead_locals();
2264   ciMethod* subst_method = ciEnv::current()->ValueObjectMethods_klass()->find_method(ciSymbols::isSubstitutable_name(), ciSymbols::object_object_boolean_signature());
2265   CallStaticJavaNode *call = new CallStaticJavaNode(C, TypeFunc::make(subst_method), SharedRuntime::get_resolve_static_call_stub(), subst_method);
2266   call->set_override_symbolic_info(true);
2267   call->init_req(TypeFunc::Parms, not_null_left);
2268   call->init_req(TypeFunc::Parms+1, not_null_right);
2269   inc_sp(2);
2270   set_edges_for_java_call(call, false, false);
2271   Node* ret = set_results_for_java_call(call, false, true);
2272   dec_sp(2);
2273 
2274   // Test the return value of ValueObjectMethods::isSubstitutable()
2275   // This is the last check, do_if can emit traps now.
2276   Node* subst_cmp = _gvn.transform(new CmpINode(ret, intcon(1)));
2277   Node* ctl = C->top();
2278   if (btest == BoolTest::eq) {
2279     PreserveJVMState pjvms(this);
2280     do_if(btest, subst_cmp, can_trap);
2281     if (!stopped()) {
2282       ctl = control();
2283     }
2284   } else {
2285     assert(btest == BoolTest::ne, "only eq or ne");
2286     PreserveJVMState pjvms(this);
2287     do_if(btest, subst_cmp, can_trap, false, &ctl);
2288     if (!stopped()) {
2289       eq_region->init_req(2, control());
2290       eq_io_phi->init_req(2, i_o());
2291       eq_mem_phi->init_req(2, reset_memory());
2292     }
2293   }
2294   ne_region->init_req(5, ctl);
2295   ne_io_phi->init_req(5, i_o());
2296   ne_mem_phi->init_req(5, reset_memory());
2297 
2298   record_for_igvn(ne_region);
2299   set_control(_gvn.transform(ne_region));
2300   set_i_o(_gvn.transform(ne_io_phi));
2301   set_all_memory(_gvn.transform(ne_mem_phi));
2302 
2303   if (btest == BoolTest::ne) {
2304     {
2305       PreserveJVMState pjvms(this);
2306       int target_bci = iter().get_dest();
2307       merge(target_bci);
2308     }
2309 
2310     record_for_igvn(eq_region);
2311     set_control(_gvn.transform(eq_region));
2312     set_i_o(_gvn.transform(eq_io_phi));
2313     set_all_memory(_gvn.transform(eq_mem_phi));
2314   }
2315 }
2316 
2317 bool Parse::path_is_suitable_for_uncommon_trap(float prob) const {
2318   // Don't want to speculate on uncommon traps when running with -Xcomp
2319   if (!UseInterpreter) {
2320     return false;
2321   }
2322   return (seems_never_taken(prob) && seems_stable_comparison());
2323 }
2324 
2325 void Parse::maybe_add_predicate_after_if(Block* path) {
2326   if (path->is_SEL_head() && path->preds_parsed() == 0) {
2327     // Add predicates at bci of if dominating the loop so traps can be
2328     // recorded on the if's profile data
2329     int bc_depth = repush_if_args();
2330     add_parse_predicates();
2331     dec_sp(bc_depth);
2332     path->set_has_predicates();
2333   }
2334 }
2335 
2336 
2337 //----------------------------adjust_map_after_if------------------------------
2338 // Adjust the JVM state to reflect the result of taking this path.
2339 // Basically, it means inspecting the CmpNode controlling this
2340 // branch, seeing how it constrains a tested value, and then
2341 // deciding if it's worth our while to encode this constraint
2342 // as graph nodes in the current abstract interpretation map.
2343 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path, bool can_trap) {
2344   if (!c->is_Cmp()) {
2345     maybe_add_predicate_after_if(path);
2346     return;
2347   }
2348 
2349   if (stopped() || btest == BoolTest::illegal) {
2350     return;                             // nothing to do
2351   }
2352 
2353   bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
2354 
2355   if (can_trap && path_is_suitable_for_uncommon_trap(prob)) {
2356     repush_if_args();
2357     Node* call = uncommon_trap(Deoptimization::Reason_unstable_if,
2358                   Deoptimization::Action_reinterpret,
2359                   nullptr,
2360                   (is_fallthrough ? "taken always" : "taken never"));
2361 
2362     if (call != nullptr) {
2363       C->record_unstable_if_trap(new UnstableIfTrap(call->as_CallStaticJava(), path));
2364     }
2365     return;
2366   }
2367 
2368   Node* val = c->in(1);
2369   Node* con = c->in(2);
2370   const Type* tcon = _gvn.type(con);
2371   const Type* tval = _gvn.type(val);
2372   bool have_con = tcon->singleton();
2373   if (tval->singleton()) {
2374     if (!have_con) {
2375       // Swap, so constant is in con.

2432     if (obj != nullptr && (con_type->isa_instptr() || con_type->isa_aryptr())) {
2433        // Found:
2434        //   Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq])
2435        // or the narrowOop equivalent.
2436        const Type* obj_type = _gvn.type(obj);
2437        const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr();
2438        if (tboth != nullptr && tboth->klass_is_exact() && tboth != obj_type &&
2439            tboth->higher_equal(obj_type)) {
2440           // obj has to be of the exact type Foo if the CmpP succeeds.
2441           int obj_in_map = map()->find_edge(obj);
2442           JVMState* jvms = this->jvms();
2443           if (obj_in_map >= 0 &&
2444               (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
2445             TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth);
2446             const Type* tcc = ccast->as_Type()->type();
2447             assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve");
2448             // Delay transform() call to allow recovery of pre-cast value
2449             // at the control merge.
2450             _gvn.set_type_bottom(ccast);
2451             record_for_igvn(ccast);
2452             if (tboth->is_inlinetypeptr()) {
2453               ccast = InlineTypeNode::make_from_oop(this, ccast, tboth->exact_klass(true)->as_inline_klass());
2454             }
2455             // Here's the payoff.
2456             replace_in_map(obj, ccast);
2457           }
2458        }
2459     }
2460   }
2461 
2462   int val_in_map = map()->find_edge(val);
2463   if (val_in_map < 0)  return;          // replace_in_map would be useless
2464   {
2465     JVMState* jvms = this->jvms();
2466     if (!(jvms->is_loc(val_in_map) ||
2467           jvms->is_stk(val_in_map)))
2468       return;                           // again, it would be useless
2469   }
2470 
2471   // Check for a comparison to a constant, and "know" that the compared
2472   // value is constrained on this path.
2473   assert(tcon->singleton(), "");
2474   ConstraintCastNode* ccast = nullptr;

2540   if (c->Opcode() == Op_CmpP &&
2541       (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
2542       c->in(2)->is_Con()) {
2543     Node* load_klass = nullptr;
2544     Node* decode = nullptr;
2545     if (c->in(1)->Opcode() == Op_DecodeNKlass) {
2546       decode = c->in(1);
2547       load_klass = c->in(1)->in(1);
2548     } else {
2549       load_klass = c->in(1);
2550     }
2551     if (load_klass->in(2)->is_AddP()) {
2552       Node* addp = load_klass->in(2);
2553       Node* obj = addp->in(AddPNode::Address);
2554       const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
2555       if (obj_type->speculative_type_not_null() != nullptr) {
2556         ciKlass* k = obj_type->speculative_type();
2557         inc_sp(2);
2558         obj = maybe_cast_profiled_obj(obj, k);
2559         dec_sp(2);
2560         if (obj->is_InlineType()) {
2561           assert(obj->as_InlineType()->is_allocated(&_gvn), "must be allocated");
2562           obj = obj->as_InlineType()->get_oop();
2563         }
2564         // Make the CmpP use the casted obj
2565         addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
2566         load_klass = load_klass->clone();
2567         load_klass->set_req(2, addp);
2568         load_klass = _gvn.transform(load_klass);
2569         if (decode != nullptr) {
2570           decode = decode->clone();
2571           decode->set_req(1, load_klass);
2572           load_klass = _gvn.transform(decode);
2573         }
2574         c = c->clone();
2575         c->set_req(1, load_klass);
2576         c = _gvn.transform(c);
2577       }
2578     }
2579   }
2580   return c;
2581 }
2582 
2583 //------------------------------do_one_bytecode--------------------------------

3390     // See if we can get some profile data and hand it off to the next block
3391     Block *target_block = block()->successor_for_bci(target_bci);
3392     if (target_block->pred_count() != 1)  break;
3393     ciMethodData* methodData = method()->method_data();
3394     if (!methodData->is_mature())  break;
3395     ciProfileData* data = methodData->bci_to_data(bci());
3396     assert(data != nullptr && data->is_JumpData(), "need JumpData for taken branch");
3397     int taken = ((ciJumpData*)data)->taken();
3398     taken = method()->scale_count(taken);
3399     target_block->set_count(taken);
3400     break;
3401   }
3402 
3403   case Bytecodes::_ifnull:    btest = BoolTest::eq; goto handle_if_null;
3404   case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null;
3405   handle_if_null:
3406     // If this is a backwards branch in the bytecodes, add Safepoint
3407     maybe_add_safepoint(iter().get_dest());
3408     a = null();
3409     b = pop();
3410     if (b->is_InlineType()) {
3411       // Null checking a scalarized but nullable inline type. Check the IsInit
3412       // input instead of the oop input to avoid keeping buffer allocations alive
3413       c = _gvn.transform(new CmpINode(b->as_InlineType()->get_is_init(), zerocon(T_INT)));
3414     } else {
3415       if (!_gvn.type(b)->speculative_maybe_null() &&
3416           !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
3417         inc_sp(1);
3418         Node* null_ctl = top();
3419         b = null_check_oop(b, &null_ctl, true, true, true);
3420         assert(null_ctl->is_top(), "no null control here");
3421         dec_sp(1);
3422       } else if (_gvn.type(b)->speculative_always_null() &&
3423                  !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
3424         inc_sp(1);
3425         b = null_assert(b);
3426         dec_sp(1);
3427       }
3428       c = _gvn.transform( new CmpPNode(b, a) );
3429     }
3430     do_ifnull(btest, c);
3431     break;
3432 
3433   case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
3434   case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
3435   handle_if_acmp:
3436     // If this is a backwards branch in the bytecodes, add Safepoint
3437     maybe_add_safepoint(iter().get_dest());
3438     a = pop();
3439     b = pop();
3440     do_acmp(btest, b, a);


3441     break;
3442 
3443   case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
3444   case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
3445   case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
3446   case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
3447   case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
3448   case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
3449   handle_ifxx:
3450     // If this is a backwards branch in the bytecodes, add Safepoint
3451     maybe_add_safepoint(iter().get_dest());
3452     a = _gvn.intcon(0);
3453     b = pop();
3454     c = _gvn.transform( new CmpINode(b, a) );
3455     do_if(btest, c);
3456     break;
3457 
3458   case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
3459   case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
3460   case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;

3475     break;
3476 
3477   case Bytecodes::_lookupswitch:
3478     do_lookupswitch();
3479     break;
3480 
3481   case Bytecodes::_invokestatic:
3482   case Bytecodes::_invokedynamic:
3483   case Bytecodes::_invokespecial:
3484   case Bytecodes::_invokevirtual:
3485   case Bytecodes::_invokeinterface:
3486     do_call();
3487     break;
3488   case Bytecodes::_checkcast:
3489     do_checkcast();
3490     break;
3491   case Bytecodes::_instanceof:
3492     do_instanceof();
3493     break;
3494   case Bytecodes::_anewarray:
3495     do_newarray();
3496     break;
3497   case Bytecodes::_newarray:
3498     do_newarray((BasicType)iter().get_index());
3499     break;
3500   case Bytecodes::_multianewarray:
3501     do_multianewarray();
3502     break;
3503   case Bytecodes::_new:
3504     do_new();
3505     break;
3506 
3507   case Bytecodes::_jsr:
3508   case Bytecodes::_jsr_w:
3509     do_jsr();
3510     break;
3511 
3512   case Bytecodes::_ret:
3513     do_ret();
3514     break;
3515 
< prev index next >