< prev index next >

src/hotspot/share/opto/parse2.cpp

Print this page

   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/ciMethodData.hpp"

  26 #include "classfile/vmSymbols.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "interpreter/linkResolver.hpp"
  29 #include "jvm_io.h"
  30 #include "memory/resourceArea.hpp"
  31 #include "memory/universe.hpp"
  32 #include "oops/oop.inline.hpp"
  33 #include "opto/addnode.hpp"
  34 #include "opto/castnode.hpp"
  35 #include "opto/convertnode.hpp"
  36 #include "opto/divnode.hpp"
  37 #include "opto/idealGraphPrinter.hpp"


  38 #include "opto/matcher.hpp"
  39 #include "opto/memnode.hpp"
  40 #include "opto/mulnode.hpp"
  41 #include "opto/opaquenode.hpp"
  42 #include "opto/parse.hpp"
  43 #include "opto/runtime.hpp"
  44 #include "runtime/deoptimization.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 
  47 #ifndef PRODUCT
  48 extern uint explicit_null_checks_inserted,
  49             explicit_null_checks_elided;
  50 #endif
  51 

















  52 //---------------------------------array_load----------------------------------
  53 void Parse::array_load(BasicType bt) {
  54   const Type* elemtype = Type::TOP;
  55   bool big_val = bt == T_DOUBLE || bt == T_LONG;
  56   Node* adr = array_addressing(bt, 0, elemtype);
  57   if (stopped())  return;     // guaranteed null or range check
  58 
  59   pop();                      // index (already used)
  60   Node* array = pop();        // the array itself



































































  61 
  62   if (elemtype == TypeInt::BOOL) {
  63     bt = T_BOOLEAN;
  64   }
  65   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
  66 
  67   Node* ld = access_load_at(array, adr, adr_type, elemtype, bt,
  68                             IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD);
  69   if (big_val) {
  70     push_pair(ld);
  71   } else {
  72     push(ld);

  73   }

  74 }
  75 




























  76 
  77 //--------------------------------array_store----------------------------------
  78 void Parse::array_store(BasicType bt) {
  79   const Type* elemtype = Type::TOP;
  80   bool big_val = bt == T_DOUBLE || bt == T_LONG;
  81   Node* adr = array_addressing(bt, big_val ? 2 : 1, elemtype);
  82   if (stopped())  return;     // guaranteed null or range check

  83   if (bt == T_OBJECT) {
  84     array_store_check();
  85     if (stopped()) {
  86       return;
  87     }
  88   }
  89   Node* val;                  // Oop to store
  90   if (big_val) {
  91     val = pop_pair();
  92   } else {
  93     val = pop();
  94   }
  95   pop();                      // index (already used)
  96   Node* array = pop();        // the array itself
  97 
  98   if (elemtype == TypeInt::BOOL) {
  99     bt = T_BOOLEAN;
 100   }
 101   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);


























































 102 
 103   access_store_at(array, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY);









































 104 }
 105 

























 106 
 107 //------------------------------array_addressing-------------------------------
 108 // Pull array and index from the stack.  Compute pointer-to-element.
 109 Node* Parse::array_addressing(BasicType type, int vals, const Type*& elemtype) {
 110   Node *idx   = peek(0+vals);   // Get from stack without popping
 111   Node *ary   = peek(1+vals);   // in case of exception
 112 
 113   // Null check the array base, with correct stack contents
 114   ary = null_check(ary, T_ARRAY);
 115   // Compile-time detect of null-exception?
 116   if (stopped())  return top();
 117 
 118   const TypeAryPtr* arytype  = _gvn.type(ary)->is_aryptr();
 119   const TypeInt*    sizetype = arytype->size();
 120   elemtype = arytype->elem();
 121 
 122   if (UseUniqueSubclasses) {
 123     const Type* el = elemtype->make_ptr();
 124     if (el && el->isa_instptr()) {
 125       const TypeInstPtr* toop = el->is_instptr();
 126       if (toop->instance_klass()->unique_concrete_subklass()) {
 127         // If we load from "AbstractClass[]" we must see "ConcreteSubClass".
 128         const Type* subklass = Type::get_const_type(toop->instance_klass());
 129         elemtype = subklass->join_speculative(el);
 130       }
 131     }
 132   }
 133 
 134   // Check for big class initializers with all constant offsets
 135   // feeding into a known-size array.
 136   const TypeInt* idxtype = _gvn.type(idx)->is_int();
 137   // See if the highest idx value is less than the lowest array bound,
 138   // and if the idx value cannot be negative:
 139   bool need_range_check = true;
 140   if (idxtype->_hi < sizetype->_lo && idxtype->_lo >= 0) {
 141     need_range_check = false;
 142     if (C->log() != nullptr)   C->log()->elem("observe that='!need_range_check'");
 143   }
 144 
 145   if (!arytype->is_loaded()) {
 146     // Only fails for some -Xcomp runs
 147     // The class is unloaded.  We have to run this bytecode in the interpreter.
 148     ciKlass* klass = arytype->unloaded_klass();
 149 
 150     uncommon_trap(Deoptimization::Reason_unloaded,
 151                   Deoptimization::Action_reinterpret,
 152                   klass, "!loaded array");
 153     return top();
 154   }
 155 
 156   // Do the range check
 157   if (need_range_check) {
 158     Node* tst;
 159     if (sizetype->_hi <= 0) {
 160       // The greatest array bound is negative, so we can conclude that we're
 161       // compiling unreachable code, but the unsigned compare trick used below
 162       // only works with non-negative lengths.  Instead, hack "tst" to be zero so
 163       // the uncommon_trap path will always be taken.
 164       tst = _gvn.intcon(0);
 165     } else {
 166       // Range is constant in array-oop, so we can use the original state of mem
 167       Node* len = load_array_length(ary);
 168 
 169       // Test length vs index (standard trick using unsigned compare)
 170       Node* chk = _gvn.transform( new CmpUNode(idx, len) );
 171       BoolTest::mask btest = BoolTest::lt;
 172       tst = _gvn.transform( new BoolNode(chk, btest) );
 173     }
 174     RangeCheckNode* rc = new RangeCheckNode(control(), tst, PROB_MAX, COUNT_UNKNOWN);
 175     _gvn.set_type(rc, rc->Value(&_gvn));
 176     if (!tst->is_Con()) {
 177       record_for_igvn(rc);
 178     }
 179     set_control(_gvn.transform(new IfTrueNode(rc)));
 180     // Branch to failure if out of bounds
 181     {
 182       PreserveJVMState pjvms(this);
 183       set_control(_gvn.transform(new IfFalseNode(rc)));
 184       if (C->allow_range_check_smearing()) {
 185         // Do not use builtin_throw, since range checks are sometimes
 186         // made more stringent by an optimistic transformation.
 187         // This creates "tentative" range checks at this point,
 188         // which are not guaranteed to throw exceptions.
 189         // See IfNode::Ideal, is_range_check, adjust_check.
 190         uncommon_trap(Deoptimization::Reason_range_check,
 191                       Deoptimization::Action_make_not_entrant,
 192                       nullptr, "range_check");
 193       } else {
 194         // If we have already recompiled with the range-check-widening
 195         // heroic optimization turned off, then we must really be throwing
 196         // range check exceptions.
 197         builtin_throw(Deoptimization::Reason_range_check);
 198       }
 199     }
 200   }

 201   // Check for always knowing you are throwing a range-check exception
 202   if (stopped())  return top();
 203 
 204   // Make array address computation control dependent to prevent it
 205   // from floating above the range check during loop optimizations.
 206   Node* ptr = array_element_address(ary, idx, type, sizetype, control());
 207   assert(ptr != top(), "top should go hand-in-hand with stopped");
 208 
 209   return ptr;
 210 }
 211 







































































































































































































 212 
 213 // returns IfNode
 214 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) {
 215   Node   *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
 216   Node   *tst = _gvn.transform(new BoolNode(cmp, mask));
 217   IfNode *iff = create_and_map_if(control(), tst, prob, cnt);
 218   return iff;
 219 }
 220 
 221 
 222 // sentinel value for the target bci to mark never taken branches
 223 // (according to profiling)
 224 static const int never_reached = INT_MAX;
 225 
 226 //------------------------------helper for tableswitch-------------------------
 227 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, bool unc) {
 228   // True branch, use existing map info
 229   { PreserveJVMState pjvms(this);
 230     Node *iftrue  = _gvn.transform( new IfTrueNode (iff) );
 231     set_control( iftrue );

1428   // False branch
1429   Node* iffalse = _gvn.transform( new IfFalseNode(iff) );
1430   set_control(iffalse);
1431 
1432   if (stopped()) {              // Path is dead?
1433     NOT_PRODUCT(explicit_null_checks_elided++);
1434     if (C->eliminate_boxing()) {
1435       // Mark the successor block as parsed
1436       next_block->next_path_num();
1437     }
1438   } else  {                     // Path is live.
1439     adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, next_block);
1440   }
1441 
1442   if (do_stress_trap) {
1443     stress_trap(iff, counter, incr_store);
1444   }
1445 }
1446 
1447 //------------------------------------do_if------------------------------------
1448 void Parse::do_if(BoolTest::mask btest, Node* c) {
1449   int target_bci = iter().get_dest();
1450 
1451   Block* branch_block = successor_for_bci(target_bci);
1452   Block* next_block   = successor_for_bci(iter().next_bci());
1453 
1454   float cnt;
1455   float prob = branch_prediction(cnt, btest, target_bci, c);
1456   float untaken_prob = 1.0 - prob;
1457 
1458   if (prob == PROB_UNKNOWN) {
1459     if (PrintOpto && Verbose) {
1460       tty->print_cr("Never-taken edge stops compilation at bci %d", bci());
1461     }
1462     repush_if_args(); // to gather stats on loop
1463     uncommon_trap(Deoptimization::Reason_unreached,
1464                   Deoptimization::Action_reinterpret,
1465                   nullptr, "cold");
1466     if (C->eliminate_boxing()) {
1467       // Mark the successor blocks as parsed
1468       branch_block->next_path_num();

1519   }
1520 
1521   // Generate real control flow
1522   float true_prob = (taken_if_true ? prob : untaken_prob);
1523   IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1524   assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1525   Node* taken_branch   = new IfTrueNode(iff);
1526   Node* untaken_branch = new IfFalseNode(iff);
1527   if (!taken_if_true) {  // Finish conversion to canonical form
1528     Node* tmp      = taken_branch;
1529     taken_branch   = untaken_branch;
1530     untaken_branch = tmp;
1531   }
1532 
1533   // Branch is taken:
1534   { PreserveJVMState pjvms(this);
1535     taken_branch = _gvn.transform(taken_branch);
1536     set_control(taken_branch);
1537 
1538     if (stopped()) {
1539       if (C->eliminate_boxing()) {
1540         // Mark the successor block as parsed
1541         branch_block->next_path_num();
1542       }
1543     } else {
1544       adjust_map_after_if(taken_btest, c, prob, branch_block);
1545       if (!stopped()) {
1546         merge(target_bci);








1547       }
1548     }
1549   }
1550 
1551   untaken_branch = _gvn.transform(untaken_branch);
1552   set_control(untaken_branch);
1553 
1554   // Branch not taken.
1555   if (stopped()) {
1556     if (C->eliminate_boxing()) {
1557       // Mark the successor block as parsed
1558       next_block->next_path_num();
1559     }
1560   } else {
1561     adjust_map_after_if(untaken_btest, c, untaken_prob, next_block);
1562   }
1563 
1564   if (do_stress_trap) {
1565     stress_trap(iff, counter, incr_store);
1566   }
1567 }
1568 




















































































































































































































































































































































































































1569 // Force unstable if traps to be taken randomly to trigger intermittent bugs such as incorrect debug information.
1570 // Add another if before the unstable if that checks a "random" condition at runtime (a simple shared counter) and
1571 // then either takes the trap or executes the original, unstable if.
1572 void Parse::stress_trap(IfNode* orig_iff, Node* counter, Node* incr_store) {
1573   // Search for an unstable if trap
1574   CallStaticJavaNode* trap = nullptr;
1575   assert(orig_iff->Opcode() == Op_If && orig_iff->outcnt() == 2, "malformed if");
1576   ProjNode* trap_proj = orig_iff->uncommon_trap_proj(trap, Deoptimization::Reason_unstable_if);
1577   if (trap == nullptr || !trap->jvms()->should_reexecute()) {
1578     // No suitable trap found. Remove unused counter load and increment.
1579     C->gvn_replace_by(incr_store, incr_store->in(MemNode::Memory));
1580     return;
1581   }
1582 
1583   // Remove trap from optimization list since we add another path to the trap.
1584   bool success = C->remove_unstable_if_trap(trap, true);
1585   assert(success, "Trap already modified");
1586 
1587   // Add a check before the original if that will trap with a certain frequency and execute the original if otherwise
1588   int freq_log = (C->random() % 31) + 1; // Random logarithmic frequency in [1, 31]

1621 }
1622 
1623 void Parse::maybe_add_predicate_after_if(Block* path) {
1624   if (path->is_SEL_head() && path->preds_parsed() == 0) {
1625     // Add predicates at bci of if dominating the loop so traps can be
1626     // recorded on the if's profile data
1627     int bc_depth = repush_if_args();
1628     add_parse_predicates();
1629     dec_sp(bc_depth);
1630     path->set_has_predicates();
1631   }
1632 }
1633 
1634 
1635 //----------------------------adjust_map_after_if------------------------------
1636 // Adjust the JVM state to reflect the result of taking this path.
1637 // Basically, it means inspecting the CmpNode controlling this
1638 // branch, seeing how it constrains a tested value, and then
1639 // deciding if it's worth our while to encode this constraint
1640 // as graph nodes in the current abstract interpretation map.
1641 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path) {
1642   if (!c->is_Cmp()) {
1643     maybe_add_predicate_after_if(path);
1644     return;
1645   }
1646 
1647   if (stopped() || btest == BoolTest::illegal) {
1648     return;                             // nothing to do
1649   }
1650 
1651   bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
1652 
1653   if (path_is_suitable_for_uncommon_trap(prob)) {
1654     repush_if_args();
1655     Node* call = uncommon_trap(Deoptimization::Reason_unstable_if,
1656                   Deoptimization::Action_reinterpret,
1657                   nullptr,
1658                   (is_fallthrough ? "taken always" : "taken never"));
1659 
1660     if (call != nullptr) {
1661       C->record_unstable_if_trap(new UnstableIfTrap(call->as_CallStaticJava(), path));
1662     }
1663     return;
1664   }
1665 
1666   Node* val = c->in(1);
1667   Node* con = c->in(2);
1668   const Type* tcon = _gvn.type(con);
1669   const Type* tval = _gvn.type(val);
1670   bool have_con = tcon->singleton();
1671   if (tval->singleton()) {
1672     if (!have_con) {
1673       // Swap, so constant is in con.

1730     if (obj != nullptr && (con_type->isa_instptr() || con_type->isa_aryptr())) {
1731        // Found:
1732        //   Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq])
1733        // or the narrowOop equivalent.
1734        const Type* obj_type = _gvn.type(obj);
1735        const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr();
1736        if (tboth != nullptr && tboth->klass_is_exact() && tboth != obj_type &&
1737            tboth->higher_equal(obj_type)) {
1738           // obj has to be of the exact type Foo if the CmpP succeeds.
1739           int obj_in_map = map()->find_edge(obj);
1740           JVMState* jvms = this->jvms();
1741           if (obj_in_map >= 0 &&
1742               (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
1743             TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth);
1744             const Type* tcc = ccast->as_Type()->type();
1745             assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve");
1746             // Delay transform() call to allow recovery of pre-cast value
1747             // at the control merge.
1748             _gvn.set_type_bottom(ccast);
1749             record_for_igvn(ccast);



1750             // Here's the payoff.
1751             replace_in_map(obj, ccast);
1752           }
1753        }
1754     }
1755   }
1756 
1757   int val_in_map = map()->find_edge(val);
1758   if (val_in_map < 0)  return;          // replace_in_map would be useless
1759   {
1760     JVMState* jvms = this->jvms();
1761     if (!(jvms->is_loc(val_in_map) ||
1762           jvms->is_stk(val_in_map)))
1763       return;                           // again, it would be useless
1764   }
1765 
1766   // Check for a comparison to a constant, and "know" that the compared
1767   // value is constrained on this path.
1768   assert(tcon->singleton(), "");
1769   ConstraintCastNode* ccast = nullptr;

1834   if (c->Opcode() == Op_CmpP &&
1835       (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
1836       c->in(2)->is_Con()) {
1837     Node* load_klass = nullptr;
1838     Node* decode = nullptr;
1839     if (c->in(1)->Opcode() == Op_DecodeNKlass) {
1840       decode = c->in(1);
1841       load_klass = c->in(1)->in(1);
1842     } else {
1843       load_klass = c->in(1);
1844     }
1845     if (load_klass->in(2)->is_AddP()) {
1846       Node* addp = load_klass->in(2);
1847       Node* obj = addp->in(AddPNode::Address);
1848       const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
1849       if (obj_type->speculative_type_not_null() != nullptr) {
1850         ciKlass* k = obj_type->speculative_type();
1851         inc_sp(2);
1852         obj = maybe_cast_profiled_obj(obj, k);
1853         dec_sp(2);




1854         // Make the CmpP use the casted obj
1855         addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
1856         load_klass = load_klass->clone();
1857         load_klass->set_req(2, addp);
1858         load_klass = _gvn.transform(load_klass);
1859         if (decode != nullptr) {
1860           decode = decode->clone();
1861           decode->set_req(1, load_klass);
1862           load_klass = _gvn.transform(decode);
1863         }
1864         c = c->clone();
1865         c->set_req(1, load_klass);
1866         c = _gvn.transform(c);
1867       }
1868     }
1869   }
1870   return c;
1871 }
1872 
1873 //------------------------------do_one_bytecode--------------------------------

2649     // See if we can get some profile data and hand it off to the next block
2650     Block *target_block = block()->successor_for_bci(target_bci);
2651     if (target_block->pred_count() != 1)  break;
2652     ciMethodData* methodData = method()->method_data();
2653     if (!methodData->is_mature())  break;
2654     ciProfileData* data = methodData->bci_to_data(bci());
2655     assert(data != nullptr && data->is_JumpData(), "need JumpData for taken branch");
2656     int taken = ((ciJumpData*)data)->taken();
2657     taken = method()->scale_count(taken);
2658     target_block->set_count(taken);
2659     break;
2660   }
2661 
2662   case Bytecodes::_ifnull:    btest = BoolTest::eq; goto handle_if_null;
2663   case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null;
2664   handle_if_null:
2665     // If this is a backwards branch in the bytecodes, add Safepoint
2666     maybe_add_safepoint(iter().get_dest());
2667     a = null();
2668     b = pop();
2669     if (!_gvn.type(b)->speculative_maybe_null() &&
2670         !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
2671       inc_sp(1);
2672       Node* null_ctl = top();
2673       b = null_check_oop(b, &null_ctl, true, true, true);
2674       assert(null_ctl->is_top(), "no null control here");
2675       dec_sp(1);
2676     } else if (_gvn.type(b)->speculative_always_null() &&
2677                !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
2678       inc_sp(1);
2679       b = null_assert(b);
2680       dec_sp(1);
2681     }
2682     c = _gvn.transform( new CmpPNode(b, a) );






2683     do_ifnull(btest, c);
2684     break;
2685 
2686   case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
2687   case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
2688   handle_if_acmp:
2689     // If this is a backwards branch in the bytecodes, add Safepoint
2690     maybe_add_safepoint(iter().get_dest());
2691     a = pop();
2692     b = pop();
2693     c = _gvn.transform( new CmpPNode(b, a) );
2694     c = optimize_cmp_with_klass(c);
2695     do_if(btest, c);
2696     break;
2697 
2698   case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
2699   case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
2700   case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
2701   case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
2702   case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
2703   case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
2704   handle_ifxx:
2705     // If this is a backwards branch in the bytecodes, add Safepoint
2706     maybe_add_safepoint(iter().get_dest());
2707     a = _gvn.intcon(0);
2708     b = pop();
2709     c = _gvn.transform( new CmpINode(b, a) );
2710     do_if(btest, c);
2711     break;
2712 
2713   case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
2714   case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
2715   case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;

2730     break;
2731 
2732   case Bytecodes::_lookupswitch:
2733     do_lookupswitch();
2734     break;
2735 
2736   case Bytecodes::_invokestatic:
2737   case Bytecodes::_invokedynamic:
2738   case Bytecodes::_invokespecial:
2739   case Bytecodes::_invokevirtual:
2740   case Bytecodes::_invokeinterface:
2741     do_call();
2742     break;
2743   case Bytecodes::_checkcast:
2744     do_checkcast();
2745     break;
2746   case Bytecodes::_instanceof:
2747     do_instanceof();
2748     break;
2749   case Bytecodes::_anewarray:
2750     do_anewarray();
2751     break;
2752   case Bytecodes::_newarray:
2753     do_newarray((BasicType)iter().get_index());
2754     break;
2755   case Bytecodes::_multianewarray:
2756     do_multianewarray();
2757     break;
2758   case Bytecodes::_new:
2759     do_new();
2760     break;
2761 
2762   case Bytecodes::_jsr:
2763   case Bytecodes::_jsr_w:
2764     do_jsr();
2765     break;
2766 
2767   case Bytecodes::_ret:
2768     do_ret();
2769     break;
2770 

   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/ciMethodData.hpp"
  26 #include "ci/ciSymbols.hpp"
  27 #include "classfile/vmSymbols.hpp"
  28 #include "compiler/compileLog.hpp"
  29 #include "interpreter/linkResolver.hpp"
  30 #include "jvm_io.h"
  31 #include "memory/resourceArea.hpp"
  32 #include "memory/universe.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "opto/addnode.hpp"
  35 #include "opto/castnode.hpp"
  36 #include "opto/convertnode.hpp"
  37 #include "opto/divnode.hpp"
  38 #include "opto/idealGraphPrinter.hpp"
  39 #include "opto/idealKit.hpp"
  40 #include "opto/inlinetypenode.hpp"
  41 #include "opto/matcher.hpp"
  42 #include "opto/memnode.hpp"
  43 #include "opto/mulnode.hpp"
  44 #include "opto/opaquenode.hpp"
  45 #include "opto/parse.hpp"
  46 #include "opto/runtime.hpp"
  47 #include "runtime/deoptimization.hpp"
  48 #include "runtime/sharedRuntime.hpp"
  49 
  50 #ifndef PRODUCT
  51 extern uint explicit_null_checks_inserted,
  52             explicit_null_checks_elided;
  53 #endif
  54 
  55 Node* Parse::record_profile_for_speculation_at_array_load(Node* ld) {
  56   // Feed unused profile data to type speculation
  57   if (UseTypeSpeculation && UseArrayLoadStoreProfile) {
  58     ciKlass* array_type = nullptr;
  59     ciKlass* element_type = nullptr;
  60     ProfilePtrKind element_ptr = ProfileMaybeNull;
  61     bool flat_array = true;
  62     bool null_free_array = true;
  63     method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
  64     if (element_type != nullptr || element_ptr != ProfileMaybeNull) {
  65       ld = record_profile_for_speculation(ld, element_type, element_ptr);
  66     }
  67   }
  68   return ld;
  69 }
  70 
  71 
  72 //---------------------------------array_load----------------------------------
  73 void Parse::array_load(BasicType bt) {
  74   const Type* elemtype = Type::TOP;

  75   Node* adr = array_addressing(bt, 0, elemtype);
  76   if (stopped())  return;     // guaranteed null or range check
  77 
  78   Node* array_index = pop();
  79   Node* array = pop();
  80 
  81   // Handle inline type arrays
  82   const TypeOopPtr* element_ptr = elemtype->make_oopptr();
  83   const TypeAryPtr* array_type = _gvn.type(array)->is_aryptr();
  84 
  85   if (!array_type->is_not_flat()) {
  86     // Cannot statically determine if array is a flat array, emit runtime check
  87     assert(UseArrayFlattening && is_reference_type(bt) && element_ptr->can_be_inline_type() &&
  88            (!element_ptr->is_inlinetypeptr() || element_ptr->inline_klass()->flat_in_array()), "array can't be flat");
  89     IdealKit ideal(this);
  90     IdealVariable res(ideal);
  91     ideal.declarations_done();
  92     ideal.if_then(flat_array_test(array, /* flat = */ false)); {
  93       // Non-flat array
  94       sync_kit(ideal);
  95       if (!array_type->is_flat()) {
  96         assert(array_type->is_flat() || control()->in(0)->as_If()->is_flat_array_check(&_gvn), "Should be found");
  97         const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
  98         DecoratorSet decorator_set = IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD;
  99         if (needs_range_check(array_type->size(), array_index)) {
 100           // We've emitted a RangeCheck but now insert an additional check between the range check and the actual load.
 101           // We cannot pin the load to two separate nodes. Instead, we pin it conservatively here such that it cannot
 102           // possibly float above the range check at any point.
 103           decorator_set |= C2_UNKNOWN_CONTROL_LOAD;
 104         }
 105         Node* ld = access_load_at(array, adr, adr_type, element_ptr, bt, decorator_set);
 106         if (element_ptr->is_inlinetypeptr()) {
 107           ld = InlineTypeNode::make_from_oop(this, ld, element_ptr->inline_klass(), !element_ptr->maybe_null());
 108         }
 109         ideal.set(res, ld);
 110       }
 111       ideal.sync_kit(this);
 112     } ideal.else_(); {
 113       // Flat array
 114       sync_kit(ideal);
 115       if (!array_type->is_not_flat()) {
 116         if (element_ptr->is_inlinetypeptr()) {
 117           // Element type is known, cast and load from flat array layout.
 118           ciInlineKlass* vk = element_ptr->inline_klass();
 119           bool is_null_free = array_type->is_null_free() || !vk->has_nullable_atomic_layout();
 120           bool is_not_null_free = array_type->is_not_null_free() || (!vk->has_atomic_layout() && !vk->has_non_atomic_layout());
 121           if (is_null_free) {
 122             // TODO 8350865 Impossible type
 123             is_not_null_free = false;
 124           }
 125           bool is_naturally_atomic = vk->is_empty() || (is_null_free && vk->nof_declared_nonstatic_fields() == 1);
 126           bool may_need_atomicity = !is_naturally_atomic && ((!is_not_null_free && vk->has_atomic_layout()) || (!is_null_free && vk->has_nullable_atomic_layout()));
 127 
 128           adr = flat_array_element_address(array, array_index, vk, is_null_free, is_not_null_free, may_need_atomicity);
 129           int nm_offset = is_null_free ? -1 : vk->null_marker_offset_in_payload();
 130           Node* vt = InlineTypeNode::make_from_flat(this, vk, array, adr, array_index, nullptr, 0, may_need_atomicity, nm_offset);
 131           ideal.set(res, vt);
 132         } else {
 133           // Element type is unknown, and thus we cannot statically determine the exact flat array layout. Emit a
 134           // runtime call to correctly load the inline type element from the flat array.
 135           Node* inline_type = load_from_unknown_flat_array(array, array_index, element_ptr);
 136           ideal.set(res, inline_type);
 137         }
 138       }
 139       ideal.sync_kit(this);
 140     } ideal.end_if();
 141     sync_kit(ideal);
 142     Node* ld = _gvn.transform(ideal.value(res));
 143     ld = record_profile_for_speculation_at_array_load(ld);
 144     push_node(bt, ld);
 145     return;
 146   }
 147 
 148   if (elemtype == TypeInt::BOOL) {
 149     bt = T_BOOLEAN;
 150   }
 151   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);

 152   Node* ld = access_load_at(array, adr, adr_type, elemtype, bt,
 153                             IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD);
 154   ld = record_profile_for_speculation_at_array_load(ld);
 155   // Loading an inline type from a non-flat array
 156   if (element_ptr != nullptr && element_ptr->is_inlinetypeptr()) {
 157     assert(!array_type->is_null_free() || !element_ptr->maybe_null(), "inline type array elements should never be null");
 158     ld = InlineTypeNode::make_from_oop(this, ld, element_ptr->inline_klass(), !element_ptr->maybe_null());
 159   }
 160   push_node(bt, ld);
 161 }
 162 
 163 Node* Parse::load_from_unknown_flat_array(Node* array, Node* array_index, const TypeOopPtr* element_ptr) {
 164   // Below membars keep this access to an unknown flat array correctly
 165   // ordered with other unknown and known flat array accesses.
 166   insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
 167 
 168   Node* call = nullptr;
 169   {
 170     // Re-execute flat array load if runtime call triggers deoptimization
 171     PreserveReexecuteState preexecs(this);
 172     jvms()->set_bci(_bci);
 173     jvms()->set_should_reexecute(true);
 174     inc_sp(2);
 175     kill_dead_locals();
 176     call = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
 177                              OptoRuntime::load_unknown_inline_Type(),
 178                              OptoRuntime::load_unknown_inline_Java(),
 179                              nullptr, TypeRawPtr::BOTTOM,
 180                              array, array_index);
 181   }
 182   make_slow_call_ex(call, env()->Throwable_klass(), false);
 183   Node* buffer = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
 184 
 185   insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
 186 
 187   // Keep track of the information that the inline type is in flat arrays
 188   const Type* unknown_value = element_ptr->is_instptr()->cast_to_flat_in_array();
 189   return _gvn.transform(new CheckCastPPNode(control(), buffer, unknown_value));
 190 }
 191 
 192 //--------------------------------array_store----------------------------------
 193 void Parse::array_store(BasicType bt) {
 194   const Type* elemtype = Type::TOP;
 195   Node* adr = array_addressing(bt, type2size[bt], elemtype);

 196   if (stopped())  return;     // guaranteed null or range check
 197   Node* stored_value_casted = nullptr;
 198   if (bt == T_OBJECT) {
 199     stored_value_casted = array_store_check(adr, elemtype);
 200     if (stopped()) {
 201       return;
 202     }
 203   }
 204   Node* const stored_value = pop_node(bt); // Value to store
 205   Node* const array_index = pop();         // Index in the array
 206   Node* array = pop();                     // The array itself
 207 
 208   const TypeAryPtr* array_type = _gvn.type(array)->is_aryptr();
 209   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);


 210 
 211   if (elemtype == TypeInt::BOOL) {
 212     bt = T_BOOLEAN;
 213   } else if (bt == T_OBJECT) {
 214     elemtype = elemtype->make_oopptr();
 215     const Type* stored_value_casted_type = _gvn.type(stored_value_casted);
 216     // Based on the value to be stored, try to determine if the array is not null-free and/or not flat.
 217     // This is only legal for non-null stores because the array_store_check always passes for null, even
 218     // if the array is null-free. Null stores are handled in GraphKit::inline_array_null_guard().
 219     bool not_inline = !stored_value_casted_type->maybe_null() && !stored_value_casted_type->is_oopptr()->can_be_inline_type();
 220     bool not_null_free = not_inline;
 221     bool not_flat = not_inline || ( stored_value_casted_type->is_inlinetypeptr() &&
 222                                    !stored_value_casted_type->inline_klass()->flat_in_array());
 223     if (!array_type->is_not_null_free() && not_null_free) {
 224       // Storing a non-inline type, mark array as not null-free.
 225       array_type = array_type->cast_to_not_null_free();
 226       Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, array_type));
 227       replace_in_map(array, cast);
 228       array = cast;
 229     }
 230     if (!array_type->is_not_flat() && not_flat) {
 231       // Storing to a non-flat array, mark array as not flat.
 232       array_type = array_type->cast_to_not_flat();
 233       Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, array_type));
 234       replace_in_map(array, cast);
 235       array = cast;
 236     }
 237 
 238     if (!array_type->is_flat() && array_type->is_null_free()) {
 239       // Store to non-flat null-free inline type array (elements can never be null)
 240       assert(!stored_value_casted_type->maybe_null(), "should be guaranteed by array store check");
 241       if (elemtype->is_inlinetypeptr() && elemtype->inline_klass()->is_empty()) {
 242         // Ignore empty inline stores, array is already initialized.
 243         return;
 244       }
 245     } else if (!array_type->is_not_flat()) {
 246       // Array might be a flat array, emit runtime checks (for nullptr, a simple inline_array_null_guard is sufficient).
 247       assert(UseArrayFlattening && !not_flat && elemtype->is_oopptr()->can_be_inline_type() &&
 248              (!array_type->klass_is_exact() || array_type->is_flat()), "array can't be a flat array");
 249       // TODO 8350865 Depending on the available layouts, we can avoid this check in below flat/not-flat branches. Also the safe_for_replace arg is now always true.
 250       array = inline_array_null_guard(array, stored_value_casted, 3, true);
 251       IdealKit ideal(this);
 252       ideal.if_then(flat_array_test(array, /* flat = */ false)); {
 253         // Non-flat array
 254         if (!array_type->is_flat()) {
 255           sync_kit(ideal);
 256           assert(array_type->is_flat() || ideal.ctrl()->in(0)->as_If()->is_flat_array_check(&_gvn), "Should be found");
 257           inc_sp(3);
 258           access_store_at(array, adr, adr_type, stored_value_casted, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY, false);
 259           dec_sp(3);
 260           ideal.sync_kit(this);
 261         }
 262       } ideal.else_(); {
 263         // Flat array
 264         sync_kit(ideal);
 265         if (!array_type->is_not_flat()) {
 266           // Try to determine the inline klass type of the stored value
 267           ciInlineKlass* vk = nullptr;
 268           if (stored_value_casted_type->is_inlinetypeptr()) {
 269             vk = stored_value_casted_type->inline_klass();
 270           } else if (elemtype->is_inlinetypeptr()) {
 271             vk = elemtype->inline_klass();
 272           }
 273 
 274           if (vk != nullptr) {
 275             // Element type is known, cast and store to flat array layout.
 276             bool is_null_free = array_type->is_null_free() || !vk->has_nullable_atomic_layout();
 277             bool is_not_null_free = array_type->is_not_null_free() || (!vk->has_atomic_layout() && !vk->has_non_atomic_layout());
 278             if (is_null_free) {
 279               // TODO 8350865 Impossible type
 280               is_not_null_free = false;
 281             }
 282             bool is_naturally_atomic = vk->is_empty() || (is_null_free && vk->nof_declared_nonstatic_fields() == 1);
 283             bool may_need_atomicity = !is_naturally_atomic && ((!is_not_null_free && vk->has_atomic_layout()) || (!is_null_free && vk->has_nullable_atomic_layout()));
 284 
 285             // Re-execute flat array store if buffering triggers deoptimization
 286             PreserveReexecuteState preexecs(this);
 287             jvms()->set_should_reexecute(true);
 288             inc_sp(3);
 289 
 290             if (!stored_value_casted->is_InlineType()) {
 291               assert(_gvn.type(stored_value_casted) == TypePtr::NULL_PTR, "Unexpected value");
 292               stored_value_casted = InlineTypeNode::make_null(_gvn, vk);
 293             }
 294             adr = flat_array_element_address(array, array_index, vk, is_null_free, is_not_null_free, may_need_atomicity);
 295             int nm_offset = is_null_free ? -1 : vk->null_marker_offset_in_payload();
 296             stored_value_casted->as_InlineType()->store_flat(this, array, adr, array_index, nullptr, 0, may_need_atomicity, nm_offset, MO_UNORDERED | IN_HEAP | IS_ARRAY);
 297           } else {
 298             // Element type is unknown, emit a runtime call since the flat array layout is not statically known.
 299             store_to_unknown_flat_array(array, array_index, stored_value_casted);
 300           }
 301         }
 302         ideal.sync_kit(this);
 303       }
 304       ideal.end_if();
 305       sync_kit(ideal);
 306       return;
 307     } else if (!array_type->is_not_null_free()) {
 308       // Array is not flat but may be null free
 309       assert(elemtype->is_oopptr()->can_be_inline_type(), "array can't be null-free");
 310       array = inline_array_null_guard(array, stored_value_casted, 3, true);
 311     }
 312   }
 313   inc_sp(3);
 314   access_store_at(array, adr, adr_type, stored_value, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY);
 315   dec_sp(3);
 316 }
 317 
 318 // Emit a runtime call to store to a flat array whose element type is either unknown (i.e. we do not know the flat
 319 // array layout) or not exact (could have different flat array layouts at runtime).
 320 void Parse::store_to_unknown_flat_array(Node* array, Node* const idx, Node* non_null_stored_value) {
 321   // Below membars keep this access to an unknown flat array correctly
 322   // ordered with other unknown and known flat array accesses.
 323   insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
 324 
 325   Node* call = nullptr;
 326   {
 327     // Re-execute flat array store if runtime call triggers deoptimization
 328     PreserveReexecuteState preexecs(this);
 329     jvms()->set_bci(_bci);
 330     jvms()->set_should_reexecute(true);
 331     inc_sp(3);
 332     kill_dead_locals();
 333     call = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
 334                       OptoRuntime::store_unknown_inline_Type(),
 335                       OptoRuntime::store_unknown_inline_Java(),
 336                       nullptr, TypeRawPtr::BOTTOM,
 337                       non_null_stored_value, array, idx);
 338   }
 339   make_slow_call_ex(call, env()->Throwable_klass(), false);
 340 
 341   insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
 342 }
 343 
 344 //------------------------------array_addressing-------------------------------
 345 // Pull array and index from the stack.  Compute pointer-to-element.
 346 Node* Parse::array_addressing(BasicType type, int vals, const Type*& elemtype) {
 347   Node *idx   = peek(0+vals);   // Get from stack without popping
 348   Node *ary   = peek(1+vals);   // in case of exception
 349 
 350   // Null check the array base, with correct stack contents
 351   ary = null_check(ary, T_ARRAY);
 352   // Compile-time detect of null-exception?
 353   if (stopped())  return top();
 354 
 355   const TypeAryPtr* arytype  = _gvn.type(ary)->is_aryptr();
 356   const TypeInt*    sizetype = arytype->size();
 357   elemtype = arytype->elem();
 358 
 359   if (UseUniqueSubclasses) {
 360     const Type* el = elemtype->make_ptr();
 361     if (el && el->isa_instptr()) {
 362       const TypeInstPtr* toop = el->is_instptr();
 363       if (toop->instance_klass()->unique_concrete_subklass()) {
 364         // If we load from "AbstractClass[]" we must see "ConcreteSubClass".
 365         const Type* subklass = Type::get_const_type(toop->instance_klass());
 366         elemtype = subklass->join_speculative(el);
 367       }
 368     }
 369   }
 370 











 371   if (!arytype->is_loaded()) {
 372     // Only fails for some -Xcomp runs
 373     // The class is unloaded.  We have to run this bytecode in the interpreter.
 374     ciKlass* klass = arytype->unloaded_klass();
 375 
 376     uncommon_trap(Deoptimization::Reason_unloaded,
 377                   Deoptimization::Action_reinterpret,
 378                   klass, "!loaded array");
 379     return top();
 380   }
 381 
 382   ary = create_speculative_inline_type_array_checks(ary, arytype, elemtype);











 383 
 384   if (needs_range_check(sizetype, idx)) {
 385     create_range_check(idx, ary, sizetype);
 386   } else if (C->log() != nullptr) {
 387     C->log()->elem("observe that='!need_range_check'");



























 388   }
 389 
 390   // Check for always knowing you are throwing a range-check exception
 391   if (stopped())  return top();
 392 
 393   // Make array address computation control dependent to prevent it
 394   // from floating above the range check during loop optimizations.
 395   Node* ptr = array_element_address(ary, idx, type, sizetype, control());
 396   assert(ptr != top(), "top should go hand-in-hand with stopped");
 397 
 398   return ptr;
 399 }
 400 
 401 // Check if we need a range check for an array access. This is the case if the index is either negative or if it could
 402 // be greater or equal the smallest possible array size (i.e. out-of-bounds).
 403 bool Parse::needs_range_check(const TypeInt* size_type, const Node* index) const {
 404   const TypeInt* index_type = _gvn.type(index)->is_int();
 405   return index_type->_hi >= size_type->_lo || index_type->_lo < 0;
 406 }
 407 
 408 void Parse::create_range_check(Node* idx, Node* ary, const TypeInt* sizetype) {
 409   Node* tst;
 410   if (sizetype->_hi <= 0) {
 411     // The greatest array bound is negative, so we can conclude that we're
 412     // compiling unreachable code, but the unsigned compare trick used below
 413     // only works with non-negative lengths.  Instead, hack "tst" to be zero so
 414     // the uncommon_trap path will always be taken.
 415     tst = _gvn.intcon(0);
 416   } else {
 417     // Range is constant in array-oop, so we can use the original state of mem
 418     Node* len = load_array_length(ary);
 419 
 420     // Test length vs index (standard trick using unsigned compare)
 421     Node* chk = _gvn.transform(new CmpUNode(idx, len) );
 422     BoolTest::mask btest = BoolTest::lt;
 423     tst = _gvn.transform(new BoolNode(chk, btest) );
 424   }
 425   RangeCheckNode* rc = new RangeCheckNode(control(), tst, PROB_MAX, COUNT_UNKNOWN);
 426   _gvn.set_type(rc, rc->Value(&_gvn));
 427   if (!tst->is_Con()) {
 428     record_for_igvn(rc);
 429   }
 430   set_control(_gvn.transform(new IfTrueNode(rc)));
 431   // Branch to failure if out of bounds
 432   {
 433     PreserveJVMState pjvms(this);
 434     set_control(_gvn.transform(new IfFalseNode(rc)));
 435     if (C->allow_range_check_smearing()) {
 436       // Do not use builtin_throw, since range checks are sometimes
 437       // made more stringent by an optimistic transformation.
 438       // This creates "tentative" range checks at this point,
 439       // which are not guaranteed to throw exceptions.
 440       // See IfNode::Ideal, is_range_check, adjust_check.
 441       uncommon_trap(Deoptimization::Reason_range_check,
 442                     Deoptimization::Action_make_not_entrant,
 443                     nullptr, "range_check");
 444     } else {
 445       // If we have already recompiled with the range-check-widening
 446       // heroic optimization turned off, then we must really be throwing
 447       // range check exceptions.
 448       builtin_throw(Deoptimization::Reason_range_check);
 449     }
 450   }
 451 }
 452 
 453 // For inline type arrays, we can use the profiling information for array accesses to speculate on the type, flatness,
 454 // and null-freeness. We can either prepare the speculative type for later uses or emit explicit speculative checks with
 455 // traps now. In the latter case, the speculative type guarantees can avoid additional runtime checks later (e.g.
 456 // non-null-free implies non-flat which allows us to remove flatness checks). This makes the graph simpler.
 457 Node* Parse::create_speculative_inline_type_array_checks(Node* array, const TypeAryPtr* array_type,
 458                                                          const Type*& element_type) {
 459   if (!array_type->is_flat() && !array_type->is_not_flat()) {
 460     // For arrays that might be flat, speculate that the array has the exact type reported in the profile data such that
 461     // we can rely on a fixed memory layout (i.e. either a flat layout or not).
 462     array = cast_to_speculative_array_type(array, array_type, element_type);
 463   } else if (UseTypeSpeculation && UseArrayLoadStoreProfile) {
 464     // Array is known to be either flat or not flat. If possible, update the speculative type by using the profile data
 465     // at this bci.
 466     array = cast_to_profiled_array_type(array);
 467   }
 468 
 469   // Even though the type does not tell us whether we have an inline type array or not, we can still check the profile data
 470   // whether we have a non-null-free or non-flat array. Speculating on a non-null-free array doesn't help aaload but could
 471   // be profitable for a subsequent aastore.
 472   if (!array_type->is_null_free() && !array_type->is_not_null_free()) {
 473     array = speculate_non_null_free_array(array, array_type);
 474   }
 475   if (!array_type->is_flat() && !array_type->is_not_flat()) {
 476     array = speculate_non_flat_array(array, array_type);
 477   }
 478   return array;
 479 }
 480 
 481 // Speculate that the array has the exact type reported in the profile data. We emit a trap when this turns out to be
 482 // wrong. On the fast path, we add a CheckCastPP to use the exact type.
 483 Node* Parse::cast_to_speculative_array_type(Node* const array, const TypeAryPtr*& array_type, const Type*& element_type) {
 484   Deoptimization::DeoptReason reason = Deoptimization::Reason_speculate_class_check;
 485   ciKlass* speculative_array_type = array_type->speculative_type();
 486   if (too_many_traps_or_recompiles(reason) || speculative_array_type == nullptr) {
 487     // No speculative type, check profile data at this bci
 488     speculative_array_type = nullptr;
 489     reason = Deoptimization::Reason_class_check;
 490     if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(reason)) {
 491       ciKlass* profiled_element_type = nullptr;
 492       ProfilePtrKind element_ptr = ProfileMaybeNull;
 493       bool flat_array = true;
 494       bool null_free_array = true;
 495       method()->array_access_profiled_type(bci(), speculative_array_type, profiled_element_type, element_ptr, flat_array,
 496                                            null_free_array);
 497     }
 498   }
 499   if (speculative_array_type != nullptr) {
 500     // Speculate that this array has the exact type reported by profile data
 501     Node* casted_array = nullptr;
 502     DEBUG_ONLY(Node* old_control = control();)
 503     Node* slow_ctl = type_check_receiver(array, speculative_array_type, 1.0, &casted_array);
 504     if (stopped()) {
 505       // The check always fails and therefore profile information is incorrect. Don't use it.
 506       assert(old_control == slow_ctl, "type check should have been removed");
 507       set_control(slow_ctl);
 508     } else if (!slow_ctl->is_top()) {
 509       { PreserveJVMState pjvms(this);
 510         set_control(slow_ctl);
 511         uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
 512       }
 513       replace_in_map(array, casted_array);
 514       array_type = _gvn.type(casted_array)->is_aryptr();
 515       element_type = array_type->elem();
 516       return casted_array;
 517     }
 518   }
 519   return array;
 520 }
 521 
 522 // Create a CheckCastPP when the speculative type can improve the current type.
 523 Node* Parse::cast_to_profiled_array_type(Node* const array) {
 524   ciKlass* array_type = nullptr;
 525   ciKlass* element_type = nullptr;
 526   ProfilePtrKind element_ptr = ProfileMaybeNull;
 527   bool flat_array = true;
 528   bool null_free_array = true;
 529   method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
 530   if (array_type != nullptr) {
 531     return record_profile_for_speculation(array, array_type, ProfileMaybeNull);
 532   }
 533   return array;
 534 }
 535 
 536 // Speculate that the array is non-null-free. We emit a trap when this turns out to be
 537 // wrong. On the fast path, we add a CheckCastPP to use the non-null-free type.
 538 Node* Parse::speculate_non_null_free_array(Node* const array, const TypeAryPtr*& array_type) {
 539   bool null_free_array = true;
 540   Deoptimization::DeoptReason reason = Deoptimization::Reason_none;
 541   if (array_type->speculative() != nullptr &&
 542       array_type->speculative()->is_aryptr()->is_not_null_free() &&
 543       !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
 544     null_free_array = false;
 545     reason = Deoptimization::Reason_speculate_class_check;
 546   } else if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(Deoptimization::Reason_class_check)) {
 547     ciKlass* profiled_array_type = nullptr;
 548     ciKlass* profiled_element_type = nullptr;
 549     ProfilePtrKind element_ptr = ProfileMaybeNull;
 550     bool flat_array = true;
 551     method()->array_access_profiled_type(bci(), profiled_array_type, profiled_element_type, element_ptr, flat_array,
 552                                          null_free_array);
 553     reason = Deoptimization::Reason_class_check;
 554   }
 555   if (!null_free_array) {
 556     { // Deoptimize if null-free array
 557       BuildCutout unless(this, null_free_array_test(array, /* null_free = */ false), PROB_MAX);
 558       uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
 559     }
 560     assert(!stopped(), "null-free array should have been caught earlier");
 561     Node* casted_array = _gvn.transform(new CheckCastPPNode(control(), array, array_type->cast_to_not_null_free()));
 562     replace_in_map(array, casted_array);
 563     array_type = _gvn.type(casted_array)->is_aryptr();
 564     return casted_array;
 565   }
 566   return array;
 567 }
 568 
 569 // Speculate that the array is non-flat. We emit a trap when this turns out to be wrong.
 570 // On the fast path, we add a CheckCastPP to use the non-flat type.
 571 Node* Parse::speculate_non_flat_array(Node* const array, const TypeAryPtr* const array_type) {
 572   bool flat_array = true;
 573   Deoptimization::DeoptReason reason = Deoptimization::Reason_none;
 574   if (array_type->speculative() != nullptr &&
 575       array_type->speculative()->is_aryptr()->is_not_flat() &&
 576       !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
 577     flat_array = false;
 578     reason = Deoptimization::Reason_speculate_class_check;
 579   } else if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(reason)) {
 580     ciKlass* profiled_array_type = nullptr;
 581     ciKlass* profiled_element_type = nullptr;
 582     ProfilePtrKind element_ptr = ProfileMaybeNull;
 583     bool null_free_array = true;
 584     method()->array_access_profiled_type(bci(), profiled_array_type, profiled_element_type, element_ptr, flat_array,
 585                                          null_free_array);
 586     reason = Deoptimization::Reason_class_check;
 587   }
 588   if (!flat_array) {
 589     { // Deoptimize if flat array
 590       BuildCutout unless(this, flat_array_test(array, /* flat = */ false), PROB_MAX);
 591       uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
 592     }
 593     assert(!stopped(), "flat array should have been caught earlier");
 594     Node* casted_array = _gvn.transform(new CheckCastPPNode(control(), array, array_type->cast_to_not_flat()));
 595     replace_in_map(array, casted_array);
 596     return casted_array;
 597   }
 598   return array;
 599 }
 600 
 601 // returns IfNode
 602 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) {
 603   Node   *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
 604   Node   *tst = _gvn.transform(new BoolNode(cmp, mask));
 605   IfNode *iff = create_and_map_if(control(), tst, prob, cnt);
 606   return iff;
 607 }
 608 
 609 
 610 // sentinel value for the target bci to mark never taken branches
 611 // (according to profiling)
 612 static const int never_reached = INT_MAX;
 613 
 614 //------------------------------helper for tableswitch-------------------------
 615 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, bool unc) {
 616   // True branch, use existing map info
 617   { PreserveJVMState pjvms(this);
 618     Node *iftrue  = _gvn.transform( new IfTrueNode (iff) );
 619     set_control( iftrue );

1816   // False branch
1817   Node* iffalse = _gvn.transform( new IfFalseNode(iff) );
1818   set_control(iffalse);
1819 
1820   if (stopped()) {              // Path is dead?
1821     NOT_PRODUCT(explicit_null_checks_elided++);
1822     if (C->eliminate_boxing()) {
1823       // Mark the successor block as parsed
1824       next_block->next_path_num();
1825     }
1826   } else  {                     // Path is live.
1827     adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, next_block);
1828   }
1829 
1830   if (do_stress_trap) {
1831     stress_trap(iff, counter, incr_store);
1832   }
1833 }
1834 
1835 //------------------------------------do_if------------------------------------
1836 void Parse::do_if(BoolTest::mask btest, Node* c, bool can_trap, bool new_path, Node** ctrl_taken) {
1837   int target_bci = iter().get_dest();
1838 
1839   Block* branch_block = successor_for_bci(target_bci);
1840   Block* next_block   = successor_for_bci(iter().next_bci());
1841 
1842   float cnt;
1843   float prob = branch_prediction(cnt, btest, target_bci, c);
1844   float untaken_prob = 1.0 - prob;
1845 
1846   if (prob == PROB_UNKNOWN) {
1847     if (PrintOpto && Verbose) {
1848       tty->print_cr("Never-taken edge stops compilation at bci %d", bci());
1849     }
1850     repush_if_args(); // to gather stats on loop
1851     uncommon_trap(Deoptimization::Reason_unreached,
1852                   Deoptimization::Action_reinterpret,
1853                   nullptr, "cold");
1854     if (C->eliminate_boxing()) {
1855       // Mark the successor blocks as parsed
1856       branch_block->next_path_num();

1907   }
1908 
1909   // Generate real control flow
1910   float true_prob = (taken_if_true ? prob : untaken_prob);
1911   IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1912   assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1913   Node* taken_branch   = new IfTrueNode(iff);
1914   Node* untaken_branch = new IfFalseNode(iff);
1915   if (!taken_if_true) {  // Finish conversion to canonical form
1916     Node* tmp      = taken_branch;
1917     taken_branch   = untaken_branch;
1918     untaken_branch = tmp;
1919   }
1920 
1921   // Branch is taken:
1922   { PreserveJVMState pjvms(this);
1923     taken_branch = _gvn.transform(taken_branch);
1924     set_control(taken_branch);
1925 
1926     if (stopped()) {
1927       if (C->eliminate_boxing() && !new_path) {
1928         // Mark the successor block as parsed (if we haven't created a new path)
1929         branch_block->next_path_num();
1930       }
1931     } else {
1932       adjust_map_after_if(taken_btest, c, prob, branch_block, can_trap);
1933       if (!stopped()) {
1934         if (new_path) {
1935           // Merge by using a new path
1936           merge_new_path(target_bci);
1937         } else if (ctrl_taken != nullptr) {
1938           // Don't merge but save taken branch to be wired by caller
1939           *ctrl_taken = control();
1940         } else {
1941           merge(target_bci);
1942         }
1943       }
1944     }
1945   }
1946 
1947   untaken_branch = _gvn.transform(untaken_branch);
1948   set_control(untaken_branch);
1949 
1950   // Branch not taken.
1951   if (stopped() && ctrl_taken == nullptr) {
1952     if (C->eliminate_boxing()) {
1953       // Mark the successor block as parsed (if caller does not re-wire control flow)
1954       next_block->next_path_num();
1955     }
1956   } else {
1957     adjust_map_after_if(untaken_btest, c, untaken_prob, next_block, can_trap);
1958   }
1959 
1960   if (do_stress_trap) {
1961     stress_trap(iff, counter, incr_store);
1962   }
1963 }
1964 
1965 
1966 static ProfilePtrKind speculative_ptr_kind(const TypeOopPtr* t) {
1967   if (t->speculative() == nullptr) {
1968     return ProfileUnknownNull;
1969   }
1970   if (t->speculative_always_null()) {
1971     return ProfileAlwaysNull;
1972   }
1973   if (t->speculative_maybe_null()) {
1974     return ProfileMaybeNull;
1975   }
1976   return ProfileNeverNull;
1977 }
1978 
1979 void Parse::acmp_always_null_input(Node* input, const TypeOopPtr* tinput, BoolTest::mask btest, Node* eq_region) {
1980   inc_sp(2);
1981   Node* cast = null_check_common(input, T_OBJECT, true, nullptr,
1982                                  !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check) &&
1983                                  speculative_ptr_kind(tinput) == ProfileAlwaysNull);
1984   dec_sp(2);
1985   if (btest == BoolTest::ne) {
1986     {
1987       PreserveJVMState pjvms(this);
1988       replace_in_map(input, cast);
1989       int target_bci = iter().get_dest();
1990       merge(target_bci);
1991     }
1992     record_for_igvn(eq_region);
1993     set_control(_gvn.transform(eq_region));
1994   } else {
1995     replace_in_map(input, cast);
1996   }
1997 }
1998 
1999 Node* Parse::acmp_null_check(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, Node*& null_ctl) {
2000   inc_sp(2);
2001   null_ctl = top();
2002   Node* cast = null_check_oop(input, &null_ctl,
2003                               input_ptr == ProfileNeverNull || (input_ptr == ProfileUnknownNull && !too_many_traps_or_recompiles(Deoptimization::Reason_null_check)),
2004                               false,
2005                               speculative_ptr_kind(tinput) == ProfileNeverNull &&
2006                               !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check));
2007   dec_sp(2);
2008   assert(!stopped(), "null input should have been caught earlier");
2009   return cast;
2010 }
2011 
2012 void Parse::acmp_known_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, ciKlass* input_type, BoolTest::mask btest, Node* eq_region) {
2013   Node* ne_region = new RegionNode(1);
2014   Node* null_ctl;
2015   Node* cast = acmp_null_check(input, tinput, input_ptr, null_ctl);
2016   ne_region->add_req(null_ctl);
2017 
2018   Node* slow_ctl = type_check_receiver(cast, input_type, 1.0, &cast);
2019   {
2020     PreserveJVMState pjvms(this);
2021     inc_sp(2);
2022     set_control(slow_ctl);
2023     Deoptimization::DeoptReason reason;
2024     if (tinput->speculative_type() != nullptr && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
2025       reason = Deoptimization::Reason_speculate_class_check;
2026     } else {
2027       reason = Deoptimization::Reason_class_check;
2028     }
2029     uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
2030   }
2031   ne_region->add_req(control());
2032 
2033   record_for_igvn(ne_region);
2034   set_control(_gvn.transform(ne_region));
2035   if (btest == BoolTest::ne) {
2036     {
2037       PreserveJVMState pjvms(this);
2038       if (null_ctl == top()) {
2039         replace_in_map(input, cast);
2040       }
2041       int target_bci = iter().get_dest();
2042       merge(target_bci);
2043     }
2044     record_for_igvn(eq_region);
2045     set_control(_gvn.transform(eq_region));
2046   } else {
2047     if (null_ctl == top()) {
2048       replace_in_map(input, cast);
2049     }
2050     set_control(_gvn.transform(ne_region));
2051   }
2052 }
2053 
2054 void Parse::acmp_unknown_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, BoolTest::mask btest, Node* eq_region) {
2055   Node* ne_region = new RegionNode(1);
2056   Node* null_ctl;
2057   Node* cast = acmp_null_check(input, tinput, input_ptr, null_ctl);
2058   ne_region->add_req(null_ctl);
2059 
2060   {
2061     BuildCutout unless(this, inline_type_test(cast, /* is_inline = */ false), PROB_MAX);
2062     inc_sp(2);
2063     uncommon_trap_exact(Deoptimization::Reason_class_check, Deoptimization::Action_maybe_recompile);
2064   }
2065 
2066   ne_region->add_req(control());
2067 
2068   record_for_igvn(ne_region);
2069   set_control(_gvn.transform(ne_region));
2070   if (btest == BoolTest::ne) {
2071     {
2072       PreserveJVMState pjvms(this);
2073       if (null_ctl == top()) {
2074         replace_in_map(input, cast);
2075       }
2076       int target_bci = iter().get_dest();
2077       merge(target_bci);
2078     }
2079     record_for_igvn(eq_region);
2080     set_control(_gvn.transform(eq_region));
2081   } else {
2082     if (null_ctl == top()) {
2083       replace_in_map(input, cast);
2084     }
2085     set_control(_gvn.transform(ne_region));
2086   }
2087 }
2088 
2089 void Parse::do_acmp(BoolTest::mask btest, Node* left, Node* right) {
2090   ciKlass* left_type = nullptr;
2091   ciKlass* right_type = nullptr;
2092   ProfilePtrKind left_ptr = ProfileUnknownNull;
2093   ProfilePtrKind right_ptr = ProfileUnknownNull;
2094   bool left_inline_type = true;
2095   bool right_inline_type = true;
2096 
2097   // Leverage profiling at acmp
2098   if (UseACmpProfile) {
2099     method()->acmp_profiled_type(bci(), left_type, right_type, left_ptr, right_ptr, left_inline_type, right_inline_type);
2100     if (too_many_traps_or_recompiles(Deoptimization::Reason_class_check)) {
2101       left_type = nullptr;
2102       right_type = nullptr;
2103       left_inline_type = true;
2104       right_inline_type = true;
2105     }
2106     if (too_many_traps_or_recompiles(Deoptimization::Reason_null_check)) {
2107       left_ptr = ProfileUnknownNull;
2108       right_ptr = ProfileUnknownNull;
2109     }
2110   }
2111 
2112   if (UseTypeSpeculation) {
2113     record_profile_for_speculation(left, left_type, left_ptr);
2114     record_profile_for_speculation(right, right_type, right_ptr);
2115   }
2116 
2117   if (!EnableValhalla) {
2118     Node* cmp = CmpP(left, right);
2119     cmp = optimize_cmp_with_klass(cmp);
2120     do_if(btest, cmp);
2121     return;
2122   }
2123 
2124   // Check for equality before potentially allocating
2125   if (left == right) {
2126     do_if(btest, makecon(TypeInt::CC_EQ));
2127     return;
2128   }
2129 
2130   // Allocate inline type operands and re-execute on deoptimization
2131   if (left->is_InlineType()) {
2132     if (_gvn.type(right)->is_zero_type() ||
2133         (right->is_InlineType() && _gvn.type(right->as_InlineType()->get_is_init())->is_zero_type())) {
2134       // Null checking a scalarized but nullable inline type. Check the IsInit
2135       // input instead of the oop input to avoid keeping buffer allocations alive.
2136       Node* cmp = CmpI(left->as_InlineType()->get_is_init(), intcon(0));
2137       do_if(btest, cmp);
2138       return;
2139     } else {
2140       PreserveReexecuteState preexecs(this);
2141       inc_sp(2);
2142       jvms()->set_should_reexecute(true);
2143       left = left->as_InlineType()->buffer(this)->get_oop();
2144     }
2145   }
2146   if (right->is_InlineType()) {
2147     PreserveReexecuteState preexecs(this);
2148     inc_sp(2);
2149     jvms()->set_should_reexecute(true);
2150     right = right->as_InlineType()->buffer(this)->get_oop();
2151   }
2152 
2153   // First, do a normal pointer comparison
2154   const TypeOopPtr* tleft = _gvn.type(left)->isa_oopptr();
2155   const TypeOopPtr* tright = _gvn.type(right)->isa_oopptr();
2156   Node* cmp = CmpP(left, right);
2157   cmp = optimize_cmp_with_klass(cmp);
2158   if (tleft == nullptr || !tleft->can_be_inline_type() ||
2159       tright == nullptr || !tright->can_be_inline_type()) {
2160     // This is sufficient, if one of the operands can't be an inline type
2161     do_if(btest, cmp);
2162     return;
2163   }
2164 
2165   // Don't add traps to unstable if branches because additional checks are required to
2166   // decide if the operands are equal/substitutable and we therefore shouldn't prune
2167   // branches for one if based on the profiling of the acmp branches.
2168   // Also, OptimizeUnstableIf would set an incorrect re-rexecution state because it
2169   // assumes that there is a 1-1 mapping between the if and the acmp branches and that
2170   // hitting a trap means that we will take the corresponding acmp branch on re-execution.
2171   const bool can_trap = true;
2172 
2173   Node* eq_region = nullptr;
2174   if (btest == BoolTest::eq) {
2175     do_if(btest, cmp, !can_trap, true);
2176     if (stopped()) {
2177       // Pointers are equal, operands must be equal
2178       return;
2179     }
2180   } else {
2181     assert(btest == BoolTest::ne, "only eq or ne");
2182     Node* is_not_equal = nullptr;
2183     eq_region = new RegionNode(3);
2184     {
2185       PreserveJVMState pjvms(this);
2186       // Pointers are not equal, but more checks are needed to determine if the operands are (not) substitutable
2187       do_if(btest, cmp, !can_trap, false, &is_not_equal);
2188       if (!stopped()) {
2189         eq_region->init_req(1, control());
2190       }
2191     }
2192     if (is_not_equal == nullptr || is_not_equal->is_top()) {
2193       record_for_igvn(eq_region);
2194       set_control(_gvn.transform(eq_region));
2195       return;
2196     }
2197     set_control(is_not_equal);
2198   }
2199 
2200   // Prefer speculative types if available
2201   if (!too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
2202     if (tleft->speculative_type() != nullptr) {
2203       left_type = tleft->speculative_type();
2204     }
2205     if (tright->speculative_type() != nullptr) {
2206       right_type = tright->speculative_type();
2207     }
2208   }
2209 
2210   if (speculative_ptr_kind(tleft) != ProfileMaybeNull && speculative_ptr_kind(tleft) != ProfileUnknownNull) {
2211     ProfilePtrKind speculative_left_ptr = speculative_ptr_kind(tleft);
2212     if (speculative_left_ptr == ProfileAlwaysNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_assert)) {
2213       left_ptr = speculative_left_ptr;
2214     } else if (speculative_left_ptr == ProfileNeverNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check)) {
2215       left_ptr = speculative_left_ptr;
2216     }
2217   }
2218   if (speculative_ptr_kind(tright) != ProfileMaybeNull && speculative_ptr_kind(tright) != ProfileUnknownNull) {
2219     ProfilePtrKind speculative_right_ptr = speculative_ptr_kind(tright);
2220     if (speculative_right_ptr == ProfileAlwaysNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_assert)) {
2221       right_ptr = speculative_right_ptr;
2222     } else if (speculative_right_ptr == ProfileNeverNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check)) {
2223       right_ptr = speculative_right_ptr;
2224     }
2225   }
2226 
2227   if (left_ptr == ProfileAlwaysNull) {
2228     // Comparison with null. Assert the input is indeed null and we're done.
2229     acmp_always_null_input(left, tleft, btest, eq_region);
2230     return;
2231   }
2232   if (right_ptr == ProfileAlwaysNull) {
2233     // Comparison with null. Assert the input is indeed null and we're done.
2234     acmp_always_null_input(right, tright, btest, eq_region);
2235     return;
2236   }
2237   if (left_type != nullptr && !left_type->is_inlinetype()) {
2238     // Comparison with an object of known type
2239     acmp_known_non_inline_type_input(left, tleft, left_ptr, left_type, btest, eq_region);
2240     return;
2241   }
2242   if (right_type != nullptr && !right_type->is_inlinetype()) {
2243     // Comparison with an object of known type
2244     acmp_known_non_inline_type_input(right, tright, right_ptr, right_type, btest, eq_region);
2245     return;
2246   }
2247   if (!left_inline_type) {
2248     // Comparison with an object known not to be an inline type
2249     acmp_unknown_non_inline_type_input(left, tleft, left_ptr, btest, eq_region);
2250     return;
2251   }
2252   if (!right_inline_type) {
2253     // Comparison with an object known not to be an inline type
2254     acmp_unknown_non_inline_type_input(right, tright, right_ptr, btest, eq_region);
2255     return;
2256   }
2257 
2258   // Pointers are not equal, check if first operand is non-null
2259   Node* ne_region = new RegionNode(6);
2260   Node* null_ctl;
2261   Node* not_null_right = acmp_null_check(right, tright, right_ptr, null_ctl);
2262   ne_region->init_req(1, null_ctl);
2263 
2264   // First operand is non-null, check if it is an inline type
2265   Node* is_value = inline_type_test(not_null_right);
2266   IfNode* is_value_iff = create_and_map_if(control(), is_value, PROB_FAIR, COUNT_UNKNOWN);
2267   Node* not_value = _gvn.transform(new IfFalseNode(is_value_iff));
2268   ne_region->init_req(2, not_value);
2269   set_control(_gvn.transform(new IfTrueNode(is_value_iff)));
2270 
2271   // The first operand is an inline type, check if the second operand is non-null
2272   Node* not_null_left = acmp_null_check(left, tleft, left_ptr, null_ctl);
2273   ne_region->init_req(3, null_ctl);
2274 
2275   // Check if both operands are of the same class.
2276   Node* kls_left = load_object_klass(not_null_left);
2277   Node* kls_right = load_object_klass(not_null_right);
2278   Node* kls_cmp = CmpP(kls_left, kls_right);
2279   Node* kls_bol = _gvn.transform(new BoolNode(kls_cmp, BoolTest::ne));
2280   IfNode* kls_iff = create_and_map_if(control(), kls_bol, PROB_FAIR, COUNT_UNKNOWN);
2281   Node* kls_ne = _gvn.transform(new IfTrueNode(kls_iff));
2282   set_control(_gvn.transform(new IfFalseNode(kls_iff)));
2283   ne_region->init_req(4, kls_ne);
2284 
2285   if (stopped()) {
2286     record_for_igvn(ne_region);
2287     set_control(_gvn.transform(ne_region));
2288     if (btest == BoolTest::ne) {
2289       {
2290         PreserveJVMState pjvms(this);
2291         int target_bci = iter().get_dest();
2292         merge(target_bci);
2293       }
2294       record_for_igvn(eq_region);
2295       set_control(_gvn.transform(eq_region));
2296     }
2297     return;
2298   }
2299 
2300   // Both operands are values types of the same class, we need to perform a
2301   // substitutability test. Delegate to ValueObjectMethods::isSubstitutable().
2302   Node* ne_io_phi = PhiNode::make(ne_region, i_o());
2303   Node* mem = reset_memory();
2304   Node* ne_mem_phi = PhiNode::make(ne_region, mem);
2305 
2306   Node* eq_io_phi = nullptr;
2307   Node* eq_mem_phi = nullptr;
2308   if (eq_region != nullptr) {
2309     eq_io_phi = PhiNode::make(eq_region, i_o());
2310     eq_mem_phi = PhiNode::make(eq_region, mem);
2311   }
2312 
2313   set_all_memory(mem);
2314 
2315   kill_dead_locals();
2316   ciMethod* subst_method = ciEnv::current()->ValueObjectMethods_klass()->find_method(ciSymbols::isSubstitutable_name(), ciSymbols::object_object_boolean_signature());
2317   CallStaticJavaNode *call = new CallStaticJavaNode(C, TypeFunc::make(subst_method), SharedRuntime::get_resolve_static_call_stub(), subst_method);
2318   call->set_override_symbolic_info(true);
2319   call->init_req(TypeFunc::Parms, not_null_left);
2320   call->init_req(TypeFunc::Parms+1, not_null_right);
2321   inc_sp(2);
2322   set_edges_for_java_call(call, false, false);
2323   Node* ret = set_results_for_java_call(call, false, true);
2324   dec_sp(2);
2325 
2326   // Test the return value of ValueObjectMethods::isSubstitutable()
2327   // This is the last check, do_if can emit traps now.
2328   Node* subst_cmp = _gvn.transform(new CmpINode(ret, intcon(1)));
2329   Node* ctl = C->top();
2330   if (btest == BoolTest::eq) {
2331     PreserveJVMState pjvms(this);
2332     do_if(btest, subst_cmp, can_trap);
2333     if (!stopped()) {
2334       ctl = control();
2335     }
2336   } else {
2337     assert(btest == BoolTest::ne, "only eq or ne");
2338     PreserveJVMState pjvms(this);
2339     do_if(btest, subst_cmp, can_trap, false, &ctl);
2340     if (!stopped()) {
2341       eq_region->init_req(2, control());
2342       eq_io_phi->init_req(2, i_o());
2343       eq_mem_phi->init_req(2, reset_memory());
2344     }
2345   }
2346   ne_region->init_req(5, ctl);
2347   ne_io_phi->init_req(5, i_o());
2348   ne_mem_phi->init_req(5, reset_memory());
2349 
2350   record_for_igvn(ne_region);
2351   set_control(_gvn.transform(ne_region));
2352   set_i_o(_gvn.transform(ne_io_phi));
2353   set_all_memory(_gvn.transform(ne_mem_phi));
2354 
2355   if (btest == BoolTest::ne) {
2356     {
2357       PreserveJVMState pjvms(this);
2358       int target_bci = iter().get_dest();
2359       merge(target_bci);
2360     }
2361 
2362     record_for_igvn(eq_region);
2363     set_control(_gvn.transform(eq_region));
2364     set_i_o(_gvn.transform(eq_io_phi));
2365     set_all_memory(_gvn.transform(eq_mem_phi));
2366   }
2367 }
2368 
2369 // Force unstable if traps to be taken randomly to trigger intermittent bugs such as incorrect debug information.
2370 // Add another if before the unstable if that checks a "random" condition at runtime (a simple shared counter) and
2371 // then either takes the trap or executes the original, unstable if.
2372 void Parse::stress_trap(IfNode* orig_iff, Node* counter, Node* incr_store) {
2373   // Search for an unstable if trap
2374   CallStaticJavaNode* trap = nullptr;
2375   assert(orig_iff->Opcode() == Op_If && orig_iff->outcnt() == 2, "malformed if");
2376   ProjNode* trap_proj = orig_iff->uncommon_trap_proj(trap, Deoptimization::Reason_unstable_if);
2377   if (trap == nullptr || !trap->jvms()->should_reexecute()) {
2378     // No suitable trap found. Remove unused counter load and increment.
2379     C->gvn_replace_by(incr_store, incr_store->in(MemNode::Memory));
2380     return;
2381   }
2382 
2383   // Remove trap from optimization list since we add another path to the trap.
2384   bool success = C->remove_unstable_if_trap(trap, true);
2385   assert(success, "Trap already modified");
2386 
2387   // Add a check before the original if that will trap with a certain frequency and execute the original if otherwise
2388   int freq_log = (C->random() % 31) + 1; // Random logarithmic frequency in [1, 31]

2421 }
2422 
2423 void Parse::maybe_add_predicate_after_if(Block* path) {
2424   if (path->is_SEL_head() && path->preds_parsed() == 0) {
2425     // Add predicates at bci of if dominating the loop so traps can be
2426     // recorded on the if's profile data
2427     int bc_depth = repush_if_args();
2428     add_parse_predicates();
2429     dec_sp(bc_depth);
2430     path->set_has_predicates();
2431   }
2432 }
2433 
2434 
2435 //----------------------------adjust_map_after_if------------------------------
2436 // Adjust the JVM state to reflect the result of taking this path.
2437 // Basically, it means inspecting the CmpNode controlling this
2438 // branch, seeing how it constrains a tested value, and then
2439 // deciding if it's worth our while to encode this constraint
2440 // as graph nodes in the current abstract interpretation map.
2441 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path, bool can_trap) {
2442   if (!c->is_Cmp()) {
2443     maybe_add_predicate_after_if(path);
2444     return;
2445   }
2446 
2447   if (stopped() || btest == BoolTest::illegal) {
2448     return;                             // nothing to do
2449   }
2450 
2451   bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
2452 
2453   if (can_trap && path_is_suitable_for_uncommon_trap(prob)) {
2454     repush_if_args();
2455     Node* call = uncommon_trap(Deoptimization::Reason_unstable_if,
2456                   Deoptimization::Action_reinterpret,
2457                   nullptr,
2458                   (is_fallthrough ? "taken always" : "taken never"));
2459 
2460     if (call != nullptr) {
2461       C->record_unstable_if_trap(new UnstableIfTrap(call->as_CallStaticJava(), path));
2462     }
2463     return;
2464   }
2465 
2466   Node* val = c->in(1);
2467   Node* con = c->in(2);
2468   const Type* tcon = _gvn.type(con);
2469   const Type* tval = _gvn.type(val);
2470   bool have_con = tcon->singleton();
2471   if (tval->singleton()) {
2472     if (!have_con) {
2473       // Swap, so constant is in con.

2530     if (obj != nullptr && (con_type->isa_instptr() || con_type->isa_aryptr())) {
2531        // Found:
2532        //   Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq])
2533        // or the narrowOop equivalent.
2534        const Type* obj_type = _gvn.type(obj);
2535        const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr();
2536        if (tboth != nullptr && tboth->klass_is_exact() && tboth != obj_type &&
2537            tboth->higher_equal(obj_type)) {
2538           // obj has to be of the exact type Foo if the CmpP succeeds.
2539           int obj_in_map = map()->find_edge(obj);
2540           JVMState* jvms = this->jvms();
2541           if (obj_in_map >= 0 &&
2542               (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
2543             TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth);
2544             const Type* tcc = ccast->as_Type()->type();
2545             assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve");
2546             // Delay transform() call to allow recovery of pre-cast value
2547             // at the control merge.
2548             _gvn.set_type_bottom(ccast);
2549             record_for_igvn(ccast);
2550             if (tboth->is_inlinetypeptr()) {
2551               ccast = InlineTypeNode::make_from_oop(this, ccast, tboth->exact_klass(true)->as_inline_klass());
2552             }
2553             // Here's the payoff.
2554             replace_in_map(obj, ccast);
2555           }
2556        }
2557     }
2558   }
2559 
2560   int val_in_map = map()->find_edge(val);
2561   if (val_in_map < 0)  return;          // replace_in_map would be useless
2562   {
2563     JVMState* jvms = this->jvms();
2564     if (!(jvms->is_loc(val_in_map) ||
2565           jvms->is_stk(val_in_map)))
2566       return;                           // again, it would be useless
2567   }
2568 
2569   // Check for a comparison to a constant, and "know" that the compared
2570   // value is constrained on this path.
2571   assert(tcon->singleton(), "");
2572   ConstraintCastNode* ccast = nullptr;

2637   if (c->Opcode() == Op_CmpP &&
2638       (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
2639       c->in(2)->is_Con()) {
2640     Node* load_klass = nullptr;
2641     Node* decode = nullptr;
2642     if (c->in(1)->Opcode() == Op_DecodeNKlass) {
2643       decode = c->in(1);
2644       load_klass = c->in(1)->in(1);
2645     } else {
2646       load_klass = c->in(1);
2647     }
2648     if (load_klass->in(2)->is_AddP()) {
2649       Node* addp = load_klass->in(2);
2650       Node* obj = addp->in(AddPNode::Address);
2651       const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
2652       if (obj_type->speculative_type_not_null() != nullptr) {
2653         ciKlass* k = obj_type->speculative_type();
2654         inc_sp(2);
2655         obj = maybe_cast_profiled_obj(obj, k);
2656         dec_sp(2);
2657         if (obj->is_InlineType()) {
2658           assert(obj->as_InlineType()->is_allocated(&_gvn), "must be allocated");
2659           obj = obj->as_InlineType()->get_oop();
2660         }
2661         // Make the CmpP use the casted obj
2662         addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
2663         load_klass = load_klass->clone();
2664         load_klass->set_req(2, addp);
2665         load_klass = _gvn.transform(load_klass);
2666         if (decode != nullptr) {
2667           decode = decode->clone();
2668           decode->set_req(1, load_klass);
2669           load_klass = _gvn.transform(decode);
2670         }
2671         c = c->clone();
2672         c->set_req(1, load_klass);
2673         c = _gvn.transform(c);
2674       }
2675     }
2676   }
2677   return c;
2678 }
2679 
2680 //------------------------------do_one_bytecode--------------------------------

3456     // See if we can get some profile data and hand it off to the next block
3457     Block *target_block = block()->successor_for_bci(target_bci);
3458     if (target_block->pred_count() != 1)  break;
3459     ciMethodData* methodData = method()->method_data();
3460     if (!methodData->is_mature())  break;
3461     ciProfileData* data = methodData->bci_to_data(bci());
3462     assert(data != nullptr && data->is_JumpData(), "need JumpData for taken branch");
3463     int taken = ((ciJumpData*)data)->taken();
3464     taken = method()->scale_count(taken);
3465     target_block->set_count(taken);
3466     break;
3467   }
3468 
3469   case Bytecodes::_ifnull:    btest = BoolTest::eq; goto handle_if_null;
3470   case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null;
3471   handle_if_null:
3472     // If this is a backwards branch in the bytecodes, add Safepoint
3473     maybe_add_safepoint(iter().get_dest());
3474     a = null();
3475     b = pop();
3476     if (b->is_InlineType()) {
3477       // Null checking a scalarized but nullable inline type. Check the IsInit
3478       // input instead of the oop input to avoid keeping buffer allocations alive
3479       c = _gvn.transform(new CmpINode(b->as_InlineType()->get_is_init(), zerocon(T_INT)));
3480     } else {
3481       if (!_gvn.type(b)->speculative_maybe_null() &&
3482           !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
3483         inc_sp(1);
3484         Node* null_ctl = top();
3485         b = null_check_oop(b, &null_ctl, true, true, true);
3486         assert(null_ctl->is_top(), "no null control here");
3487         dec_sp(1);
3488       } else if (_gvn.type(b)->speculative_always_null() &&
3489                  !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
3490         inc_sp(1);
3491         b = null_assert(b);
3492         dec_sp(1);
3493       }
3494       c = _gvn.transform( new CmpPNode(b, a) );
3495     }
3496     do_ifnull(btest, c);
3497     break;
3498 
3499   case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
3500   case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
3501   handle_if_acmp:
3502     // If this is a backwards branch in the bytecodes, add Safepoint
3503     maybe_add_safepoint(iter().get_dest());
3504     a = pop();
3505     b = pop();
3506     do_acmp(btest, b, a);


3507     break;
3508 
3509   case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
3510   case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
3511   case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
3512   case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
3513   case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
3514   case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
3515   handle_ifxx:
3516     // If this is a backwards branch in the bytecodes, add Safepoint
3517     maybe_add_safepoint(iter().get_dest());
3518     a = _gvn.intcon(0);
3519     b = pop();
3520     c = _gvn.transform( new CmpINode(b, a) );
3521     do_if(btest, c);
3522     break;
3523 
3524   case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
3525   case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
3526   case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;

3541     break;
3542 
3543   case Bytecodes::_lookupswitch:
3544     do_lookupswitch();
3545     break;
3546 
3547   case Bytecodes::_invokestatic:
3548   case Bytecodes::_invokedynamic:
3549   case Bytecodes::_invokespecial:
3550   case Bytecodes::_invokevirtual:
3551   case Bytecodes::_invokeinterface:
3552     do_call();
3553     break;
3554   case Bytecodes::_checkcast:
3555     do_checkcast();
3556     break;
3557   case Bytecodes::_instanceof:
3558     do_instanceof();
3559     break;
3560   case Bytecodes::_anewarray:
3561     do_newarray();
3562     break;
3563   case Bytecodes::_newarray:
3564     do_newarray((BasicType)iter().get_index());
3565     break;
3566   case Bytecodes::_multianewarray:
3567     do_multianewarray();
3568     break;
3569   case Bytecodes::_new:
3570     do_new();
3571     break;
3572 
3573   case Bytecodes::_jsr:
3574   case Bytecodes::_jsr_w:
3575     do_jsr();
3576     break;
3577 
3578   case Bytecodes::_ret:
3579     do_ret();
3580     break;
3581 
< prev index next >