< prev index next >

src/hotspot/share/opto/parse2.cpp

Print this page


   1 /*
   2  * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciMethodData.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "compiler/compileLog.hpp"
  30 #include "interpreter/linkResolver.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "memory/universe.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "opto/addnode.hpp"
  35 #include "opto/castnode.hpp"
  36 #include "opto/convertnode.hpp"
  37 #include "opto/divnode.hpp"
  38 #include "opto/idealGraphPrinter.hpp"

  39 #include "opto/matcher.hpp"
  40 #include "opto/memnode.hpp"
  41 #include "opto/mulnode.hpp"
  42 #include "opto/opaquenode.hpp"
  43 #include "opto/parse.hpp"
  44 #include "opto/runtime.hpp"

  45 #include "runtime/deoptimization.hpp"
  46 #include "runtime/sharedRuntime.hpp"
  47 
  48 #ifndef PRODUCT
  49 extern int explicit_null_checks_inserted,
  50            explicit_null_checks_elided;
  51 #endif
  52 
  53 //---------------------------------array_load----------------------------------
  54 void Parse::array_load(BasicType bt) {
  55   const Type* elemtype = Type::TOP;
  56   bool big_val = bt == T_DOUBLE || bt == T_LONG;
  57   Node* adr = array_addressing(bt, 0, &elemtype);
  58   if (stopped())  return;     // guaranteed null or range check
  59 
  60   pop();                      // index (already used)
  61   Node* array = pop();        // the array itself



















































































































  62 
  63   if (elemtype == TypeInt::BOOL) {
  64     bt = T_BOOLEAN;
  65   } else if (bt == T_OBJECT) {
  66     elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr();
  67   }
  68 
  69   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
  70 
  71   Node* ld = access_load_at(array, adr, adr_type, elemtype, bt,
  72                             IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD);
  73   if (big_val) {
  74     push_pair(ld);
  75   } else {
  76     push(ld);


  77   }


  78 }
  79 
  80 
  81 //--------------------------------array_store----------------------------------
  82 void Parse::array_store(BasicType bt) {
  83   const Type* elemtype = Type::TOP;
  84   bool big_val = bt == T_DOUBLE || bt == T_LONG;
  85   Node* adr = array_addressing(bt, big_val ? 2 : 1, &elemtype);
  86   if (stopped())  return;     // guaranteed null or range check

  87   if (bt == T_OBJECT) {
  88     array_store_check();

  89   }
  90   Node* val;                  // Oop to store
  91   if (big_val) {
  92     val = pop_pair();
  93   } else {
  94     val = pop();
  95   }
  96   pop();                      // index (already used)
  97   Node* array = pop();        // the array itself
  98 
  99   if (elemtype == TypeInt::BOOL) {
 100     bt = T_BOOLEAN;
 101   } else if (bt == T_OBJECT) {
 102     elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr();
 103   }
























 104 
 105   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);










































































































 106 
 107   access_store_at(array, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY);
 108 }
 109 
 110 
 111 //------------------------------array_addressing-------------------------------
 112 // Pull array and index from the stack.  Compute pointer-to-element.
 113 Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) {
 114   Node *idx   = peek(0+vals);   // Get from stack without popping
 115   Node *ary   = peek(1+vals);   // in case of exception
 116 
 117   // Null check the array base, with correct stack contents
 118   ary = null_check(ary, T_ARRAY);
 119   // Compile-time detect of null-exception?
 120   if (stopped())  return top();
 121 
 122   const TypeAryPtr* arytype  = _gvn.type(ary)->is_aryptr();
 123   const TypeInt*    sizetype = arytype->size();
 124   const Type*       elemtype = arytype->elem();
 125 
 126   if (UseUniqueSubclasses && result2 != NULL) {
 127     const Type* el = elemtype->make_ptr();


 187       if (C->allow_range_check_smearing()) {
 188         // Do not use builtin_throw, since range checks are sometimes
 189         // made more stringent by an optimistic transformation.
 190         // This creates "tentative" range checks at this point,
 191         // which are not guaranteed to throw exceptions.
 192         // See IfNode::Ideal, is_range_check, adjust_check.
 193         uncommon_trap(Deoptimization::Reason_range_check,
 194                       Deoptimization::Action_make_not_entrant,
 195                       NULL, "range_check");
 196       } else {
 197         // If we have already recompiled with the range-check-widening
 198         // heroic optimization turned off, then we must really be throwing
 199         // range check exceptions.
 200         builtin_throw(Deoptimization::Reason_range_check, idx);
 201       }
 202     }
 203   }
 204   // Check for always knowing you are throwing a range-check exception
 205   if (stopped())  return top();
 206 














 207   // Make array address computation control dependent to prevent it
 208   // from floating above the range check during loop optimizations.
 209   Node* ptr = array_element_address(ary, idx, type, sizetype, control());
 210 
 211   if (result2 != NULL)  *result2 = elemtype;
 212 
 213   assert(ptr != top(), "top should go hand-in-hand with stopped");
 214 
 215   return ptr;
 216 }
 217 
 218 
 219 // returns IfNode
 220 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) {
 221   Node   *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
 222   Node   *tst = _gvn.transform(new BoolNode(cmp, mask));
 223   IfNode *iff = create_and_map_if(control(), tst, prob, cnt);
 224   return iff;
 225 }
 226 


1478 
1479   // Sanity check the probability value
1480   assert(prob > 0.0f,"Bad probability in Parser");
1481  // Need xform to put node in hash table
1482   IfNode *iff = create_and_xform_if( control(), tst, prob, cnt );
1483   assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1484   // True branch
1485   { PreserveJVMState pjvms(this);
1486     Node* iftrue  = _gvn.transform( new IfTrueNode (iff) );
1487     set_control(iftrue);
1488 
1489     if (stopped()) {            // Path is dead?
1490       NOT_PRODUCT(explicit_null_checks_elided++);
1491       if (C->eliminate_boxing()) {
1492         // Mark the successor block as parsed
1493         branch_block->next_path_num();
1494       }
1495     } else {                    // Path is live.
1496       // Update method data
1497       profile_taken_branch(target_bci);
1498       adjust_map_after_if(btest, c, prob, branch_block, next_block);
1499       if (!stopped()) {
1500         merge(target_bci);
1501       }
1502     }
1503   }
1504 
1505   // False branch
1506   Node* iffalse = _gvn.transform( new IfFalseNode(iff) );
1507   set_control(iffalse);
1508 
1509   if (stopped()) {              // Path is dead?
1510     NOT_PRODUCT(explicit_null_checks_elided++);
1511     if (C->eliminate_boxing()) {
1512       // Mark the successor block as parsed
1513       next_block->next_path_num();
1514     }
1515   } else  {                     // Path is live.
1516     // Update method data
1517     profile_not_taken_branch();
1518     adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob,
1519                         next_block, branch_block);
1520   }
1521 }
1522 
1523 //------------------------------------do_if------------------------------------
1524 void Parse::do_if(BoolTest::mask btest, Node* c) {
1525   int target_bci = iter().get_dest();
1526 
1527   Block* branch_block = successor_for_bci(target_bci);
1528   Block* next_block   = successor_for_bci(iter().next_bci());
1529 
1530   float cnt;
1531   float prob = branch_prediction(cnt, btest, target_bci, c);
1532   float untaken_prob = 1.0 - prob;
1533 
1534   if (prob == PROB_UNKNOWN) {
1535     if (PrintOpto && Verbose) {
1536       tty->print_cr("Never-taken edge stops compilation at bci %d", bci());
1537     }
1538     repush_if_args(); // to gather stats on loop
1539     // We need to mark this branch as taken so that if we recompile we will
1540     // see that it is possible. In the tiered system the interpreter doesn't
1541     // do profiling and by the time we get to the lower tier from the interpreter
1542     // the path may be cold again. Make sure it doesn't look untaken
1543     profile_taken_branch(target_bci, !ProfileInterpreter);
1544     uncommon_trap(Deoptimization::Reason_unreached,


1593   }
1594 
1595   // Generate real control flow
1596   float true_prob = (taken_if_true ? prob : untaken_prob);
1597   IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1598   assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1599   Node* taken_branch   = new IfTrueNode(iff);
1600   Node* untaken_branch = new IfFalseNode(iff);
1601   if (!taken_if_true) {  // Finish conversion to canonical form
1602     Node* tmp      = taken_branch;
1603     taken_branch   = untaken_branch;
1604     untaken_branch = tmp;
1605   }
1606 
1607   // Branch is taken:
1608   { PreserveJVMState pjvms(this);
1609     taken_branch = _gvn.transform(taken_branch);
1610     set_control(taken_branch);
1611 
1612     if (stopped()) {
1613       if (C->eliminate_boxing()) {
1614         // Mark the successor block as parsed
1615         branch_block->next_path_num();
1616       }
1617     } else {
1618       // Update method data
1619       profile_taken_branch(target_bci);
1620       adjust_map_after_if(taken_btest, c, prob, branch_block, next_block);
1621       if (!stopped()) {
1622         merge(target_bci);








1623       }
1624     }
1625   }
1626 
1627   untaken_branch = _gvn.transform(untaken_branch);
1628   set_control(untaken_branch);
1629 
1630   // Branch not taken.
1631   if (stopped()) {
1632     if (C->eliminate_boxing()) {
1633       // Mark the successor block as parsed
1634       next_block->next_path_num();
1635     }
1636   } else {
1637     // Update method data
1638     profile_not_taken_branch();
1639     adjust_map_after_if(untaken_btest, c, untaken_prob,
1640                         next_block, branch_block);












































































































































































































1641   }
1642 }
1643 
1644 bool Parse::path_is_suitable_for_uncommon_trap(float prob) const {
1645   // Don't want to speculate on uncommon traps when running with -Xcomp
1646   if (!UseInterpreter) {
1647     return false;
1648   }
1649   return (seems_never_taken(prob) && seems_stable_comparison());
1650 }
1651 
1652 void Parse::maybe_add_predicate_after_if(Block* path) {
1653   if (path->is_SEL_head() && path->preds_parsed() == 0) {
1654     // Add predicates at bci of if dominating the loop so traps can be
1655     // recorded on the if's profile data
1656     int bc_depth = repush_if_args();
1657     add_predicate();
1658     dec_sp(bc_depth);
1659     path->set_has_predicates();
1660   }
1661 }
1662 
1663 
1664 //----------------------------adjust_map_after_if------------------------------
1665 // Adjust the JVM state to reflect the result of taking this path.
1666 // Basically, it means inspecting the CmpNode controlling this
1667 // branch, seeing how it constrains a tested value, and then
1668 // deciding if it's worth our while to encode this constraint
1669 // as graph nodes in the current abstract interpretation map.
1670 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
1671                                 Block* path, Block* other_path) {
1672   if (!c->is_Cmp()) {
1673     maybe_add_predicate_after_if(path);
1674     return;
1675   }
1676 
1677   if (stopped() || btest == BoolTest::illegal) {
1678     return;                             // nothing to do
1679   }
1680 
1681   bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
1682 
1683   if (path_is_suitable_for_uncommon_trap(prob)) {
1684     repush_if_args();
1685     uncommon_trap(Deoptimization::Reason_unstable_if,
1686                   Deoptimization::Action_reinterpret,
1687                   NULL,
1688                   (is_fallthrough ? "taken always" : "taken never"));
1689     return;
1690   }
1691 


1861   if (c->Opcode() == Op_CmpP &&
1862       (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
1863       c->in(2)->is_Con()) {
1864     Node* load_klass = NULL;
1865     Node* decode = NULL;
1866     if (c->in(1)->Opcode() == Op_DecodeNKlass) {
1867       decode = c->in(1);
1868       load_klass = c->in(1)->in(1);
1869     } else {
1870       load_klass = c->in(1);
1871     }
1872     if (load_klass->in(2)->is_AddP()) {
1873       Node* addp = load_klass->in(2);
1874       Node* obj = addp->in(AddPNode::Address);
1875       const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
1876       if (obj_type->speculative_type_not_null() != NULL) {
1877         ciKlass* k = obj_type->speculative_type();
1878         inc_sp(2);
1879         obj = maybe_cast_profiled_obj(obj, k);
1880         dec_sp(2);




1881         // Make the CmpP use the casted obj
1882         addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
1883         load_klass = load_klass->clone();
1884         load_klass->set_req(2, addp);
1885         load_klass = _gvn.transform(load_klass);
1886         if (decode != NULL) {
1887           decode = decode->clone();
1888           decode->set_req(1, load_klass);
1889           load_klass = _gvn.transform(decode);
1890         }
1891         c = c->clone();
1892         c->set_req(1, load_klass);
1893         c = _gvn.transform(c);
1894       }
1895     }
1896   }
1897   return c;
1898 }
1899 
1900 //------------------------------do_one_bytecode--------------------------------


2708     // See if we can get some profile data and hand it off to the next block
2709     Block *target_block = block()->successor_for_bci(target_bci);
2710     if (target_block->pred_count() != 1)  break;
2711     ciMethodData* methodData = method()->method_data();
2712     if (!methodData->is_mature())  break;
2713     ciProfileData* data = methodData->bci_to_data(bci());
2714     assert(data != NULL && data->is_JumpData(), "need JumpData for taken branch");
2715     int taken = ((ciJumpData*)data)->taken();
2716     taken = method()->scale_count(taken);
2717     target_block->set_count(taken);
2718     break;
2719   }
2720 
2721   case Bytecodes::_ifnull:    btest = BoolTest::eq; goto handle_if_null;
2722   case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null;
2723   handle_if_null:
2724     // If this is a backwards branch in the bytecodes, add Safepoint
2725     maybe_add_safepoint(iter().get_dest());
2726     a = null();
2727     b = pop();
2728     if (!_gvn.type(b)->speculative_maybe_null() &&
2729         !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
2730       inc_sp(1);
2731       Node* null_ctl = top();
2732       b = null_check_oop(b, &null_ctl, true, true, true);
2733       assert(null_ctl->is_top(), "no null control here");
2734       dec_sp(1);
2735     } else if (_gvn.type(b)->speculative_always_null() &&
2736                !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
2737       inc_sp(1);
2738       b = null_assert(b);
2739       dec_sp(1);






2740     }
2741     c = _gvn.transform( new CmpPNode(b, a) );
2742     do_ifnull(btest, c);
2743     break;
2744 
2745   case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
2746   case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
2747   handle_if_acmp:
2748     // If this is a backwards branch in the bytecodes, add Safepoint
2749     maybe_add_safepoint(iter().get_dest());
2750     a = access_resolve(pop(), 0);
2751     b = access_resolve(pop(), 0);
2752     c = _gvn.transform( new CmpPNode(b, a) );
2753     c = optimize_cmp_with_klass(c);
2754     do_if(btest, c);
2755     break;
2756 
2757   case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
2758   case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
2759   case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
2760   case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
2761   case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
2762   case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
2763   handle_ifxx:
2764     // If this is a backwards branch in the bytecodes, add Safepoint
2765     maybe_add_safepoint(iter().get_dest());
2766     a = _gvn.intcon(0);
2767     b = pop();
2768     c = _gvn.transform( new CmpINode(b, a) );
2769     do_if(btest, c);
2770     break;
2771 
2772   case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
2773   case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
2774   case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;


2789     break;
2790 
2791   case Bytecodes::_lookupswitch:
2792     do_lookupswitch();
2793     break;
2794 
2795   case Bytecodes::_invokestatic:
2796   case Bytecodes::_invokedynamic:
2797   case Bytecodes::_invokespecial:
2798   case Bytecodes::_invokevirtual:
2799   case Bytecodes::_invokeinterface:
2800     do_call();
2801     break;
2802   case Bytecodes::_checkcast:
2803     do_checkcast();
2804     break;
2805   case Bytecodes::_instanceof:
2806     do_instanceof();
2807     break;
2808   case Bytecodes::_anewarray:
2809     do_anewarray();
2810     break;
2811   case Bytecodes::_newarray:
2812     do_newarray((BasicType)iter().get_index());
2813     break;
2814   case Bytecodes::_multianewarray:
2815     do_multianewarray();
2816     break;
2817   case Bytecodes::_new:
2818     do_new();






2819     break;
2820 
2821   case Bytecodes::_jsr:
2822   case Bytecodes::_jsr_w:
2823     do_jsr();
2824     break;
2825 
2826   case Bytecodes::_ret:
2827     do_ret();
2828     break;
2829 
2830 
2831   case Bytecodes::_monitorenter:
2832     do_monitor_enter();
2833     break;
2834 
2835   case Bytecodes::_monitorexit:
2836     do_monitor_exit();
2837     break;
2838 


   1 /*
   2  * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciMethodData.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "compiler/compileLog.hpp"
  30 #include "interpreter/linkResolver.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "memory/universe.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "opto/addnode.hpp"
  35 #include "opto/castnode.hpp"
  36 #include "opto/convertnode.hpp"
  37 #include "opto/divnode.hpp"
  38 #include "opto/idealGraphPrinter.hpp"
  39 #include "opto/idealKit.hpp"
  40 #include "opto/matcher.hpp"
  41 #include "opto/memnode.hpp"
  42 #include "opto/mulnode.hpp"
  43 #include "opto/opaquenode.hpp"
  44 #include "opto/parse.hpp"
  45 #include "opto/runtime.hpp"
  46 #include "opto/valuetypenode.hpp"
  47 #include "runtime/deoptimization.hpp"
  48 #include "runtime/sharedRuntime.hpp"
  49 
  50 #ifndef PRODUCT
  51 extern int explicit_null_checks_inserted,
  52            explicit_null_checks_elided;
  53 #endif
  54 
  55 //---------------------------------array_load----------------------------------
  56 void Parse::array_load(BasicType bt) {
  57   const Type* elemtype = Type::TOP;

  58   Node* adr = array_addressing(bt, 0, &elemtype);
  59   if (stopped())  return;     // guaranteed null or range check
  60 
  61   Node* idx = pop();
  62   Node* ary = pop();
  63 
  64   // Handle value type arrays
  65   const TypeOopPtr* elemptr = elemtype->make_oopptr();
  66   const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
  67   if (elemtype->isa_valuetype() != NULL) {
  68     C->set_flattened_accesses();
  69     // Load from flattened value type array
  70     Node* vt = ValueTypeNode::make_from_flattened(this, elemtype->value_klass(), ary, adr);
  71     push(vt);
  72     return;
  73   } else if (elemptr != NULL && elemptr->is_valuetypeptr() && !elemptr->maybe_null()) {
  74     // Load from non-flattened but flattenable value type array (elements can never be null)
  75     bt = T_VALUETYPE;
  76   } else if (!ary_t->is_not_flat()) {
  77     // Cannot statically determine if array is flattened, emit runtime check
  78     assert(ValueArrayFlatten && elemptr->can_be_value_type() && !ary_t->klass_is_exact() && !ary_t->is_not_null_free() &&
  79            (!elemptr->is_valuetypeptr() || elemptr->value_klass()->flatten_array()), "array can't be flattened");
  80     Node* ctl = control();
  81     IdealKit ideal(this);
  82     IdealVariable res(ideal);
  83     ideal.declarations_done();
  84     Node* kls = load_object_klass(ary);
  85     Node* tag = load_lh_array_tag(kls);
  86     ideal.if_then(tag, BoolTest::ne, intcon(Klass::_lh_array_tag_vt_value)); {
  87       // non-flattened
  88       sync_kit(ideal);
  89       const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
  90       Node* ld = access_load_at(ary, adr, adr_type, elemptr, bt,
  91                                 IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD, ctl);
  92       ideal.sync_kit(this);
  93       ideal.set(res, ld);
  94     } ideal.else_(); {
  95       // flattened
  96       sync_kit(ideal);
  97       if (elemptr->is_valuetypeptr()) {
  98         // Element type is known, cast and load from flattened representation
  99         ciValueKlass* vk = elemptr->value_klass();
 100         assert(vk->flatten_array() && elemptr->maybe_null(), "must be a flattenable and nullable array");
 101         ciArrayKlass* array_klass = ciArrayKlass::make(vk, /* never_null */ true);
 102         const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr();
 103         Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, arytype));
 104         adr = array_element_address(cast, idx, T_VALUETYPE, ary_t->size(), control());
 105         Node* vt = ValueTypeNode::make_from_flattened(this, vk, cast, adr)->allocate(this, false, false)->get_oop();
 106         ideal.set(res, vt);
 107         ideal.sync_kit(this);
 108       } else {
 109         // Element type is unknown, emit runtime call
 110         Node* k_adr = basic_plus_adr(kls, in_bytes(ArrayKlass::element_klass_offset()));
 111         Node* elem_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS));
 112         Node* obj_size  = NULL;
 113         kill_dead_locals();
 114         inc_sp(2);
 115         Node* alloc_obj = new_instance(elem_klass, NULL, &obj_size, /*deoptimize_on_exception=*/true);
 116         dec_sp(2);
 117 
 118         AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
 119         assert(alloc->maybe_set_complete(&_gvn), "");
 120         alloc->initialization()->set_complete_with_arraycopy();
 121 
 122         // This membar keeps this access to an unknown flattened array
 123         // correctly ordered with other unknown and known flattened
 124         // array accesses.
 125         insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::VALUES));
 126 
 127         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 128         // Unknown value type might contain reference fields
 129         if (!bs->array_copy_requires_gc_barriers(false, T_OBJECT, false, BarrierSetC2::Parsing)) {
 130           int base_off = sizeof(instanceOopDesc);
 131           Node* dst_base = basic_plus_adr(alloc_obj, base_off);
 132           Node* countx = obj_size;
 133           countx = _gvn.transform(new SubXNode(countx, MakeConX(base_off)));
 134           countx = _gvn.transform(new URShiftXNode(countx, intcon(LogBytesPerLong)));
 135 
 136           assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
 137           Node* lhp = basic_plus_adr(kls, in_bytes(Klass::layout_helper_offset()));
 138           Node* elem_shift = make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered);
 139           uint header = arrayOopDesc::base_offset_in_bytes(T_VALUETYPE);
 140           Node* base  = basic_plus_adr(ary, header);
 141           idx = Compile::conv_I2X_index(&_gvn, idx, TypeInt::POS, control());
 142           Node* scale = _gvn.transform(new LShiftXNode(idx, elem_shift));
 143           Node* adr = basic_plus_adr(ary, base, scale);
 144 
 145           access_clone(adr, dst_base, countx, false);
 146         } else {
 147           ideal.sync_kit(this);
 148           ideal.make_leaf_call(OptoRuntime::load_unknown_value_Type(),
 149                                CAST_FROM_FN_PTR(address, OptoRuntime::load_unknown_value),
 150                                "load_unknown_value",
 151                                ary, idx, alloc_obj);
 152           sync_kit(ideal);
 153         }
 154 
 155         // This makes sure no other thread sees a partially initialized buffered value
 156         insert_mem_bar_volatile(Op_MemBarStoreStore, Compile::AliasIdxRaw, alloc->proj_out_or_null(AllocateNode::RawAddress));
 157 
 158         // Same as MemBarCPUOrder above: keep this unknown flattened
 159         // array access correctly ordered with other flattened array
 160         // access
 161         insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::VALUES));
 162 
 163         // Prevent any use of the newly allocated value before it is
 164         // fully initialized
 165         alloc_obj = new CastPPNode(alloc_obj, _gvn.type(alloc_obj), true);
 166         alloc_obj->set_req(0, control());
 167         alloc_obj = _gvn.transform(alloc_obj);
 168 
 169         ideal.sync_kit(this);
 170 
 171         ideal.set(res, alloc_obj);
 172       }
 173     } ideal.end_if();
 174     sync_kit(ideal);
 175     push_node(bt, _gvn.transform(ideal.value(res)));
 176     return;
 177   }
 178 
 179   if (elemtype == TypeInt::BOOL) {
 180     bt = T_BOOLEAN;
 181   } else if (bt == T_OBJECT) {
 182     elemtype = ary_t->elem()->make_oopptr();
 183   }
 184 
 185   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
 186   Node* ld = access_load_at(ary, adr, adr_type, elemtype, bt,

 187                             IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD);
 188   if (bt == T_VALUETYPE) {
 189     // Loading a non-flattened (but flattenable) value type from an array
 190     assert(!gvn().type(ld)->maybe_null(), "value type array elements should never be null");
 191     if (elemptr->value_klass()->is_scalarizable()) {
 192       ld = ValueTypeNode::make_from_oop(this, ld, elemptr->value_klass());
 193     }
 194   }
 195 
 196   push_node(bt, ld);
 197 }
 198 
 199 
 200 //--------------------------------array_store----------------------------------
 201 void Parse::array_store(BasicType bt) {
 202   const Type* elemtype = Type::TOP;
 203   Node* adr = array_addressing(bt, type2size[bt], &elemtype);

 204   if (stopped())  return;     // guaranteed null or range check
 205   Node* cast_val = NULL;
 206   if (bt == T_OBJECT) {
 207     cast_val = array_store_check();
 208     if (stopped()) return;
 209   }
 210   Node* val = pop_node(bt); // Value to store
 211   Node* idx = pop();        // Index in the array
 212   Node* ary = pop();        // The array itself
 213 
 214   const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
 215   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);


 216 
 217   if (elemtype == TypeInt::BOOL) {
 218     bt = T_BOOLEAN;
 219   } else if (bt == T_OBJECT) {
 220     elemtype = elemtype->make_oopptr();
 221     const Type* tval = _gvn.type(cast_val);
 222     // We may have lost type information for 'val' here due to the casts
 223     // emitted by the array_store_check code (see JDK-6312651)
 224     // TODO Remove this code once JDK-6312651 is in.
 225     const Type* tval_init = _gvn.type(val);
 226     bool can_be_value_type = tval->isa_valuetype() || (tval != TypePtr::NULL_PTR && tval_init->is_oopptr()->can_be_value_type() && tval->is_oopptr()->can_be_value_type());
 227     bool not_flattenable = !can_be_value_type || ((tval_init->is_valuetypeptr() || tval_init->isa_valuetype()) && !tval_init->value_klass()->flatten_array());
 228 
 229     if (!ary_t->is_not_null_free() && !can_be_value_type && (!tval->maybe_null() || !tval_init->maybe_null())) {
 230       // Storing a non-inline-type, mark array as not null-free.
 231       // This is only legal for non-null stores because the array_store_check passes for null.
 232       ary_t = ary_t->cast_to_not_null_free();
 233       Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, ary_t));
 234       replace_in_map(ary, cast);
 235       ary = cast;
 236     } else if (!ary_t->is_not_flat() && not_flattenable) {
 237       // Storing a non-flattenable value, mark array as not flat.
 238       ary_t = ary_t->cast_to_not_flat();
 239       if (tval != TypePtr::NULL_PTR) {
 240         // For NULL, this transformation is only valid after the null guard below
 241         Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, ary_t));
 242         replace_in_map(ary, cast);
 243         ary = cast;
 244       }
 245     }
 246 
 247     if (ary_t->elem()->isa_valuetype() != NULL) {
 248       // Store to flattened value type array
 249       C->set_flattened_accesses();
 250       if (!cast_val->is_ValueType()) {
 251         inc_sp(3);
 252         cast_val = null_check(cast_val);
 253         if (stopped()) return;
 254         dec_sp(3);
 255         cast_val = ValueTypeNode::make_from_oop(this, cast_val, ary_t->elem()->value_klass());
 256       }
 257       cast_val->as_ValueType()->store_flattened(this, ary, adr);
 258       return;
 259     } else if (elemtype->is_valuetypeptr() && !elemtype->maybe_null()) {
 260       // Store to non-flattened but flattenable value type array (elements can never be null)
 261       if (!cast_val->is_ValueType() && tval->maybe_null()) {
 262         inc_sp(3);
 263         cast_val = null_check(cast_val);
 264         if (stopped()) return;
 265         dec_sp(3);
 266       }
 267     } else if (!ary_t->is_not_flat()) {
 268       // Array might be flattened, emit runtime checks
 269       assert(ValueArrayFlatten && !not_flattenable && elemtype->is_oopptr()->can_be_value_type() &&
 270              !ary_t->klass_is_exact() && !ary_t->is_not_null_free(), "array can't be flattened");
 271       IdealKit ideal(this);
 272       Node* kls = load_object_klass(ary);
 273       Node* layout_val = load_lh_array_tag(kls);
 274       ideal.if_then(layout_val, BoolTest::ne, intcon(Klass::_lh_array_tag_vt_value));
 275       {
 276         // non-flattened
 277         sync_kit(ideal);
 278         gen_value_array_null_guard(ary, cast_val, 3);
 279         access_store_at(ary, adr, adr_type, cast_val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY, false, false);
 280         ideal.sync_kit(this);
 281       }
 282       ideal.else_();
 283       {
 284         // flattened
 285         if (!cast_val->is_ValueType() && tval->maybe_null()) {
 286           // Add null check
 287           sync_kit(ideal);
 288           Node* null_ctl = top();
 289           cast_val = null_check_oop(cast_val, &null_ctl);
 290           if (null_ctl != top()) {
 291             PreserveJVMState pjvms(this);
 292             inc_sp(3);
 293             set_control(null_ctl);
 294             uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none);
 295             dec_sp(3);
 296           }
 297           ideal.sync_kit(this);
 298         }
 299         // Try to determine the value klass
 300         ciValueKlass* vk = NULL;
 301         if (tval->isa_valuetype() || tval->is_valuetypeptr()) {
 302           vk = tval->value_klass();
 303         } else if (tval_init->isa_valuetype() || tval_init->is_valuetypeptr()) {
 304           vk = tval_init->value_klass();
 305         } else if (elemtype->is_valuetypeptr()) {
 306           vk = elemtype->value_klass();
 307         }
 308         if (vk != NULL && !stopped()) {
 309           // Element type is known, cast and store to flattened representation
 310           sync_kit(ideal);
 311           assert(vk->flatten_array() && elemtype->maybe_null(), "must be a flattenable and nullable array");
 312           ciArrayKlass* array_klass = ciArrayKlass::make(vk, /* never_null */ true);
 313           const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr();
 314           ary = _gvn.transform(new CheckCastPPNode(control(), ary, arytype));
 315           adr = array_element_address(ary, idx, T_OBJECT, arytype->size(), control());
 316           if (!cast_val->is_ValueType()) {
 317             assert(!gvn().type(cast_val)->maybe_null(), "value type array elements should never be null");
 318             cast_val = ValueTypeNode::make_from_oop(this, cast_val, vk);
 319           }
 320           cast_val->as_ValueType()->store_flattened(this, ary, adr);
 321           ideal.sync_kit(this);
 322         } else if (!ideal.ctrl()->is_top()) {
 323           // Element type is unknown, emit runtime call
 324           sync_kit(ideal);
 325 
 326           // This membar keeps this access to an unknown flattened
 327           // array correctly ordered with other unknown and known
 328           // flattened array accesses.
 329           insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::VALUES));
 330           ideal.sync_kit(this);
 331 
 332           ideal.make_leaf_call(OptoRuntime::store_unknown_value_Type(),
 333                                CAST_FROM_FN_PTR(address, OptoRuntime::store_unknown_value),
 334                                "store_unknown_value",
 335                                cast_val, ary, idx);
 336 
 337           sync_kit(ideal);
 338           // Same as MemBarCPUOrder above: keep this unknown
 339           // flattened array access correctly ordered with other
 340           // flattened array access
 341           insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::VALUES));
 342           ideal.sync_kit(this);
 343         }
 344       }
 345       ideal.end_if();
 346       sync_kit(ideal);
 347       return;
 348     } else if (!ary_t->is_not_null_free()) {
 349       // Array is not flattened but may be null free
 350       assert(elemtype->is_oopptr()->can_be_value_type() && !ary_t->klass_is_exact(), "array can't be null free");
 351       ary = gen_value_array_null_guard(ary, cast_val, 3, true);
 352     }
 353   }
 354 
 355   access_store_at(ary, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY);
 356 }
 357 
 358 
 359 //------------------------------array_addressing-------------------------------
 360 // Pull array and index from the stack.  Compute pointer-to-element.
 361 Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) {
 362   Node *idx   = peek(0+vals);   // Get from stack without popping
 363   Node *ary   = peek(1+vals);   // in case of exception
 364 
 365   // Null check the array base, with correct stack contents
 366   ary = null_check(ary, T_ARRAY);
 367   // Compile-time detect of null-exception?
 368   if (stopped())  return top();
 369 
 370   const TypeAryPtr* arytype  = _gvn.type(ary)->is_aryptr();
 371   const TypeInt*    sizetype = arytype->size();
 372   const Type*       elemtype = arytype->elem();
 373 
 374   if (UseUniqueSubclasses && result2 != NULL) {
 375     const Type* el = elemtype->make_ptr();


 435       if (C->allow_range_check_smearing()) {
 436         // Do not use builtin_throw, since range checks are sometimes
 437         // made more stringent by an optimistic transformation.
 438         // This creates "tentative" range checks at this point,
 439         // which are not guaranteed to throw exceptions.
 440         // See IfNode::Ideal, is_range_check, adjust_check.
 441         uncommon_trap(Deoptimization::Reason_range_check,
 442                       Deoptimization::Action_make_not_entrant,
 443                       NULL, "range_check");
 444       } else {
 445         // If we have already recompiled with the range-check-widening
 446         // heroic optimization turned off, then we must really be throwing
 447         // range check exceptions.
 448         builtin_throw(Deoptimization::Reason_range_check, idx);
 449       }
 450     }
 451   }
 452   // Check for always knowing you are throwing a range-check exception
 453   if (stopped())  return top();
 454 
 455   // Speculate on the array not being null-free
 456   if (!arytype->is_not_null_free() && arytype->speculative() != NULL && arytype->speculative()->isa_aryptr() != NULL &&
 457       arytype->speculative()->is_aryptr()->is_not_null_free() &&
 458       !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
 459     Node* tst = gen_null_free_array_check(ary);
 460     {
 461       BuildCutout unless(this, tst, PROB_ALWAYS);
 462       uncommon_trap(Deoptimization::Reason_speculate_class_check,
 463                     Deoptimization::Action_maybe_recompile);
 464     }
 465     Node* cast = new CheckCastPPNode(control(), ary, arytype->cast_to_not_null_free());
 466     replace_in_map(ary, _gvn.transform(cast));
 467   }
 468 
 469   // Make array address computation control dependent to prevent it
 470   // from floating above the range check during loop optimizations.
 471   Node* ptr = array_element_address(ary, idx, type, sizetype, control());
 472 
 473   if (result2 != NULL)  *result2 = elemtype;
 474 
 475   assert(ptr != top(), "top should go hand-in-hand with stopped");
 476 
 477   return ptr;
 478 }
 479 
 480 
 481 // returns IfNode
 482 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) {
 483   Node   *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
 484   Node   *tst = _gvn.transform(new BoolNode(cmp, mask));
 485   IfNode *iff = create_and_map_if(control(), tst, prob, cnt);
 486   return iff;
 487 }
 488 


1740 
1741   // Sanity check the probability value
1742   assert(prob > 0.0f,"Bad probability in Parser");
1743  // Need xform to put node in hash table
1744   IfNode *iff = create_and_xform_if( control(), tst, prob, cnt );
1745   assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1746   // True branch
1747   { PreserveJVMState pjvms(this);
1748     Node* iftrue  = _gvn.transform( new IfTrueNode (iff) );
1749     set_control(iftrue);
1750 
1751     if (stopped()) {            // Path is dead?
1752       NOT_PRODUCT(explicit_null_checks_elided++);
1753       if (C->eliminate_boxing()) {
1754         // Mark the successor block as parsed
1755         branch_block->next_path_num();
1756       }
1757     } else {                    // Path is live.
1758       // Update method data
1759       profile_taken_branch(target_bci);
1760       adjust_map_after_if(btest, c, prob, branch_block);
1761       if (!stopped()) {
1762         merge(target_bci);
1763       }
1764     }
1765   }
1766 
1767   // False branch
1768   Node* iffalse = _gvn.transform( new IfFalseNode(iff) );
1769   set_control(iffalse);
1770 
1771   if (stopped()) {              // Path is dead?
1772     NOT_PRODUCT(explicit_null_checks_elided++);
1773     if (C->eliminate_boxing()) {
1774       // Mark the successor block as parsed
1775       next_block->next_path_num();
1776     }
1777   } else  {                     // Path is live.
1778     // Update method data
1779     profile_not_taken_branch();
1780     adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, next_block);

1781   }
1782 }
1783 
1784 //------------------------------------do_if------------------------------------
1785 void Parse::do_if(BoolTest::mask btest, Node* c, bool new_path, Node** ctrl_taken) {
1786   int target_bci = iter().get_dest();
1787 
1788   Block* branch_block = successor_for_bci(target_bci);
1789   Block* next_block   = successor_for_bci(iter().next_bci());
1790 
1791   float cnt;
1792   float prob = branch_prediction(cnt, btest, target_bci, c);
1793   float untaken_prob = 1.0 - prob;
1794 
1795   if (prob == PROB_UNKNOWN) {
1796     if (PrintOpto && Verbose) {
1797       tty->print_cr("Never-taken edge stops compilation at bci %d", bci());
1798     }
1799     repush_if_args(); // to gather stats on loop
1800     // We need to mark this branch as taken so that if we recompile we will
1801     // see that it is possible. In the tiered system the interpreter doesn't
1802     // do profiling and by the time we get to the lower tier from the interpreter
1803     // the path may be cold again. Make sure it doesn't look untaken
1804     profile_taken_branch(target_bci, !ProfileInterpreter);
1805     uncommon_trap(Deoptimization::Reason_unreached,


1854   }
1855 
1856   // Generate real control flow
1857   float true_prob = (taken_if_true ? prob : untaken_prob);
1858   IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1859   assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1860   Node* taken_branch   = new IfTrueNode(iff);
1861   Node* untaken_branch = new IfFalseNode(iff);
1862   if (!taken_if_true) {  // Finish conversion to canonical form
1863     Node* tmp      = taken_branch;
1864     taken_branch   = untaken_branch;
1865     untaken_branch = tmp;
1866   }
1867 
1868   // Branch is taken:
1869   { PreserveJVMState pjvms(this);
1870     taken_branch = _gvn.transform(taken_branch);
1871     set_control(taken_branch);
1872 
1873     if (stopped()) {
1874       if (C->eliminate_boxing() && !new_path) {
1875         // Mark the successor block as parsed (if we haven't created a new path)
1876         branch_block->next_path_num();
1877       }
1878     } else {
1879       // Update method data
1880       profile_taken_branch(target_bci);
1881       adjust_map_after_if(taken_btest, c, prob, branch_block);
1882       if (!stopped()) {
1883         if (new_path) {
1884           // Merge by using a new path
1885           merge_new_path(target_bci);
1886         } else if (ctrl_taken != NULL) {
1887           // Don't merge but save taken branch to be wired by caller
1888           *ctrl_taken = control();
1889         } else {
1890           merge(target_bci);
1891         }
1892       }
1893     }
1894   }
1895 
1896   untaken_branch = _gvn.transform(untaken_branch);
1897   set_control(untaken_branch);
1898 
1899   // Branch not taken.
1900   if (stopped() && ctrl_taken == NULL) {
1901     if (C->eliminate_boxing()) {
1902       // Mark the successor block as parsed (if caller does not re-wire control flow)
1903       next_block->next_path_num();
1904     }
1905   } else {
1906     // Update method data
1907     profile_not_taken_branch();
1908     adjust_map_after_if(untaken_btest, c, untaken_prob, next_block);
1909   }
1910 }
1911 
1912 void Parse::do_acmp(BoolTest::mask btest, Node* a, Node* b) {
1913   ciMethod* subst_method = ciEnv::current()->ValueBootstrapMethods_klass()->find_method(ciSymbol::isSubstitutable_name(), ciSymbol::object_object_boolean_signature());
1914   // If current method is ValueBootstrapMethods::isSubstitutable(),
1915   // compile the acmp as a regular pointer comparison otherwise we
1916   // could call ValueBootstrapMethods::isSubstitutable() back
1917   if (!EnableValhalla || (method() == subst_method)) {
1918     Node* cmp = CmpP(a, b);
1919     cmp = optimize_cmp_with_klass(cmp);
1920     do_if(btest, cmp);
1921     return;
1922   }
1923 
1924   // Substitutability test
1925   if (a->is_ValueType()) {
1926     inc_sp(2);
1927     a = a->as_ValueType()->allocate(this, true)->get_oop();
1928     dec_sp(2);
1929   }
1930   if (b->is_ValueType()) {
1931     inc_sp(2);
1932     b = b->as_ValueType()->allocate(this, true)->get_oop();
1933     dec_sp(2);
1934   }
1935 
1936   const TypeOopPtr* ta = _gvn.type(a)->isa_oopptr();
1937   const TypeOopPtr* tb = _gvn.type(b)->isa_oopptr();
1938 
1939   if (ta == NULL || !ta->can_be_value_type_raw() ||
1940       tb == NULL || !tb->can_be_value_type_raw()) {
1941     Node* cmp = CmpP(a, b);
1942     cmp = optimize_cmp_with_klass(cmp);
1943     do_if(btest, cmp);
1944     return;
1945   }
1946 
1947   Node* cmp = CmpP(a, b);
1948   cmp = optimize_cmp_with_klass(cmp);
1949   Node* eq_region = NULL;
1950   if (btest == BoolTest::eq) {
1951     do_if(btest, cmp, true);
1952     if (stopped()) {
1953       return;
1954     }
1955   } else {
1956     assert(btest == BoolTest::ne, "only eq or ne");
1957     Node* is_not_equal = NULL;
1958     eq_region = new RegionNode(3);
1959     {
1960       PreserveJVMState pjvms(this);
1961       do_if(btest, cmp, false, &is_not_equal);
1962       if (!stopped()) {
1963         eq_region->init_req(1, control());
1964       }
1965     }
1966     if (is_not_equal == NULL || is_not_equal->is_top()) {
1967       record_for_igvn(eq_region);
1968       set_control(_gvn.transform(eq_region));
1969       return;
1970     }
1971     set_control(is_not_equal);
1972   }
1973   // Pointers not equal, check for values
1974   Node* ne_region = new RegionNode(6);
1975   inc_sp(2);
1976   Node* null_ctl = top();
1977   Node* not_null_a = null_check_oop(a, &null_ctl, !too_many_traps(Deoptimization::Reason_null_check), false, false);
1978   dec_sp(2);
1979   ne_region->init_req(1, null_ctl);
1980   if (stopped()) {
1981     record_for_igvn(ne_region);
1982     set_control(_gvn.transform(ne_region));
1983     if (btest == BoolTest::ne) {
1984       {
1985         PreserveJVMState pjvms(this);
1986         int target_bci = iter().get_dest();
1987         merge(target_bci);
1988       }
1989       record_for_igvn(eq_region);
1990       set_control(_gvn.transform(eq_region));
1991     }
1992     return;
1993   }
1994 
1995   Node* is_value = is_always_locked(not_null_a);
1996   Node* value_mask = _gvn.MakeConX(markOopDesc::always_locked_pattern);
1997   Node* is_value_cmp = _gvn.transform(new CmpXNode(is_value, value_mask));
1998   Node* is_value_bol = _gvn.transform(new BoolNode(is_value_cmp, BoolTest::ne));
1999   IfNode* is_value_iff = create_and_map_if(control(), is_value_bol, PROB_FAIR, COUNT_UNKNOWN);
2000   Node* not_value = _gvn.transform(new IfTrueNode(is_value_iff));
2001   set_control(_gvn.transform(new IfFalseNode(is_value_iff)));
2002   ne_region->init_req(2, not_value);
2003 
2004   // One of the 2 pointers refers to a value, check if both are of
2005   // the same class
2006   inc_sp(2);
2007   null_ctl = top();
2008   Node* not_null_b = null_check_oop(b, &null_ctl, !too_many_traps(Deoptimization::Reason_null_check), false, false);
2009   dec_sp(2);
2010   ne_region->init_req(3, null_ctl);
2011   if (stopped()) {
2012     record_for_igvn(ne_region);
2013     set_control(_gvn.transform(ne_region));
2014     if (btest == BoolTest::ne) {
2015       {
2016         PreserveJVMState pjvms(this);
2017         int target_bci = iter().get_dest();
2018         merge(target_bci);
2019       }
2020       record_for_igvn(eq_region);
2021       set_control(_gvn.transform(eq_region));
2022     }
2023     return;
2024   }
2025   Node* kls_a = load_object_klass(not_null_a);
2026   Node* kls_b = load_object_klass(not_null_b);
2027   Node* kls_cmp = CmpP(kls_a, kls_b);
2028   Node* kls_bol = _gvn.transform(new BoolNode(kls_cmp, BoolTest::ne));
2029   IfNode* kls_iff = create_and_map_if(control(), kls_bol, PROB_FAIR, COUNT_UNKNOWN);
2030   Node* kls_ne = _gvn.transform(new IfTrueNode(kls_iff));
2031   set_control(_gvn.transform(new IfFalseNode(kls_iff)));
2032   ne_region->init_req(4, kls_ne);
2033 
2034   if (stopped()) {
2035     record_for_igvn(ne_region);
2036     set_control(_gvn.transform(ne_region));
2037     if (btest == BoolTest::ne) {
2038       {
2039         PreserveJVMState pjvms(this);
2040         int target_bci = iter().get_dest();
2041         merge(target_bci);
2042       }
2043       record_for_igvn(eq_region);
2044       set_control(_gvn.transform(eq_region));
2045     }
2046     return;
2047   }
2048   // Both are values of the same class, we need to perform a
2049   // substitutability test. Delegate to
2050   // ValueBootstrapMethods::isSubstitutable().
2051 
2052   Node* ne_io_phi = PhiNode::make(ne_region, i_o());
2053   Node* mem = reset_memory();
2054   Node* ne_mem_phi = PhiNode::make(ne_region, mem);
2055 
2056   Node* eq_io_phi = NULL;
2057   Node* eq_mem_phi = NULL;
2058   if (eq_region != NULL) {
2059     eq_io_phi = PhiNode::make(eq_region, i_o());
2060     eq_mem_phi = PhiNode::make(eq_region, mem);
2061   }
2062 
2063   set_all_memory(mem);
2064 
2065   kill_dead_locals();
2066   CallStaticJavaNode *call = new CallStaticJavaNode(C, TypeFunc::make(subst_method), SharedRuntime::get_resolve_static_call_stub(), subst_method, bci());
2067   call->set_override_symbolic_info(true);
2068   call->init_req(TypeFunc::Parms, not_null_a);
2069   call->init_req(TypeFunc::Parms+1, not_null_b);
2070   inc_sp(2);
2071   set_edges_for_java_call(call, false, false);
2072   Node* ret = set_results_for_java_call(call, false, true);
2073   dec_sp(2);
2074 
2075   // Test the return value of ValueBootstrapMethods::isSubstitutable()
2076   Node* subst_cmp = _gvn.transform(new CmpINode(ret, intcon(1)));
2077   Node* ctl = C->top();
2078   if (btest == BoolTest::eq) {
2079     PreserveJVMState pjvms(this);
2080     do_if(btest, subst_cmp);
2081     if (!stopped()) {
2082       ctl = control();
2083     }
2084   } else {
2085     assert(btest == BoolTest::ne, "only eq or ne");
2086     PreserveJVMState pjvms(this);
2087     do_if(btest, subst_cmp, false, &ctl);
2088     if (!stopped()) {
2089       eq_region->init_req(2, control());
2090       eq_io_phi->init_req(2, i_o());
2091       eq_mem_phi->init_req(2, reset_memory());
2092     }
2093   }
2094   ne_region->init_req(5, ctl);
2095   ne_io_phi->init_req(5, i_o());
2096   ne_mem_phi->init_req(5, reset_memory());
2097 
2098   record_for_igvn(ne_region);
2099   set_control(_gvn.transform(ne_region));
2100   set_i_o(_gvn.transform(ne_io_phi));
2101   set_all_memory(_gvn.transform(ne_mem_phi));
2102 
2103   if (btest == BoolTest::ne) {
2104     {
2105       PreserveJVMState pjvms(this);
2106       int target_bci = iter().get_dest();
2107       merge(target_bci);
2108     }
2109 
2110     record_for_igvn(eq_region);
2111     set_control(_gvn.transform(eq_region));
2112     set_i_o(_gvn.transform(eq_io_phi));
2113     set_all_memory(_gvn.transform(eq_mem_phi));
2114   }
2115 }
2116 
2117 bool Parse::path_is_suitable_for_uncommon_trap(float prob) const {
2118   // Don't want to speculate on uncommon traps when running with -Xcomp
2119   if (!UseInterpreter) {
2120     return false;
2121   }
2122   return (seems_never_taken(prob) && seems_stable_comparison());
2123 }
2124 
2125 void Parse::maybe_add_predicate_after_if(Block* path) {
2126   if (path->is_SEL_head() && path->preds_parsed() == 0) {
2127     // Add predicates at bci of if dominating the loop so traps can be
2128     // recorded on the if's profile data
2129     int bc_depth = repush_if_args();
2130     add_predicate();
2131     dec_sp(bc_depth);
2132     path->set_has_predicates();
2133   }
2134 }
2135 
2136 
2137 //----------------------------adjust_map_after_if------------------------------
2138 // Adjust the JVM state to reflect the result of taking this path.
2139 // Basically, it means inspecting the CmpNode controlling this
2140 // branch, seeing how it constrains a tested value, and then
2141 // deciding if it's worth our while to encode this constraint
2142 // as graph nodes in the current abstract interpretation map.
2143 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path) {

2144   if (!c->is_Cmp()) {
2145     maybe_add_predicate_after_if(path);
2146     return;
2147   }
2148 
2149   if (stopped() || btest == BoolTest::illegal) {
2150     return;                             // nothing to do
2151   }
2152 
2153   bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
2154 
2155   if (path_is_suitable_for_uncommon_trap(prob)) {
2156     repush_if_args();
2157     uncommon_trap(Deoptimization::Reason_unstable_if,
2158                   Deoptimization::Action_reinterpret,
2159                   NULL,
2160                   (is_fallthrough ? "taken always" : "taken never"));
2161     return;
2162   }
2163 


2333   if (c->Opcode() == Op_CmpP &&
2334       (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
2335       c->in(2)->is_Con()) {
2336     Node* load_klass = NULL;
2337     Node* decode = NULL;
2338     if (c->in(1)->Opcode() == Op_DecodeNKlass) {
2339       decode = c->in(1);
2340       load_klass = c->in(1)->in(1);
2341     } else {
2342       load_klass = c->in(1);
2343     }
2344     if (load_klass->in(2)->is_AddP()) {
2345       Node* addp = load_klass->in(2);
2346       Node* obj = addp->in(AddPNode::Address);
2347       const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
2348       if (obj_type->speculative_type_not_null() != NULL) {
2349         ciKlass* k = obj_type->speculative_type();
2350         inc_sp(2);
2351         obj = maybe_cast_profiled_obj(obj, k);
2352         dec_sp(2);
2353         if (obj->is_ValueType()) {
2354           assert(obj->as_ValueType()->is_allocated(&_gvn), "must be allocated");
2355           obj = obj->as_ValueType()->get_oop();
2356         }
2357         // Make the CmpP use the casted obj
2358         addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
2359         load_klass = load_klass->clone();
2360         load_klass->set_req(2, addp);
2361         load_klass = _gvn.transform(load_klass);
2362         if (decode != NULL) {
2363           decode = decode->clone();
2364           decode->set_req(1, load_klass);
2365           load_klass = _gvn.transform(decode);
2366         }
2367         c = c->clone();
2368         c->set_req(1, load_klass);
2369         c = _gvn.transform(c);
2370       }
2371     }
2372   }
2373   return c;
2374 }
2375 
2376 //------------------------------do_one_bytecode--------------------------------


3184     // See if we can get some profile data and hand it off to the next block
3185     Block *target_block = block()->successor_for_bci(target_bci);
3186     if (target_block->pred_count() != 1)  break;
3187     ciMethodData* methodData = method()->method_data();
3188     if (!methodData->is_mature())  break;
3189     ciProfileData* data = methodData->bci_to_data(bci());
3190     assert(data != NULL && data->is_JumpData(), "need JumpData for taken branch");
3191     int taken = ((ciJumpData*)data)->taken();
3192     taken = method()->scale_count(taken);
3193     target_block->set_count(taken);
3194     break;
3195   }
3196 
3197   case Bytecodes::_ifnull:    btest = BoolTest::eq; goto handle_if_null;
3198   case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null;
3199   handle_if_null:
3200     // If this is a backwards branch in the bytecodes, add Safepoint
3201     maybe_add_safepoint(iter().get_dest());
3202     a = null();
3203     b = pop();
3204     if (b->is_ValueType()) {
3205       // Return constant false because 'b' is always non-null
3206       c = _gvn.makecon(TypeInt::CC_GT);
3207     } else {
3208       if (!_gvn.type(b)->speculative_maybe_null() &&
3209           !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
3210         inc_sp(1);
3211         Node* null_ctl = top();
3212         b = null_check_oop(b, &null_ctl, true, true, true);
3213         assert(null_ctl->is_top(), "no null control here");
3214         dec_sp(1);
3215       } else if (_gvn.type(b)->speculative_always_null() &&
3216                  !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
3217         inc_sp(1);
3218         b = null_assert(b);
3219         dec_sp(1);
3220       }
3221       c = _gvn.transform( new CmpPNode(b, a) );
3222     }

3223     do_ifnull(btest, c);
3224     break;
3225 
3226   case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
3227   case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
3228   handle_if_acmp:
3229     // If this is a backwards branch in the bytecodes, add Safepoint
3230     maybe_add_safepoint(iter().get_dest());
3231     a = access_resolve(pop(), 0);
3232     b = access_resolve(pop(), 0);
3233     do_acmp(btest, a, b);


3234     break;
3235 
3236   case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
3237   case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
3238   case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
3239   case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
3240   case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
3241   case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
3242   handle_ifxx:
3243     // If this is a backwards branch in the bytecodes, add Safepoint
3244     maybe_add_safepoint(iter().get_dest());
3245     a = _gvn.intcon(0);
3246     b = pop();
3247     c = _gvn.transform( new CmpINode(b, a) );
3248     do_if(btest, c);
3249     break;
3250 
3251   case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
3252   case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
3253   case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;


3268     break;
3269 
3270   case Bytecodes::_lookupswitch:
3271     do_lookupswitch();
3272     break;
3273 
3274   case Bytecodes::_invokestatic:
3275   case Bytecodes::_invokedynamic:
3276   case Bytecodes::_invokespecial:
3277   case Bytecodes::_invokevirtual:
3278   case Bytecodes::_invokeinterface:
3279     do_call();
3280     break;
3281   case Bytecodes::_checkcast:
3282     do_checkcast();
3283     break;
3284   case Bytecodes::_instanceof:
3285     do_instanceof();
3286     break;
3287   case Bytecodes::_anewarray:
3288     do_newarray();
3289     break;
3290   case Bytecodes::_newarray:
3291     do_newarray((BasicType)iter().get_index());
3292     break;
3293   case Bytecodes::_multianewarray:
3294     do_multianewarray();
3295     break;
3296   case Bytecodes::_new:
3297     do_new();
3298     break;
3299   case Bytecodes::_defaultvalue:
3300     do_defaultvalue();
3301     break;
3302   case Bytecodes::_withfield:
3303     do_withfield();
3304     break;
3305 
3306   case Bytecodes::_jsr:
3307   case Bytecodes::_jsr_w:
3308     do_jsr();
3309     break;
3310 
3311   case Bytecodes::_ret:
3312     do_ret();
3313     break;
3314 
3315 
3316   case Bytecodes::_monitorenter:
3317     do_monitor_enter();
3318     break;
3319 
3320   case Bytecodes::_monitorexit:
3321     do_monitor_exit();
3322     break;
3323 


< prev index next >