< prev index next >

src/hotspot/share/opto/parse2.cpp

Print this page

   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jvm_io.h"
  27 #include "ci/ciMethodData.hpp"

  28 #include "classfile/vmSymbols.hpp"
  29 #include "compiler/compileLog.hpp"
  30 #include "interpreter/linkResolver.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "memory/universe.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "opto/addnode.hpp"
  35 #include "opto/castnode.hpp"
  36 #include "opto/convertnode.hpp"
  37 #include "opto/divnode.hpp"
  38 #include "opto/idealGraphPrinter.hpp"


  39 #include "opto/matcher.hpp"
  40 #include "opto/memnode.hpp"
  41 #include "opto/mulnode.hpp"
  42 #include "opto/opaquenode.hpp"
  43 #include "opto/parse.hpp"
  44 #include "opto/runtime.hpp"
  45 #include "runtime/deoptimization.hpp"
  46 #include "runtime/sharedRuntime.hpp"
  47 
  48 #ifndef PRODUCT
  49 extern int explicit_null_checks_inserted,
  50            explicit_null_checks_elided;
  51 #endif
  52 

















  53 //---------------------------------array_load----------------------------------
  54 void Parse::array_load(BasicType bt) {
  55   const Type* elemtype = Type::TOP;
  56   bool big_val = bt == T_DOUBLE || bt == T_LONG;
  57   Node* adr = array_addressing(bt, 0, elemtype);
  58   if (stopped())  return;     // guaranteed null or range check
  59 
  60   pop();                      // index (already used)
  61   Node* array = pop();        // the array itself


























































































  62 
  63   if (elemtype == TypeInt::BOOL) {
  64     bt = T_BOOLEAN;
  65   }
  66   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
  67 
  68   Node* ld = access_load_at(array, adr, adr_type, elemtype, bt,
  69                             IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD);
  70   if (big_val) {
  71     push_pair(ld);
  72   } else {
  73     push(ld);

  74   }

  75 }
  76 
  77 
  78 //--------------------------------array_store----------------------------------
  79 void Parse::array_store(BasicType bt) {
  80   const Type* elemtype = Type::TOP;
  81   bool big_val = bt == T_DOUBLE || bt == T_LONG;
  82   Node* adr = array_addressing(bt, big_val ? 2 : 1, elemtype);
  83   if (stopped())  return;     // guaranteed null or range check

  84   if (bt == T_OBJECT) {
  85     array_store_check();
  86     if (stopped()) {
  87       return;
  88     }
  89   }
  90   Node* val;                  // Oop to store
  91   if (big_val) {
  92     val = pop_pair();
  93   } else {
  94     val = pop();
  95   }
  96   pop();                      // index (already used)
  97   Node* array = pop();        // the array itself
  98 
  99   if (elemtype == TypeInt::BOOL) {
 100     bt = T_BOOLEAN;

























































































































 101   }
 102   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
 103 
 104   access_store_at(array, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY);
 105 }
 106 
 107 
 108 //------------------------------array_addressing-------------------------------
 109 // Pull array and index from the stack.  Compute pointer-to-element.
 110 Node* Parse::array_addressing(BasicType type, int vals, const Type*& elemtype) {
 111   Node *idx   = peek(0+vals);   // Get from stack without popping
 112   Node *ary   = peek(1+vals);   // in case of exception
 113 
 114   // Null check the array base, with correct stack contents
 115   ary = null_check(ary, T_ARRAY);
 116   // Compile-time detect of null-exception?
 117   if (stopped())  return top();
 118 
 119   const TypeAryPtr* arytype  = _gvn.type(ary)->is_aryptr();
 120   const TypeInt*    sizetype = arytype->size();
 121   elemtype = arytype->elem();
 122 
 123   if (UseUniqueSubclasses) {
 124     const Type* el = elemtype->make_ptr();

 184       if (C->allow_range_check_smearing()) {
 185         // Do not use builtin_throw, since range checks are sometimes
 186         // made more stringent by an optimistic transformation.
 187         // This creates "tentative" range checks at this point,
 188         // which are not guaranteed to throw exceptions.
 189         // See IfNode::Ideal, is_range_check, adjust_check.
 190         uncommon_trap(Deoptimization::Reason_range_check,
 191                       Deoptimization::Action_make_not_entrant,
 192                       NULL, "range_check");
 193       } else {
 194         // If we have already recompiled with the range-check-widening
 195         // heroic optimization turned off, then we must really be throwing
 196         // range check exceptions.
 197         builtin_throw(Deoptimization::Reason_range_check, idx);
 198       }
 199     }
 200   }
 201   // Check for always knowing you are throwing a range-check exception
 202   if (stopped())  return top();
 203 




















































































































 204   // Make array address computation control dependent to prevent it
 205   // from floating above the range check during loop optimizations.
 206   Node* ptr = array_element_address(ary, idx, type, sizetype, control());
 207   assert(ptr != top(), "top should go hand-in-hand with stopped");
 208 
 209   return ptr;
 210 }
 211 
 212 
 213 // returns IfNode
 214 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) {
 215   Node   *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
 216   Node   *tst = _gvn.transform(new BoolNode(cmp, mask));
 217   IfNode *iff = create_and_map_if(control(), tst, prob, cnt);
 218   return iff;
 219 }
 220 
 221 
 222 // sentinel value for the target bci to mark never taken branches
 223 // (according to profiling)

1392   // Generate real control flow
1393   Node   *tst = _gvn.transform( new BoolNode( c, btest ) );
1394 
1395   // Sanity check the probability value
1396   assert(prob > 0.0f,"Bad probability in Parser");
1397  // Need xform to put node in hash table
1398   IfNode *iff = create_and_xform_if( control(), tst, prob, cnt );
1399   assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1400   // True branch
1401   { PreserveJVMState pjvms(this);
1402     Node* iftrue  = _gvn.transform( new IfTrueNode (iff) );
1403     set_control(iftrue);
1404 
1405     if (stopped()) {            // Path is dead?
1406       NOT_PRODUCT(explicit_null_checks_elided++);
1407       if (C->eliminate_boxing()) {
1408         // Mark the successor block as parsed
1409         branch_block->next_path_num();
1410       }
1411     } else {                    // Path is live.
1412       adjust_map_after_if(btest, c, prob, branch_block, next_block);
1413       if (!stopped()) {
1414         merge(target_bci);
1415       }
1416     }
1417   }
1418 
1419   // False branch
1420   Node* iffalse = _gvn.transform( new IfFalseNode(iff) );
1421   set_control(iffalse);
1422 
1423   if (stopped()) {              // Path is dead?
1424     NOT_PRODUCT(explicit_null_checks_elided++);
1425     if (C->eliminate_boxing()) {
1426       // Mark the successor block as parsed
1427       next_block->next_path_num();
1428     }
1429   } else  {                     // Path is live.
1430     adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob,
1431                         next_block, branch_block);
1432   }
1433 }
1434 
1435 //------------------------------------do_if------------------------------------
1436 void Parse::do_if(BoolTest::mask btest, Node* c) {
1437   int target_bci = iter().get_dest();
1438 
1439   Block* branch_block = successor_for_bci(target_bci);
1440   Block* next_block   = successor_for_bci(iter().next_bci());
1441 
1442   float cnt;
1443   float prob = branch_prediction(cnt, btest, target_bci, c);
1444   float untaken_prob = 1.0 - prob;
1445 
1446   if (prob == PROB_UNKNOWN) {
1447     if (PrintOpto && Verbose) {
1448       tty->print_cr("Never-taken edge stops compilation at bci %d", bci());
1449     }
1450     repush_if_args(); // to gather stats on loop
1451     uncommon_trap(Deoptimization::Reason_unreached,
1452                   Deoptimization::Action_reinterpret,
1453                   NULL, "cold");
1454     if (C->eliminate_boxing()) {
1455       // Mark the successor blocks as parsed
1456       branch_block->next_path_num();

1500   }
1501 
1502   // Generate real control flow
1503   float true_prob = (taken_if_true ? prob : untaken_prob);
1504   IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1505   assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1506   Node* taken_branch   = new IfTrueNode(iff);
1507   Node* untaken_branch = new IfFalseNode(iff);
1508   if (!taken_if_true) {  // Finish conversion to canonical form
1509     Node* tmp      = taken_branch;
1510     taken_branch   = untaken_branch;
1511     untaken_branch = tmp;
1512   }
1513 
1514   // Branch is taken:
1515   { PreserveJVMState pjvms(this);
1516     taken_branch = _gvn.transform(taken_branch);
1517     set_control(taken_branch);
1518 
1519     if (stopped()) {
1520       if (C->eliminate_boxing()) {
1521         // Mark the successor block as parsed
1522         branch_block->next_path_num();
1523       }
1524     } else {
1525       adjust_map_after_if(taken_btest, c, prob, branch_block, next_block);
1526       if (!stopped()) {
1527         merge(target_bci);








1528       }
1529     }
1530   }
1531 
1532   untaken_branch = _gvn.transform(untaken_branch);
1533   set_control(untaken_branch);
1534 
1535   // Branch not taken.
1536   if (stopped()) {
1537     if (C->eliminate_boxing()) {
1538       // Mark the successor block as parsed
1539       next_block->next_path_num();
1540     }
1541   } else {
1542     adjust_map_after_if(untaken_btest, c, untaken_prob,
1543                         next_block, branch_block);










































































































































































































































































































































































































1544   }
1545 }
1546 
1547 bool Parse::path_is_suitable_for_uncommon_trap(float prob) const {
1548   // Don't want to speculate on uncommon traps when running with -Xcomp
1549   if (!UseInterpreter) {
1550     return false;
1551   }
1552   return (seems_never_taken(prob) && seems_stable_comparison());
1553 }
1554 
1555 void Parse::maybe_add_predicate_after_if(Block* path) {
1556   if (path->is_SEL_head() && path->preds_parsed() == 0) {
1557     // Add predicates at bci of if dominating the loop so traps can be
1558     // recorded on the if's profile data
1559     int bc_depth = repush_if_args();
1560     add_empty_predicates();
1561     dec_sp(bc_depth);
1562     path->set_has_predicates();
1563   }
1564 }
1565 
1566 
1567 //----------------------------adjust_map_after_if------------------------------
1568 // Adjust the JVM state to reflect the result of taking this path.
1569 // Basically, it means inspecting the CmpNode controlling this
1570 // branch, seeing how it constrains a tested value, and then
1571 // deciding if it's worth our while to encode this constraint
1572 // as graph nodes in the current abstract interpretation map.
1573 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
1574                                 Block* path, Block* other_path) {
1575   if (!c->is_Cmp()) {
1576     maybe_add_predicate_after_if(path);
1577     return;
1578   }
1579 
1580   if (stopped() || btest == BoolTest::illegal) {
1581     return;                             // nothing to do
1582   }
1583 
1584   bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
1585 
1586   if (path_is_suitable_for_uncommon_trap(prob)) {
1587     repush_if_args();
1588     uncommon_trap(Deoptimization::Reason_unstable_if,
1589                   Deoptimization::Action_reinterpret,
1590                   NULL,
1591                   (is_fallthrough ? "taken always" : "taken never"));
1592     return;
1593   }
1594 

1764   if (c->Opcode() == Op_CmpP &&
1765       (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
1766       c->in(2)->is_Con()) {
1767     Node* load_klass = NULL;
1768     Node* decode = NULL;
1769     if (c->in(1)->Opcode() == Op_DecodeNKlass) {
1770       decode = c->in(1);
1771       load_klass = c->in(1)->in(1);
1772     } else {
1773       load_klass = c->in(1);
1774     }
1775     if (load_klass->in(2)->is_AddP()) {
1776       Node* addp = load_klass->in(2);
1777       Node* obj = addp->in(AddPNode::Address);
1778       const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
1779       if (obj_type->speculative_type_not_null() != NULL) {
1780         ciKlass* k = obj_type->speculative_type();
1781         inc_sp(2);
1782         obj = maybe_cast_profiled_obj(obj, k);
1783         dec_sp(2);




1784         // Make the CmpP use the casted obj
1785         addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
1786         load_klass = load_klass->clone();
1787         load_klass->set_req(2, addp);
1788         load_klass = _gvn.transform(load_klass);
1789         if (decode != NULL) {
1790           decode = decode->clone();
1791           decode->set_req(1, load_klass);
1792           load_klass = _gvn.transform(decode);
1793         }
1794         c = c->clone();
1795         c->set_req(1, load_klass);
1796         c = _gvn.transform(c);
1797       }
1798     }
1799   }
1800   return c;
1801 }
1802 
1803 //------------------------------do_one_bytecode--------------------------------

2609     // See if we can get some profile data and hand it off to the next block
2610     Block *target_block = block()->successor_for_bci(target_bci);
2611     if (target_block->pred_count() != 1)  break;
2612     ciMethodData* methodData = method()->method_data();
2613     if (!methodData->is_mature())  break;
2614     ciProfileData* data = methodData->bci_to_data(bci());
2615     assert(data != NULL && data->is_JumpData(), "need JumpData for taken branch");
2616     int taken = ((ciJumpData*)data)->taken();
2617     taken = method()->scale_count(taken);
2618     target_block->set_count(taken);
2619     break;
2620   }
2621 
2622   case Bytecodes::_ifnull:    btest = BoolTest::eq; goto handle_if_null;
2623   case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null;
2624   handle_if_null:
2625     // If this is a backwards branch in the bytecodes, add Safepoint
2626     maybe_add_safepoint(iter().get_dest());
2627     a = null();
2628     b = pop();
2629     if (!_gvn.type(b)->speculative_maybe_null() &&
2630         !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
2631       inc_sp(1);
2632       Node* null_ctl = top();
2633       b = null_check_oop(b, &null_ctl, true, true, true);
2634       assert(null_ctl->is_top(), "no null control here");
2635       dec_sp(1);
2636     } else if (_gvn.type(b)->speculative_always_null() &&
2637                !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
2638       inc_sp(1);
2639       b = null_assert(b);
2640       dec_sp(1);
2641     }
2642     c = _gvn.transform( new CmpPNode(b, a) );






2643     do_ifnull(btest, c);
2644     break;
2645 
2646   case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
2647   case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
2648   handle_if_acmp:
2649     // If this is a backwards branch in the bytecodes, add Safepoint
2650     maybe_add_safepoint(iter().get_dest());
2651     a = pop();
2652     b = pop();
2653     c = _gvn.transform( new CmpPNode(b, a) );
2654     c = optimize_cmp_with_klass(c);
2655     do_if(btest, c);
2656     break;
2657 
2658   case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
2659   case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
2660   case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
2661   case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
2662   case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
2663   case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
2664   handle_ifxx:
2665     // If this is a backwards branch in the bytecodes, add Safepoint
2666     maybe_add_safepoint(iter().get_dest());
2667     a = _gvn.intcon(0);
2668     b = pop();
2669     c = _gvn.transform( new CmpINode(b, a) );
2670     do_if(btest, c);
2671     break;
2672 
2673   case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
2674   case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
2675   case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;

2690     break;
2691 
2692   case Bytecodes::_lookupswitch:
2693     do_lookupswitch();
2694     break;
2695 
2696   case Bytecodes::_invokestatic:
2697   case Bytecodes::_invokedynamic:
2698   case Bytecodes::_invokespecial:
2699   case Bytecodes::_invokevirtual:
2700   case Bytecodes::_invokeinterface:
2701     do_call();
2702     break;
2703   case Bytecodes::_checkcast:
2704     do_checkcast();
2705     break;
2706   case Bytecodes::_instanceof:
2707     do_instanceof();
2708     break;
2709   case Bytecodes::_anewarray:
2710     do_anewarray();
2711     break;
2712   case Bytecodes::_newarray:
2713     do_newarray((BasicType)iter().get_index());
2714     break;
2715   case Bytecodes::_multianewarray:
2716     do_multianewarray();
2717     break;
2718   case Bytecodes::_new:
2719     do_new();
2720     break;






2721 
2722   case Bytecodes::_jsr:
2723   case Bytecodes::_jsr_w:
2724     do_jsr();
2725     break;
2726 
2727   case Bytecodes::_ret:
2728     do_ret();
2729     break;
2730 
2731 
2732   case Bytecodes::_monitorenter:
2733     do_monitor_enter();
2734     break;
2735 
2736   case Bytecodes::_monitorexit:
2737     do_monitor_exit();
2738     break;
2739 
2740   case Bytecodes::_breakpoint:

   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jvm_io.h"
  27 #include "ci/ciMethodData.hpp"
  28 #include "ci/ciSymbols.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "compiler/compileLog.hpp"
  31 #include "interpreter/linkResolver.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "memory/universe.hpp"
  34 #include "oops/oop.inline.hpp"
  35 #include "opto/addnode.hpp"
  36 #include "opto/castnode.hpp"
  37 #include "opto/convertnode.hpp"
  38 #include "opto/divnode.hpp"
  39 #include "opto/idealGraphPrinter.hpp"
  40 #include "opto/idealKit.hpp"
  41 #include "opto/inlinetypenode.hpp"
  42 #include "opto/matcher.hpp"
  43 #include "opto/memnode.hpp"
  44 #include "opto/mulnode.hpp"
  45 #include "opto/opaquenode.hpp"
  46 #include "opto/parse.hpp"
  47 #include "opto/runtime.hpp"
  48 #include "runtime/deoptimization.hpp"
  49 #include "runtime/sharedRuntime.hpp"
  50 
  51 #ifndef PRODUCT
  52 extern int explicit_null_checks_inserted,
  53            explicit_null_checks_elided;
  54 #endif
  55 
  56 Node* Parse::record_profile_for_speculation_at_array_load(Node* ld) {
  57   // Feed unused profile data to type speculation
  58   if (UseTypeSpeculation && UseArrayLoadStoreProfile) {
  59     ciKlass* array_type = NULL;
  60     ciKlass* element_type = NULL;
  61     ProfilePtrKind element_ptr = ProfileMaybeNull;
  62     bool flat_array = true;
  63     bool null_free_array = true;
  64     method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
  65     if (element_type != NULL || element_ptr != ProfileMaybeNull) {
  66       ld = record_profile_for_speculation(ld, element_type, element_ptr);
  67     }
  68   }
  69   return ld;
  70 }
  71 
  72 
  73 //---------------------------------array_load----------------------------------
  74 void Parse::array_load(BasicType bt) {
  75   const Type* elemtype = Type::TOP;

  76   Node* adr = array_addressing(bt, 0, elemtype);
  77   if (stopped())  return;     // guaranteed null or range check
  78 
  79   Node* idx = pop();
  80   Node* ary = pop();
  81 
  82   // Handle inline type arrays
  83   const TypeOopPtr* elemptr = elemtype->make_oopptr();
  84   const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
  85   if (ary_t->is_flat()) {
  86     // Load from flattened inline type array
  87     Node* vt = InlineTypeNode::make_from_flattened(this, elemtype->inline_klass(), ary, adr);
  88     push(vt);
  89     return;
  90   } else if (ary_t->is_null_free()) {
  91     // Load from non-flattened inline type array (elements can never be null)
  92     bt = T_PRIMITIVE_OBJECT;
  93   } else if (!ary_t->is_not_flat()) {
  94     // Cannot statically determine if array is flattened, emit runtime check
  95     assert(UseFlatArray && is_reference_type(bt) && elemptr->can_be_inline_type() && !ary_t->klass_is_exact() && !ary_t->is_not_null_free() &&
  96            (!elemptr->is_inlinetypeptr() || elemptr->inline_klass()->flatten_array()), "array can't be flattened");
  97     IdealKit ideal(this);
  98     IdealVariable res(ideal);
  99     ideal.declarations_done();
 100     ideal.if_then(flat_array_test(ary, /* flat = */ false)); {
 101       // non-flattened
 102       assert(ideal.ctrl()->in(0)->as_If()->is_flat_array_check(&_gvn), "Should be found");
 103       sync_kit(ideal);
 104       const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
 105       Node* ld = access_load_at(ary, adr, adr_type, elemptr, bt,
 106                                 IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD);
 107       if (elemptr->is_inlinetypeptr()) {
 108         assert(elemptr->maybe_null(), "null free array should be handled above");
 109         ld = InlineTypeNode::make_from_oop(this, ld, elemptr->inline_klass(), false);
 110       }
 111       ideal.sync_kit(this);
 112       ideal.set(res, ld);
 113     } ideal.else_(); {
 114       // flattened
 115       sync_kit(ideal);
 116       if (elemptr->is_inlinetypeptr()) {
 117         // Element type is known, cast and load from flattened representation
 118         ciInlineKlass* vk = elemptr->inline_klass();
 119         assert(vk->flatten_array() && elemptr->maybe_null(), "never/always flat - should be optimized");
 120         ciArrayKlass* array_klass = ciArrayKlass::make(vk, /* null_free */ true);
 121         const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr();
 122         Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, arytype));
 123         Node* casted_adr = array_element_address(cast, idx, T_PRIMITIVE_OBJECT, ary_t->size(), control());
 124         // Re-execute flattened array load if buffering triggers deoptimization
 125         PreserveReexecuteState preexecs(this);
 126         jvms()->set_should_reexecute(true);
 127         inc_sp(2);
 128         Node* vt = InlineTypeNode::make_from_flattened(this, vk, cast, casted_adr)->buffer(this, false);
 129         ideal.set(res, vt);
 130         ideal.sync_kit(this);
 131       } else {
 132         // Element type is unknown, emit runtime call
 133 
 134         // Below membars keep this access to an unknown flattened array correctly
 135         // ordered with other unknown and known flattened array accesses.
 136         insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
 137 
 138         Node* call = NULL;
 139         {
 140           // Re-execute flattened array load if runtime call triggers deoptimization
 141           PreserveReexecuteState preexecs(this);
 142           jvms()->set_bci(_bci);
 143           jvms()->set_should_reexecute(true);
 144           inc_sp(2);
 145           kill_dead_locals();
 146           call = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
 147                                    OptoRuntime::load_unknown_inline_type(),
 148                                    OptoRuntime::load_unknown_inline_Java(),
 149                                    NULL, TypeRawPtr::BOTTOM,
 150                                    ary, idx);
 151         }
 152         make_slow_call_ex(call, env()->Throwable_klass(), false);
 153         Node* buffer = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
 154 
 155         insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
 156 
 157         // Keep track of the information that the inline type is flattened in arrays
 158         const Type* unknown_value = elemptr->is_instptr()->cast_to_flatten_array();
 159         buffer = _gvn.transform(new CheckCastPPNode(control(), buffer, unknown_value));
 160 
 161         ideal.sync_kit(this);
 162         ideal.set(res, buffer);
 163       }
 164     } ideal.end_if();
 165     sync_kit(ideal);
 166     Node* ld = _gvn.transform(ideal.value(res));
 167     ld = record_profile_for_speculation_at_array_load(ld);
 168     push_node(bt, ld);
 169     return;
 170   }
 171 
 172   if (elemtype == TypeInt::BOOL) {
 173     bt = T_BOOLEAN;
 174   }
 175   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
 176   Node* ld = access_load_at(ary, adr, adr_type, elemtype, bt,

 177                             IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD);
 178   ld = record_profile_for_speculation_at_array_load(ld);
 179   // Loading a non-flattened inline type
 180   if (elemptr != NULL && elemptr->is_inlinetypeptr()) {
 181     assert(!ary_t->is_null_free() || !elemptr->maybe_null(), "inline type array elements should never be null");
 182     ld = InlineTypeNode::make_from_oop(this, ld, elemptr->inline_klass(), !elemptr->maybe_null());
 183   }
 184   push_node(bt, ld);
 185 }
 186 
 187 
 188 //--------------------------------array_store----------------------------------
 189 void Parse::array_store(BasicType bt) {
 190   const Type* elemtype = Type::TOP;
 191   Node* adr = array_addressing(bt, type2size[bt], elemtype);

 192   if (stopped())  return;     // guaranteed null or range check
 193   Node* cast_val = NULL;
 194   if (bt == T_OBJECT) {
 195     cast_val = array_store_check(adr, elemtype);
 196     if (stopped()) return;


 197   }
 198   Node* val = pop_node(bt); // Value to store
 199   Node* idx = pop();        // Index in the array
 200   Node* ary = pop();        // The array itself
 201 
 202   const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
 203   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
 204   assert(adr->as_AddP()->in(AddPNode::Base) == ary, "inconsistent address base");

 205 
 206   if (elemtype == TypeInt::BOOL) {
 207     bt = T_BOOLEAN;
 208   } else if (bt == T_OBJECT) {
 209     elemtype = elemtype->make_oopptr();
 210     const Type* tval = _gvn.type(cast_val);
 211     // We may have lost type information for 'val' here due to the casts
 212     // emitted by the array_store_check code (see JDK-6312651)
 213     // TODO Remove this code once JDK-6312651 is in.
 214     const Type* tval_init = _gvn.type(val);
 215     // Based on the value to be stored, try to determine if the array is not null-free and/or not flat.
 216     // This is only legal for non-null stores because the array_store_check always passes for null, even
 217     // if the array is null-free. Null stores are handled in GraphKit::gen_inline_array_null_guard().
 218     bool not_inline = !tval->isa_inlinetype() &&
 219                       ((!tval_init->maybe_null() && !tval_init->is_oopptr()->can_be_inline_type()) ||
 220                        (!tval->maybe_null() && !tval->is_oopptr()->can_be_inline_type()));
 221     bool not_flattened = not_inline || ((tval_init->is_inlinetypeptr() || tval_init->isa_inlinetype()) && !tval_init->inline_klass()->flatten_array());
 222     if (!ary_t->is_not_null_free() && not_inline) {
 223       // Storing a non-inline type, mark array as not null-free (-> not flat).
 224       ary_t = ary_t->cast_to_not_null_free();
 225       Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, ary_t));
 226       replace_in_map(ary, cast);
 227       ary = cast;
 228     } else if (!ary_t->is_not_flat() && not_flattened) {
 229       // Storing a non-flattened value, mark array as not flat.
 230       ary_t = ary_t->cast_to_not_flat();
 231       Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, ary_t));
 232       replace_in_map(ary, cast);
 233       ary = cast;
 234     }
 235 
 236     if (ary_t->is_flat()) {
 237       // Store to flattened inline type array
 238       assert(!tval->maybe_null(), "should be guaranteed by array store check");
 239       // Re-execute flattened array store if buffering triggers deoptimization
 240       PreserveReexecuteState preexecs(this);
 241       inc_sp(3);
 242       jvms()->set_should_reexecute(true);
 243       cast_val->as_InlineTypeBase()->store_flattened(this, ary, adr, NULL, 0, MO_UNORDERED | IN_HEAP | IS_ARRAY);
 244       return;
 245     } else if (ary_t->is_null_free()) {
 246       // Store to non-flattened inline type array (elements can never be null)
 247       assert(!tval->maybe_null(), "should be guaranteed by array store check");
 248       if (elemtype->inline_klass()->is_empty()) {
 249         // Ignore empty inline stores, array is already initialized.
 250         return;
 251       }
 252     } else if (!ary_t->is_not_flat() && (tval != TypePtr::NULL_PTR || StressReflectiveCode)) {
 253       // Array might be flattened, emit runtime checks (for NULL, a simple inline_array_null_guard is sufficient).
 254       assert(UseFlatArray && !not_flattened && elemtype->is_oopptr()->can_be_inline_type() &&
 255              !ary_t->klass_is_exact() && !ary_t->is_not_null_free(), "array can't be flattened");
 256       IdealKit ideal(this);
 257       ideal.if_then(flat_array_test(ary, /* flat = */ false)); {
 258         // non-flattened
 259         assert(ideal.ctrl()->in(0)->as_If()->is_flat_array_check(&_gvn), "Should be found");
 260         sync_kit(ideal);
 261         Node* cast_ary = inline_array_null_guard(ary, cast_val, 3);
 262         inc_sp(3);
 263         access_store_at(cast_ary, adr, adr_type, cast_val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY, false);
 264         dec_sp(3);
 265         ideal.sync_kit(this);
 266       } ideal.else_(); {
 267         sync_kit(ideal);
 268         // flattened
 269         Node* null_ctl = top();
 270         Node* val = null_check_oop(cast_val, &null_ctl);
 271         if (null_ctl != top()) {
 272           PreserveJVMState pjvms(this);
 273           inc_sp(3);
 274           set_control(null_ctl);
 275           uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none);
 276           dec_sp(3);
 277         }
 278         // Try to determine the inline klass
 279         ciInlineKlass* vk = NULL;
 280         if (tval->isa_inlinetype() || tval->is_inlinetypeptr()) {
 281           vk = tval->inline_klass();
 282         } else if (tval_init->isa_inlinetype() || tval_init->is_inlinetypeptr()) {
 283           vk = tval_init->inline_klass();
 284         } else if (elemtype->is_inlinetypeptr()) {
 285           vk = elemtype->inline_klass();
 286         }
 287         Node* casted_ary = ary;
 288         if (vk != NULL && !stopped()) {
 289           // Element type is known, cast and store to flattened representation
 290           assert(vk->flatten_array() && elemtype->maybe_null(), "never/always flat - should be optimized");
 291           ciArrayKlass* array_klass = ciArrayKlass::make(vk, /* null_free */ true);
 292           const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr();
 293           casted_ary = _gvn.transform(new CheckCastPPNode(control(), casted_ary, arytype));
 294           Node* casted_adr = array_element_address(casted_ary, idx, T_OBJECT, arytype->size(), control());
 295           if (!val->is_InlineType()) {
 296             assert(!gvn().type(val)->maybe_null(), "inline type array elements should never be null");
 297             val = InlineTypeNode::make_from_oop(this, val, vk);
 298           }
 299           // Re-execute flattened array store if buffering triggers deoptimization
 300           PreserveReexecuteState preexecs(this);
 301           inc_sp(3);
 302           jvms()->set_should_reexecute(true);
 303           val->as_InlineTypeBase()->store_flattened(this, casted_ary, casted_adr, NULL, 0, MO_UNORDERED | IN_HEAP | IS_ARRAY);
 304         } else if (!stopped()) {
 305           // Element type is unknown, emit runtime call
 306 
 307           // Below membars keep this access to an unknown flattened array correctly
 308           // ordered with other unknown and known flattened array accesses.
 309           insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
 310 
 311           make_runtime_call(RC_LEAF,
 312                             OptoRuntime::store_unknown_inline_type(),
 313                             CAST_FROM_FN_PTR(address, OptoRuntime::store_unknown_inline),
 314                             "store_unknown_inline", TypeRawPtr::BOTTOM,
 315                             val, casted_ary, idx);
 316 
 317           insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
 318         }
 319         ideal.sync_kit(this);
 320       }
 321       ideal.end_if();
 322       sync_kit(ideal);
 323       return;
 324     } else if (!ary_t->is_not_null_free()) {
 325       // Array is not flattened but may be null free
 326       assert(elemtype->is_oopptr()->can_be_inline_type() && !ary_t->klass_is_exact(), "array can't be null-free");
 327       ary = inline_array_null_guard(ary, cast_val, 3, true);
 328     }
 329   }
 330   inc_sp(3);
 331   access_store_at(ary, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY);
 332   dec_sp(3);
 333 }
 334 
 335 
 336 //------------------------------array_addressing-------------------------------
 337 // Pull array and index from the stack.  Compute pointer-to-element.
 338 Node* Parse::array_addressing(BasicType type, int vals, const Type*& elemtype) {
 339   Node *idx   = peek(0+vals);   // Get from stack without popping
 340   Node *ary   = peek(1+vals);   // in case of exception
 341 
 342   // Null check the array base, with correct stack contents
 343   ary = null_check(ary, T_ARRAY);
 344   // Compile-time detect of null-exception?
 345   if (stopped())  return top();
 346 
 347   const TypeAryPtr* arytype  = _gvn.type(ary)->is_aryptr();
 348   const TypeInt*    sizetype = arytype->size();
 349   elemtype = arytype->elem();
 350 
 351   if (UseUniqueSubclasses) {
 352     const Type* el = elemtype->make_ptr();

 412       if (C->allow_range_check_smearing()) {
 413         // Do not use builtin_throw, since range checks are sometimes
 414         // made more stringent by an optimistic transformation.
 415         // This creates "tentative" range checks at this point,
 416         // which are not guaranteed to throw exceptions.
 417         // See IfNode::Ideal, is_range_check, adjust_check.
 418         uncommon_trap(Deoptimization::Reason_range_check,
 419                       Deoptimization::Action_make_not_entrant,
 420                       NULL, "range_check");
 421       } else {
 422         // If we have already recompiled with the range-check-widening
 423         // heroic optimization turned off, then we must really be throwing
 424         // range check exceptions.
 425         builtin_throw(Deoptimization::Reason_range_check, idx);
 426       }
 427     }
 428   }
 429   // Check for always knowing you are throwing a range-check exception
 430   if (stopped())  return top();
 431 
 432   // This could be an access to an inline type array. We can't tell if it's
 433   // flat or not. Knowing the exact type avoids runtime checks and leads to
 434   // a much simpler graph shape. Check profile information.
 435   if (!arytype->is_flat() && !arytype->is_not_flat()) {
 436     // First check the speculative type
 437     Deoptimization::DeoptReason reason = Deoptimization::Reason_speculate_class_check;
 438     ciKlass* array_type = arytype->speculative_type();
 439     if (too_many_traps_or_recompiles(reason) || array_type == NULL) {
 440       // No speculative type, check profile data at this bci
 441       array_type = NULL;
 442       reason = Deoptimization::Reason_class_check;
 443       if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(reason)) {
 444         ciKlass* element_type = NULL;
 445         ProfilePtrKind element_ptr = ProfileMaybeNull;
 446         bool flat_array = true;
 447         bool null_free_array = true;
 448         method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
 449       }
 450     }
 451     if (array_type != NULL) {
 452       // Speculate that this array has the exact type reported by profile data
 453       Node* better_ary = NULL;
 454       DEBUG_ONLY(Node* old_control = control();)
 455       Node* slow_ctl = type_check_receiver(ary, array_type, 1.0, &better_ary);
 456       if (stopped()) {
 457         // The check always fails and therefore profile information is incorrect. Don't use it.
 458         assert(old_control == slow_ctl, "type check should have been removed");
 459         set_control(slow_ctl);
 460       } else if (!slow_ctl->is_top()) {
 461         { PreserveJVMState pjvms(this);
 462           set_control(slow_ctl);
 463           uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
 464         }
 465         replace_in_map(ary, better_ary);
 466         ary = better_ary;
 467         arytype  = _gvn.type(ary)->is_aryptr();
 468         elemtype = arytype->elem();
 469       }
 470     }
 471   } else if (UseTypeSpeculation && UseArrayLoadStoreProfile) {
 472     // No need to speculate: feed profile data at this bci for the
 473     // array to type speculation
 474     ciKlass* array_type = NULL;
 475     ciKlass* element_type = NULL;
 476     ProfilePtrKind element_ptr = ProfileMaybeNull;
 477     bool flat_array = true;
 478     bool null_free_array = true;
 479     method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
 480     if (array_type != NULL) {
 481       ary = record_profile_for_speculation(ary, array_type, ProfileMaybeNull);
 482     }
 483   }
 484 
 485   // We have no exact array type from profile data. Check profile data
 486   // for a non null-free or non flat array. Non null-free implies non
 487   // flat so check this one first. Speculating on a non null-free
 488   // array doesn't help aaload but could be profitable for a
 489   // subsequent aastore.
 490   if (!arytype->is_null_free() && !arytype->is_not_null_free()) {
 491     bool null_free_array = true;
 492     Deoptimization::DeoptReason reason = Deoptimization::Reason_none;
 493     if (arytype->speculative() != NULL &&
 494         arytype->speculative()->is_aryptr()->is_not_null_free() &&
 495         !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
 496       null_free_array = false;
 497       reason = Deoptimization::Reason_speculate_class_check;
 498     } else if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(Deoptimization::Reason_class_check)) {
 499       ciKlass* array_type = NULL;
 500       ciKlass* element_type = NULL;
 501       ProfilePtrKind element_ptr = ProfileMaybeNull;
 502       bool flat_array = true;
 503       method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
 504       reason = Deoptimization::Reason_class_check;
 505     }
 506     if (!null_free_array) {
 507       { // Deoptimize if null-free array
 508         BuildCutout unless(this, null_free_array_test(load_object_klass(ary), /* null_free = */ false), PROB_MAX);
 509         uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
 510       }
 511       assert(!stopped(), "null-free array should have been caught earlier");
 512       Node* better_ary = _gvn.transform(new CheckCastPPNode(control(), ary, arytype->cast_to_not_null_free()));
 513       replace_in_map(ary, better_ary);
 514       ary = better_ary;
 515       arytype = _gvn.type(ary)->is_aryptr();
 516     }
 517   }
 518 
 519   if (!arytype->is_flat() && !arytype->is_not_flat()) {
 520     bool flat_array = true;
 521     Deoptimization::DeoptReason reason = Deoptimization::Reason_none;
 522     if (arytype->speculative() != NULL &&
 523         arytype->speculative()->is_aryptr()->is_not_flat() &&
 524         !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
 525       flat_array = false;
 526       reason = Deoptimization::Reason_speculate_class_check;
 527     } else if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(reason)) {
 528       ciKlass* array_type = NULL;
 529       ciKlass* element_type = NULL;
 530       ProfilePtrKind element_ptr = ProfileMaybeNull;
 531       bool null_free_array = true;
 532       method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
 533       reason = Deoptimization::Reason_class_check;
 534     }
 535     if (!flat_array) {
 536       { // Deoptimize if flat array
 537         BuildCutout unless(this, flat_array_test(ary, /* flat = */ false), PROB_MAX);
 538         uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
 539       }
 540       assert(!stopped(), "flat array should have been caught earlier");
 541       Node* better_ary = _gvn.transform(new CheckCastPPNode(control(), ary, arytype->cast_to_not_flat()));
 542       replace_in_map(ary, better_ary);
 543       ary = better_ary;
 544       arytype = _gvn.type(ary)->is_aryptr();
 545     }
 546   }
 547 
 548   // Make array address computation control dependent to prevent it
 549   // from floating above the range check during loop optimizations.
 550   Node* ptr = array_element_address(ary, idx, type, sizetype, control());
 551   assert(ptr != top(), "top should go hand-in-hand with stopped");
 552 
 553   return ptr;
 554 }
 555 
 556 
 557 // returns IfNode
 558 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) {
 559   Node   *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
 560   Node   *tst = _gvn.transform(new BoolNode(cmp, mask));
 561   IfNode *iff = create_and_map_if(control(), tst, prob, cnt);
 562   return iff;
 563 }
 564 
 565 
 566 // sentinel value for the target bci to mark never taken branches
 567 // (according to profiling)

1736   // Generate real control flow
1737   Node   *tst = _gvn.transform( new BoolNode( c, btest ) );
1738 
1739   // Sanity check the probability value
1740   assert(prob > 0.0f,"Bad probability in Parser");
1741  // Need xform to put node in hash table
1742   IfNode *iff = create_and_xform_if( control(), tst, prob, cnt );
1743   assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1744   // True branch
1745   { PreserveJVMState pjvms(this);
1746     Node* iftrue  = _gvn.transform( new IfTrueNode (iff) );
1747     set_control(iftrue);
1748 
1749     if (stopped()) {            // Path is dead?
1750       NOT_PRODUCT(explicit_null_checks_elided++);
1751       if (C->eliminate_boxing()) {
1752         // Mark the successor block as parsed
1753         branch_block->next_path_num();
1754       }
1755     } else {                    // Path is live.
1756       adjust_map_after_if(btest, c, prob, branch_block);
1757       if (!stopped()) {
1758         merge(target_bci);
1759       }
1760     }
1761   }
1762 
1763   // False branch
1764   Node* iffalse = _gvn.transform( new IfFalseNode(iff) );
1765   set_control(iffalse);
1766 
1767   if (stopped()) {              // Path is dead?
1768     NOT_PRODUCT(explicit_null_checks_elided++);
1769     if (C->eliminate_boxing()) {
1770       // Mark the successor block as parsed
1771       next_block->next_path_num();
1772     }
1773   } else  {                     // Path is live.
1774     adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, next_block);

1775   }
1776 }
1777 
1778 //------------------------------------do_if------------------------------------
1779 void Parse::do_if(BoolTest::mask btest, Node* c, bool new_path, Node** ctrl_taken) {
1780   int target_bci = iter().get_dest();
1781 
1782   Block* branch_block = successor_for_bci(target_bci);
1783   Block* next_block   = successor_for_bci(iter().next_bci());
1784 
1785   float cnt;
1786   float prob = branch_prediction(cnt, btest, target_bci, c);
1787   float untaken_prob = 1.0 - prob;
1788 
1789   if (prob == PROB_UNKNOWN) {
1790     if (PrintOpto && Verbose) {
1791       tty->print_cr("Never-taken edge stops compilation at bci %d", bci());
1792     }
1793     repush_if_args(); // to gather stats on loop
1794     uncommon_trap(Deoptimization::Reason_unreached,
1795                   Deoptimization::Action_reinterpret,
1796                   NULL, "cold");
1797     if (C->eliminate_boxing()) {
1798       // Mark the successor blocks as parsed
1799       branch_block->next_path_num();

1843   }
1844 
1845   // Generate real control flow
1846   float true_prob = (taken_if_true ? prob : untaken_prob);
1847   IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1848   assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1849   Node* taken_branch   = new IfTrueNode(iff);
1850   Node* untaken_branch = new IfFalseNode(iff);
1851   if (!taken_if_true) {  // Finish conversion to canonical form
1852     Node* tmp      = taken_branch;
1853     taken_branch   = untaken_branch;
1854     untaken_branch = tmp;
1855   }
1856 
1857   // Branch is taken:
1858   { PreserveJVMState pjvms(this);
1859     taken_branch = _gvn.transform(taken_branch);
1860     set_control(taken_branch);
1861 
1862     if (stopped()) {
1863       if (C->eliminate_boxing() && !new_path) {
1864         // Mark the successor block as parsed (if we haven't created a new path)
1865         branch_block->next_path_num();
1866       }
1867     } else {
1868       adjust_map_after_if(taken_btest, c, prob, branch_block);
1869       if (!stopped()) {
1870         if (new_path) {
1871           // Merge by using a new path
1872           merge_new_path(target_bci);
1873         } else if (ctrl_taken != NULL) {
1874           // Don't merge but save taken branch to be wired by caller
1875           *ctrl_taken = control();
1876         } else {
1877           merge(target_bci);
1878         }
1879       }
1880     }
1881   }
1882 
1883   untaken_branch = _gvn.transform(untaken_branch);
1884   set_control(untaken_branch);
1885 
1886   // Branch not taken.
1887   if (stopped() && ctrl_taken == NULL) {
1888     if (C->eliminate_boxing()) {
1889       // Mark the successor block as parsed (if caller does not re-wire control flow)
1890       next_block->next_path_num();
1891     }
1892   } else {
1893     adjust_map_after_if(untaken_btest, c, untaken_prob, next_block);
1894   }
1895 }
1896 
1897 
1898 static ProfilePtrKind speculative_ptr_kind(const TypeOopPtr* t) {
1899   if (t->speculative() == NULL) {
1900     return ProfileUnknownNull;
1901   }
1902   if (t->speculative_always_null()) {
1903     return ProfileAlwaysNull;
1904   }
1905   if (t->speculative_maybe_null()) {
1906     return ProfileMaybeNull;
1907   }
1908   return ProfileNeverNull;
1909 }
1910 
1911 void Parse::acmp_always_null_input(Node* input, const TypeOopPtr* tinput, BoolTest::mask btest, Node* eq_region) {
1912   inc_sp(2);
1913   Node* cast = null_check_common(input, T_OBJECT, true, NULL,
1914                                  !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check) &&
1915                                  speculative_ptr_kind(tinput) == ProfileAlwaysNull);
1916   dec_sp(2);
1917   if (btest == BoolTest::ne) {
1918     {
1919       PreserveJVMState pjvms(this);
1920       replace_in_map(input, cast);
1921       int target_bci = iter().get_dest();
1922       merge(target_bci);
1923     }
1924     record_for_igvn(eq_region);
1925     set_control(_gvn.transform(eq_region));
1926   } else {
1927     replace_in_map(input, cast);
1928   }
1929 }
1930 
1931 Node* Parse::acmp_null_check(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, Node*& null_ctl) {
1932   inc_sp(2);
1933   null_ctl = top();
1934   Node* cast = null_check_oop(input, &null_ctl,
1935                               input_ptr == ProfileNeverNull || (input_ptr == ProfileUnknownNull && !too_many_traps_or_recompiles(Deoptimization::Reason_null_check)),
1936                               false,
1937                               speculative_ptr_kind(tinput) == ProfileNeverNull &&
1938                               !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check));
1939   dec_sp(2);
1940   assert(!stopped(), "null input should have been caught earlier");
1941   if (cast->is_InlineType()) {
1942     cast = cast->as_InlineType()->get_oop();
1943   }
1944   return cast;
1945 }
1946 
1947 void Parse::acmp_known_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, ciKlass* input_type, BoolTest::mask btest, Node* eq_region) {
1948   Node* ne_region = new RegionNode(1);
1949   Node* null_ctl;
1950   Node* cast = acmp_null_check(input, tinput, input_ptr, null_ctl);
1951   ne_region->add_req(null_ctl);
1952 
1953   Node* slow_ctl = type_check_receiver(cast, input_type, 1.0, &cast);
1954   {
1955     PreserveJVMState pjvms(this);
1956     inc_sp(2);
1957     set_control(slow_ctl);
1958     Deoptimization::DeoptReason reason;
1959     if (tinput->speculative_type() != NULL && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
1960       reason = Deoptimization::Reason_speculate_class_check;
1961     } else {
1962       reason = Deoptimization::Reason_class_check;
1963     }
1964     uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
1965   }
1966   ne_region->add_req(control());
1967 
1968   record_for_igvn(ne_region);
1969   set_control(_gvn.transform(ne_region));
1970   if (btest == BoolTest::ne) {
1971     {
1972       PreserveJVMState pjvms(this);
1973       if (null_ctl == top()) {
1974         replace_in_map(input, cast);
1975       }
1976       int target_bci = iter().get_dest();
1977       merge(target_bci);
1978     }
1979     record_for_igvn(eq_region);
1980     set_control(_gvn.transform(eq_region));
1981   } else {
1982     if (null_ctl == top()) {
1983       replace_in_map(input, cast);
1984     }
1985     set_control(_gvn.transform(ne_region));
1986   }
1987 }
1988 
1989 void Parse::acmp_unknown_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, BoolTest::mask btest, Node* eq_region) {
1990   Node* ne_region = new RegionNode(1);
1991   Node* null_ctl;
1992   Node* cast = acmp_null_check(input, tinput, input_ptr, null_ctl);
1993   ne_region->add_req(null_ctl);
1994 
1995   {
1996     BuildCutout unless(this, inline_type_test(cast, /* is_inline = */ false), PROB_MAX);
1997     inc_sp(2);
1998     uncommon_trap_exact(Deoptimization::Reason_class_check, Deoptimization::Action_maybe_recompile);
1999   }
2000 
2001   ne_region->add_req(control());
2002 
2003   record_for_igvn(ne_region);
2004   set_control(_gvn.transform(ne_region));
2005   if (btest == BoolTest::ne) {
2006     {
2007       PreserveJVMState pjvms(this);
2008       if (null_ctl == top()) {
2009         replace_in_map(input, cast);
2010       }
2011       int target_bci = iter().get_dest();
2012       merge(target_bci);
2013     }
2014     record_for_igvn(eq_region);
2015     set_control(_gvn.transform(eq_region));
2016   } else {
2017     if (null_ctl == top()) {
2018       replace_in_map(input, cast);
2019     }
2020     set_control(_gvn.transform(ne_region));
2021   }
2022 }
2023 
2024 void Parse::do_acmp(BoolTest::mask btest, Node* left, Node* right) {
2025   ciKlass* left_type = NULL;
2026   ciKlass* right_type = NULL;
2027   ProfilePtrKind left_ptr = ProfileUnknownNull;
2028   ProfilePtrKind right_ptr = ProfileUnknownNull;
2029   bool left_inline_type = true;
2030   bool right_inline_type = true;
2031 
2032   // Leverage profiling at acmp
2033   if (UseACmpProfile) {
2034     method()->acmp_profiled_type(bci(), left_type, right_type, left_ptr, right_ptr, left_inline_type, right_inline_type);
2035     if (too_many_traps_or_recompiles(Deoptimization::Reason_class_check)) {
2036       left_type = NULL;
2037       right_type = NULL;
2038       left_inline_type = true;
2039       right_inline_type = true;
2040     }
2041     if (too_many_traps_or_recompiles(Deoptimization::Reason_null_check)) {
2042       left_ptr = ProfileUnknownNull;
2043       right_ptr = ProfileUnknownNull;
2044     }
2045   }
2046 
2047   if (UseTypeSpeculation) {
2048     record_profile_for_speculation(left, left_type, left_ptr);
2049     record_profile_for_speculation(right, right_type, right_ptr);
2050   }
2051 
2052   if (!EnableValhalla) {
2053     Node* cmp = CmpP(left, right);
2054     cmp = optimize_cmp_with_klass(cmp);
2055     do_if(btest, cmp);
2056     return;
2057   }
2058 
2059   // Check for equality before potentially allocating
2060   if (left == right) {
2061     do_if(btest, makecon(TypeInt::CC_EQ));
2062     return;
2063   }
2064 
2065   // Allocate inline type operands and re-execute on deoptimization
2066   if (left->is_InlineTypeBase()) {
2067     if (_gvn.type(right)->is_zero_type() ||
2068         (right->is_InlineTypeBase() && _gvn.type(right->as_InlineTypeBase()->get_is_init())->is_zero_type())) {
2069       // Null checking a scalarized but nullable inline type. Check the IsInit
2070       // input instead of the oop input to avoid keeping buffer allocations alive.
2071       Node* cmp = CmpI(left->as_InlineTypeBase()->get_is_init(), intcon(0));
2072       do_if(btest, cmp);
2073       return;
2074     } else if (left->is_InlineType()){
2075       PreserveReexecuteState preexecs(this);
2076       inc_sp(2);
2077       jvms()->set_should_reexecute(true);
2078       left = left->as_InlineType()->buffer(this)->get_oop();
2079     }
2080   }
2081   if (right->is_InlineType()) {
2082     PreserveReexecuteState preexecs(this);
2083     inc_sp(2);
2084     jvms()->set_should_reexecute(true);
2085     right = right->as_InlineType()->buffer(this)->get_oop();
2086   }
2087 
2088   // First, do a normal pointer comparison
2089   const TypeOopPtr* tleft = _gvn.type(left)->isa_oopptr();
2090   const TypeOopPtr* tright = _gvn.type(right)->isa_oopptr();
2091   Node* cmp = CmpP(left, right);
2092   cmp = optimize_cmp_with_klass(cmp);
2093   if (tleft == NULL || !tleft->can_be_inline_type() ||
2094       tright == NULL || !tright->can_be_inline_type()) {
2095     // This is sufficient, if one of the operands can't be an inline type
2096     do_if(btest, cmp);
2097     return;
2098   }
2099   Node* eq_region = NULL;
2100   if (btest == BoolTest::eq) {
2101     do_if(btest, cmp, true);
2102     if (stopped()) {
2103       return;
2104     }
2105   } else {
2106     assert(btest == BoolTest::ne, "only eq or ne");
2107     Node* is_not_equal = NULL;
2108     eq_region = new RegionNode(3);
2109     {
2110       PreserveJVMState pjvms(this);
2111       do_if(btest, cmp, false, &is_not_equal);
2112       if (!stopped()) {
2113         eq_region->init_req(1, control());
2114       }
2115     }
2116     if (is_not_equal == NULL || is_not_equal->is_top()) {
2117       record_for_igvn(eq_region);
2118       set_control(_gvn.transform(eq_region));
2119       return;
2120     }
2121     set_control(is_not_equal);
2122   }
2123 
2124   // Prefer speculative types if available
2125   if (!too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
2126     if (tleft->speculative_type() != NULL) {
2127       left_type = tleft->speculative_type();
2128     }
2129     if (tright->speculative_type() != NULL) {
2130       right_type = tright->speculative_type();
2131     }
2132   }
2133 
2134   if (speculative_ptr_kind(tleft) != ProfileMaybeNull && speculative_ptr_kind(tleft) != ProfileUnknownNull) {
2135     ProfilePtrKind speculative_left_ptr = speculative_ptr_kind(tleft);
2136     if (speculative_left_ptr == ProfileAlwaysNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_assert)) {
2137       left_ptr = speculative_left_ptr;
2138     } else if (speculative_left_ptr == ProfileNeverNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check)) {
2139       left_ptr = speculative_left_ptr;
2140     }
2141   }
2142   if (speculative_ptr_kind(tright) != ProfileMaybeNull && speculative_ptr_kind(tright) != ProfileUnknownNull) {
2143     ProfilePtrKind speculative_right_ptr = speculative_ptr_kind(tright);
2144     if (speculative_right_ptr == ProfileAlwaysNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_assert)) {
2145       right_ptr = speculative_right_ptr;
2146     } else if (speculative_right_ptr == ProfileNeverNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check)) {
2147       right_ptr = speculative_right_ptr;
2148     }
2149   }
2150 
2151   if (left_ptr == ProfileAlwaysNull) {
2152     // Comparison with null. Assert the input is indeed null and we're done.
2153     acmp_always_null_input(left, tleft, btest, eq_region);
2154     return;
2155   }
2156   if (right_ptr == ProfileAlwaysNull) {
2157     // Comparison with null. Assert the input is indeed null and we're done.
2158     acmp_always_null_input(right, tright, btest, eq_region);
2159     return;
2160   }
2161   if (left_type != NULL && !left_type->is_inlinetype()) {
2162     // Comparison with an object of known type
2163     acmp_known_non_inline_type_input(left, tleft, left_ptr, left_type, btest, eq_region);
2164     return;
2165   }
2166   if (right_type != NULL && !right_type->is_inlinetype()) {
2167     // Comparison with an object of known type
2168     acmp_known_non_inline_type_input(right, tright, right_ptr, right_type, btest, eq_region);
2169     return;
2170   }
2171   if (!left_inline_type) {
2172     // Comparison with an object known not to be an inline type
2173     acmp_unknown_non_inline_type_input(left, tleft, left_ptr, btest, eq_region);
2174     return;
2175   }
2176   if (!right_inline_type) {
2177     // Comparison with an object known not to be an inline type
2178     acmp_unknown_non_inline_type_input(right, tright, right_ptr, btest, eq_region);
2179     return;
2180   }
2181 
2182   // Pointers are not equal, check if first operand is non-null
2183   Node* ne_region = new RegionNode(6);
2184   Node* null_ctl;
2185   Node* not_null_right = acmp_null_check(right, tright, right_ptr, null_ctl);
2186   ne_region->init_req(1, null_ctl);
2187 
2188   // First operand is non-null, check if it is an inline type
2189   Node* is_value = inline_type_test(not_null_right);
2190   IfNode* is_value_iff = create_and_map_if(control(), is_value, PROB_FAIR, COUNT_UNKNOWN);
2191   Node* not_value = _gvn.transform(new IfFalseNode(is_value_iff));
2192   ne_region->init_req(2, not_value);
2193   set_control(_gvn.transform(new IfTrueNode(is_value_iff)));
2194 
2195   // The first operand is an inline type, check if the second operand is non-null
2196   Node* not_null_left = acmp_null_check(left, tleft, left_ptr, null_ctl);
2197   ne_region->init_req(3, null_ctl);
2198 
2199   // Check if both operands are of the same class.
2200   Node* kls_left = load_object_klass(not_null_left);
2201   Node* kls_right = load_object_klass(not_null_right);
2202   Node* kls_cmp = CmpP(kls_left, kls_right);
2203   Node* kls_bol = _gvn.transform(new BoolNode(kls_cmp, BoolTest::ne));
2204   IfNode* kls_iff = create_and_map_if(control(), kls_bol, PROB_FAIR, COUNT_UNKNOWN);
2205   Node* kls_ne = _gvn.transform(new IfTrueNode(kls_iff));
2206   set_control(_gvn.transform(new IfFalseNode(kls_iff)));
2207   ne_region->init_req(4, kls_ne);
2208 
2209   if (stopped()) {
2210     record_for_igvn(ne_region);
2211     set_control(_gvn.transform(ne_region));
2212     if (btest == BoolTest::ne) {
2213       {
2214         PreserveJVMState pjvms(this);
2215         int target_bci = iter().get_dest();
2216         merge(target_bci);
2217       }
2218       record_for_igvn(eq_region);
2219       set_control(_gvn.transform(eq_region));
2220     }
2221     return;
2222   }
2223 
2224   // Both operands are values types of the same class, we need to perform a
2225   // substitutability test. Delegate to PrimitiveObjectMethods::isSubstitutable().
2226   Node* ne_io_phi = PhiNode::make(ne_region, i_o());
2227   Node* mem = reset_memory();
2228   Node* ne_mem_phi = PhiNode::make(ne_region, mem);
2229 
2230   Node* eq_io_phi = NULL;
2231   Node* eq_mem_phi = NULL;
2232   if (eq_region != NULL) {
2233     eq_io_phi = PhiNode::make(eq_region, i_o());
2234     eq_mem_phi = PhiNode::make(eq_region, mem);
2235   }
2236 
2237   set_all_memory(mem);
2238 
2239   kill_dead_locals();
2240   ciMethod* subst_method = ciEnv::current()->PrimitiveObjectMethods_klass()->find_method(ciSymbols::isSubstitutable_name(), ciSymbols::object_object_boolean_signature());
2241   CallStaticJavaNode *call = new CallStaticJavaNode(C, TypeFunc::make(subst_method), SharedRuntime::get_resolve_static_call_stub(), subst_method);
2242   call->set_override_symbolic_info(true);
2243   call->init_req(TypeFunc::Parms, not_null_left);
2244   call->init_req(TypeFunc::Parms+1, not_null_right);
2245   inc_sp(2);
2246   set_edges_for_java_call(call, false, false);
2247   Node* ret = set_results_for_java_call(call, false, true);
2248   dec_sp(2);
2249 
2250   // Test the return value of PrimitiveObjectMethods::isSubstitutable()
2251   Node* subst_cmp = _gvn.transform(new CmpINode(ret, intcon(1)));
2252   Node* ctl = C->top();
2253   if (btest == BoolTest::eq) {
2254     PreserveJVMState pjvms(this);
2255     do_if(btest, subst_cmp);
2256     if (!stopped()) {
2257       ctl = control();
2258     }
2259   } else {
2260     assert(btest == BoolTest::ne, "only eq or ne");
2261     PreserveJVMState pjvms(this);
2262     do_if(btest, subst_cmp, false, &ctl);
2263     if (!stopped()) {
2264       eq_region->init_req(2, control());
2265       eq_io_phi->init_req(2, i_o());
2266       eq_mem_phi->init_req(2, reset_memory());
2267     }
2268   }
2269   ne_region->init_req(5, ctl);
2270   ne_io_phi->init_req(5, i_o());
2271   ne_mem_phi->init_req(5, reset_memory());
2272 
2273   record_for_igvn(ne_region);
2274   set_control(_gvn.transform(ne_region));
2275   set_i_o(_gvn.transform(ne_io_phi));
2276   set_all_memory(_gvn.transform(ne_mem_phi));
2277 
2278   if (btest == BoolTest::ne) {
2279     {
2280       PreserveJVMState pjvms(this);
2281       int target_bci = iter().get_dest();
2282       merge(target_bci);
2283     }
2284 
2285     record_for_igvn(eq_region);
2286     set_control(_gvn.transform(eq_region));
2287     set_i_o(_gvn.transform(eq_io_phi));
2288     set_all_memory(_gvn.transform(eq_mem_phi));
2289   }
2290 }
2291 
2292 bool Parse::path_is_suitable_for_uncommon_trap(float prob) const {
2293   // Don't want to speculate on uncommon traps when running with -Xcomp
2294   if (!UseInterpreter) {
2295     return false;
2296   }
2297   return (seems_never_taken(prob) && seems_stable_comparison());
2298 }
2299 
2300 void Parse::maybe_add_predicate_after_if(Block* path) {
2301   if (path->is_SEL_head() && path->preds_parsed() == 0) {
2302     // Add predicates at bci of if dominating the loop so traps can be
2303     // recorded on the if's profile data
2304     int bc_depth = repush_if_args();
2305     add_empty_predicates();
2306     dec_sp(bc_depth);
2307     path->set_has_predicates();
2308   }
2309 }
2310 
2311 
2312 //----------------------------adjust_map_after_if------------------------------
2313 // Adjust the JVM state to reflect the result of taking this path.
2314 // Basically, it means inspecting the CmpNode controlling this
2315 // branch, seeing how it constrains a tested value, and then
2316 // deciding if it's worth our while to encode this constraint
2317 // as graph nodes in the current abstract interpretation map.
2318 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path) {

2319   if (!c->is_Cmp()) {
2320     maybe_add_predicate_after_if(path);
2321     return;
2322   }
2323 
2324   if (stopped() || btest == BoolTest::illegal) {
2325     return;                             // nothing to do
2326   }
2327 
2328   bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
2329 
2330   if (path_is_suitable_for_uncommon_trap(prob)) {
2331     repush_if_args();
2332     uncommon_trap(Deoptimization::Reason_unstable_if,
2333                   Deoptimization::Action_reinterpret,
2334                   NULL,
2335                   (is_fallthrough ? "taken always" : "taken never"));
2336     return;
2337   }
2338 

2508   if (c->Opcode() == Op_CmpP &&
2509       (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
2510       c->in(2)->is_Con()) {
2511     Node* load_klass = NULL;
2512     Node* decode = NULL;
2513     if (c->in(1)->Opcode() == Op_DecodeNKlass) {
2514       decode = c->in(1);
2515       load_klass = c->in(1)->in(1);
2516     } else {
2517       load_klass = c->in(1);
2518     }
2519     if (load_klass->in(2)->is_AddP()) {
2520       Node* addp = load_klass->in(2);
2521       Node* obj = addp->in(AddPNode::Address);
2522       const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
2523       if (obj_type->speculative_type_not_null() != NULL) {
2524         ciKlass* k = obj_type->speculative_type();
2525         inc_sp(2);
2526         obj = maybe_cast_profiled_obj(obj, k);
2527         dec_sp(2);
2528         if (obj->is_InlineType()) {
2529           assert(obj->as_InlineType()->is_allocated(&_gvn), "must be allocated");
2530           obj = obj->as_InlineType()->get_oop();
2531         }
2532         // Make the CmpP use the casted obj
2533         addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
2534         load_klass = load_klass->clone();
2535         load_klass->set_req(2, addp);
2536         load_klass = _gvn.transform(load_klass);
2537         if (decode != NULL) {
2538           decode = decode->clone();
2539           decode->set_req(1, load_klass);
2540           load_klass = _gvn.transform(decode);
2541         }
2542         c = c->clone();
2543         c->set_req(1, load_klass);
2544         c = _gvn.transform(c);
2545       }
2546     }
2547   }
2548   return c;
2549 }
2550 
2551 //------------------------------do_one_bytecode--------------------------------

3357     // See if we can get some profile data and hand it off to the next block
3358     Block *target_block = block()->successor_for_bci(target_bci);
3359     if (target_block->pred_count() != 1)  break;
3360     ciMethodData* methodData = method()->method_data();
3361     if (!methodData->is_mature())  break;
3362     ciProfileData* data = methodData->bci_to_data(bci());
3363     assert(data != NULL && data->is_JumpData(), "need JumpData for taken branch");
3364     int taken = ((ciJumpData*)data)->taken();
3365     taken = method()->scale_count(taken);
3366     target_block->set_count(taken);
3367     break;
3368   }
3369 
3370   case Bytecodes::_ifnull:    btest = BoolTest::eq; goto handle_if_null;
3371   case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null;
3372   handle_if_null:
3373     // If this is a backwards branch in the bytecodes, add Safepoint
3374     maybe_add_safepoint(iter().get_dest());
3375     a = null();
3376     b = pop();
3377     if (b->is_InlineType()) {
3378       // Null checking a scalarized but nullable inline type. Check the IsInit
3379       // input instead of the oop input to avoid keeping buffer allocations alive
3380       c = _gvn.transform(new CmpINode(b->as_InlineType()->get_is_init(), zerocon(T_INT)));
3381     } else {
3382       if (!_gvn.type(b)->speculative_maybe_null() &&
3383           !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
3384         inc_sp(1);
3385         Node* null_ctl = top();
3386         b = null_check_oop(b, &null_ctl, true, true, true);
3387         assert(null_ctl->is_top(), "no null control here");
3388         dec_sp(1);
3389       } else if (_gvn.type(b)->speculative_always_null() &&
3390                  !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
3391         inc_sp(1);
3392         b = null_assert(b);
3393         dec_sp(1);
3394       }
3395       c = _gvn.transform( new CmpPNode(b, a) );
3396     }
3397     do_ifnull(btest, c);
3398     break;
3399 
3400   case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
3401   case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
3402   handle_if_acmp:
3403     // If this is a backwards branch in the bytecodes, add Safepoint
3404     maybe_add_safepoint(iter().get_dest());
3405     a = pop();
3406     b = pop();
3407     do_acmp(btest, b, a);


3408     break;
3409 
3410   case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
3411   case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
3412   case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
3413   case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
3414   case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
3415   case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
3416   handle_ifxx:
3417     // If this is a backwards branch in the bytecodes, add Safepoint
3418     maybe_add_safepoint(iter().get_dest());
3419     a = _gvn.intcon(0);
3420     b = pop();
3421     c = _gvn.transform( new CmpINode(b, a) );
3422     do_if(btest, c);
3423     break;
3424 
3425   case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
3426   case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
3427   case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;

3442     break;
3443 
3444   case Bytecodes::_lookupswitch:
3445     do_lookupswitch();
3446     break;
3447 
3448   case Bytecodes::_invokestatic:
3449   case Bytecodes::_invokedynamic:
3450   case Bytecodes::_invokespecial:
3451   case Bytecodes::_invokevirtual:
3452   case Bytecodes::_invokeinterface:
3453     do_call();
3454     break;
3455   case Bytecodes::_checkcast:
3456     do_checkcast();
3457     break;
3458   case Bytecodes::_instanceof:
3459     do_instanceof();
3460     break;
3461   case Bytecodes::_anewarray:
3462     do_newarray();
3463     break;
3464   case Bytecodes::_newarray:
3465     do_newarray((BasicType)iter().get_index());
3466     break;
3467   case Bytecodes::_multianewarray:
3468     do_multianewarray();
3469     break;
3470   case Bytecodes::_new:
3471     do_new();
3472     break;
3473   case Bytecodes::_aconst_init:
3474     do_aconst_init();
3475     break;
3476   case Bytecodes::_withfield:
3477     do_withfield();
3478     break;
3479 
3480   case Bytecodes::_jsr:
3481   case Bytecodes::_jsr_w:
3482     do_jsr();
3483     break;
3484 
3485   case Bytecodes::_ret:
3486     do_ret();
3487     break;
3488 
3489 
3490   case Bytecodes::_monitorenter:
3491     do_monitor_enter();
3492     break;
3493 
3494   case Bytecodes::_monitorexit:
3495     do_monitor_exit();
3496     break;
3497 
3498   case Bytecodes::_breakpoint:
< prev index next >