< prev index next >

src/hotspot/share/opto/graphKit.cpp

Print this page

   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"


  26 #include "ci/ciUtilities.hpp"
  27 #include "classfile/javaClasses.hpp"
  28 #include "ci/ciNativeEntryPoint.hpp"
  29 #include "ci/ciObjArray.hpp"
  30 #include "asm/register.hpp"
  31 #include "compiler/compileLog.hpp"
  32 #include "gc/shared/barrierSet.hpp"
  33 #include "gc/shared/c2/barrierSetC2.hpp"
  34 #include "interpreter/interpreter.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "opto/addnode.hpp"
  37 #include "opto/castnode.hpp"
  38 #include "opto/convertnode.hpp"
  39 #include "opto/graphKit.hpp"
  40 #include "opto/idealKit.hpp"

  41 #include "opto/intrinsicnode.hpp"
  42 #include "opto/locknode.hpp"
  43 #include "opto/machnode.hpp"

  44 #include "opto/opaquenode.hpp"
  45 #include "opto/parse.hpp"
  46 #include "opto/rootnode.hpp"
  47 #include "opto/runtime.hpp"
  48 #include "opto/subtypenode.hpp"
  49 #include "runtime/deoptimization.hpp"
  50 #include "runtime/sharedRuntime.hpp"
  51 #include "utilities/bitMap.inline.hpp"
  52 #include "utilities/powerOfTwo.hpp"
  53 #include "utilities/growableArray.hpp"
  54 
  55 //----------------------------GraphKit-----------------------------------------
  56 // Main utility constructor.
  57 GraphKit::GraphKit(JVMState* jvms)
  58   : Phase(Phase::Parser),
  59     _env(C->env()),
  60     _gvn(*C->initial_gvn()),
  61     _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
  62 {

  63   _exceptions = jvms->map()->next_exception();
  64   if (_exceptions != NULL)  jvms->map()->set_next_exception(NULL);
  65   set_jvms(jvms);







  66 }
  67 
  68 // Private constructor for parser.
  69 GraphKit::GraphKit()
  70   : Phase(Phase::Parser),
  71     _env(C->env()),
  72     _gvn(*C->initial_gvn()),
  73     _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
  74 {
  75   _exceptions = NULL;
  76   set_map(NULL);
  77   debug_only(_sp = -99);
  78   debug_only(set_bci(-99));
  79 }
  80 
  81 
  82 
  83 //---------------------------clean_stack---------------------------------------
  84 // Clear away rubbish from the stack area of the JVM state.
  85 // This destroys any arguments that may be waiting on the stack.

 818         if (PrintMiscellaneous && (Verbose || WizardMode)) {
 819           tty->print_cr("Zombie local %d: ", local);
 820           jvms->dump();
 821         }
 822         return false;
 823       }
 824     }
 825   }
 826   return true;
 827 }
 828 
 829 #endif //ASSERT
 830 
 831 // Helper function for enforcing certain bytecodes to reexecute if deoptimization happens.
 832 static bool should_reexecute_implied_by_bytecode(JVMState *jvms, bool is_anewarray) {
 833   ciMethod* cur_method = jvms->method();
 834   int       cur_bci   = jvms->bci();
 835   if (cur_method != NULL && cur_bci != InvocationEntryBci) {
 836     Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
 837     return Interpreter::bytecode_should_reexecute(code) ||
 838            (is_anewarray && code == Bytecodes::_multianewarray);
 839     // Reexecute _multianewarray bytecode which was replaced with
 840     // sequence of [a]newarray. See Parse::do_multianewarray().
 841     //
 842     // Note: interpreter should not have it set since this optimization
 843     // is limited by dimensions and guarded by flag so in some cases
 844     // multianewarray() runtime calls will be generated and
 845     // the bytecode should not be reexecutes (stack will not be reset).
 846   } else {
 847     return false;
 848   }
 849 }
 850 
 851 // Helper function for adding JVMState and debug information to node
 852 void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) {
 853   // Add the safepoint edges to the call (or other safepoint).
 854 
 855   // Make sure dead locals are set to top.  This
 856   // should help register allocation time and cut down on the size
 857   // of the deoptimization information.
 858   assert(dead_locals_are_killed(), "garbage in debug info before safepoint");

1078       ciSignature* declared_signature = NULL;
1079       ciMethod* ignored_callee = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
1080       assert(declared_signature != NULL, "cannot be null");
1081       inputs   = declared_signature->arg_size_for_bc(code);
1082       int size = declared_signature->return_type()->size();
1083       depth = size - inputs;
1084     }
1085     break;
1086 
1087   case Bytecodes::_multianewarray:
1088     {
1089       ciBytecodeStream iter(method());
1090       iter.reset_to_bci(bci());
1091       iter.next();
1092       inputs = iter.get_dimensions();
1093       assert(rsize == 1, "");
1094       depth = rsize - inputs;
1095     }
1096     break;
1097 









1098   case Bytecodes::_ireturn:
1099   case Bytecodes::_lreturn:
1100   case Bytecodes::_freturn:
1101   case Bytecodes::_dreturn:
1102   case Bytecodes::_areturn:
1103     assert(rsize == -depth, "");
1104     inputs = rsize;
1105     break;
1106 
1107   case Bytecodes::_jsr:
1108   case Bytecodes::_jsr_w:
1109     inputs = 0;
1110     depth  = 1;                  // S.B. depth=1, not zero
1111     break;
1112 
1113   default:
1114     // bytecode produces a typed result
1115     inputs = rsize - depth;
1116     assert(inputs >= 0, "");
1117     break;

1160   Node* conv = _gvn.transform( new ConvI2LNode(offset));
1161   Node* mask = _gvn.transform(ConLNode::make((julong) max_juint));
1162   return _gvn.transform( new AndLNode(conv, mask) );
1163 }
1164 
1165 Node* GraphKit::ConvL2I(Node* offset) {
1166   // short-circuit a common case
1167   jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot);
1168   if (offset_con != (jlong)Type::OffsetBot) {
1169     return intcon((int) offset_con);
1170   }
1171   return _gvn.transform( new ConvL2INode(offset));
1172 }
1173 
1174 //-------------------------load_object_klass-----------------------------------
1175 Node* GraphKit::load_object_klass(Node* obj) {
1176   // Special-case a fresh allocation to avoid building nodes:
1177   Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
1178   if (akls != NULL)  return akls;
1179   Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
1180   return _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS));
1181 }
1182 
1183 //-------------------------load_array_length-----------------------------------
1184 Node* GraphKit::load_array_length(Node* array) {
1185   // Special-case a fresh allocation to avoid building nodes:
1186   AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array, &_gvn);
1187   Node *alen;
1188   if (alloc == NULL) {
1189     Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes());
1190     alen = _gvn.transform( new LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS));
1191   } else {
1192     alen = array_ideal_length(alloc, _gvn.type(array)->is_oopptr(), false);
1193   }
1194   return alen;
1195 }
1196 
1197 Node* GraphKit::array_ideal_length(AllocateArrayNode* alloc,
1198                                    const TypeOopPtr* oop_type,
1199                                    bool replace_length_in_map) {
1200   Node* length = alloc->Ideal_length();

1209         replace_in_map(length, ccast);
1210       }
1211       return ccast;
1212     }
1213   }
1214   return length;
1215 }
1216 
1217 //------------------------------do_null_check----------------------------------
1218 // Helper function to do a NULL pointer check.  Returned value is
1219 // the incoming address with NULL casted away.  You are allowed to use the
1220 // not-null value only if you are control dependent on the test.
1221 #ifndef PRODUCT
1222 extern int explicit_null_checks_inserted,
1223            explicit_null_checks_elided;
1224 #endif
1225 Node* GraphKit::null_check_common(Node* value, BasicType type,
1226                                   // optional arguments for variations:
1227                                   bool assert_null,
1228                                   Node* *null_control,
1229                                   bool speculative) {

1230   assert(!assert_null || null_control == NULL, "not both at once");
1231   if (stopped())  return top();
1232   NOT_PRODUCT(explicit_null_checks_inserted++);
1233 





































1234   // Construct NULL check
1235   Node *chk = NULL;
1236   switch(type) {
1237     case T_LONG   : chk = new CmpLNode(value, _gvn.zerocon(T_LONG)); break;
1238     case T_INT    : chk = new CmpINode(value, _gvn.intcon(0)); break;

1239     case T_ARRAY  : // fall through
1240       type = T_OBJECT;  // simplify further tests
1241     case T_OBJECT : {
1242       const Type *t = _gvn.type( value );
1243 
1244       const TypeOopPtr* tp = t->isa_oopptr();
1245       if (tp != NULL && tp->klass() != NULL && !tp->klass()->is_loaded()
1246           // Only for do_null_check, not any of its siblings:
1247           && !assert_null && null_control == NULL) {
1248         // Usually, any field access or invocation on an unloaded oop type
1249         // will simply fail to link, since the statically linked class is
1250         // likely also to be unloaded.  However, in -Xcomp mode, sometimes
1251         // the static class is loaded but the sharper oop type is not.
1252         // Rather than checking for this obscure case in lots of places,
1253         // we simply observe that a null check on an unloaded class
1254         // will always be followed by a nonsense operation, so we
1255         // can just issue the uncommon trap here.
1256         // Our access to the unloaded class will only be correct
1257         // after it has been loaded and initialized, which requires
1258         // a trip through the interpreter.

1316         }
1317         Node *oldcontrol = control();
1318         set_control(cfg);
1319         Node *res = cast_not_null(value);
1320         set_control(oldcontrol);
1321         NOT_PRODUCT(explicit_null_checks_elided++);
1322         return res;
1323       }
1324       cfg = IfNode::up_one_dom(cfg, /*linear_only=*/ true);
1325       if (cfg == NULL)  break;  // Quit at region nodes
1326       depth++;
1327     }
1328   }
1329 
1330   //-----------
1331   // Branch to failure if null
1332   float ok_prob = PROB_MAX;  // a priori estimate:  nulls never happen
1333   Deoptimization::DeoptReason reason;
1334   if (assert_null) {
1335     reason = Deoptimization::reason_null_assert(speculative);
1336   } else if (type == T_OBJECT) {
1337     reason = Deoptimization::reason_null_check(speculative);
1338   } else {
1339     reason = Deoptimization::Reason_div0_check;
1340   }
1341   // %%% Since Reason_unhandled is not recorded on a per-bytecode basis,
1342   // ciMethodData::has_trap_at will return a conservative -1 if any
1343   // must-be-null assertion has failed.  This could cause performance
1344   // problems for a method after its first do_null_assert failure.
1345   // Consider using 'Reason_class_check' instead?
1346 
1347   // To cause an implicit null check, we set the not-null probability
1348   // to the maximum (PROB_MAX).  For an explicit check the probability
1349   // is set to a smaller value.
1350   if (null_control != NULL || too_many_traps(reason)) {
1351     // probability is less likely
1352     ok_prob =  PROB_LIKELY_MAG(3);
1353   } else if (!assert_null &&
1354              (ImplicitNullCheckThreshold > 0) &&
1355              method() != NULL &&
1356              (method()->method_data()->trap_count(reason)

1390   }
1391 
1392   if (assert_null) {
1393     // Cast obj to null on this path.
1394     replace_in_map(value, zerocon(type));
1395     return zerocon(type);
1396   }
1397 
1398   // Cast obj to not-null on this path, if there is no null_control.
1399   // (If there is a null_control, a non-null value may come back to haunt us.)
1400   if (type == T_OBJECT) {
1401     Node* cast = cast_not_null(value, false);
1402     if (null_control == NULL || (*null_control) == top())
1403       replace_in_map(value, cast);
1404     value = cast;
1405   }
1406 
1407   return value;
1408 }
1409 
1410 
1411 //------------------------------cast_not_null----------------------------------
1412 // Cast obj to not-null on this path
1413 Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {

























1414   const Type *t = _gvn.type(obj);
1415   const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL);
1416   // Object is already not-null?
1417   if( t == t_not_null ) return obj;
1418 
1419   Node *cast = new CastPPNode(obj,t_not_null);
1420   cast->init_req(0, control());
1421   cast = _gvn.transform( cast );
1422 
1423   // Scan for instances of 'obj' in the current JVM mapping.
1424   // These instances are known to be not-null after the test.
1425   if (do_replace_in_map)
1426     replace_in_map(obj, cast);
1427 
1428   return cast;                  // Return casted value
1429 }
1430 
1431 // Sometimes in intrinsics, we implicitly know an object is not null
1432 // (there's no actual null check) so we can cast it to not null. In
1433 // the course of optimizations, the input to the cast can become null.

1527                           MemNode::MemOrd mo,
1528                           LoadNode::ControlDependency control_dependency,
1529                           bool require_atomic_access,
1530                           bool unaligned,
1531                           bool mismatched,
1532                           bool unsafe,
1533                           uint8_t barrier_data) {
1534   assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1535   const TypePtr* adr_type = NULL; // debug-mode-only argument
1536   debug_only(adr_type = C->get_adr_type(adr_idx));
1537   Node* mem = memory(adr_idx);
1538   Node* ld;
1539   if (require_atomic_access && bt == T_LONG) {
1540     ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);
1541   } else if (require_atomic_access && bt == T_DOUBLE) {
1542     ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);
1543   } else {
1544     ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);
1545   }
1546   ld = _gvn.transform(ld);
1547   if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {

1548     // Improve graph before escape analysis and boxing elimination.
1549     record_for_igvn(ld);
1550   }
1551   return ld;
1552 }
1553 
1554 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1555                                 int adr_idx,
1556                                 MemNode::MemOrd mo,
1557                                 bool require_atomic_access,
1558                                 bool unaligned,
1559                                 bool mismatched,
1560                                 bool unsafe) {
1561   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1562   const TypePtr* adr_type = NULL;
1563   debug_only(adr_type = C->get_adr_type(adr_idx));
1564   Node *mem = memory(adr_idx);
1565   Node* st;
1566   if (require_atomic_access && bt == T_LONG) {
1567     st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo);

1578   }
1579   if (unsafe) {
1580     st->as_Store()->set_unsafe_access();
1581   }
1582   st = _gvn.transform(st);
1583   set_memory(st, adr_idx);
1584   // Back-to-back stores can only remove intermediate store with DU info
1585   // so push on worklist for optimizer.
1586   if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1587     record_for_igvn(st);
1588 
1589   return st;
1590 }
1591 
1592 Node* GraphKit::access_store_at(Node* obj,
1593                                 Node* adr,
1594                                 const TypePtr* adr_type,
1595                                 Node* val,
1596                                 const Type* val_type,
1597                                 BasicType bt,
1598                                 DecoratorSet decorators) {

1599   // Transformation of a value which could be NULL pointer (CastPP #NULL)
1600   // could be delayed during Parse (for example, in adjust_map_after_if()).
1601   // Execute transformation here to avoid barrier generation in such case.
1602   if (_gvn.type(val) == TypePtr::NULL_PTR) {
1603     val = _gvn.makecon(TypePtr::NULL_PTR);
1604   }
1605 
1606   if (stopped()) {
1607     return top(); // Dead path ?
1608   }
1609 
1610   assert(val != NULL, "not dead path");







1611 
1612   C2AccessValuePtr addr(adr, adr_type);
1613   C2AccessValue value(val, val_type);
1614   C2ParseAccess access(this, decorators | C2_WRITE_ACCESS, bt, obj, addr);
1615   if (access.is_raw()) {
1616     return _barrier_set->BarrierSetC2::store_at(access, value);
1617   } else {
1618     return _barrier_set->store_at(access, value);
1619   }
1620 }
1621 
1622 Node* GraphKit::access_load_at(Node* obj,   // containing obj
1623                                Node* adr,   // actual adress to store val at
1624                                const TypePtr* adr_type,
1625                                const Type* val_type,
1626                                BasicType bt,
1627                                DecoratorSet decorators) {

1628   if (stopped()) {
1629     return top(); // Dead path ?
1630   }
1631 
1632   C2AccessValuePtr addr(adr, adr_type);
1633   C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, obj, addr);
1634   if (access.is_raw()) {
1635     return _barrier_set->BarrierSetC2::load_at(access, val_type);
1636   } else {
1637     return _barrier_set->load_at(access, val_type);
1638   }
1639 }
1640 
1641 Node* GraphKit::access_load(Node* adr,   // actual adress to load val at
1642                             const Type* val_type,
1643                             BasicType bt,
1644                             DecoratorSet decorators) {
1645   if (stopped()) {
1646     return top(); // Dead path ?
1647   }
1648 
1649   C2AccessValuePtr addr(adr, adr->bottom_type()->is_ptr());
1650   C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, NULL, addr);
1651   if (access.is_raw()) {
1652     return _barrier_set->BarrierSetC2::load_at(access, val_type);
1653   } else {

1719                                      const Type* value_type,
1720                                      BasicType bt,
1721                                      DecoratorSet decorators) {
1722   C2AccessValuePtr addr(adr, adr_type);
1723   C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS, bt, obj, addr, alias_idx);
1724   if (access.is_raw()) {
1725     return _barrier_set->BarrierSetC2::atomic_add_at(access, new_val, value_type);
1726   } else {
1727     return _barrier_set->atomic_add_at(access, new_val, value_type);
1728   }
1729 }
1730 
1731 void GraphKit::access_clone(Node* src, Node* dst, Node* size, bool is_array) {
1732   return _barrier_set->clone(this, src, dst, size, is_array);
1733 }
1734 
1735 //-------------------------array_element_address-------------------------
1736 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1737                                       const TypeInt* sizetype, Node* ctrl) {
1738   uint shift  = exact_log2(type2aelembytes(elembt));





1739   uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1740 
1741   // short-circuit a common case (saves lots of confusing waste motion)
1742   jint idx_con = find_int_con(idx, -1);
1743   if (idx_con >= 0) {
1744     intptr_t offset = header + ((intptr_t)idx_con << shift);
1745     return basic_plus_adr(ary, offset);
1746   }
1747 
1748   // must be correct type for alignment purposes
1749   Node* base  = basic_plus_adr(ary, header);
1750   idx = Compile::conv_I2X_index(&_gvn, idx, sizetype, ctrl);
1751   Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );
1752   return basic_plus_adr(ary, base, scale);
1753 }
1754 
1755 //-------------------------load_array_element-------------------------
1756 Node* GraphKit::load_array_element(Node* ary, Node* idx, const TypeAryPtr* arytype, bool set_ctrl) {
1757   const Type* elemtype = arytype->elem();
1758   BasicType elembt = elemtype->array_element_basic_type();

1759   Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1760   if (elembt == T_NARROWOOP) {
1761     elembt = T_OBJECT; // To satisfy switch in LoadNode::make()
1762   }
1763   Node* ld = access_load_at(ary, adr, arytype, elemtype, elembt,
1764                             IN_HEAP | IS_ARRAY | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0));
1765   return ld;
1766 }
1767 
1768 //-------------------------set_arguments_for_java_call-------------------------
1769 // Arguments (pre-popped from the stack) are taken from the JVMS.
1770 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1771   // Add the call arguments:
1772   uint nargs = call->method()->arg_size();
1773   for (uint i = 0; i < nargs; i++) {
1774     Node* arg = argument(i);
1775     call->init_req(i + TypeFunc::Parms, arg);



































1776   }
1777 }
1778 
1779 //---------------------------set_edges_for_java_call---------------------------
1780 // Connect a newly created call into the current JVMS.
1781 // A return value node (if any) is returned from set_edges_for_java_call.
1782 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1783 
1784   // Add the predefined inputs:
1785   call->init_req( TypeFunc::Control, control() );
1786   call->init_req( TypeFunc::I_O    , i_o() );
1787   call->init_req( TypeFunc::Memory , reset_memory() );
1788   call->init_req( TypeFunc::FramePtr, frameptr() );
1789   call->init_req( TypeFunc::ReturnAdr, top() );
1790 
1791   add_safepoint_edges(call, must_throw);
1792 
1793   Node* xcall = _gvn.transform(call);
1794 
1795   if (xcall == top()) {
1796     set_control(top());
1797     return;
1798   }
1799   assert(xcall == call, "call identity is stable");
1800 
1801   // Re-use the current map to produce the result.
1802 
1803   set_control(_gvn.transform(new ProjNode(call, TypeFunc::Control)));
1804   set_i_o(    _gvn.transform(new ProjNode(call, TypeFunc::I_O    , separate_io_proj)));
1805   set_all_memory_call(xcall, separate_io_proj);
1806 
1807   //return xcall;   // no need, caller already has it
1808 }
1809 
1810 Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj, bool deoptimize) {
1811   if (stopped())  return top();  // maybe the call folded up?
1812 
1813   // Capture the return value, if any.
1814   Node* ret;
1815   if (call->method() == NULL ||
1816       call->method()->return_type()->basic_type() == T_VOID)
1817         ret = top();
1818   else  ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1819 
1820   // Note:  Since any out-of-line call can produce an exception,
1821   // we always insert an I_O projection from the call into the result.
1822 
1823   make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj, deoptimize);
1824 
1825   if (separate_io_proj) {
1826     // The caller requested separate projections be used by the fall
1827     // through and exceptional paths, so replace the projections for
1828     // the fall through path.
1829     set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));
1830     set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));
1831   }















1832   return ret;
1833 }
1834 
1835 //--------------------set_predefined_input_for_runtime_call--------------------
1836 // Reading and setting the memory state is way conservative here.
1837 // The real problem is that I am not doing real Type analysis on memory,
1838 // so I cannot distinguish card mark stores from other stores.  Across a GC
1839 // point the Store Barrier and the card mark memory has to agree.  I cannot
1840 // have a card mark store and its barrier split across the GC point from
1841 // either above or below.  Here I get that to happen by reading ALL of memory.
1842 // A better answer would be to separate out card marks from other memory.
1843 // For now, return the input memory state, so that it can be reused
1844 // after the call, if this call has restricted memory effects.
1845 Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem) {
1846   // Set fixed predefined input arguments
1847   Node* memory = reset_memory();
1848   Node* m = narrow_mem == NULL ? memory : narrow_mem;
1849   call->init_req( TypeFunc::Control,   control()  );
1850   call->init_req( TypeFunc::I_O,       top()      ); // does no i/o
1851   call->init_req( TypeFunc::Memory,    m          ); // may gc ptrs

1902     if (use->is_MergeMem()) {
1903       wl.push(use);
1904     }
1905   }
1906 }
1907 
1908 // Replace the call with the current state of the kit.
1909 void GraphKit::replace_call(CallNode* call, Node* result, bool do_replaced_nodes) {
1910   JVMState* ejvms = NULL;
1911   if (has_exceptions()) {
1912     ejvms = transfer_exceptions_into_jvms();
1913   }
1914 
1915   ReplacedNodes replaced_nodes = map()->replaced_nodes();
1916   ReplacedNodes replaced_nodes_exception;
1917   Node* ex_ctl = top();
1918 
1919   SafePointNode* final_state = stop();
1920 
1921   // Find all the needed outputs of this call
1922   CallProjections callprojs;
1923   call->extract_projections(&callprojs, true);
1924 
1925   Unique_Node_List wl;
1926   Node* init_mem = call->in(TypeFunc::Memory);
1927   Node* final_mem = final_state->in(TypeFunc::Memory);
1928   Node* final_ctl = final_state->in(TypeFunc::Control);
1929   Node* final_io = final_state->in(TypeFunc::I_O);
1930 
1931   // Replace all the old call edges with the edges from the inlining result
1932   if (callprojs.fallthrough_catchproj != NULL) {
1933     C->gvn_replace_by(callprojs.fallthrough_catchproj, final_ctl);
1934   }
1935   if (callprojs.fallthrough_memproj != NULL) {
1936     if (final_mem->is_MergeMem()) {
1937       // Parser's exits MergeMem was not transformed but may be optimized
1938       final_mem = _gvn.transform(final_mem);
1939     }
1940     C->gvn_replace_by(callprojs.fallthrough_memproj,   final_mem);
1941     add_mergemem_users_to_worklist(wl, final_mem);
1942   }
1943   if (callprojs.fallthrough_ioproj != NULL) {
1944     C->gvn_replace_by(callprojs.fallthrough_ioproj,    final_io);
1945   }
1946 
1947   // Replace the result with the new result if it exists and is used
1948   if (callprojs.resproj != NULL && result != NULL) {
1949     C->gvn_replace_by(callprojs.resproj, result);




1950   }
1951 
1952   if (ejvms == NULL) {
1953     // No exception edges to simply kill off those paths
1954     if (callprojs.catchall_catchproj != NULL) {
1955       C->gvn_replace_by(callprojs.catchall_catchproj, C->top());
1956     }
1957     if (callprojs.catchall_memproj != NULL) {
1958       C->gvn_replace_by(callprojs.catchall_memproj,   C->top());
1959     }
1960     if (callprojs.catchall_ioproj != NULL) {
1961       C->gvn_replace_by(callprojs.catchall_ioproj,    C->top());
1962     }
1963     // Replace the old exception object with top
1964     if (callprojs.exobj != NULL) {
1965       C->gvn_replace_by(callprojs.exobj, C->top());
1966     }
1967   } else {
1968     GraphKit ekit(ejvms);
1969 
1970     // Load my combined exception state into the kit, with all phis transformed:
1971     SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states();
1972     replaced_nodes_exception = ex_map->replaced_nodes();
1973 
1974     Node* ex_oop = ekit.use_exception_state(ex_map);
1975 
1976     if (callprojs.catchall_catchproj != NULL) {
1977       C->gvn_replace_by(callprojs.catchall_catchproj, ekit.control());
1978       ex_ctl = ekit.control();
1979     }
1980     if (callprojs.catchall_memproj != NULL) {
1981       Node* ex_mem = ekit.reset_memory();
1982       C->gvn_replace_by(callprojs.catchall_memproj,   ex_mem);
1983       add_mergemem_users_to_worklist(wl, ex_mem);
1984     }
1985     if (callprojs.catchall_ioproj != NULL) {
1986       C->gvn_replace_by(callprojs.catchall_ioproj,    ekit.i_o());
1987     }
1988 
1989     // Replace the old exception object with the newly created one
1990     if (callprojs.exobj != NULL) {
1991       C->gvn_replace_by(callprojs.exobj, ex_oop);
1992     }
1993   }
1994 
1995   // Disconnect the call from the graph
1996   call->disconnect_inputs(C);
1997   C->gvn_replace_by(call, C->top());
1998 
1999   // Clean up any MergeMems that feed other MergeMems since the
2000   // optimizer doesn't like that.
2001   while (wl.size() > 0) {
2002     _gvn.transform(wl.pop());
2003   }
2004 
2005   if (callprojs.fallthrough_catchproj != NULL && !final_ctl->is_top() && do_replaced_nodes) {
2006     replaced_nodes.apply(C, final_ctl);
2007   }
2008   if (!ex_ctl->is_top() && do_replaced_nodes) {
2009     replaced_nodes_exception.apply(C, ex_ctl);
2010   }
2011 }
2012 
2013 
2014 //------------------------------increment_counter------------------------------
2015 // for statistics: increment a VM counter by 1
2016 
2017 void GraphKit::increment_counter(address counter_addr) {
2018   Node* adr1 = makecon(TypeRawPtr::make(counter_addr));
2019   increment_counter(adr1);
2020 }
2021 
2022 void GraphKit::increment_counter(Node* counter_addr) {
2023   int adr_type = Compile::AliasIdxRaw;
2024   Node* ctrl = control();
2025   Node* cnt  = make_load(ctrl, counter_addr, TypeLong::LONG, T_LONG, adr_type, MemNode::unordered);

2183  *
2184  * @param n          node that the type applies to
2185  * @param exact_kls  type from profiling
2186  * @param maybe_null did profiling see null?
2187  *
2188  * @return           node with improved type
2189  */
2190 Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls, ProfilePtrKind ptr_kind) {
2191   const Type* current_type = _gvn.type(n);
2192   assert(UseTypeSpeculation, "type speculation must be on");
2193 
2194   const TypePtr* speculative = current_type->speculative();
2195 
2196   // Should the klass from the profile be recorded in the speculative type?
2197   if (current_type->would_improve_type(exact_kls, jvms()->depth())) {
2198     const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls);
2199     const TypeOopPtr* xtype = tklass->as_instance_type();
2200     assert(xtype->klass_is_exact(), "Should be exact");
2201     // Any reason to believe n is not null (from this profiling or a previous one)?
2202     assert(ptr_kind != ProfileAlwaysNull, "impossible here");
2203     const TypePtr* ptr = (ptr_kind == ProfileMaybeNull && current_type->speculative_maybe_null()) ? TypePtr::BOTTOM : TypePtr::NOTNULL;
2204     // record the new speculative type's depth
2205     speculative = xtype->cast_to_ptr_type(ptr->ptr())->is_ptr();
2206     speculative = speculative->with_inline_depth(jvms()->depth());
2207   } else if (current_type->would_improve_ptr(ptr_kind)) {
2208     // Profiling report that null was never seen so we can change the
2209     // speculative type to non null ptr.
2210     if (ptr_kind == ProfileAlwaysNull) {
2211       speculative = TypePtr::NULL_PTR;
2212     } else {
2213       assert(ptr_kind == ProfileNeverNull, "nothing else is an improvement");
2214       const TypePtr* ptr = TypePtr::NOTNULL;
2215       if (speculative != NULL) {
2216         speculative = speculative->cast_to_ptr_type(ptr->ptr())->is_ptr();
2217       } else {
2218         speculative = ptr;
2219       }
2220     }
2221   }
2222 
2223   if (speculative != current_type->speculative()) {
2224     // Build a type with a speculative type (what we think we know
2225     // about the type but will need a guard when we use it)
2226     const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::OffsetBot, TypeOopPtr::InstanceBot, speculative);
2227     // We're changing the type, we need a new CheckCast node to carry
2228     // the new type. The new type depends on the control: what
2229     // profiling tells us is only valid from here as far as we can
2230     // tell.
2231     Node* cast = new CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type));
2232     cast = _gvn.transform(cast);
2233     replace_in_map(n, cast);
2234     n = cast;
2235   }
2236 
2237   return n;
2238 }
2239 
2240 /**
2241  * Record profiling data from receiver profiling at an invoke with the
2242  * type system so that it can propagate it (speculation)
2243  *
2244  * @param n  receiver node
2245  *
2246  * @return   node with improved type
2247  */
2248 Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) {
2249   if (!UseTypeSpeculation) {
2250     return n;
2251   }
2252   ciKlass* exact_kls = profile_has_unique_klass();
2253   ProfilePtrKind ptr_kind = ProfileMaybeNull;
2254   if ((java_bc() == Bytecodes::_checkcast ||
2255        java_bc() == Bytecodes::_instanceof ||
2256        java_bc() == Bytecodes::_aastore) &&
2257       method()->method_data()->is_mature()) {
2258     ciProfileData* data = method()->method_data()->bci_to_data(bci());
2259     if (data != NULL) {
2260       if (!data->as_BitData()->null_seen()) {
2261         ptr_kind = ProfileNeverNull;







2262       } else {
2263         assert(data->is_ReceiverTypeData(), "bad profile data type");
2264         ciReceiverTypeData* call = (ciReceiverTypeData*)data->as_ReceiverTypeData();
2265         uint i = 0;
2266         for (; i < call->row_limit(); i++) {
2267           ciKlass* receiver = call->receiver(i);
2268           if (receiver != NULL) {
2269             break;




2270           }

2271         }
2272         ptr_kind = (i == call->row_limit()) ? ProfileAlwaysNull : ProfileMaybeNull;
2273       }
2274     }
2275   }
2276   return record_profile_for_speculation(n, exact_kls, ptr_kind);
2277 }
2278 
2279 /**
2280  * Record profiling data from argument profiling at an invoke with the
2281  * type system so that it can propagate it (speculation)
2282  *
2283  * @param dest_method  target method for the call
2284  * @param bc           what invoke bytecode is this?
2285  */
2286 void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) {
2287   if (!UseTypeSpeculation) {
2288     return;
2289   }
2290   const TypeFunc* tf    = TypeFunc::make(dest_method);
2291   int             nargs = tf->domain()->cnt() - TypeFunc::Parms;
2292   int skip = Bytecodes::has_receiver(bc) ? 1 : 0;
2293   for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {
2294     const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms);
2295     if (is_reference_type(targ->basic_type())) {
2296       ProfilePtrKind ptr_kind = ProfileMaybeNull;
2297       ciKlass* better_type = NULL;
2298       if (method()->argument_profiled_type(bci(), i, better_type, ptr_kind)) {
2299         record_profile_for_speculation(argument(j), better_type, ptr_kind);
2300       }
2301       i++;
2302     }
2303   }
2304 }
2305 
2306 /**
2307  * Record profiling data from parameter profiling at an invoke with
2308  * the type system so that it can propagate it (speculation)
2309  */
2310 void GraphKit::record_profiled_parameters_for_speculation() {
2311   if (!UseTypeSpeculation) {
2312     return;
2313   }
2314   for (int i = 0, j = 0; i < method()->arg_size() ; i++) {

2328  * the type system so that it can propagate it (speculation)
2329  */
2330 void GraphKit::record_profiled_return_for_speculation() {
2331   if (!UseTypeSpeculation) {
2332     return;
2333   }
2334   ProfilePtrKind ptr_kind = ProfileMaybeNull;
2335   ciKlass* better_type = NULL;
2336   if (method()->return_profiled_type(bci(), better_type, ptr_kind)) {
2337     // If profiling reports a single type for the return value,
2338     // feed it to the type system so it can propagate it as a
2339     // speculative type
2340     record_profile_for_speculation(stack(sp()-1), better_type, ptr_kind);
2341   }
2342 }
2343 
2344 void GraphKit::round_double_arguments(ciMethod* dest_method) {
2345   if (Matcher::strict_fp_requires_explicit_rounding) {
2346     // (Note:  TypeFunc::make has a cache that makes this fast.)
2347     const TypeFunc* tf    = TypeFunc::make(dest_method);
2348     int             nargs = tf->domain()->cnt() - TypeFunc::Parms;
2349     for (int j = 0; j < nargs; j++) {
2350       const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms);
2351       if (targ->basic_type() == T_DOUBLE) {
2352         // If any parameters are doubles, they must be rounded before
2353         // the call, dprecision_rounding does gvn.transform
2354         Node *arg = argument(j);
2355         arg = dprecision_rounding(arg);
2356         set_argument(j, arg);
2357       }
2358     }
2359   }
2360 }
2361 
2362 // rounding for strict float precision conformance
2363 Node* GraphKit::precision_rounding(Node* n) {
2364   if (Matcher::strict_fp_requires_explicit_rounding) {
2365 #ifdef IA32
2366     if (UseSSE == 0) {
2367       return _gvn.transform(new RoundFloatNode(0, n));
2368     }
2369 #else
2370     Unimplemented();

2479                                   // The first NULL ends the list.
2480                                   Node* parm0, Node* parm1,
2481                                   Node* parm2, Node* parm3,
2482                                   Node* parm4, Node* parm5,
2483                                   Node* parm6, Node* parm7) {
2484   assert(call_addr != NULL, "must not call NULL targets");
2485 
2486   // Slow-path call
2487   bool is_leaf = !(flags & RC_NO_LEAF);
2488   bool has_io  = (!is_leaf && !(flags & RC_NO_IO));
2489   if (call_name == NULL) {
2490     assert(!is_leaf, "must supply name for leaf");
2491     call_name = OptoRuntime::stub_name(call_addr);
2492   }
2493   CallNode* call;
2494   if (!is_leaf) {
2495     call = new CallStaticJavaNode(call_type, call_addr, call_name, adr_type);
2496   } else if (flags & RC_NO_FP) {
2497     call = new CallLeafNoFPNode(call_type, call_addr, call_name, adr_type);
2498   } else  if (flags & RC_VECTOR){
2499     uint num_bits = call_type->range()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte;
2500     call = new CallLeafVectorNode(call_type, call_addr, call_name, adr_type, num_bits);
2501   } else {
2502     call = new CallLeafNode(call_type, call_addr, call_name, adr_type);
2503   }
2504 
2505   // The following is similar to set_edges_for_java_call,
2506   // except that the memory effects of the call are restricted to AliasIdxRaw.
2507 
2508   // Slow path call has no side-effects, uses few values
2509   bool wide_in  = !(flags & RC_NARROW_MEM);
2510   bool wide_out = (C->get_alias_index(adr_type) == Compile::AliasIdxBot);
2511 
2512   Node* prev_mem = NULL;
2513   if (wide_in) {
2514     prev_mem = set_predefined_input_for_runtime_call(call);
2515   } else {
2516     assert(!wide_out, "narrow in => narrow out");
2517     Node* narrow_mem = memory(adr_type);
2518     prev_mem = set_predefined_input_for_runtime_call(call, narrow_mem);
2519   }

2578 
2579 //-----------------------------make_native_call-------------------------------
2580 Node* GraphKit::make_native_call(address call_addr, const TypeFunc* call_type, uint nargs, ciNativeEntryPoint* nep) {
2581   // Select just the actual call args to pass on
2582   // [MethodHandle fallback, long addr, HALF addr, ... args , NativeEntryPoint nep]
2583   //                                             |          |
2584   //                                             V          V
2585   //                                             [ ... args ]
2586   uint n_filtered_args = nargs - 4; // -fallback, -addr (2), -nep;
2587   ResourceMark rm;
2588   Node** argument_nodes = NEW_RESOURCE_ARRAY(Node*, n_filtered_args);
2589   const Type** arg_types = TypeTuple::fields(n_filtered_args);
2590   GrowableArray<VMReg> arg_regs(C->comp_arena(), n_filtered_args, n_filtered_args, VMRegImpl::Bad());
2591 
2592   VMReg* argRegs = nep->argMoves();
2593   {
2594     for (uint vm_arg_pos = 0, java_arg_read_pos = 0;
2595         vm_arg_pos < n_filtered_args; vm_arg_pos++) {
2596       uint vm_unfiltered_arg_pos = vm_arg_pos + 3; // +3 to skip fallback handle argument and addr (2 since long)
2597       Node* node = argument(vm_unfiltered_arg_pos);
2598       const Type* type = call_type->domain()->field_at(TypeFunc::Parms + vm_unfiltered_arg_pos);
2599       VMReg reg = type == Type::HALF
2600         ? VMRegImpl::Bad()
2601         : argRegs[java_arg_read_pos++];
2602 
2603       argument_nodes[vm_arg_pos] = node;
2604       arg_types[TypeFunc::Parms + vm_arg_pos] = type;
2605       arg_regs.at_put(vm_arg_pos, reg);
2606     }
2607   }
2608 
2609   uint n_returns = call_type->range()->cnt() - TypeFunc::Parms;
2610   GrowableArray<VMReg> ret_regs(C->comp_arena(), n_returns, n_returns, VMRegImpl::Bad());
2611   const Type** ret_types = TypeTuple::fields(n_returns);
2612 
2613   VMReg* retRegs = nep->returnMoves();
2614   {
2615     for (uint vm_ret_pos = 0, java_ret_read_pos = 0;
2616         vm_ret_pos < n_returns; vm_ret_pos++) { // 0 or 1
2617       const Type* type = call_type->range()->field_at(TypeFunc::Parms + vm_ret_pos);
2618       VMReg reg = type == Type::HALF
2619         ? VMRegImpl::Bad()
2620         : retRegs[java_ret_read_pos++];
2621 
2622       ret_regs.at_put(vm_ret_pos, reg);
2623       ret_types[TypeFunc::Parms + vm_ret_pos] = type;
2624     }
2625   }
2626 
2627   const TypeFunc* new_call_type = TypeFunc::make(
2628     TypeTuple::make(TypeFunc::Parms + n_filtered_args, arg_types),
2629     TypeTuple::make(TypeFunc::Parms + n_returns, ret_types)
2630   );
2631 
2632   if (nep->need_transition()) {
2633     RuntimeStub* invoker = SharedRuntime::make_native_invoker(call_addr,
2634                                                               nep->shadow_space(),
2635                                                               arg_regs, ret_regs);
2636     if (invoker == NULL) {
2637       C->record_failure("native invoker not implemented on this platform");

2926 
2927   // Now do a linear scan of the secondary super-klass array.  Again, no real
2928   // performance impact (too rare) but it's gotta be done.
2929   // Since the code is rarely used, there is no penalty for moving it
2930   // out of line, and it can only improve I-cache density.
2931   // The decision to inline or out-of-line this final check is platform
2932   // dependent, and is found in the AD file definition of PartialSubtypeCheck.
2933   Node* psc = gvn.transform(
2934     new PartialSubtypeCheckNode(*ctrl, subklass, superklass));
2935 
2936   IfNode *iff4 = gen_subtype_check_compare(*ctrl, psc, gvn.zerocon(T_OBJECT), BoolTest::ne, PROB_FAIR, gvn, T_ADDRESS);
2937   r_not_subtype->init_req(2, gvn.transform(new IfTrueNode (iff4)));
2938   r_ok_subtype ->init_req(3, gvn.transform(new IfFalseNode(iff4)));
2939 
2940   // Return false path; set default control to true path.
2941   *ctrl = gvn.transform(r_ok_subtype);
2942   return gvn.transform(r_not_subtype);
2943 }
2944 
2945 Node* GraphKit::gen_subtype_check(Node* obj_or_subklass, Node* superklass) {




2946   bool expand_subtype_check = C->post_loop_opts_phase() ||   // macro node expansion is over
2947                               ExpandSubTypeCheckAtParseTime; // forced expansion
2948   if (expand_subtype_check) {
2949     MergeMemNode* mem = merged_memory();
2950     Node* ctrl = control();
2951     Node* subklass = obj_or_subklass;
2952     if (!_gvn.type(obj_or_subklass)->isa_klassptr()) {
2953       subklass = load_object_klass(obj_or_subklass);
2954     }
2955 
2956     Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, _gvn);
2957     set_control(ctrl);
2958     return n;
2959   }
2960 
2961   Node* check = _gvn.transform(new SubTypeCheckNode(C, obj_or_subklass, superklass));
2962   Node* bol = _gvn.transform(new BoolNode(check, BoolTest::eq));
2963   IfNode* iff = create_and_xform_if(control(), bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
2964   set_control(_gvn.transform(new IfTrueNode(iff)));
2965   return _gvn.transform(new IfFalseNode(iff));
2966 }
2967 
2968 // Profile-driven exact type check:
2969 Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass,
2970                                     float prob,
2971                                     Node* *casted_receiver) {
2972   assert(!klass->is_interface(), "no exact type check on interfaces");
2973 











2974   const TypeKlassPtr* tklass = TypeKlassPtr::make(klass);
2975   Node* recv_klass = load_object_klass(receiver);
2976   Node* want_klass = makecon(tklass);
2977   Node* cmp = _gvn.transform(new CmpPNode(recv_klass, want_klass));
2978   Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
2979   IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN);
2980   set_control( _gvn.transform(new IfTrueNode (iff)));
2981   Node* fail = _gvn.transform(new IfFalseNode(iff));
2982 
2983   if (!stopped()) {
2984     const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
2985     const TypeOopPtr* recvx_type = tklass->as_instance_type();
2986     assert(recvx_type->klass_is_exact(), "");
2987 
2988     if (!receiver_type->higher_equal(recvx_type)) { // ignore redundant casts
2989       // Subsume downstream occurrences of receiver with a cast to
2990       // recv_xtype, since now we know what the type will be.
2991       Node* cast = new CheckCastPPNode(control(), receiver, recvx_type);
2992       (*casted_receiver) = _gvn.transform(cast);





2993       // (User must make the replace_in_map call.)
2994     }
2995   }
2996 
2997   return fail;
2998 }
2999 











3000 //------------------------------subtype_check_receiver-------------------------
3001 Node* GraphKit::subtype_check_receiver(Node* receiver, ciKlass* klass,
3002                                        Node** casted_receiver) {
3003   const TypeKlassPtr* tklass = TypeKlassPtr::make(klass);
3004   Node* want_klass = makecon(tklass);
3005 
3006   Node* slow_ctl = gen_subtype_check(receiver, want_klass);
3007 
3008   // Ignore interface type information until interface types are properly tracked.
3009   if (!stopped() && !klass->is_interface()) {
3010     const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
3011     const TypeOopPtr* recv_type = tklass->cast_to_exactness(false)->is_klassptr()->as_instance_type();
3012     if (!receiver_type->higher_equal(recv_type)) { // ignore redundant casts
3013       Node* cast = new CheckCastPPNode(control(), receiver, recv_type);
3014       (*casted_receiver) = _gvn.transform(cast);
3015     }
3016   }
3017 
3018   return slow_ctl;
3019 }
3020 
3021 //------------------------------seems_never_null-------------------------------
3022 // Use null_seen information if it is available from the profile.
3023 // If we see an unexpected null at a type check we record it and force a
3024 // recompile; the offending check will be recompiled to handle NULLs.
3025 // If we see several offending BCIs, then all checks in the
3026 // method will be recompiled.
3027 bool GraphKit::seems_never_null(Node* obj, ciProfileData* data, bool& speculating) {
3028   speculating = !_gvn.type(obj)->speculative_maybe_null();
3029   Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculating);
3030   if (UncommonNullCast               // Cutout for this technique
3031       && obj != null()               // And not the -Xcomp stupid case?
3032       && !too_many_traps(reason)
3033       ) {
3034     if (speculating) {
3035       return true;
3036     }
3037     if (data == NULL)
3038       // Edge case:  no mature data.  Be optimistic here.
3039       return true;
3040     // If the profile has not seen a null, assume it won't happen.
3041     assert(java_bc() == Bytecodes::_checkcast ||
3042            java_bc() == Bytecodes::_instanceof ||
3043            java_bc() == Bytecodes::_aastore, "MDO must collect null_seen bit here");



3044     return !data->as_BitData()->null_seen();
3045   }
3046   speculating = false;
3047   return false;
3048 }
3049 
3050 void GraphKit::guard_klass_being_initialized(Node* klass) {
3051   int init_state_off = in_bytes(InstanceKlass::init_state_offset());
3052   Node* adr = basic_plus_adr(top(), klass, init_state_off);
3053   Node* init_state = LoadNode::make(_gvn, NULL, immutable_memory(), adr,
3054                                     adr->bottom_type()->is_ptr(), TypeInt::BYTE,
3055                                     T_BYTE, MemNode::unordered);
3056   init_state = _gvn.transform(init_state);
3057 
3058   Node* being_initialized_state = makecon(TypeInt::make(InstanceKlass::being_initialized));
3059 
3060   Node* chk = _gvn.transform(new CmpINode(being_initialized_state, init_state));
3061   Node* tst = _gvn.transform(new BoolNode(chk, BoolTest::eq));
3062 
3063   { BuildCutout unless(this, tst, PROB_MAX);

3103 
3104 //------------------------maybe_cast_profiled_receiver-------------------------
3105 // If the profile has seen exactly one type, narrow to exactly that type.
3106 // Subsequent type checks will always fold up.
3107 Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
3108                                              ciKlass* require_klass,
3109                                              ciKlass* spec_klass,
3110                                              bool safe_for_replace) {
3111   if (!UseTypeProfile || !TypeProfileCasts) return NULL;
3112 
3113   Deoptimization::DeoptReason reason = Deoptimization::reason_class_check(spec_klass != NULL);
3114 
3115   // Make sure we haven't already deoptimized from this tactic.
3116   if (too_many_traps_or_recompiles(reason))
3117     return NULL;
3118 
3119   // (No, this isn't a call, but it's enough like a virtual call
3120   // to use the same ciMethod accessor to get the profile info...)
3121   // If we have a speculative type use it instead of profiling (which
3122   // may not help us)
3123   ciKlass* exact_kls = spec_klass == NULL ? profile_has_unique_klass() : spec_klass;













3124   if (exact_kls != NULL) {// no cast failures here
3125     if (require_klass == NULL ||
3126         C->static_subtype_check(require_klass, exact_kls) == Compile::SSC_always_true) {
3127       // If we narrow the type to match what the type profile sees or
3128       // the speculative type, we can then remove the rest of the
3129       // cast.
3130       // This is a win, even if the exact_kls is very specific,
3131       // because downstream operations, such as method calls,
3132       // will often benefit from the sharper type.
3133       Node* exact_obj = not_null_obj; // will get updated in place...
3134       Node* slow_ctl  = type_check_receiver(exact_obj, exact_kls, 1.0,
3135                                             &exact_obj);
3136       { PreserveJVMState pjvms(this);
3137         set_control(slow_ctl);
3138         uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
3139       }
3140       if (safe_for_replace) {
3141         replace_in_map(not_null_obj, exact_obj);
3142       }
3143       return exact_obj;

3208 // and the reflective instance-of call.
3209 Node* GraphKit::gen_instanceof(Node* obj, Node* superklass, bool safe_for_replace) {
3210   kill_dead_locals();           // Benefit all the uncommon traps
3211   assert( !stopped(), "dead parse path should be checked in callers" );
3212   assert(!TypePtr::NULL_PTR->higher_equal(_gvn.type(superklass)->is_klassptr()),
3213          "must check for not-null not-dead klass in callers");
3214 
3215   // Make the merge point
3216   enum { _obj_path = 1, _fail_path, _null_path, PATH_LIMIT };
3217   RegionNode* region = new RegionNode(PATH_LIMIT);
3218   Node*       phi    = new PhiNode(region, TypeInt::BOOL);
3219   C->set_has_split_ifs(true); // Has chance for split-if optimization
3220 
3221   ciProfileData* data = NULL;
3222   if (java_bc() == Bytecodes::_instanceof) {  // Only for the bytecode
3223     data = method()->method_data()->bci_to_data(bci());
3224   }
3225   bool speculative_not_null = false;
3226   bool never_see_null = (ProfileDynamicTypes  // aggressive use of profile
3227                          && seems_never_null(obj, data, speculative_not_null));

3228 
3229   // Null check; get casted pointer; set region slot 3
3230   Node* null_ctl = top();





3231   Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
3232 
3233   // If not_null_obj is dead, only null-path is taken
3234   if (stopped()) {              // Doing instance-of on a NULL?
3235     set_control(null_ctl);
3236     return intcon(0);
3237   }
3238   region->init_req(_null_path, null_ctl);
3239   phi   ->init_req(_null_path, intcon(0)); // Set null path value
3240   if (null_ctl == top()) {
3241     // Do this eagerly, so that pattern matches like is_diamond_phi
3242     // will work even during parsing.
3243     assert(_null_path == PATH_LIMIT-1, "delete last");
3244     region->del_req(_null_path);
3245     phi   ->del_req(_null_path);
3246   }
3247 
3248   // Do we know the type check always succeed?
3249   bool known_statically = false;
3250   if (_gvn.type(superklass)->singleton()) {
3251     ciKlass* superk = _gvn.type(superklass)->is_klassptr()->klass();
3252     ciKlass* subk = _gvn.type(obj)->is_oopptr()->klass();
3253     if (subk != NULL && subk->is_loaded()) {
3254       int static_res = C->static_subtype_check(superk, subk);
3255       known_statically = (static_res == Compile::SSC_always_true || static_res == Compile::SSC_always_false);


3256     }
3257   }
3258 
3259   if (!known_statically) {
3260     const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3261     // We may not have profiling here or it may not help us. If we
3262     // have a speculative type use it to perform an exact cast.
3263     ciKlass* spec_obj_type = obj_type->speculative_type();
3264     if (spec_obj_type != NULL || (ProfileDynamicTypes && data != NULL)) {
3265       Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, NULL, spec_obj_type, safe_for_replace);
3266       if (stopped()) {            // Profile disagrees with this path.
3267         set_control(null_ctl);    // Null is the only remaining possibility.
3268         return intcon(0);
3269       }
3270       if (cast_obj != NULL) {
3271         not_null_obj = cast_obj;


3272       }
3273     }
3274   }
3275 
3276   // Generate the subtype check
3277   Node* not_subtype_ctrl = gen_subtype_check(not_null_obj, superklass);
3278 
3279   // Plug in the success path to the general merge in slot 1.
3280   region->init_req(_obj_path, control());
3281   phi   ->init_req(_obj_path, intcon(1));
3282 
3283   // Plug in the failing path to the general merge in slot 2.
3284   region->init_req(_fail_path, not_subtype_ctrl);
3285   phi   ->init_req(_fail_path, intcon(0));
3286 
3287   // Return final merged results
3288   set_control( _gvn.transform(region) );
3289   record_for_igvn(region);
3290 
3291   // If we know the type check always succeeds then we don't use the
3292   // profiling data at this bytecode. Don't lose it, feed it to the
3293   // type system as a speculative type.
3294   if (safe_for_replace) {
3295     Node* casted_obj = record_profiled_receiver_for_speculation(obj);
3296     replace_in_map(obj, casted_obj);
3297   }
3298 
3299   return _gvn.transform(phi);
3300 }
3301 
3302 //-------------------------------gen_checkcast---------------------------------
3303 // Generate a checkcast idiom.  Used by both the checkcast bytecode and the
3304 // array store bytecode.  Stack must be as-if BEFORE doing the bytecode so the
3305 // uncommon-trap paths work.  Adjust stack after this call.
3306 // If failure_control is supplied and not null, it is filled in with
3307 // the control edge for the cast failure.  Otherwise, an appropriate
3308 // uncommon trap or exception is thrown.
3309 Node* GraphKit::gen_checkcast(Node *obj, Node* superklass,
3310                               Node* *failure_control) {
3311   kill_dead_locals();           // Benefit all the uncommon traps
3312   const TypeKlassPtr *tk = _gvn.type(superklass)->is_klassptr();
3313   const Type *toop = TypeOopPtr::make_from_klass(tk->klass());



3314 
3315   // Fast cutout:  Check the case that the cast is vacuously true.
3316   // This detects the common cases where the test will short-circuit
3317   // away completely.  We do this before we perform the null check,
3318   // because if the test is going to turn into zero code, we don't
3319   // want a residual null check left around.  (Causes a slowdown,
3320   // for example, in some objArray manipulations, such as a[i]=a[j].)
3321   if (tk->singleton()) {
3322     const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr();
3323     if (objtp != NULL && objtp->klass() != NULL) {
3324       switch (C->static_subtype_check(tk->klass(), objtp->klass())) {








3325       case Compile::SSC_always_true:
3326         // If we know the type check always succeed then we don't use
3327         // the profiling data at this bytecode. Don't lose it, feed it
3328         // to the type system as a speculative type.
3329         return record_profiled_receiver_for_speculation(obj);








3330       case Compile::SSC_always_false:




3331         // It needs a null check because a null will *pass* the cast check.
3332         // A non-null value will always produce an exception.
3333         if (!objtp->maybe_null()) {
3334           bool is_aastore = (java_bc() == Bytecodes::_aastore);
3335           Deoptimization::DeoptReason reason = is_aastore ?
3336             Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
3337           builtin_throw(reason, makecon(TypeKlassPtr::make(objtp->klass())));
3338           return top();
3339         } else if (!too_many_traps_or_recompiles(Deoptimization::Reason_null_assert)) {
3340           return null_assert(obj);
3341         }
3342         break; // Fall through to full check
3343       }
3344     }
3345   }
3346 
3347   ciProfileData* data = NULL;
3348   bool safe_for_replace = false;
3349   if (failure_control == NULL) {        // use MDO in regular case only
3350     assert(java_bc() == Bytecodes::_aastore ||
3351            java_bc() == Bytecodes::_checkcast,
3352            "interpreter profiles type checks only for these BCs");
3353     data = method()->method_data()->bci_to_data(bci());
3354     safe_for_replace = true;

3355   }
3356 
3357   // Make the merge point
3358   enum { _obj_path = 1, _null_path, PATH_LIMIT };
3359   RegionNode* region = new RegionNode(PATH_LIMIT);
3360   Node*       phi    = new PhiNode(region, toop);



3361   C->set_has_split_ifs(true); // Has chance for split-if optimization
3362 
3363   // Use null-cast information if it is available
3364   bool speculative_not_null = false;
3365   bool never_see_null = ((failure_control == NULL)  // regular case only
3366                          && seems_never_null(obj, data, speculative_not_null));
3367 
3368   // Null check; get casted pointer; set region slot 3
3369   Node* null_ctl = top();
3370   Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);









3371 
3372   // If not_null_obj is dead, only null-path is taken
3373   if (stopped()) {              // Doing instance-of on a NULL?
3374     set_control(null_ctl);



3375     return null();
3376   }
3377   region->init_req(_null_path, null_ctl);
3378   phi   ->init_req(_null_path, null());  // Set null path value
3379   if (null_ctl == top()) {
3380     // Do this eagerly, so that pattern matches like is_diamond_phi
3381     // will work even during parsing.
3382     assert(_null_path == PATH_LIMIT-1, "delete last");
3383     region->del_req(_null_path);
3384     phi   ->del_req(_null_path);
3385   }
3386 
3387   Node* cast_obj = NULL;
3388   if (tk->klass_is_exact()) {
3389     // The following optimization tries to statically cast the speculative type of the object
3390     // (for example obtained during profiling) to the type of the superklass and then do a
3391     // dynamic check that the type of the object is what we expect. To work correctly
3392     // for checkcast and aastore the type of superklass should be exact.
3393     const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3394     // We may not have profiling here or it may not help us. If we have
3395     // a speculative type use it to perform an exact cast.
3396     ciKlass* spec_obj_type = obj_type->speculative_type();
3397     if (spec_obj_type != NULL || data != NULL) {
3398       cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk->klass(), spec_obj_type, safe_for_replace);
3399       if (cast_obj != NULL) {
3400         if (failure_control != NULL) // failure is now impossible
3401           (*failure_control) = top();
3402         // adjust the type of the phi to the exact klass:
3403         phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR));
3404       }
3405     }
3406   }
3407 
3408   if (cast_obj == NULL) {
3409     // Generate the subtype check
3410     Node* not_subtype_ctrl = gen_subtype_check(not_null_obj, superklass );
3411 
3412     // Plug in success path into the merge
3413     cast_obj = _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop));
3414     // Failure path ends in uncommon trap (or may be dead - failure impossible)
3415     if (failure_control == NULL) {
3416       if (not_subtype_ctrl != top()) { // If failure is possible
3417         PreserveJVMState pjvms(this);
3418         set_control(not_subtype_ctrl);






3419         bool is_aastore = (java_bc() == Bytecodes::_aastore);
3420         Deoptimization::DeoptReason reason = is_aastore ?
3421           Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
3422         builtin_throw(reason, load_object_klass(not_null_obj));
3423       }
3424     } else {
3425       (*failure_control) = not_subtype_ctrl;
3426     }
3427   }
3428 
3429   region->init_req(_obj_path, control());
3430   phi   ->init_req(_obj_path, cast_obj);
3431 
3432   // A merge of NULL or Casted-NotNull obj
3433   Node* res = _gvn.transform(phi);
3434 
3435   // Note I do NOT always 'replace_in_map(obj,result)' here.
3436   //  if( tk->klass()->can_be_primary_super()  )
3437     // This means that if I successfully store an Object into an array-of-String
3438     // I 'forget' that the Object is really now known to be a String.  I have to
3439     // do this because we don't have true union types for interfaces - if I store
3440     // a Baz into an array-of-Interface and then tell the optimizer it's an
3441     // Interface, I forget that it's also a Baz and cannot do Baz-like field
3442     // references to it.  FIX THIS WHEN UNION TYPES APPEAR!
3443   //  replace_in_map( obj, res );
3444 
3445   // Return final merged results
3446   set_control( _gvn.transform(region) );
3447   record_for_igvn(region);
3448 
3449   return record_profiled_receiver_for_speculation(res);




























































































































3450 }
3451 
3452 //------------------------------next_monitor-----------------------------------
3453 // What number should be given to the next monitor?
3454 int GraphKit::next_monitor() {
3455   int current = jvms()->monitor_depth()* C->sync_stack_slots();
3456   int next = current + C->sync_stack_slots();
3457   // Keep the toplevel high water mark current:
3458   if (C->fixed_slots() < next)  C->set_fixed_slots(next);
3459   return current;
3460 }
3461 
3462 //------------------------------insert_mem_bar---------------------------------
3463 // Memory barrier to avoid floating things around
3464 // The membar serves as a pinch point between both control and all memory slices.
3465 Node* GraphKit::insert_mem_bar(int opcode, Node* precedent) {
3466   MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent);
3467   mb->init_req(TypeFunc::Control, control());
3468   mb->init_req(TypeFunc::Memory,  reset_memory());
3469   Node* membar = _gvn.transform(mb);

3497   }
3498   Node* membar = _gvn.transform(mb);
3499   set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control)));
3500   if (alias_idx == Compile::AliasIdxBot) {
3501     merged_memory()->set_base_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)));
3502   } else {
3503     set_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)),alias_idx);
3504   }
3505   return membar;
3506 }
3507 
3508 //------------------------------shared_lock------------------------------------
3509 // Emit locking code.
3510 FastLockNode* GraphKit::shared_lock(Node* obj) {
3511   // bci is either a monitorenter bc or InvocationEntryBci
3512   // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3513   assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3514 
3515   if( !GenerateSynchronizationCode )
3516     return NULL;                // Not locking things?

3517   if (stopped())                // Dead monitor?
3518     return NULL;
3519 
3520   assert(dead_locals_are_killed(), "should kill locals before sync. point");
3521 
3522   // Box the stack location
3523   Node* box = _gvn.transform(new BoxLockNode(next_monitor()));
3524   Node* mem = reset_memory();
3525 
3526   FastLockNode * flock = _gvn.transform(new FastLockNode(0, obj, box) )->as_FastLock();
3527 
3528   // Create the rtm counters for this fast lock if needed.
3529   flock->create_rtm_lock_counter(sync_jvms()); // sync_jvms used to get current bci
3530 
3531   // Add monitor to debug info for the slow path.  If we block inside the
3532   // slow path and de-opt, we need the monitor hanging around
3533   map()->push_monitor( flock );
3534 
3535   const TypeFunc *tf = LockNode::lock_type();
3536   LockNode *lock = new LockNode(C, tf);

3565   }
3566 #endif
3567 
3568   return flock;
3569 }
3570 
3571 
3572 //------------------------------shared_unlock----------------------------------
3573 // Emit unlocking code.
3574 void GraphKit::shared_unlock(Node* box, Node* obj) {
3575   // bci is either a monitorenter bc or InvocationEntryBci
3576   // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3577   assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3578 
3579   if( !GenerateSynchronizationCode )
3580     return;
3581   if (stopped()) {               // Dead monitor?
3582     map()->pop_monitor();        // Kill monitor from debug info
3583     return;
3584   }

3585 
3586   // Memory barrier to avoid floating things down past the locked region
3587   insert_mem_bar(Op_MemBarReleaseLock);
3588 
3589   const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();
3590   UnlockNode *unlock = new UnlockNode(C, tf);
3591 #ifdef ASSERT
3592   unlock->set_dbg_jvms(sync_jvms());
3593 #endif
3594   uint raw_idx = Compile::AliasIdxRaw;
3595   unlock->init_req( TypeFunc::Control, control() );
3596   unlock->init_req( TypeFunc::Memory , memory(raw_idx) );
3597   unlock->init_req( TypeFunc::I_O    , top() )     ;   // does no i/o
3598   unlock->init_req( TypeFunc::FramePtr, frameptr() );
3599   unlock->init_req( TypeFunc::ReturnAdr, top() );
3600 
3601   unlock->init_req(TypeFunc::Parms + 0, obj);
3602   unlock->init_req(TypeFunc::Parms + 1, box);
3603   unlock = _gvn.transform(unlock)->as_Unlock();
3604 
3605   Node* mem = reset_memory();
3606 
3607   // unlock has no side-effects, sets few values
3608   set_predefined_output_for_runtime_call(unlock, mem, TypeRawPtr::BOTTOM);
3609 
3610   // Kill monitor from debug info
3611   map()->pop_monitor( );
3612 }
3613 
3614 //-------------------------------get_layout_helper-----------------------------
3615 // If the given klass is a constant or known to be an array,
3616 // fetch the constant layout helper value into constant_value
3617 // and return (Node*)NULL.  Otherwise, load the non-constant
3618 // layout helper value, and return the node which represents it.
3619 // This two-faced routine is useful because allocation sites
3620 // almost always feature constant types.
3621 Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {
3622   const TypeKlassPtr* inst_klass = _gvn.type(klass_node)->isa_klassptr();
3623   if (!StressReflectiveCode && inst_klass != NULL) {
3624     ciKlass* klass = inst_klass->klass();
3625     bool    xklass = inst_klass->klass_is_exact();
3626     if (xklass || klass->is_array_klass()) {







3627       jint lhelper = klass->layout_helper();
3628       if (lhelper != Klass::_lh_neutral_value) {
3629         constant_value = lhelper;
3630         return (Node*) NULL;
3631       }
3632     }
3633   }
3634   constant_value = Klass::_lh_neutral_value;  // put in a known value
3635   Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset()));
3636   return make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered);
3637 }
3638 
3639 // We just put in an allocate/initialize with a big raw-memory effect.
3640 // Hook selected additional alias categories on the initialization.
3641 static void hook_memory_on_init(GraphKit& kit, int alias_idx,
3642                                 MergeMemNode* init_in_merge,
3643                                 Node* init_out_raw) {
3644   DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory());
3645   assert(init_in_merge->memory_at(alias_idx) == init_in_raw, "");
3646 
3647   Node* prevmem = kit.memory(alias_idx);
3648   init_in_merge->set_memory_at(alias_idx, prevmem);
3649   kit.set_memory(init_out_raw, alias_idx);


3650 }
3651 
3652 //---------------------------set_output_for_allocation-------------------------
3653 Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
3654                                           const TypeOopPtr* oop_type,
3655                                           bool deoptimize_on_exception) {
3656   int rawidx = Compile::AliasIdxRaw;
3657   alloc->set_req( TypeFunc::FramePtr, frameptr() );
3658   add_safepoint_edges(alloc);
3659   Node* allocx = _gvn.transform(alloc);
3660   set_control( _gvn.transform(new ProjNode(allocx, TypeFunc::Control) ) );
3661   // create memory projection for i_o
3662   set_memory ( _gvn.transform( new ProjNode(allocx, TypeFunc::Memory, true) ), rawidx );
3663   make_slow_call_ex(allocx, env()->Throwable_klass(), true, deoptimize_on_exception);
3664 
3665   // create a memory projection as for the normal control path
3666   Node* malloc = _gvn.transform(new ProjNode(allocx, TypeFunc::Memory));
3667   set_memory(malloc, rawidx);
3668 
3669   // a normal slow-call doesn't change i_o, but an allocation does
3670   // we create a separate i_o projection for the normal control path
3671   set_i_o(_gvn.transform( new ProjNode(allocx, TypeFunc::I_O, false) ) );
3672   Node* rawoop = _gvn.transform( new ProjNode(allocx, TypeFunc::Parms) );
3673 
3674   // put in an initialization barrier
3675   InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx,
3676                                                  rawoop)->as_Initialize();
3677   assert(alloc->initialization() == init,  "2-way macro link must work");
3678   assert(init ->allocation()     == alloc, "2-way macro link must work");
3679   {
3680     // Extract memory strands which may participate in the new object's
3681     // initialization, and source them from the new InitializeNode.
3682     // This will allow us to observe initializations when they occur,
3683     // and link them properly (as a group) to the InitializeNode.
3684     assert(init->in(InitializeNode::Memory) == malloc, "");
3685     MergeMemNode* minit_in = MergeMemNode::make(malloc);
3686     init->set_req(InitializeNode::Memory, minit_in);
3687     record_for_igvn(minit_in); // fold it up later, if possible

3688     Node* minit_out = memory(rawidx);
3689     assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
3690     // Add an edge in the MergeMem for the header fields so an access
3691     // to one of those has correct memory state
3692     set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes())));
3693     set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes())));
3694     if (oop_type->isa_aryptr()) {
3695       const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
3696       int            elemidx  = C->get_alias_index(telemref);
3697       hook_memory_on_init(*this, elemidx, minit_in, minit_out);


























3698     } else if (oop_type->isa_instptr()) {

3699       ciInstanceKlass* ik = oop_type->klass()->as_instance_klass();
3700       for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
3701         ciField* field = ik->nonstatic_field_at(i);
3702         if (field->offset() >= TrackedInitializationLimit * HeapWordSize)
3703           continue;  // do not bother to track really large numbers of fields
3704         // Find (or create) the alias category for this field:
3705         int fieldidx = C->alias_type(field)->index();
3706         hook_memory_on_init(*this, fieldidx, minit_in, minit_out);
3707       }
3708     }
3709   }
3710 
3711   // Cast raw oop to the real thing...
3712   Node* javaoop = new CheckCastPPNode(control(), rawoop, oop_type);
3713   javaoop = _gvn.transform(javaoop);
3714   C->set_recent_alloc(control(), javaoop);
3715   assert(just_allocated_object(control()) == javaoop, "just allocated");
3716 
3717 #ifdef ASSERT
3718   { // Verify that the AllocateNode::Ideal_allocation recognizers work:

3729       assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please");
3730     }
3731   }
3732 #endif //ASSERT
3733 
3734   return javaoop;
3735 }
3736 
3737 //---------------------------new_instance--------------------------------------
3738 // This routine takes a klass_node which may be constant (for a static type)
3739 // or may be non-constant (for reflective code).  It will work equally well
3740 // for either, and the graph will fold nicely if the optimizer later reduces
3741 // the type to a constant.
3742 // The optional arguments are for specialized use by intrinsics:
3743 //  - If 'extra_slow_test' if not null is an extra condition for the slow-path.
3744 //  - If 'return_size_val', report the the total object size to the caller.
3745 //  - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
3746 Node* GraphKit::new_instance(Node* klass_node,
3747                              Node* extra_slow_test,
3748                              Node* *return_size_val,
3749                              bool deoptimize_on_exception) {

3750   // Compute size in doublewords
3751   // The size is always an integral number of doublewords, represented
3752   // as a positive bytewise size stored in the klass's layout_helper.
3753   // The layout_helper also encodes (in a low bit) the need for a slow path.
3754   jint  layout_con = Klass::_lh_neutral_value;
3755   Node* layout_val = get_layout_helper(klass_node, layout_con);
3756   int   layout_is_con = (layout_val == NULL);
3757 
3758   if (extra_slow_test == NULL)  extra_slow_test = intcon(0);
3759   // Generate the initial go-slow test.  It's either ALWAYS (return a
3760   // Node for 1) or NEVER (return a NULL) or perhaps (in the reflective
3761   // case) a computed value derived from the layout_helper.
3762   Node* initial_slow_test = NULL;
3763   if (layout_is_con) {
3764     assert(!StressReflectiveCode, "stress mode does not use these paths");
3765     bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con);
3766     initial_slow_test = must_go_slow ? intcon(1) : extra_slow_test;
3767   } else {   // reflective case
3768     // This reflective path is used by Unsafe.allocateInstance.
3769     // (It may be stress-tested by specifying StressReflectiveCode.)
3770     // Basically, we want to get into the VM is there's an illegal argument.
3771     Node* bit = intcon(Klass::_lh_instance_slow_path_bit);
3772     initial_slow_test = _gvn.transform( new AndINode(layout_val, bit) );
3773     if (extra_slow_test != intcon(0)) {
3774       initial_slow_test = _gvn.transform( new OrINode(initial_slow_test, extra_slow_test) );
3775     }
3776     // (Macro-expander will further convert this to a Bool, if necessary.)

3787 
3788     // Clear the low bits to extract layout_helper_size_in_bytes:
3789     assert((int)Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");
3790     Node* mask = MakeConX(~ (intptr_t)right_n_bits(LogBytesPerLong));
3791     size = _gvn.transform( new AndXNode(size, mask) );
3792   }
3793   if (return_size_val != NULL) {
3794     (*return_size_val) = size;
3795   }
3796 
3797   // This is a precise notnull oop of the klass.
3798   // (Actually, it need not be precise if this is a reflective allocation.)
3799   // It's what we cast the result to.
3800   const TypeKlassPtr* tklass = _gvn.type(klass_node)->isa_klassptr();
3801   if (!tklass)  tklass = TypeInstKlassPtr::OBJECT;
3802   const TypeOopPtr* oop_type = tklass->as_instance_type();
3803 
3804   // Now generate allocation code
3805 
3806   // The entire memory state is needed for slow path of the allocation
3807   // since GC and deoptimization can happened.
3808   Node *mem = reset_memory();
3809   set_all_memory(mem); // Create new memory state
3810 
3811   AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
3812                                          control(), mem, i_o(),
3813                                          size, klass_node,
3814                                          initial_slow_test);
3815 
3816   return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception);
3817 }
3818 
3819 //-------------------------------new_array-------------------------------------
3820 // helper for both newarray and anewarray
3821 // The 'length' parameter is (obviously) the length of the array.
3822 // See comments on new_instance for the meaning of the other arguments.
3823 Node* GraphKit::new_array(Node* klass_node,     // array klass (maybe variable)
3824                           Node* length,         // number of array elements
3825                           int   nargs,          // number of arguments to push back for uncommon trap
3826                           Node* *return_size_val,
3827                           bool deoptimize_on_exception) {
3828   jint  layout_con = Klass::_lh_neutral_value;
3829   Node* layout_val = get_layout_helper(klass_node, layout_con);
3830   int   layout_is_con = (layout_val == NULL);
3831 
3832   if (!layout_is_con && !StressReflectiveCode &&
3833       !too_many_traps(Deoptimization::Reason_class_check)) {
3834     // This is a reflective array creation site.
3835     // Optimistically assume that it is a subtype of Object[],
3836     // so that we can fold up all the address arithmetic.
3837     layout_con = Klass::array_layout_helper(T_OBJECT);
3838     Node* cmp_lh = _gvn.transform( new CmpINode(layout_val, intcon(layout_con)) );
3839     Node* bol_lh = _gvn.transform( new BoolNode(cmp_lh, BoolTest::eq) );
3840     { BuildCutout unless(this, bol_lh, PROB_MAX);
3841       inc_sp(nargs);
3842       uncommon_trap(Deoptimization::Reason_class_check,
3843                     Deoptimization::Action_maybe_recompile);
3844     }
3845     layout_val = NULL;
3846     layout_is_con = true;
3847   }
3848 
3849   // Generate the initial go-slow test.  Make sure we do not overflow
3850   // if length is huge (near 2Gig) or negative!  We do not need
3851   // exact double-words here, just a close approximation of needed
3852   // double-words.  We can't add any offset or rounding bits, lest we
3853   // take a size -1 of bytes and make it positive.  Use an unsigned
3854   // compare, so negative sizes look hugely positive.
3855   int fast_size_limit = FastAllocateSizeLimit;
3856   if (layout_is_con) {
3857     assert(!StressReflectiveCode, "stress mode does not use these paths");
3858     // Increase the size limit if we have exact knowledge of array type.
3859     int log2_esize = Klass::layout_helper_log2_element_size(layout_con);
3860     fast_size_limit <<= (LogBytesPerLong - log2_esize);
3861   }
3862 
3863   Node* initial_slow_cmp  = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) );
3864   Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) );
3865 
3866   // --- Size Computation ---
3867   // array_size = round_to_heap(array_header + (length << elem_shift));
3868   // where round_to_heap(x) == align_to(x, MinObjAlignmentInBytes)
3869   // and align_to(x, y) == ((x + y-1) & ~(y-1))
3870   // The rounding mask is strength-reduced, if possible.
3871   int round_mask = MinObjAlignmentInBytes - 1;
3872   Node* header_size = NULL;
3873   int   header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
3874   // (T_BYTE has the weakest alignment and size restrictions...)
3875   if (layout_is_con) {
3876     int       hsize  = Klass::layout_helper_header_size(layout_con);
3877     int       eshift = Klass::layout_helper_log2_element_size(layout_con);
3878     BasicType etype  = Klass::layout_helper_element_type(layout_con);
3879     if ((round_mask & ~right_n_bits(eshift)) == 0)
3880       round_mask = 0;  // strength-reduce it if it goes away completely
3881     assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
3882     assert(header_size_min <= hsize, "generic minimum is smallest");
3883     header_size_min = hsize;
3884     header_size = intcon(hsize + round_mask);
3885   } else {
3886     Node* hss   = intcon(Klass::_lh_header_size_shift);
3887     Node* hsm   = intcon(Klass::_lh_header_size_mask);
3888     Node* hsize = _gvn.transform( new URShiftINode(layout_val, hss) );
3889     hsize       = _gvn.transform( new AndINode(hsize, hsm) );
3890     Node* mask  = intcon(round_mask);
3891     header_size = _gvn.transform( new AddINode(hsize, mask) );
3892   }
3893 
3894   Node* elem_shift = NULL;
3895   if (layout_is_con) {
3896     int eshift = Klass::layout_helper_log2_element_size(layout_con);
3897     if (eshift != 0)
3898       elem_shift = intcon(eshift);
3899   } else {
3900     // There is no need to mask or shift this value.
3901     // The semantics of LShiftINode include an implicit mask to 0x1F.

3945   // places, one where the length is sharply limited, and the other
3946   // after a successful allocation.
3947   Node* abody = lengthx;
3948   if (elem_shift != NULL)
3949     abody     = _gvn.transform( new LShiftXNode(lengthx, elem_shift) );
3950   Node* size  = _gvn.transform( new AddXNode(headerx, abody) );
3951   if (round_mask != 0) {
3952     Node* mask = MakeConX(~round_mask);
3953     size       = _gvn.transform( new AndXNode(size, mask) );
3954   }
3955   // else if round_mask == 0, the size computation is self-rounding
3956 
3957   if (return_size_val != NULL) {
3958     // This is the size
3959     (*return_size_val) = size;
3960   }
3961 
3962   // Now generate allocation code
3963 
3964   // The entire memory state is needed for slow path of the allocation
3965   // since GC and deoptimization can happened.
3966   Node *mem = reset_memory();
3967   set_all_memory(mem); // Create new memory state
3968 
3969   if (initial_slow_test->is_Bool()) {
3970     // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
3971     initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
3972   }
3973 
3974   const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type();

3975   Node* valid_length_test = _gvn.intcon(1);
3976   if (ary_type->klass()->is_array_klass()) {
3977     BasicType bt = ary_type->klass()->as_array_klass()->element_type()->basic_type();
3978     jint max = TypeAryPtr::max_array_length(bt);
3979     Node* valid_length_cmp  = _gvn.transform(new CmpUNode(length, intcon(max)));
3980     valid_length_test = _gvn.transform(new BoolNode(valid_length_cmp, BoolTest::le));
3981   }
3982 




























































3983   // Create the AllocateArrayNode and its result projections
3984   AllocateArrayNode* alloc
3985     = new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
3986                             control(), mem, i_o(),
3987                             size, klass_node,
3988                             initial_slow_test,
3989                             length, valid_length_test);
3990 
3991   // Cast to correct type.  Note that the klass_node may be constant or not,
3992   // and in the latter case the actual array type will be inexact also.
3993   // (This happens via a non-constant argument to inline_native_newArray.)
3994   // In any case, the value of klass_node provides the desired array type.
3995   const TypeInt* length_type = _gvn.find_int_type(length);
3996   if (ary_type->isa_aryptr() && length_type != NULL) {
3997     // Try to get a better type than POS for the size
3998     ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
3999   }
4000 
4001   Node* javaoop = set_output_for_allocation(alloc, ary_type, deoptimize_on_exception);
4002 
4003   array_ideal_length(alloc, ary_type, true);
4004   return javaoop;
4005 }
4006 
4007 // The following "Ideal_foo" functions are placed here because they recognize
4008 // the graph shapes created by the functions immediately above.
4009 

4124   set_all_memory(ideal.merged_memory());
4125   set_i_o(ideal.i_o());
4126   set_control(ideal.ctrl());
4127 }
4128 
4129 void GraphKit::final_sync(IdealKit& ideal) {
4130   // Final sync IdealKit and graphKit.
4131   sync_kit(ideal);
4132 }
4133 
4134 Node* GraphKit::load_String_length(Node* str, bool set_ctrl) {
4135   Node* len = load_array_length(load_String_value(str, set_ctrl));
4136   Node* coder = load_String_coder(str, set_ctrl);
4137   // Divide length by 2 if coder is UTF16
4138   return _gvn.transform(new RShiftINode(len, coder));
4139 }
4140 
4141 Node* GraphKit::load_String_value(Node* str, bool set_ctrl) {
4142   int value_offset = java_lang_String::value_offset();
4143   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4144                                                      false, NULL, 0);
4145   const TypePtr* value_field_type = string_type->add_offset(value_offset);
4146   const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
4147                                                   TypeAry::make(TypeInt::BYTE, TypeInt::POS),
4148                                                   ciTypeArrayKlass::make(T_BYTE), true, 0);
4149   Node* p = basic_plus_adr(str, str, value_offset);
4150   Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT,
4151                               IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
4152   return load;
4153 }
4154 
4155 Node* GraphKit::load_String_coder(Node* str, bool set_ctrl) {
4156   if (!CompactStrings) {
4157     return intcon(java_lang_String::CODER_UTF16);
4158   }
4159   int coder_offset = java_lang_String::coder_offset();
4160   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4161                                                      false, NULL, 0);
4162   const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4163 
4164   Node* p = basic_plus_adr(str, str, coder_offset);
4165   Node* load = access_load_at(str, p, coder_field_type, TypeInt::BYTE, T_BYTE,
4166                               IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
4167   return load;
4168 }
4169 
4170 void GraphKit::store_String_value(Node* str, Node* value) {
4171   int value_offset = java_lang_String::value_offset();
4172   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4173                                                      false, NULL, 0);
4174   const TypePtr* value_field_type = string_type->add_offset(value_offset);
4175 
4176   access_store_at(str,  basic_plus_adr(str, value_offset), value_field_type,
4177                   value, TypeAryPtr::BYTES, T_OBJECT, IN_HEAP | MO_UNORDERED);
4178 }
4179 
4180 void GraphKit::store_String_coder(Node* str, Node* value) {
4181   int coder_offset = java_lang_String::coder_offset();
4182   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4183                                                      false, NULL, 0);
4184   const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4185 
4186   access_store_at(str, basic_plus_adr(str, coder_offset), coder_field_type,
4187                   value, TypeInt::BYTE, T_BYTE, IN_HEAP | MO_UNORDERED);
4188 }
4189 
4190 // Capture src and dst memory state with a MergeMemNode
4191 Node* GraphKit::capture_memory(const TypePtr* src_type, const TypePtr* dst_type) {
4192   if (src_type == dst_type) {
4193     // Types are equal, we don't need a MergeMemNode
4194     return memory(src_type);
4195   }
4196   MergeMemNode* merge = MergeMemNode::make(map()->memory());
4197   record_for_igvn(merge); // fold it up later, if possible
4198   int src_idx = C->get_alias_index(src_type);
4199   int dst_idx = C->get_alias_index(dst_type);
4200   merge->set_memory_at(src_idx, memory(src_idx));
4201   merge->set_memory_at(dst_idx, memory(dst_idx));
4202   return merge;
4203 }

4276   i_char->init_req(2, AddI(i_char, intcon(2)));
4277 
4278   set_control(IfFalse(iff));
4279   set_memory(st, TypeAryPtr::BYTES);
4280 }
4281 
4282 Node* GraphKit::make_constant_from_field(ciField* field, Node* obj) {
4283   if (!field->is_constant()) {
4284     return NULL; // Field not marked as constant.
4285   }
4286   ciInstance* holder = NULL;
4287   if (!field->is_static()) {
4288     ciObject* const_oop = obj->bottom_type()->is_oopptr()->const_oop();
4289     if (const_oop != NULL && const_oop->is_instance()) {
4290       holder = const_oop->as_instance();
4291     }
4292   }
4293   const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(),
4294                                                         /*is_unsigned_load=*/false);
4295   if (con_type != NULL) {
4296     return makecon(con_type);






4297   }
4298   return NULL;
4299 }










   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciFlatArrayKlass.hpp"
  27 #include "ci/ciInlineKlass.hpp"
  28 #include "ci/ciUtilities.hpp"
  29 #include "classfile/javaClasses.hpp"
  30 #include "ci/ciNativeEntryPoint.hpp"
  31 #include "ci/ciObjArray.hpp"
  32 #include "asm/register.hpp"
  33 #include "compiler/compileLog.hpp"
  34 #include "gc/shared/barrierSet.hpp"
  35 #include "gc/shared/c2/barrierSetC2.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "opto/addnode.hpp"
  39 #include "opto/castnode.hpp"
  40 #include "opto/convertnode.hpp"
  41 #include "opto/graphKit.hpp"
  42 #include "opto/idealKit.hpp"
  43 #include "opto/inlinetypenode.hpp"
  44 #include "opto/intrinsicnode.hpp"
  45 #include "opto/locknode.hpp"
  46 #include "opto/machnode.hpp"
  47 #include "opto/narrowptrnode.hpp"
  48 #include "opto/opaquenode.hpp"
  49 #include "opto/parse.hpp"
  50 #include "opto/rootnode.hpp"
  51 #include "opto/runtime.hpp"
  52 #include "opto/subtypenode.hpp"
  53 #include "runtime/deoptimization.hpp"
  54 #include "runtime/sharedRuntime.hpp"
  55 #include "utilities/bitMap.inline.hpp"
  56 #include "utilities/powerOfTwo.hpp"
  57 #include "utilities/growableArray.hpp"
  58 
  59 //----------------------------GraphKit-----------------------------------------
  60 // Main utility constructor.
  61 GraphKit::GraphKit(JVMState* jvms, PhaseGVN* gvn)
  62   : Phase(Phase::Parser),
  63     _env(C->env()),
  64     _gvn((gvn != NULL) ? *gvn : *C->initial_gvn()),
  65     _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
  66 {
  67   assert(gvn == NULL || !gvn->is_IterGVN() || gvn->is_IterGVN()->delay_transform(), "delay transform should be enabled");
  68   _exceptions = jvms->map()->next_exception();
  69   if (_exceptions != NULL)  jvms->map()->set_next_exception(NULL);
  70   set_jvms(jvms);
  71 #ifdef ASSERT
  72   if (_gvn.is_IterGVN() != NULL) {
  73     assert(_gvn.is_IterGVN()->delay_transform(), "Transformation must be delayed if IterGVN is used");
  74     // Save the initial size of _for_igvn worklist for verification (see ~GraphKit)
  75     _worklist_size = _gvn.C->for_igvn()->size();
  76   }
  77 #endif
  78 }
  79 
  80 // Private constructor for parser.
  81 GraphKit::GraphKit()
  82   : Phase(Phase::Parser),
  83     _env(C->env()),
  84     _gvn(*C->initial_gvn()),
  85     _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
  86 {
  87   _exceptions = NULL;
  88   set_map(NULL);
  89   debug_only(_sp = -99);
  90   debug_only(set_bci(-99));
  91 }
  92 
  93 
  94 
  95 //---------------------------clean_stack---------------------------------------
  96 // Clear away rubbish from the stack area of the JVM state.
  97 // This destroys any arguments that may be waiting on the stack.

 830         if (PrintMiscellaneous && (Verbose || WizardMode)) {
 831           tty->print_cr("Zombie local %d: ", local);
 832           jvms->dump();
 833         }
 834         return false;
 835       }
 836     }
 837   }
 838   return true;
 839 }
 840 
 841 #endif //ASSERT
 842 
 843 // Helper function for enforcing certain bytecodes to reexecute if deoptimization happens.
 844 static bool should_reexecute_implied_by_bytecode(JVMState *jvms, bool is_anewarray) {
 845   ciMethod* cur_method = jvms->method();
 846   int       cur_bci   = jvms->bci();
 847   if (cur_method != NULL && cur_bci != InvocationEntryBci) {
 848     Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
 849     return Interpreter::bytecode_should_reexecute(code) ||
 850            (is_anewarray && (code == Bytecodes::_multianewarray));
 851     // Reexecute _multianewarray bytecode which was replaced with
 852     // sequence of [a]newarray. See Parse::do_multianewarray().
 853     //
 854     // Note: interpreter should not have it set since this optimization
 855     // is limited by dimensions and guarded by flag so in some cases
 856     // multianewarray() runtime calls will be generated and
 857     // the bytecode should not be reexecutes (stack will not be reset).
 858   } else {
 859     return false;
 860   }
 861 }
 862 
 863 // Helper function for adding JVMState and debug information to node
 864 void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) {
 865   // Add the safepoint edges to the call (or other safepoint).
 866 
 867   // Make sure dead locals are set to top.  This
 868   // should help register allocation time and cut down on the size
 869   // of the deoptimization information.
 870   assert(dead_locals_are_killed(), "garbage in debug info before safepoint");

1090       ciSignature* declared_signature = NULL;
1091       ciMethod* ignored_callee = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
1092       assert(declared_signature != NULL, "cannot be null");
1093       inputs   = declared_signature->arg_size_for_bc(code);
1094       int size = declared_signature->return_type()->size();
1095       depth = size - inputs;
1096     }
1097     break;
1098 
1099   case Bytecodes::_multianewarray:
1100     {
1101       ciBytecodeStream iter(method());
1102       iter.reset_to_bci(bci());
1103       iter.next();
1104       inputs = iter.get_dimensions();
1105       assert(rsize == 1, "");
1106       depth = rsize - inputs;
1107     }
1108     break;
1109 
1110   case Bytecodes::_withfield: {
1111     bool ignored_will_link;
1112     ciField* field = method()->get_field_at_bci(bci(), ignored_will_link);
1113     int      size  = field->type()->size();
1114     inputs = size+1;
1115     depth = rsize - inputs;
1116     break;
1117   }
1118 
1119   case Bytecodes::_ireturn:
1120   case Bytecodes::_lreturn:
1121   case Bytecodes::_freturn:
1122   case Bytecodes::_dreturn:
1123   case Bytecodes::_areturn:
1124     assert(rsize == -depth, "");
1125     inputs = rsize;
1126     break;
1127 
1128   case Bytecodes::_jsr:
1129   case Bytecodes::_jsr_w:
1130     inputs = 0;
1131     depth  = 1;                  // S.B. depth=1, not zero
1132     break;
1133 
1134   default:
1135     // bytecode produces a typed result
1136     inputs = rsize - depth;
1137     assert(inputs >= 0, "");
1138     break;

1181   Node* conv = _gvn.transform( new ConvI2LNode(offset));
1182   Node* mask = _gvn.transform(ConLNode::make((julong) max_juint));
1183   return _gvn.transform( new AndLNode(conv, mask) );
1184 }
1185 
1186 Node* GraphKit::ConvL2I(Node* offset) {
1187   // short-circuit a common case
1188   jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot);
1189   if (offset_con != (jlong)Type::OffsetBot) {
1190     return intcon((int) offset_con);
1191   }
1192   return _gvn.transform( new ConvL2INode(offset));
1193 }
1194 
1195 //-------------------------load_object_klass-----------------------------------
1196 Node* GraphKit::load_object_klass(Node* obj) {
1197   // Special-case a fresh allocation to avoid building nodes:
1198   Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
1199   if (akls != NULL)  return akls;
1200   Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
1201   return _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
1202 }
1203 
1204 //-------------------------load_array_length-----------------------------------
1205 Node* GraphKit::load_array_length(Node* array) {
1206   // Special-case a fresh allocation to avoid building nodes:
1207   AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array, &_gvn);
1208   Node *alen;
1209   if (alloc == NULL) {
1210     Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes());
1211     alen = _gvn.transform( new LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS));
1212   } else {
1213     alen = array_ideal_length(alloc, _gvn.type(array)->is_oopptr(), false);
1214   }
1215   return alen;
1216 }
1217 
1218 Node* GraphKit::array_ideal_length(AllocateArrayNode* alloc,
1219                                    const TypeOopPtr* oop_type,
1220                                    bool replace_length_in_map) {
1221   Node* length = alloc->Ideal_length();

1230         replace_in_map(length, ccast);
1231       }
1232       return ccast;
1233     }
1234   }
1235   return length;
1236 }
1237 
1238 //------------------------------do_null_check----------------------------------
1239 // Helper function to do a NULL pointer check.  Returned value is
1240 // the incoming address with NULL casted away.  You are allowed to use the
1241 // not-null value only if you are control dependent on the test.
1242 #ifndef PRODUCT
1243 extern int explicit_null_checks_inserted,
1244            explicit_null_checks_elided;
1245 #endif
1246 Node* GraphKit::null_check_common(Node* value, BasicType type,
1247                                   // optional arguments for variations:
1248                                   bool assert_null,
1249                                   Node* *null_control,
1250                                   bool speculative,
1251                                   bool is_init_check) {
1252   assert(!assert_null || null_control == NULL, "not both at once");
1253   if (stopped())  return top();
1254   NOT_PRODUCT(explicit_null_checks_inserted++);
1255 
1256   if (value->is_InlineType()) {
1257     InlineTypeNode* vt = value->as_InlineType();
1258     null_check_common(vt->get_is_init(), T_INT, assert_null, null_control, speculative, true);
1259     if (stopped()) {
1260       return top();
1261     }
1262     if (assert_null) {
1263       // TODO 8284443 Scalarize here (this currently leads to compilation bailouts)
1264       // vt = InlineTypeNode::make_null(_gvn, vt->type()->inline_klass());
1265       // replace_in_map(value, vt);
1266       // return vt;
1267       return null();
1268     }
1269     bool do_replace_in_map = (null_control == NULL || (*null_control) == top());
1270     return cast_not_null(value, do_replace_in_map);
1271   } else if (value->is_InlineTypePtr()) {
1272     // Null checking a scalarized but nullable inline type. Check the IsInit
1273     // input instead of the oop input to avoid keeping buffer allocations alive.
1274     InlineTypePtrNode* vtptr = value->as_InlineTypePtr();
1275     while (vtptr->get_oop()->is_InlineTypePtr()) {
1276       vtptr = vtptr->get_oop()->as_InlineTypePtr();
1277     }
1278     null_check_common(vtptr->get_is_init(), T_INT, assert_null, null_control, speculative, true);
1279     if (stopped()) {
1280       return top();
1281     }
1282     if (assert_null) {
1283       // TODO 8284443 Scalarize here (this currently leads to compilation bailouts)
1284       // vtptr = InlineTypePtrNode::make_null(_gvn, vtptr->type()->inline_klass());
1285       // replace_in_map(value, vtptr);
1286       // return vtptr;
1287       return null();
1288     }
1289     bool do_replace_in_map = (null_control == NULL || (*null_control) == top());
1290     return cast_not_null(value, do_replace_in_map);
1291   }
1292 
1293   // Construct NULL check
1294   Node *chk = NULL;
1295   switch(type) {
1296     case T_LONG   : chk = new CmpLNode(value, _gvn.zerocon(T_LONG)); break;
1297     case T_INT    : chk = new CmpINode(value, _gvn.intcon(0)); break;
1298     case T_PRIMITIVE_OBJECT : // fall through
1299     case T_ARRAY  : // fall through
1300       type = T_OBJECT;  // simplify further tests
1301     case T_OBJECT : {
1302       const Type *t = _gvn.type( value );
1303 
1304       const TypeOopPtr* tp = t->isa_oopptr();
1305       if (tp != NULL && tp->klass() != NULL && !tp->klass()->is_loaded()
1306           // Only for do_null_check, not any of its siblings:
1307           && !assert_null && null_control == NULL) {
1308         // Usually, any field access or invocation on an unloaded oop type
1309         // will simply fail to link, since the statically linked class is
1310         // likely also to be unloaded.  However, in -Xcomp mode, sometimes
1311         // the static class is loaded but the sharper oop type is not.
1312         // Rather than checking for this obscure case in lots of places,
1313         // we simply observe that a null check on an unloaded class
1314         // will always be followed by a nonsense operation, so we
1315         // can just issue the uncommon trap here.
1316         // Our access to the unloaded class will only be correct
1317         // after it has been loaded and initialized, which requires
1318         // a trip through the interpreter.

1376         }
1377         Node *oldcontrol = control();
1378         set_control(cfg);
1379         Node *res = cast_not_null(value);
1380         set_control(oldcontrol);
1381         NOT_PRODUCT(explicit_null_checks_elided++);
1382         return res;
1383       }
1384       cfg = IfNode::up_one_dom(cfg, /*linear_only=*/ true);
1385       if (cfg == NULL)  break;  // Quit at region nodes
1386       depth++;
1387     }
1388   }
1389 
1390   //-----------
1391   // Branch to failure if null
1392   float ok_prob = PROB_MAX;  // a priori estimate:  nulls never happen
1393   Deoptimization::DeoptReason reason;
1394   if (assert_null) {
1395     reason = Deoptimization::reason_null_assert(speculative);
1396   } else if (type == T_OBJECT || is_init_check) {
1397     reason = Deoptimization::reason_null_check(speculative);
1398   } else {
1399     reason = Deoptimization::Reason_div0_check;
1400   }
1401   // %%% Since Reason_unhandled is not recorded on a per-bytecode basis,
1402   // ciMethodData::has_trap_at will return a conservative -1 if any
1403   // must-be-null assertion has failed.  This could cause performance
1404   // problems for a method after its first do_null_assert failure.
1405   // Consider using 'Reason_class_check' instead?
1406 
1407   // To cause an implicit null check, we set the not-null probability
1408   // to the maximum (PROB_MAX).  For an explicit check the probability
1409   // is set to a smaller value.
1410   if (null_control != NULL || too_many_traps(reason)) {
1411     // probability is less likely
1412     ok_prob =  PROB_LIKELY_MAG(3);
1413   } else if (!assert_null &&
1414              (ImplicitNullCheckThreshold > 0) &&
1415              method() != NULL &&
1416              (method()->method_data()->trap_count(reason)

1450   }
1451 
1452   if (assert_null) {
1453     // Cast obj to null on this path.
1454     replace_in_map(value, zerocon(type));
1455     return zerocon(type);
1456   }
1457 
1458   // Cast obj to not-null on this path, if there is no null_control.
1459   // (If there is a null_control, a non-null value may come back to haunt us.)
1460   if (type == T_OBJECT) {
1461     Node* cast = cast_not_null(value, false);
1462     if (null_control == NULL || (*null_control) == top())
1463       replace_in_map(value, cast);
1464     value = cast;
1465   }
1466 
1467   return value;
1468 }
1469 

1470 //------------------------------cast_not_null----------------------------------
1471 // Cast obj to not-null on this path
1472 Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
1473   if (obj->is_InlineType()) {
1474     InlineTypeNode* vt = obj->clone()->as_InlineType();
1475     vt->set_is_init(_gvn);
1476     vt = _gvn.transform(vt)->as_InlineType();
1477     if (do_replace_in_map) {
1478       replace_in_map(obj, vt);
1479     }
1480     return vt;
1481   } else if (obj->is_InlineTypePtr()) {
1482     // Cast oop input instead
1483     Node* cast = cast_not_null(obj->as_InlineTypePtr()->get_oop(), do_replace_in_map);
1484     if (cast->is_top()) {
1485       // Always null
1486       return top();
1487     }
1488     // Create a new node with the casted oop input and is_init set
1489     InlineTypeBaseNode* vt = obj->clone()->as_InlineTypePtr();
1490     vt->set_oop(cast);
1491     vt->set_is_init(_gvn);
1492     vt = _gvn.transform(vt)->as_InlineTypePtr();
1493     if (do_replace_in_map) {
1494       replace_in_map(obj, vt);
1495     }
1496     return vt;
1497   }
1498   const Type *t = _gvn.type(obj);
1499   const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL);
1500   // Object is already not-null?
1501   if( t == t_not_null ) return obj;
1502 
1503   Node *cast = new CastPPNode(obj,t_not_null);
1504   cast->init_req(0, control());
1505   cast = _gvn.transform( cast );
1506 
1507   // Scan for instances of 'obj' in the current JVM mapping.
1508   // These instances are known to be not-null after the test.
1509   if (do_replace_in_map)
1510     replace_in_map(obj, cast);
1511 
1512   return cast;                  // Return casted value
1513 }
1514 
1515 // Sometimes in intrinsics, we implicitly know an object is not null
1516 // (there's no actual null check) so we can cast it to not null. In
1517 // the course of optimizations, the input to the cast can become null.

1611                           MemNode::MemOrd mo,
1612                           LoadNode::ControlDependency control_dependency,
1613                           bool require_atomic_access,
1614                           bool unaligned,
1615                           bool mismatched,
1616                           bool unsafe,
1617                           uint8_t barrier_data) {
1618   assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1619   const TypePtr* adr_type = NULL; // debug-mode-only argument
1620   debug_only(adr_type = C->get_adr_type(adr_idx));
1621   Node* mem = memory(adr_idx);
1622   Node* ld;
1623   if (require_atomic_access && bt == T_LONG) {
1624     ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);
1625   } else if (require_atomic_access && bt == T_DOUBLE) {
1626     ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);
1627   } else {
1628     ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);
1629   }
1630   ld = _gvn.transform(ld);
1631 
1632   if (((bt == T_OBJECT || bt == T_PRIMITIVE_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
1633     // Improve graph before escape analysis and boxing elimination.
1634     record_for_igvn(ld);
1635   }
1636   return ld;
1637 }
1638 
1639 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1640                                 int adr_idx,
1641                                 MemNode::MemOrd mo,
1642                                 bool require_atomic_access,
1643                                 bool unaligned,
1644                                 bool mismatched,
1645                                 bool unsafe) {
1646   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1647   const TypePtr* adr_type = NULL;
1648   debug_only(adr_type = C->get_adr_type(adr_idx));
1649   Node *mem = memory(adr_idx);
1650   Node* st;
1651   if (require_atomic_access && bt == T_LONG) {
1652     st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo);

1663   }
1664   if (unsafe) {
1665     st->as_Store()->set_unsafe_access();
1666   }
1667   st = _gvn.transform(st);
1668   set_memory(st, adr_idx);
1669   // Back-to-back stores can only remove intermediate store with DU info
1670   // so push on worklist for optimizer.
1671   if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1672     record_for_igvn(st);
1673 
1674   return st;
1675 }
1676 
1677 Node* GraphKit::access_store_at(Node* obj,
1678                                 Node* adr,
1679                                 const TypePtr* adr_type,
1680                                 Node* val,
1681                                 const Type* val_type,
1682                                 BasicType bt,
1683                                 DecoratorSet decorators,
1684                                 bool safe_for_replace) {
1685   // Transformation of a value which could be NULL pointer (CastPP #NULL)
1686   // could be delayed during Parse (for example, in adjust_map_after_if()).
1687   // Execute transformation here to avoid barrier generation in such case.
1688   if (_gvn.type(val) == TypePtr::NULL_PTR) {
1689     val = _gvn.makecon(TypePtr::NULL_PTR);
1690   }
1691 
1692   if (stopped()) {
1693     return top(); // Dead path ?
1694   }
1695 
1696   assert(val != NULL, "not dead path");
1697   if (val->is_InlineType()) {
1698     // Store to non-flattened field. Buffer the inline type and make sure
1699     // the store is re-executed if the allocation triggers deoptimization.
1700     PreserveReexecuteState preexecs(this);
1701     jvms()->set_should_reexecute(true);
1702     val = val->as_InlineType()->buffer(this, safe_for_replace);
1703   }
1704 
1705   C2AccessValuePtr addr(adr, adr_type);
1706   C2AccessValue value(val, val_type);
1707   C2ParseAccess access(this, decorators | C2_WRITE_ACCESS, bt, obj, addr);
1708   if (access.is_raw()) {
1709     return _barrier_set->BarrierSetC2::store_at(access, value);
1710   } else {
1711     return _barrier_set->store_at(access, value);
1712   }
1713 }
1714 
1715 Node* GraphKit::access_load_at(Node* obj,   // containing obj
1716                                Node* adr,   // actual adress to store val at
1717                                const TypePtr* adr_type,
1718                                const Type* val_type,
1719                                BasicType bt,
1720                                DecoratorSet decorators,
1721                                Node* ctl) {
1722   if (stopped()) {
1723     return top(); // Dead path ?
1724   }
1725 
1726   C2AccessValuePtr addr(adr, adr_type);
1727   C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, obj, addr, ctl);
1728   if (access.is_raw()) {
1729     return _barrier_set->BarrierSetC2::load_at(access, val_type);
1730   } else {
1731     return _barrier_set->load_at(access, val_type);
1732   }
1733 }
1734 
1735 Node* GraphKit::access_load(Node* adr,   // actual adress to load val at
1736                             const Type* val_type,
1737                             BasicType bt,
1738                             DecoratorSet decorators) {
1739   if (stopped()) {
1740     return top(); // Dead path ?
1741   }
1742 
1743   C2AccessValuePtr addr(adr, adr->bottom_type()->is_ptr());
1744   C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, NULL, addr);
1745   if (access.is_raw()) {
1746     return _barrier_set->BarrierSetC2::load_at(access, val_type);
1747   } else {

1813                                      const Type* value_type,
1814                                      BasicType bt,
1815                                      DecoratorSet decorators) {
1816   C2AccessValuePtr addr(adr, adr_type);
1817   C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS, bt, obj, addr, alias_idx);
1818   if (access.is_raw()) {
1819     return _barrier_set->BarrierSetC2::atomic_add_at(access, new_val, value_type);
1820   } else {
1821     return _barrier_set->atomic_add_at(access, new_val, value_type);
1822   }
1823 }
1824 
1825 void GraphKit::access_clone(Node* src, Node* dst, Node* size, bool is_array) {
1826   return _barrier_set->clone(this, src, dst, size, is_array);
1827 }
1828 
1829 //-------------------------array_element_address-------------------------
1830 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1831                                       const TypeInt* sizetype, Node* ctrl) {
1832   uint shift  = exact_log2(type2aelembytes(elembt));
1833   ciKlass* arytype_klass = _gvn.type(ary)->is_aryptr()->klass();
1834   if (arytype_klass != NULL && arytype_klass->is_flat_array_klass()) {
1835     ciFlatArrayKlass* vak = arytype_klass->as_flat_array_klass();
1836     shift = vak->log2_element_size();
1837   }
1838   uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1839 
1840   // short-circuit a common case (saves lots of confusing waste motion)
1841   jint idx_con = find_int_con(idx, -1);
1842   if (idx_con >= 0) {
1843     intptr_t offset = header + ((intptr_t)idx_con << shift);
1844     return basic_plus_adr(ary, offset);
1845   }
1846 
1847   // must be correct type for alignment purposes
1848   Node* base  = basic_plus_adr(ary, header);
1849   idx = Compile::conv_I2X_index(&_gvn, idx, sizetype, ctrl);
1850   Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );
1851   return basic_plus_adr(ary, base, scale);
1852 }
1853 
1854 //-------------------------load_array_element-------------------------
1855 Node* GraphKit::load_array_element(Node* ary, Node* idx, const TypeAryPtr* arytype, bool set_ctrl) {
1856   const Type* elemtype = arytype->elem();
1857   BasicType elembt = elemtype->array_element_basic_type();
1858   assert(elembt != T_PRIMITIVE_OBJECT, "inline types are not supported by this method");
1859   Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1860   if (elembt == T_NARROWOOP) {
1861     elembt = T_OBJECT; // To satisfy switch in LoadNode::make()
1862   }
1863   Node* ld = access_load_at(ary, adr, arytype, elemtype, elembt,
1864                             IN_HEAP | IS_ARRAY | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0));
1865   return ld;
1866 }
1867 
1868 //-------------------------set_arguments_for_java_call-------------------------
1869 // Arguments (pre-popped from the stack) are taken from the JVMS.
1870 void GraphKit::set_arguments_for_java_call(CallJavaNode* call, bool is_late_inline) {
1871   PreserveReexecuteState preexecs(this);
1872   if (EnableValhalla) {
1873     // Make sure the call is "re-executed", if buffering of inline type arguments triggers deoptimization.
1874     // At this point, the call hasn't been executed yet, so we will only ever execute the call once.
1875     jvms()->set_should_reexecute(true);
1876     int arg_size = method()->get_declared_signature_at_bci(bci())->arg_size_for_bc(java_bc());
1877     inc_sp(arg_size);
1878   }
1879   // Add the call arguments
1880   const TypeTuple* domain = call->tf()->domain_sig();
1881   uint nargs = domain->cnt();
1882   int arg_num = 0;
1883   for (uint i = TypeFunc::Parms, idx = TypeFunc::Parms; i < nargs; i++) {
1884     Node* arg = argument(i-TypeFunc::Parms);
1885     const Type* t = domain->field_at(i);
1886     if (t->is_inlinetypeptr() && call->method()->is_scalarized_arg(arg_num)) {
1887       // We don't pass inline type arguments by reference but instead pass each field of the inline type
1888       if (!arg->is_InlineTypeBase()) {
1889         assert(_gvn.type(arg)->is_zero_type() && !t->inline_klass()->is_null_free(), "Unexpected argument type");
1890         arg = InlineTypeNode::make_from_oop(this, arg, t->inline_klass(), t->inline_klass()->is_null_free());
1891       }
1892       InlineTypeBaseNode* vt = arg->as_InlineTypeBase();
1893       vt->pass_fields(this, call, idx, true, !t->maybe_null());
1894       // If an inline type argument is passed as fields, attach the Method* to the call site
1895       // to be able to access the extended signature later via attached_method_before_pc().
1896       // For example, see CompiledMethod::preserve_callee_argument_oops().
1897       call->set_override_symbolic_info(true);
1898       arg_num++;
1899       continue;
1900     } else if (arg->is_InlineType()) {
1901       // Pass inline type argument via oop to callee
1902       arg = arg->as_InlineType()->buffer(this);
1903       if (!is_late_inline) {
1904         arg = arg->as_InlineTypePtr()->get_oop();
1905       }
1906     }
1907     if (t != Type::HALF) {
1908       arg_num++;
1909     }
1910     call->init_req(idx++, arg);
1911   }
1912 }
1913 
1914 //---------------------------set_edges_for_java_call---------------------------
1915 // Connect a newly created call into the current JVMS.
1916 // A return value node (if any) is returned from set_edges_for_java_call.
1917 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1918 
1919   // Add the predefined inputs:
1920   call->init_req( TypeFunc::Control, control() );
1921   call->init_req( TypeFunc::I_O    , i_o() );
1922   call->init_req( TypeFunc::Memory , reset_memory() );
1923   call->init_req( TypeFunc::FramePtr, frameptr() );
1924   call->init_req( TypeFunc::ReturnAdr, top() );
1925 
1926   add_safepoint_edges(call, must_throw);
1927 
1928   Node* xcall = _gvn.transform(call);
1929 
1930   if (xcall == top()) {
1931     set_control(top());
1932     return;
1933   }
1934   assert(xcall == call, "call identity is stable");
1935 
1936   // Re-use the current map to produce the result.
1937 
1938   set_control(_gvn.transform(new ProjNode(call, TypeFunc::Control)));
1939   set_i_o(    _gvn.transform(new ProjNode(call, TypeFunc::I_O    , separate_io_proj)));
1940   set_all_memory_call(xcall, separate_io_proj);
1941 
1942   //return xcall;   // no need, caller already has it
1943 }
1944 
1945 Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj, bool deoptimize) {
1946   if (stopped())  return top();  // maybe the call folded up?
1947 







1948   // Note:  Since any out-of-line call can produce an exception,
1949   // we always insert an I_O projection from the call into the result.
1950 
1951   make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj, deoptimize);
1952 
1953   if (separate_io_proj) {
1954     // The caller requested separate projections be used by the fall
1955     // through and exceptional paths, so replace the projections for
1956     // the fall through path.
1957     set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));
1958     set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));
1959   }
1960 
1961   // Capture the return value, if any.
1962   Node* ret;
1963   if (call->method() == NULL || call->method()->return_type()->basic_type() == T_VOID) {
1964     ret = top();
1965   } else if (call->tf()->returns_inline_type_as_fields()) {
1966     // Return of multiple values (inline type fields): we create a
1967     // InlineType node, each field is a projection from the call.
1968     ciInlineKlass* vk = call->method()->return_type()->as_inline_klass();
1969     uint base_input = TypeFunc::Parms;
1970     ret = InlineTypeNode::make_from_multi(this, call, vk, base_input, false, call->method()->signature()->returns_null_free_inline_type());
1971   } else {
1972     ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1973   }
1974 
1975   return ret;
1976 }
1977 
1978 //--------------------set_predefined_input_for_runtime_call--------------------
1979 // Reading and setting the memory state is way conservative here.
1980 // The real problem is that I am not doing real Type analysis on memory,
1981 // so I cannot distinguish card mark stores from other stores.  Across a GC
1982 // point the Store Barrier and the card mark memory has to agree.  I cannot
1983 // have a card mark store and its barrier split across the GC point from
1984 // either above or below.  Here I get that to happen by reading ALL of memory.
1985 // A better answer would be to separate out card marks from other memory.
1986 // For now, return the input memory state, so that it can be reused
1987 // after the call, if this call has restricted memory effects.
1988 Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem) {
1989   // Set fixed predefined input arguments
1990   Node* memory = reset_memory();
1991   Node* m = narrow_mem == NULL ? memory : narrow_mem;
1992   call->init_req( TypeFunc::Control,   control()  );
1993   call->init_req( TypeFunc::I_O,       top()      ); // does no i/o
1994   call->init_req( TypeFunc::Memory,    m          ); // may gc ptrs

2045     if (use->is_MergeMem()) {
2046       wl.push(use);
2047     }
2048   }
2049 }
2050 
2051 // Replace the call with the current state of the kit.
2052 void GraphKit::replace_call(CallNode* call, Node* result, bool do_replaced_nodes) {
2053   JVMState* ejvms = NULL;
2054   if (has_exceptions()) {
2055     ejvms = transfer_exceptions_into_jvms();
2056   }
2057 
2058   ReplacedNodes replaced_nodes = map()->replaced_nodes();
2059   ReplacedNodes replaced_nodes_exception;
2060   Node* ex_ctl = top();
2061 
2062   SafePointNode* final_state = stop();
2063 
2064   // Find all the needed outputs of this call
2065   CallProjections* callprojs = call->extract_projections(true);

2066 
2067   Unique_Node_List wl;
2068   Node* init_mem = call->in(TypeFunc::Memory);
2069   Node* final_mem = final_state->in(TypeFunc::Memory);
2070   Node* final_ctl = final_state->in(TypeFunc::Control);
2071   Node* final_io = final_state->in(TypeFunc::I_O);
2072 
2073   // Replace all the old call edges with the edges from the inlining result
2074   if (callprojs->fallthrough_catchproj != NULL) {
2075     C->gvn_replace_by(callprojs->fallthrough_catchproj, final_ctl);
2076   }
2077   if (callprojs->fallthrough_memproj != NULL) {
2078     if (final_mem->is_MergeMem()) {
2079       // Parser's exits MergeMem was not transformed but may be optimized
2080       final_mem = _gvn.transform(final_mem);
2081     }
2082     C->gvn_replace_by(callprojs->fallthrough_memproj,   final_mem);
2083     add_mergemem_users_to_worklist(wl, final_mem);
2084   }
2085   if (callprojs->fallthrough_ioproj != NULL) {
2086     C->gvn_replace_by(callprojs->fallthrough_ioproj,    final_io);
2087   }
2088 
2089   // Replace the result with the new result if it exists and is used
2090   if (callprojs->resproj[0] != NULL && result != NULL) {
2091     // If the inlined code is dead, the result projections for an inline type returned as
2092     // fields have not been replaced. They will go away once the call is replaced by TOP below.
2093     assert(callprojs->nb_resproj == 1 || (call->tf()->returns_inline_type_as_fields() && stopped()),
2094            "unexpected number of results");
2095     C->gvn_replace_by(callprojs->resproj[0], result);
2096   }
2097 
2098   if (ejvms == NULL) {
2099     // No exception edges to simply kill off those paths
2100     if (callprojs->catchall_catchproj != NULL) {
2101       C->gvn_replace_by(callprojs->catchall_catchproj, C->top());
2102     }
2103     if (callprojs->catchall_memproj != NULL) {
2104       C->gvn_replace_by(callprojs->catchall_memproj,   C->top());
2105     }
2106     if (callprojs->catchall_ioproj != NULL) {
2107       C->gvn_replace_by(callprojs->catchall_ioproj,    C->top());
2108     }
2109     // Replace the old exception object with top
2110     if (callprojs->exobj != NULL) {
2111       C->gvn_replace_by(callprojs->exobj, C->top());
2112     }
2113   } else {
2114     GraphKit ekit(ejvms);
2115 
2116     // Load my combined exception state into the kit, with all phis transformed:
2117     SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states();
2118     replaced_nodes_exception = ex_map->replaced_nodes();
2119 
2120     Node* ex_oop = ekit.use_exception_state(ex_map);
2121 
2122     if (callprojs->catchall_catchproj != NULL) {
2123       C->gvn_replace_by(callprojs->catchall_catchproj, ekit.control());
2124       ex_ctl = ekit.control();
2125     }
2126     if (callprojs->catchall_memproj != NULL) {
2127       Node* ex_mem = ekit.reset_memory();
2128       C->gvn_replace_by(callprojs->catchall_memproj,   ex_mem);
2129       add_mergemem_users_to_worklist(wl, ex_mem);
2130     }
2131     if (callprojs->catchall_ioproj != NULL) {
2132       C->gvn_replace_by(callprojs->catchall_ioproj,    ekit.i_o());
2133     }
2134 
2135     // Replace the old exception object with the newly created one
2136     if (callprojs->exobj != NULL) {
2137       C->gvn_replace_by(callprojs->exobj, ex_oop);
2138     }
2139   }
2140 
2141   // Disconnect the call from the graph
2142   call->disconnect_inputs(C);
2143   C->gvn_replace_by(call, C->top());
2144 
2145   // Clean up any MergeMems that feed other MergeMems since the
2146   // optimizer doesn't like that.
2147   while (wl.size() > 0) {
2148     _gvn.transform(wl.pop());
2149   }
2150 
2151   if (callprojs->fallthrough_catchproj != NULL && !final_ctl->is_top() && do_replaced_nodes) {
2152     replaced_nodes.apply(C, final_ctl);
2153   }
2154   if (!ex_ctl->is_top() && do_replaced_nodes) {
2155     replaced_nodes_exception.apply(C, ex_ctl);
2156   }
2157 }
2158 
2159 
2160 //------------------------------increment_counter------------------------------
2161 // for statistics: increment a VM counter by 1
2162 
2163 void GraphKit::increment_counter(address counter_addr) {
2164   Node* adr1 = makecon(TypeRawPtr::make(counter_addr));
2165   increment_counter(adr1);
2166 }
2167 
2168 void GraphKit::increment_counter(Node* counter_addr) {
2169   int adr_type = Compile::AliasIdxRaw;
2170   Node* ctrl = control();
2171   Node* cnt  = make_load(ctrl, counter_addr, TypeLong::LONG, T_LONG, adr_type, MemNode::unordered);

2329  *
2330  * @param n          node that the type applies to
2331  * @param exact_kls  type from profiling
2332  * @param maybe_null did profiling see null?
2333  *
2334  * @return           node with improved type
2335  */
2336 Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls, ProfilePtrKind ptr_kind) {
2337   const Type* current_type = _gvn.type(n);
2338   assert(UseTypeSpeculation, "type speculation must be on");
2339 
2340   const TypePtr* speculative = current_type->speculative();
2341 
2342   // Should the klass from the profile be recorded in the speculative type?
2343   if (current_type->would_improve_type(exact_kls, jvms()->depth())) {
2344     const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls);
2345     const TypeOopPtr* xtype = tklass->as_instance_type();
2346     assert(xtype->klass_is_exact(), "Should be exact");
2347     // Any reason to believe n is not null (from this profiling or a previous one)?
2348     assert(ptr_kind != ProfileAlwaysNull, "impossible here");
2349     const TypePtr* ptr = (ptr_kind != ProfileNeverNull && current_type->speculative_maybe_null()) ? TypePtr::BOTTOM : TypePtr::NOTNULL;
2350     // record the new speculative type's depth
2351     speculative = xtype->cast_to_ptr_type(ptr->ptr())->is_ptr();
2352     speculative = speculative->with_inline_depth(jvms()->depth());
2353   } else if (current_type->would_improve_ptr(ptr_kind)) {
2354     // Profiling report that null was never seen so we can change the
2355     // speculative type to non null ptr.
2356     if (ptr_kind == ProfileAlwaysNull) {
2357       speculative = TypePtr::NULL_PTR;
2358     } else {
2359       assert(ptr_kind == ProfileNeverNull, "nothing else is an improvement");
2360       const TypePtr* ptr = TypePtr::NOTNULL;
2361       if (speculative != NULL) {
2362         speculative = speculative->cast_to_ptr_type(ptr->ptr())->is_ptr();
2363       } else {
2364         speculative = ptr;
2365       }
2366     }
2367   }
2368 
2369   if (speculative != current_type->speculative()) {
2370     // Build a type with a speculative type (what we think we know
2371     // about the type but will need a guard when we use it)
2372     const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, speculative);
2373     // We're changing the type, we need a new CheckCast node to carry
2374     // the new type. The new type depends on the control: what
2375     // profiling tells us is only valid from here as far as we can
2376     // tell.
2377     Node* cast = new CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type));
2378     cast = _gvn.transform(cast);
2379     replace_in_map(n, cast);
2380     n = cast;
2381   }
2382 
2383   return n;
2384 }
2385 
2386 /**
2387  * Record profiling data from receiver profiling at an invoke with the
2388  * type system so that it can propagate it (speculation)
2389  *
2390  * @param n  receiver node
2391  *
2392  * @return   node with improved type
2393  */
2394 Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) {
2395   if (!UseTypeSpeculation) {
2396     return n;
2397   }
2398   ciKlass* exact_kls = profile_has_unique_klass();
2399   ProfilePtrKind ptr_kind = ProfileMaybeNull;
2400   if ((java_bc() == Bytecodes::_checkcast ||
2401        java_bc() == Bytecodes::_instanceof ||
2402        java_bc() == Bytecodes::_aastore) &&
2403       method()->method_data()->is_mature()) {
2404     ciProfileData* data = method()->method_data()->bci_to_data(bci());
2405     if (data != NULL) {
2406       if (java_bc() == Bytecodes::_aastore) {
2407         ciKlass* array_type = NULL;
2408         ciKlass* element_type = NULL;
2409         ProfilePtrKind element_ptr = ProfileMaybeNull;
2410         bool flat_array = true;
2411         bool null_free_array = true;
2412         method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
2413         exact_kls = element_type;
2414         ptr_kind = element_ptr;
2415       } else {
2416         if (!data->as_BitData()->null_seen()) {
2417           ptr_kind = ProfileNeverNull;
2418         } else {
2419           assert(data->is_ReceiverTypeData(), "bad profile data type");
2420           ciReceiverTypeData* call = (ciReceiverTypeData*)data->as_ReceiverTypeData();
2421           uint i = 0;
2422           for (; i < call->row_limit(); i++) {
2423             ciKlass* receiver = call->receiver(i);
2424             if (receiver != NULL) {
2425               break;
2426             }
2427           }
2428           ptr_kind = (i == call->row_limit()) ? ProfileAlwaysNull : ProfileMaybeNull;
2429         }

2430       }
2431     }
2432   }
2433   return record_profile_for_speculation(n, exact_kls, ptr_kind);
2434 }
2435 
2436 /**
2437  * Record profiling data from argument profiling at an invoke with the
2438  * type system so that it can propagate it (speculation)
2439  *
2440  * @param dest_method  target method for the call
2441  * @param bc           what invoke bytecode is this?
2442  */
2443 void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) {
2444   if (!UseTypeSpeculation) {
2445     return;
2446   }
2447   const TypeFunc* tf    = TypeFunc::make(dest_method);
2448   int             nargs = tf->domain_sig()->cnt() - TypeFunc::Parms;
2449   int skip = Bytecodes::has_receiver(bc) ? 1 : 0;
2450   for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {
2451     const Type *targ = tf->domain_sig()->field_at(j + TypeFunc::Parms);
2452     if (is_reference_type(targ->basic_type())) {
2453       ProfilePtrKind ptr_kind = ProfileMaybeNull;
2454       ciKlass* better_type = NULL;
2455       if (method()->argument_profiled_type(bci(), i, better_type, ptr_kind)) {
2456         record_profile_for_speculation(argument(j), better_type, ptr_kind);
2457       }
2458       i++;
2459     }
2460   }
2461 }
2462 
2463 /**
2464  * Record profiling data from parameter profiling at an invoke with
2465  * the type system so that it can propagate it (speculation)
2466  */
2467 void GraphKit::record_profiled_parameters_for_speculation() {
2468   if (!UseTypeSpeculation) {
2469     return;
2470   }
2471   for (int i = 0, j = 0; i < method()->arg_size() ; i++) {

2485  * the type system so that it can propagate it (speculation)
2486  */
2487 void GraphKit::record_profiled_return_for_speculation() {
2488   if (!UseTypeSpeculation) {
2489     return;
2490   }
2491   ProfilePtrKind ptr_kind = ProfileMaybeNull;
2492   ciKlass* better_type = NULL;
2493   if (method()->return_profiled_type(bci(), better_type, ptr_kind)) {
2494     // If profiling reports a single type for the return value,
2495     // feed it to the type system so it can propagate it as a
2496     // speculative type
2497     record_profile_for_speculation(stack(sp()-1), better_type, ptr_kind);
2498   }
2499 }
2500 
2501 void GraphKit::round_double_arguments(ciMethod* dest_method) {
2502   if (Matcher::strict_fp_requires_explicit_rounding) {
2503     // (Note:  TypeFunc::make has a cache that makes this fast.)
2504     const TypeFunc* tf    = TypeFunc::make(dest_method);
2505     int             nargs = tf->domain_sig()->cnt() - TypeFunc::Parms;
2506     for (int j = 0; j < nargs; j++) {
2507       const Type *targ = tf->domain_sig()->field_at(j + TypeFunc::Parms);
2508       if (targ->basic_type() == T_DOUBLE) {
2509         // If any parameters are doubles, they must be rounded before
2510         // the call, dprecision_rounding does gvn.transform
2511         Node *arg = argument(j);
2512         arg = dprecision_rounding(arg);
2513         set_argument(j, arg);
2514       }
2515     }
2516   }
2517 }
2518 
2519 // rounding for strict float precision conformance
2520 Node* GraphKit::precision_rounding(Node* n) {
2521   if (Matcher::strict_fp_requires_explicit_rounding) {
2522 #ifdef IA32
2523     if (UseSSE == 0) {
2524       return _gvn.transform(new RoundFloatNode(0, n));
2525     }
2526 #else
2527     Unimplemented();

2636                                   // The first NULL ends the list.
2637                                   Node* parm0, Node* parm1,
2638                                   Node* parm2, Node* parm3,
2639                                   Node* parm4, Node* parm5,
2640                                   Node* parm6, Node* parm7) {
2641   assert(call_addr != NULL, "must not call NULL targets");
2642 
2643   // Slow-path call
2644   bool is_leaf = !(flags & RC_NO_LEAF);
2645   bool has_io  = (!is_leaf && !(flags & RC_NO_IO));
2646   if (call_name == NULL) {
2647     assert(!is_leaf, "must supply name for leaf");
2648     call_name = OptoRuntime::stub_name(call_addr);
2649   }
2650   CallNode* call;
2651   if (!is_leaf) {
2652     call = new CallStaticJavaNode(call_type, call_addr, call_name, adr_type);
2653   } else if (flags & RC_NO_FP) {
2654     call = new CallLeafNoFPNode(call_type, call_addr, call_name, adr_type);
2655   } else  if (flags & RC_VECTOR){
2656     uint num_bits = call_type->range_sig()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte;
2657     call = new CallLeafVectorNode(call_type, call_addr, call_name, adr_type, num_bits);
2658   } else {
2659     call = new CallLeafNode(call_type, call_addr, call_name, adr_type);
2660   }
2661 
2662   // The following is similar to set_edges_for_java_call,
2663   // except that the memory effects of the call are restricted to AliasIdxRaw.
2664 
2665   // Slow path call has no side-effects, uses few values
2666   bool wide_in  = !(flags & RC_NARROW_MEM);
2667   bool wide_out = (C->get_alias_index(adr_type) == Compile::AliasIdxBot);
2668 
2669   Node* prev_mem = NULL;
2670   if (wide_in) {
2671     prev_mem = set_predefined_input_for_runtime_call(call);
2672   } else {
2673     assert(!wide_out, "narrow in => narrow out");
2674     Node* narrow_mem = memory(adr_type);
2675     prev_mem = set_predefined_input_for_runtime_call(call, narrow_mem);
2676   }

2735 
2736 //-----------------------------make_native_call-------------------------------
2737 Node* GraphKit::make_native_call(address call_addr, const TypeFunc* call_type, uint nargs, ciNativeEntryPoint* nep) {
2738   // Select just the actual call args to pass on
2739   // [MethodHandle fallback, long addr, HALF addr, ... args , NativeEntryPoint nep]
2740   //                                             |          |
2741   //                                             V          V
2742   //                                             [ ... args ]
2743   uint n_filtered_args = nargs - 4; // -fallback, -addr (2), -nep;
2744   ResourceMark rm;
2745   Node** argument_nodes = NEW_RESOURCE_ARRAY(Node*, n_filtered_args);
2746   const Type** arg_types = TypeTuple::fields(n_filtered_args);
2747   GrowableArray<VMReg> arg_regs(C->comp_arena(), n_filtered_args, n_filtered_args, VMRegImpl::Bad());
2748 
2749   VMReg* argRegs = nep->argMoves();
2750   {
2751     for (uint vm_arg_pos = 0, java_arg_read_pos = 0;
2752         vm_arg_pos < n_filtered_args; vm_arg_pos++) {
2753       uint vm_unfiltered_arg_pos = vm_arg_pos + 3; // +3 to skip fallback handle argument and addr (2 since long)
2754       Node* node = argument(vm_unfiltered_arg_pos);
2755       const Type* type = call_type->domain_sig()->field_at(TypeFunc::Parms + vm_unfiltered_arg_pos);
2756       VMReg reg = type == Type::HALF
2757         ? VMRegImpl::Bad()
2758         : argRegs[java_arg_read_pos++];
2759 
2760       argument_nodes[vm_arg_pos] = node;
2761       arg_types[TypeFunc::Parms + vm_arg_pos] = type;
2762       arg_regs.at_put(vm_arg_pos, reg);
2763     }
2764   }
2765 
2766   uint n_returns = call_type->range_sig()->cnt() - TypeFunc::Parms;
2767   GrowableArray<VMReg> ret_regs(C->comp_arena(), n_returns, n_returns, VMRegImpl::Bad());
2768   const Type** ret_types = TypeTuple::fields(n_returns);
2769 
2770   VMReg* retRegs = nep->returnMoves();
2771   {
2772     for (uint vm_ret_pos = 0, java_ret_read_pos = 0;
2773         vm_ret_pos < n_returns; vm_ret_pos++) { // 0 or 1
2774       const Type* type = call_type->range_sig()->field_at(TypeFunc::Parms + vm_ret_pos);
2775       VMReg reg = type == Type::HALF
2776         ? VMRegImpl::Bad()
2777         : retRegs[java_ret_read_pos++];
2778 
2779       ret_regs.at_put(vm_ret_pos, reg);
2780       ret_types[TypeFunc::Parms + vm_ret_pos] = type;
2781     }
2782   }
2783 
2784   const TypeFunc* new_call_type = TypeFunc::make(
2785     TypeTuple::make(TypeFunc::Parms + n_filtered_args, arg_types),
2786     TypeTuple::make(TypeFunc::Parms + n_returns, ret_types)
2787   );
2788 
2789   if (nep->need_transition()) {
2790     RuntimeStub* invoker = SharedRuntime::make_native_invoker(call_addr,
2791                                                               nep->shadow_space(),
2792                                                               arg_regs, ret_regs);
2793     if (invoker == NULL) {
2794       C->record_failure("native invoker not implemented on this platform");

3083 
3084   // Now do a linear scan of the secondary super-klass array.  Again, no real
3085   // performance impact (too rare) but it's gotta be done.
3086   // Since the code is rarely used, there is no penalty for moving it
3087   // out of line, and it can only improve I-cache density.
3088   // The decision to inline or out-of-line this final check is platform
3089   // dependent, and is found in the AD file definition of PartialSubtypeCheck.
3090   Node* psc = gvn.transform(
3091     new PartialSubtypeCheckNode(*ctrl, subklass, superklass));
3092 
3093   IfNode *iff4 = gen_subtype_check_compare(*ctrl, psc, gvn.zerocon(T_OBJECT), BoolTest::ne, PROB_FAIR, gvn, T_ADDRESS);
3094   r_not_subtype->init_req(2, gvn.transform(new IfTrueNode (iff4)));
3095   r_ok_subtype ->init_req(3, gvn.transform(new IfFalseNode(iff4)));
3096 
3097   // Return false path; set default control to true path.
3098   *ctrl = gvn.transform(r_ok_subtype);
3099   return gvn.transform(r_not_subtype);
3100 }
3101 
3102 Node* GraphKit::gen_subtype_check(Node* obj_or_subklass, Node* superklass) {
3103   const Type* sub_t = _gvn.type(obj_or_subklass);
3104   if (sub_t->isa_inlinetype()) {
3105     obj_or_subklass = makecon(TypeKlassPtr::make(sub_t->inline_klass()));
3106   }
3107   bool expand_subtype_check = C->post_loop_opts_phase() ||   // macro node expansion is over
3108                               ExpandSubTypeCheckAtParseTime; // forced expansion
3109   if (expand_subtype_check) {
3110     MergeMemNode* mem = merged_memory();
3111     Node* ctrl = control();
3112     Node* subklass = obj_or_subklass;
3113     if (!sub_t->isa_klassptr() && !sub_t->isa_inlinetype()) {
3114       subklass = load_object_klass(obj_or_subklass);
3115     }

3116     Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, _gvn);
3117     set_control(ctrl);
3118     return n;
3119   }
3120 
3121   Node* check = _gvn.transform(new SubTypeCheckNode(C, obj_or_subklass, superklass));
3122   Node* bol = _gvn.transform(new BoolNode(check, BoolTest::eq));
3123   IfNode* iff = create_and_xform_if(control(), bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
3124   set_control(_gvn.transform(new IfTrueNode(iff)));
3125   return _gvn.transform(new IfFalseNode(iff));
3126 }
3127 
3128 // Profile-driven exact type check:
3129 Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass,
3130                                     float prob, Node* *casted_receiver) {

3131   assert(!klass->is_interface(), "no exact type check on interfaces");
3132   Node* fail = top();
3133   const Type* rec_t = _gvn.type(receiver);
3134   if (rec_t->isa_inlinetype()) {
3135     if (klass->equals(rec_t->inline_klass())) {
3136       (*casted_receiver) = receiver; // Always passes
3137     } else {
3138       (*casted_receiver) = top();    // Always fails
3139       fail = control();
3140       set_control(top());
3141     }
3142     return fail;
3143   }
3144   const TypeKlassPtr* tklass = TypeKlassPtr::make(klass);
3145   Node* recv_klass = load_object_klass(receiver);
3146   fail = type_check(recv_klass, tklass, prob);





3147 
3148   if (!stopped()) {
3149     const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
3150     const TypeOopPtr* recv_xtype = tklass->as_instance_type();
3151     assert(recv_xtype->klass_is_exact(), "");
3152 
3153     if (!receiver_type->higher_equal(recv_xtype)) { // ignore redundant casts
3154       // Subsume downstream occurrences of receiver with a cast to
3155       // recv_xtype, since now we know what the type will be.
3156       Node* cast = new CheckCastPPNode(control(), receiver, recv_xtype);
3157       Node* res = _gvn.transform(cast);
3158       if (recv_xtype->is_inlinetypeptr()) {
3159         assert(!gvn().type(res)->maybe_null(), "receiver should never be null");
3160         res = InlineTypeNode::make_from_oop(this, res, recv_xtype->inline_klass())->as_InlineTypeBase()->as_ptr(&gvn());
3161       }
3162       (*casted_receiver) = res;
3163       // (User must make the replace_in_map call.)
3164     }
3165   }
3166 
3167   return fail;
3168 }
3169 
3170 Node* GraphKit::type_check(Node* recv_klass, const TypeKlassPtr* tklass,
3171                            float prob) {
3172   Node* want_klass = makecon(tklass);
3173   Node* cmp = _gvn.transform(new CmpPNode(recv_klass, want_klass));
3174   Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
3175   IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN);
3176   set_control(_gvn.transform(new IfTrueNode (iff)));
3177   Node* fail = _gvn.transform(new IfFalseNode(iff));
3178   return fail;
3179 }
3180 
3181 //------------------------------subtype_check_receiver-------------------------
3182 Node* GraphKit::subtype_check_receiver(Node* receiver, ciKlass* klass,
3183                                        Node** casted_receiver) {
3184   const TypeKlassPtr* tklass = TypeKlassPtr::make(klass);
3185   Node* want_klass = makecon(tklass);
3186 
3187   Node* slow_ctl = gen_subtype_check(receiver, want_klass);
3188 
3189   // Ignore interface type information until interface types are properly tracked.
3190   if (!stopped() && !klass->is_interface()) {
3191     const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
3192     const TypeOopPtr* recv_type = tklass->cast_to_exactness(false)->is_klassptr()->as_instance_type();
3193     if (receiver_type != NULL && !receiver_type->higher_equal(recv_type)) { // ignore redundant casts
3194       Node* cast = new CheckCastPPNode(control(), receiver, recv_type);
3195       (*casted_receiver) = _gvn.transform(cast);
3196     }
3197   }
3198 
3199   return slow_ctl;
3200 }
3201 
3202 //------------------------------seems_never_null-------------------------------
3203 // Use null_seen information if it is available from the profile.
3204 // If we see an unexpected null at a type check we record it and force a
3205 // recompile; the offending check will be recompiled to handle NULLs.
3206 // If we see several offending BCIs, then all checks in the
3207 // method will be recompiled.
3208 bool GraphKit::seems_never_null(Node* obj, ciProfileData* data, bool& speculating) {
3209   speculating = !_gvn.type(obj)->speculative_maybe_null();
3210   Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculating);
3211   if (UncommonNullCast               // Cutout for this technique
3212       && obj != null()               // And not the -Xcomp stupid case?
3213       && !too_many_traps(reason)
3214       ) {
3215     if (speculating) {
3216       return true;
3217     }
3218     if (data == NULL)
3219       // Edge case:  no mature data.  Be optimistic here.
3220       return true;
3221     // If the profile has not seen a null, assume it won't happen.
3222     assert(java_bc() == Bytecodes::_checkcast ||
3223            java_bc() == Bytecodes::_instanceof ||
3224            java_bc() == Bytecodes::_aastore, "MDO must collect null_seen bit here");
3225     if (java_bc() == Bytecodes::_aastore) {
3226       return ((ciArrayLoadStoreData*)data->as_ArrayLoadStoreData())->element()->ptr_kind() == ProfileNeverNull;
3227     }
3228     return !data->as_BitData()->null_seen();
3229   }
3230   speculating = false;
3231   return false;
3232 }
3233 
3234 void GraphKit::guard_klass_being_initialized(Node* klass) {
3235   int init_state_off = in_bytes(InstanceKlass::init_state_offset());
3236   Node* adr = basic_plus_adr(top(), klass, init_state_off);
3237   Node* init_state = LoadNode::make(_gvn, NULL, immutable_memory(), adr,
3238                                     adr->bottom_type()->is_ptr(), TypeInt::BYTE,
3239                                     T_BYTE, MemNode::unordered);
3240   init_state = _gvn.transform(init_state);
3241 
3242   Node* being_initialized_state = makecon(TypeInt::make(InstanceKlass::being_initialized));
3243 
3244   Node* chk = _gvn.transform(new CmpINode(being_initialized_state, init_state));
3245   Node* tst = _gvn.transform(new BoolNode(chk, BoolTest::eq));
3246 
3247   { BuildCutout unless(this, tst, PROB_MAX);

3287 
3288 //------------------------maybe_cast_profiled_receiver-------------------------
3289 // If the profile has seen exactly one type, narrow to exactly that type.
3290 // Subsequent type checks will always fold up.
3291 Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
3292                                              ciKlass* require_klass,
3293                                              ciKlass* spec_klass,
3294                                              bool safe_for_replace) {
3295   if (!UseTypeProfile || !TypeProfileCasts) return NULL;
3296 
3297   Deoptimization::DeoptReason reason = Deoptimization::reason_class_check(spec_klass != NULL);
3298 
3299   // Make sure we haven't already deoptimized from this tactic.
3300   if (too_many_traps_or_recompiles(reason))
3301     return NULL;
3302 
3303   // (No, this isn't a call, but it's enough like a virtual call
3304   // to use the same ciMethod accessor to get the profile info...)
3305   // If we have a speculative type use it instead of profiling (which
3306   // may not help us)
3307   ciKlass* exact_kls = spec_klass;
3308   if (exact_kls == NULL) {
3309     if (java_bc() == Bytecodes::_aastore) {
3310       ciKlass* array_type = NULL;
3311       ciKlass* element_type = NULL;
3312       ProfilePtrKind element_ptr = ProfileMaybeNull;
3313       bool flat_array = true;
3314       bool null_free_array = true;
3315       method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
3316       exact_kls = element_type;
3317     } else {
3318       exact_kls = profile_has_unique_klass();
3319     }
3320   }
3321   if (exact_kls != NULL) {// no cast failures here
3322     if (require_klass == NULL ||
3323         C->static_subtype_check(require_klass, exact_kls) == Compile::SSC_always_true) {
3324       // If we narrow the type to match what the type profile sees or
3325       // the speculative type, we can then remove the rest of the
3326       // cast.
3327       // This is a win, even if the exact_kls is very specific,
3328       // because downstream operations, such as method calls,
3329       // will often benefit from the sharper type.
3330       Node* exact_obj = not_null_obj; // will get updated in place...
3331       Node* slow_ctl  = type_check_receiver(exact_obj, exact_kls, 1.0,
3332                                             &exact_obj);
3333       { PreserveJVMState pjvms(this);
3334         set_control(slow_ctl);
3335         uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
3336       }
3337       if (safe_for_replace) {
3338         replace_in_map(not_null_obj, exact_obj);
3339       }
3340       return exact_obj;

3405 // and the reflective instance-of call.
3406 Node* GraphKit::gen_instanceof(Node* obj, Node* superklass, bool safe_for_replace) {
3407   kill_dead_locals();           // Benefit all the uncommon traps
3408   assert( !stopped(), "dead parse path should be checked in callers" );
3409   assert(!TypePtr::NULL_PTR->higher_equal(_gvn.type(superklass)->is_klassptr()),
3410          "must check for not-null not-dead klass in callers");
3411 
3412   // Make the merge point
3413   enum { _obj_path = 1, _fail_path, _null_path, PATH_LIMIT };
3414   RegionNode* region = new RegionNode(PATH_LIMIT);
3415   Node*       phi    = new PhiNode(region, TypeInt::BOOL);
3416   C->set_has_split_ifs(true); // Has chance for split-if optimization
3417 
3418   ciProfileData* data = NULL;
3419   if (java_bc() == Bytecodes::_instanceof) {  // Only for the bytecode
3420     data = method()->method_data()->bci_to_data(bci());
3421   }
3422   bool speculative_not_null = false;
3423   bool never_see_null = (ProfileDynamicTypes  // aggressive use of profile
3424                          && seems_never_null(obj, data, speculative_not_null));
3425   bool is_value = obj->is_InlineType();
3426 
3427   // Null check; get casted pointer; set region slot 3
3428   Node* null_ctl = top();
3429   if (is_value) {
3430     // TODO 8284443 Enable this
3431     safe_for_replace = false;
3432     never_see_null = false;
3433   }
3434   Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
3435 
3436   // If not_null_obj is dead, only null-path is taken
3437   if (stopped()) {              // Doing instance-of on a NULL?
3438     set_control(null_ctl);
3439     return intcon(0);
3440   }
3441   region->init_req(_null_path, null_ctl);
3442   phi   ->init_req(_null_path, intcon(0)); // Set null path value
3443   if (null_ctl == top()) {
3444     // Do this eagerly, so that pattern matches like is_diamond_phi
3445     // will work even during parsing.
3446     assert(_null_path == PATH_LIMIT-1, "delete last");
3447     region->del_req(_null_path);
3448     phi   ->del_req(_null_path);
3449   }
3450 
3451   // Do we know the type check always succeed?
3452   if (!is_value) {
3453     bool known_statically = false;
3454     if (_gvn.type(superklass)->singleton()) {
3455       ciKlass* superk = _gvn.type(superklass)->is_klassptr()->klass();
3456       ciKlass* subk = _gvn.type(obj)->is_oopptr()->klass();
3457       if (subk != NULL && subk->is_loaded()) {
3458         int static_res = C->static_subtype_check(superk, subk);
3459         known_statically = (static_res == Compile::SSC_always_true || static_res == Compile::SSC_always_false);
3460       }
3461     }

3462 
3463     if (!known_statically) {
3464       const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3465       // We may not have profiling here or it may not help us. If we
3466       // have a speculative type use it to perform an exact cast.
3467       ciKlass* spec_obj_type = obj_type->speculative_type();
3468       if (spec_obj_type != NULL || (ProfileDynamicTypes && data != NULL)) {
3469         Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, NULL, spec_obj_type, safe_for_replace);
3470         if (stopped()) {            // Profile disagrees with this path.
3471           set_control(null_ctl);    // Null is the only remaining possibility.
3472           return intcon(0);
3473         }
3474         if (cast_obj != NULL) {
3475           not_null_obj = cast_obj;
3476           is_value = not_null_obj->is_InlineType();
3477         }
3478       }
3479     }
3480   }
3481 
3482   // Generate the subtype check
3483   Node* not_subtype_ctrl = gen_subtype_check(not_null_obj, superklass);
3484 
3485   // Plug in the success path to the general merge in slot 1.
3486   region->init_req(_obj_path, control());
3487   phi   ->init_req(_obj_path, intcon(1));
3488 
3489   // Plug in the failing path to the general merge in slot 2.
3490   region->init_req(_fail_path, not_subtype_ctrl);
3491   phi   ->init_req(_fail_path, intcon(0));
3492 
3493   // Return final merged results
3494   set_control( _gvn.transform(region) );
3495   record_for_igvn(region);
3496 
3497   // If we know the type check always succeeds then we don't use the
3498   // profiling data at this bytecode. Don't lose it, feed it to the
3499   // type system as a speculative type.
3500   if (safe_for_replace && !is_value) {
3501     Node* casted_obj = record_profiled_receiver_for_speculation(obj);
3502     replace_in_map(obj, casted_obj);
3503   }
3504 
3505   return _gvn.transform(phi);
3506 }
3507 
3508 //-------------------------------gen_checkcast---------------------------------
3509 // Generate a checkcast idiom.  Used by both the checkcast bytecode and the
3510 // array store bytecode.  Stack must be as-if BEFORE doing the bytecode so the
3511 // uncommon-trap paths work.  Adjust stack after this call.
3512 // If failure_control is supplied and not null, it is filled in with
3513 // the control edge for the cast failure.  Otherwise, an appropriate
3514 // uncommon trap or exception is thrown.
3515 Node* GraphKit::gen_checkcast(Node *obj, Node* superklass, Node* *failure_control, bool null_free) {

3516   kill_dead_locals();           // Benefit all the uncommon traps
3517   const TypeKlassPtr* tk = _gvn.type(superklass)->is_klassptr();
3518   const TypeOopPtr* toop = TypeOopPtr::make_from_klass(tk->klass());
3519   bool safe_for_replace = (failure_control == NULL);
3520   bool from_inline = obj->is_InlineType();
3521   assert(!null_free || toop->is_inlinetypeptr(), "must be an inline type pointer");
3522 
3523   // Fast cutout:  Check the case that the cast is vacuously true.
3524   // This detects the common cases where the test will short-circuit
3525   // away completely.  We do this before we perform the null check,
3526   // because if the test is going to turn into zero code, we don't
3527   // want a residual null check left around.  (Causes a slowdown,
3528   // for example, in some objArray manipulations, such as a[i]=a[j].)
3529   if (tk->singleton()) {
3530     ciKlass* klass = NULL;
3531     if (obj->is_InlineTypeBase()) {
3532       klass = _gvn.type(obj)->inline_klass();
3533     } else {
3534       const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr();
3535       if (objtp != NULL) {
3536         klass = objtp->klass();
3537       }
3538     }
3539     if (klass != NULL) {
3540       switch (C->static_subtype_check(tk->klass(), klass)) {
3541       case Compile::SSC_always_true:
3542         // If we know the type check always succeed then we don't use
3543         // the profiling data at this bytecode. Don't lose it, feed it
3544         // to the type system as a speculative type.
3545         if (!from_inline) {
3546           obj = record_profiled_receiver_for_speculation(obj);
3547         }
3548         if (null_free) {
3549           assert(safe_for_replace, "must be");
3550           obj = null_check(obj);
3551         }
3552         assert(stopped() || !toop->is_inlinetypeptr() || obj->is_InlineTypeBase(), "should have been scalarized");
3553         return obj;
3554       case Compile::SSC_always_false:
3555         if (null_free) {
3556           assert(safe_for_replace, "must be");
3557           obj = null_check(obj);
3558         }
3559         // It needs a null check because a null will *pass* the cast check.
3560         const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr();
3561         if (objtp != NULL && !objtp->maybe_null()) {
3562           bool is_aastore = (java_bc() == Bytecodes::_aastore);
3563           Deoptimization::DeoptReason reason = is_aastore ?
3564             Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
3565           builtin_throw(reason, makecon(TypeKlassPtr::make(klass)));
3566           return top();
3567         } else if (!too_many_traps_or_recompiles(Deoptimization::Reason_null_assert)) {
3568           return null_assert(obj);
3569         }
3570         break; // Fall through to full check
3571       }
3572     }
3573   }
3574 
3575   ciProfileData* data = NULL;

3576   if (failure_control == NULL) {        // use MDO in regular case only
3577     assert(java_bc() == Bytecodes::_aastore ||
3578            java_bc() == Bytecodes::_checkcast,
3579            "interpreter profiles type checks only for these BCs");
3580     if (method()->method_data()->is_mature()) {
3581       data = method()->method_data()->bci_to_data(bci());
3582     }
3583   }
3584 
3585   // Make the merge point
3586   enum { _obj_path = 1, _null_path, PATH_LIMIT };
3587   RegionNode* region = new RegionNode(PATH_LIMIT);
3588   Node*       phi    = new PhiNode(region, toop);
3589   _gvn.set_type(region, Type::CONTROL);
3590   _gvn.set_type(phi, toop);
3591 
3592   C->set_has_split_ifs(true); // Has chance for split-if optimization
3593 
3594   // Use null-cast information if it is available
3595   bool speculative_not_null = false;
3596   bool never_see_null = ((failure_control == NULL)  // regular case only
3597                          && seems_never_null(obj, data, speculative_not_null));
3598 
3599   // Null check; get casted pointer; set region slot 3
3600   Node* null_ctl = top();
3601   Node* not_null_obj = NULL;
3602   if (null_free) {
3603     assert(safe_for_replace, "must be");
3604     not_null_obj = null_check(obj);
3605   } else if (from_inline) {
3606     // TODO 8284443 obj can be null and null should pass
3607     not_null_obj = obj;
3608   } else {
3609     not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
3610   }
3611 
3612   // If not_null_obj is dead, only null-path is taken
3613   if (stopped()) {              // Doing instance-of on a NULL?
3614     set_control(null_ctl);
3615     if (toop->is_inlinetypeptr()) {
3616       return InlineTypePtrNode::make_null(_gvn, toop->inline_klass());
3617     }
3618     return null();
3619   }
3620   region->init_req(_null_path, null_ctl);
3621   phi   ->init_req(_null_path, null());  // Set null path value
3622   if (null_ctl == top()) {
3623     // Do this eagerly, so that pattern matches like is_diamond_phi
3624     // will work even during parsing.
3625     assert(_null_path == PATH_LIMIT-1, "delete last");
3626     region->del_req(_null_path);
3627     phi   ->del_req(_null_path);
3628   }
3629 
3630   Node* cast_obj = NULL;
3631   if (!from_inline && tk->klass_is_exact()) {
3632     // The following optimization tries to statically cast the speculative type of the object
3633     // (for example obtained during profiling) to the type of the superklass and then do a
3634     // dynamic check that the type of the object is what we expect. To work correctly
3635     // for checkcast and aastore the type of superklass should be exact.
3636     const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3637     // We may not have profiling here or it may not help us. If we have
3638     // a speculative type use it to perform an exact cast.
3639     ciKlass* spec_obj_type = obj_type->speculative_type();
3640     if (spec_obj_type != NULL || data != NULL) {
3641       cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk->klass(), spec_obj_type, safe_for_replace);
3642       if (cast_obj != NULL) {
3643         if (failure_control != NULL) // failure is now impossible
3644           (*failure_control) = top();
3645         // adjust the type of the phi to the exact klass:
3646         phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR));
3647       }
3648     }
3649   }
3650 
3651   if (cast_obj == NULL) {
3652     // Generate the subtype check
3653     Node* not_subtype_ctrl = gen_subtype_check(not_null_obj, superklass);
3654 
3655     // Plug in success path into the merge
3656     cast_obj = from_inline ? not_null_obj : _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop));
3657     // Failure path ends in uncommon trap (or may be dead - failure impossible)
3658     if (failure_control == NULL) {
3659       if (not_subtype_ctrl != top()) { // If failure is possible
3660         PreserveJVMState pjvms(this);
3661         set_control(not_subtype_ctrl);
3662         Node* obj_klass = NULL;
3663         if (not_null_obj->is_InlineTypeBase()) {
3664           obj_klass = makecon(TypeKlassPtr::make(_gvn.type(not_null_obj)->inline_klass()));
3665         } else {
3666           obj_klass = load_object_klass(not_null_obj);
3667         }
3668         bool is_aastore = (java_bc() == Bytecodes::_aastore);
3669         Deoptimization::DeoptReason reason = is_aastore ?
3670           Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
3671         builtin_throw(reason, obj_klass);
3672       }
3673     } else {
3674       (*failure_control) = not_subtype_ctrl;
3675     }
3676   }
3677 
3678   region->init_req(_obj_path, control());
3679   phi   ->init_req(_obj_path, cast_obj);
3680 
3681   // A merge of NULL or Casted-NotNull obj
3682   Node* res = _gvn.transform(phi);
3683 
3684   // Note I do NOT always 'replace_in_map(obj,result)' here.
3685   //  if( tk->klass()->can_be_primary_super()  )
3686     // This means that if I successfully store an Object into an array-of-String
3687     // I 'forget' that the Object is really now known to be a String.  I have to
3688     // do this because we don't have true union types for interfaces - if I store
3689     // a Baz into an array-of-Interface and then tell the optimizer it's an
3690     // Interface, I forget that it's also a Baz and cannot do Baz-like field
3691     // references to it.  FIX THIS WHEN UNION TYPES APPEAR!
3692   //  replace_in_map( obj, res );
3693 
3694   // Return final merged results
3695   set_control( _gvn.transform(region) );
3696   record_for_igvn(region);
3697 
3698   bool not_inline = !toop->can_be_inline_type();
3699   bool not_flattened = !UseFlatArray || not_inline || (toop->is_inlinetypeptr() && !toop->inline_klass()->flatten_array());
3700   if (EnableValhalla && not_flattened) {
3701     // Check if obj has been loaded from an array
3702     obj = obj->isa_DecodeN() ? obj->in(1) : obj;
3703     Node* array = NULL;
3704     if (obj->isa_Load()) {
3705       Node* address = obj->in(MemNode::Address);
3706       if (address->isa_AddP()) {
3707         array = address->as_AddP()->in(AddPNode::Base);
3708       }
3709     } else if (obj->is_Phi()) {
3710       Node* region = obj->in(0);
3711       // TODO make this more robust (see JDK-8231346)
3712       if (region->req() == 3 && region->in(2) != NULL && region->in(2)->in(0) != NULL) {
3713         IfNode* iff = region->in(2)->in(0)->isa_If();
3714         if (iff != NULL) {
3715           iff->is_flat_array_check(&_gvn, &array);
3716         }
3717       }
3718     }
3719     if (array != NULL) {
3720       const TypeAryPtr* ary_t = _gvn.type(array)->isa_aryptr();
3721       if (ary_t != NULL) {
3722         if (!ary_t->is_not_null_free() && not_inline) {
3723           // Casting array element to a non-inline-type, mark array as not null-free.
3724           Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, ary_t->cast_to_not_null_free()));
3725           replace_in_map(array, cast);
3726         } else if (!ary_t->is_not_flat()) {
3727           // Casting array element to a non-flattened type, mark array as not flat.
3728           Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, ary_t->cast_to_not_flat()));
3729           replace_in_map(array, cast);
3730         }
3731       }
3732     }
3733   }
3734 
3735   if (!stopped() && !res->is_InlineTypeBase()) {
3736     res = record_profiled_receiver_for_speculation(res);
3737     if (toop->is_inlinetypeptr()) {
3738       Node* vt = InlineTypeNode::make_from_oop(this, res, toop->inline_klass(), !gvn().type(res)->maybe_null());
3739       res = vt;
3740       if (safe_for_replace) {
3741         if (vt->is_InlineType() && C->inlining_incrementally()) {
3742           vt = vt->as_InlineType()->as_ptr(&_gvn);
3743         }
3744         replace_in_map(obj, vt);
3745         replace_in_map(not_null_obj, vt);
3746         replace_in_map(res, vt);
3747       }
3748     }
3749   }
3750   return res;
3751 }
3752 
3753 Node* GraphKit::inline_type_test(Node* obj, bool is_inline) {
3754   Node* mark_adr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
3755   Node* mark = make_load(NULL, mark_adr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
3756   Node* mask = MakeConX(markWord::inline_type_pattern);
3757   Node* masked = _gvn.transform(new AndXNode(mark, mask));
3758   Node* cmp = _gvn.transform(new CmpXNode(masked, mask));
3759   return _gvn.transform(new BoolNode(cmp, is_inline ? BoolTest::eq : BoolTest::ne));
3760 }
3761 
3762 Node* GraphKit::is_val_mirror(Node* mirror) {
3763   Node* p = basic_plus_adr(mirror, java_lang_Class::secondary_mirror_offset());
3764   Node* secondary_mirror = access_load_at(mirror, p, _gvn.type(p)->is_ptr(), TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR), T_OBJECT, IN_HEAP);
3765   Node* cmp = _gvn.transform(new CmpPNode(mirror, secondary_mirror));
3766   return _gvn.transform(new BoolNode(cmp, BoolTest::eq));
3767 }
3768 
3769 Node* GraphKit::array_lh_test(Node* klass, jint mask, jint val, bool eq) {
3770   Node* lh_adr = basic_plus_adr(klass, in_bytes(Klass::layout_helper_offset()));
3771   // Make sure to use immutable memory here to enable hoisting the check out of loops
3772   Node* lh_val = _gvn.transform(LoadNode::make(_gvn, NULL, immutable_memory(), lh_adr, lh_adr->bottom_type()->is_ptr(), TypeInt::INT, T_INT, MemNode::unordered));
3773   Node* masked = _gvn.transform(new AndINode(lh_val, intcon(mask)));
3774   Node* cmp = _gvn.transform(new CmpINode(masked, intcon(val)));
3775   return _gvn.transform(new BoolNode(cmp, eq ? BoolTest::eq : BoolTest::ne));
3776 }
3777 
3778 Node* GraphKit::flat_array_test(Node* array_or_klass, bool flat) {
3779   // We can't use immutable memory here because the mark word is mutable.
3780   // PhaseIdealLoop::move_flat_array_check_out_of_loop will make sure the
3781   // check is moved out of loops (mainly to enable loop unswitching).
3782   Node* mem = UseArrayMarkWordCheck ? memory(Compile::AliasIdxRaw) : immutable_memory();
3783   Node* cmp = _gvn.transform(new FlatArrayCheckNode(C, mem, array_or_klass));
3784   record_for_igvn(cmp); // Give it a chance to be optimized out by IGVN
3785   return _gvn.transform(new BoolNode(cmp, flat ? BoolTest::eq : BoolTest::ne));
3786 }
3787 
3788 Node* GraphKit::null_free_array_test(Node* klass, bool null_free) {
3789   return array_lh_test(klass, Klass::_lh_null_free_array_bit_inplace, 0, !null_free);
3790 }
3791 
3792 // Deoptimize if 'ary' is a null-free inline type array and 'val' is null
3793 Node* GraphKit::inline_array_null_guard(Node* ary, Node* val, int nargs, bool safe_for_replace) {
3794   RegionNode* region = new RegionNode(3);
3795   Node* null_ctl = top();
3796   null_check_oop(val, &null_ctl);
3797   if (null_ctl != top()) {
3798     PreserveJVMState pjvms(this);
3799     set_control(null_ctl);
3800     {
3801       // Deoptimize if null-free array
3802       BuildCutout unless(this, null_free_array_test(load_object_klass(ary), /* null_free = */ false), PROB_MAX);
3803       inc_sp(nargs);
3804       uncommon_trap(Deoptimization::Reason_null_check,
3805                     Deoptimization::Action_none);
3806     }
3807     region->init_req(1, control());
3808   }
3809   region->init_req(2, control());
3810   set_control(_gvn.transform(region));
3811   record_for_igvn(region);
3812   if (_gvn.type(val) == TypePtr::NULL_PTR) {
3813     // Since we were just successfully storing null, the array can't be null free.
3814     const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
3815     ary_t = ary_t->cast_to_not_null_free();
3816     Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, ary_t));
3817     if (safe_for_replace) {
3818       replace_in_map(ary, cast);
3819     }
3820     ary = cast;
3821   }
3822   return ary;
3823 }
3824 
3825 //------------------------------next_monitor-----------------------------------
3826 // What number should be given to the next monitor?
3827 int GraphKit::next_monitor() {
3828   int current = jvms()->monitor_depth()* C->sync_stack_slots();
3829   int next = current + C->sync_stack_slots();
3830   // Keep the toplevel high water mark current:
3831   if (C->fixed_slots() < next)  C->set_fixed_slots(next);
3832   return current;
3833 }
3834 
3835 //------------------------------insert_mem_bar---------------------------------
3836 // Memory barrier to avoid floating things around
3837 // The membar serves as a pinch point between both control and all memory slices.
3838 Node* GraphKit::insert_mem_bar(int opcode, Node* precedent) {
3839   MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent);
3840   mb->init_req(TypeFunc::Control, control());
3841   mb->init_req(TypeFunc::Memory,  reset_memory());
3842   Node* membar = _gvn.transform(mb);

3870   }
3871   Node* membar = _gvn.transform(mb);
3872   set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control)));
3873   if (alias_idx == Compile::AliasIdxBot) {
3874     merged_memory()->set_base_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)));
3875   } else {
3876     set_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)),alias_idx);
3877   }
3878   return membar;
3879 }
3880 
3881 //------------------------------shared_lock------------------------------------
3882 // Emit locking code.
3883 FastLockNode* GraphKit::shared_lock(Node* obj) {
3884   // bci is either a monitorenter bc or InvocationEntryBci
3885   // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3886   assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3887 
3888   if( !GenerateSynchronizationCode )
3889     return NULL;                // Not locking things?
3890 
3891   if (stopped())                // Dead monitor?
3892     return NULL;
3893 
3894   assert(dead_locals_are_killed(), "should kill locals before sync. point");
3895 
3896   // Box the stack location
3897   Node* box = _gvn.transform(new BoxLockNode(next_monitor()));
3898   Node* mem = reset_memory();
3899 
3900   FastLockNode * flock = _gvn.transform(new FastLockNode(0, obj, box) )->as_FastLock();
3901 
3902   // Create the rtm counters for this fast lock if needed.
3903   flock->create_rtm_lock_counter(sync_jvms()); // sync_jvms used to get current bci
3904 
3905   // Add monitor to debug info for the slow path.  If we block inside the
3906   // slow path and de-opt, we need the monitor hanging around
3907   map()->push_monitor( flock );
3908 
3909   const TypeFunc *tf = LockNode::lock_type();
3910   LockNode *lock = new LockNode(C, tf);

3939   }
3940 #endif
3941 
3942   return flock;
3943 }
3944 
3945 
3946 //------------------------------shared_unlock----------------------------------
3947 // Emit unlocking code.
3948 void GraphKit::shared_unlock(Node* box, Node* obj) {
3949   // bci is either a monitorenter bc or InvocationEntryBci
3950   // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3951   assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3952 
3953   if( !GenerateSynchronizationCode )
3954     return;
3955   if (stopped()) {               // Dead monitor?
3956     map()->pop_monitor();        // Kill monitor from debug info
3957     return;
3958   }
3959   assert(!obj->is_InlineTypeBase(), "should not unlock on inline type");
3960 
3961   // Memory barrier to avoid floating things down past the locked region
3962   insert_mem_bar(Op_MemBarReleaseLock);
3963 
3964   const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();
3965   UnlockNode *unlock = new UnlockNode(C, tf);
3966 #ifdef ASSERT
3967   unlock->set_dbg_jvms(sync_jvms());
3968 #endif
3969   uint raw_idx = Compile::AliasIdxRaw;
3970   unlock->init_req( TypeFunc::Control, control() );
3971   unlock->init_req( TypeFunc::Memory , memory(raw_idx) );
3972   unlock->init_req( TypeFunc::I_O    , top() )     ;   // does no i/o
3973   unlock->init_req( TypeFunc::FramePtr, frameptr() );
3974   unlock->init_req( TypeFunc::ReturnAdr, top() );
3975 
3976   unlock->init_req(TypeFunc::Parms + 0, obj);
3977   unlock->init_req(TypeFunc::Parms + 1, box);
3978   unlock = _gvn.transform(unlock)->as_Unlock();
3979 
3980   Node* mem = reset_memory();
3981 
3982   // unlock has no side-effects, sets few values
3983   set_predefined_output_for_runtime_call(unlock, mem, TypeRawPtr::BOTTOM);
3984 
3985   // Kill monitor from debug info
3986   map()->pop_monitor( );
3987 }
3988 
3989 //-------------------------------get_layout_helper-----------------------------
3990 // If the given klass is a constant or known to be an array,
3991 // fetch the constant layout helper value into constant_value
3992 // and return (Node*)NULL.  Otherwise, load the non-constant
3993 // layout helper value, and return the node which represents it.
3994 // This two-faced routine is useful because allocation sites
3995 // almost always feature constant types.
3996 Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {
3997   const TypeKlassPtr* inst_klass = _gvn.type(klass_node)->isa_klassptr();
3998   if (!StressReflectiveCode && inst_klass != NULL) {
3999     ciKlass* klass = inst_klass->klass();
4000     assert(klass != NULL, "klass should not be NULL");
4001     bool xklass = inst_klass->klass_is_exact();
4002     bool can_be_flattened = false;
4003     if (UseFlatArray && klass->is_obj_array_klass() && !klass->as_obj_array_klass()->is_elem_null_free()) {
4004       // The runtime type of [LMyValue might be [QMyValue due to [QMyValue <: [LMyValue.
4005       ciKlass* elem = klass->as_obj_array_klass()->element_klass();
4006       can_be_flattened = elem->can_be_inline_klass() && (!elem->is_inlinetype() || elem->flatten_array());
4007     }
4008     if (!can_be_flattened && (xklass || klass->is_array_klass())) {
4009       jint lhelper = klass->layout_helper();
4010       if (lhelper != Klass::_lh_neutral_value) {
4011         constant_value = lhelper;
4012         return (Node*) NULL;
4013       }
4014     }
4015   }
4016   constant_value = Klass::_lh_neutral_value;  // put in a known value
4017   Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset()));
4018   return make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered);
4019 }
4020 
4021 // We just put in an allocate/initialize with a big raw-memory effect.
4022 // Hook selected additional alias categories on the initialization.
4023 static void hook_memory_on_init(GraphKit& kit, int alias_idx,
4024                                 MergeMemNode* init_in_merge,
4025                                 Node* init_out_raw) {
4026   DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory());
4027   assert(init_in_merge->memory_at(alias_idx) == init_in_raw, "");
4028 
4029   Node* prevmem = kit.memory(alias_idx);
4030   init_in_merge->set_memory_at(alias_idx, prevmem);
4031   if (init_out_raw != NULL) {
4032     kit.set_memory(init_out_raw, alias_idx);
4033   }
4034 }
4035 
4036 //---------------------------set_output_for_allocation-------------------------
4037 Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
4038                                           const TypeOopPtr* oop_type,
4039                                           bool deoptimize_on_exception) {
4040   int rawidx = Compile::AliasIdxRaw;
4041   alloc->set_req( TypeFunc::FramePtr, frameptr() );
4042   add_safepoint_edges(alloc);
4043   Node* allocx = _gvn.transform(alloc);
4044   set_control( _gvn.transform(new ProjNode(allocx, TypeFunc::Control) ) );
4045   // create memory projection for i_o
4046   set_memory ( _gvn.transform( new ProjNode(allocx, TypeFunc::Memory, true) ), rawidx );
4047   make_slow_call_ex(allocx, env()->Throwable_klass(), true, deoptimize_on_exception);
4048 
4049   // create a memory projection as for the normal control path
4050   Node* malloc = _gvn.transform(new ProjNode(allocx, TypeFunc::Memory));
4051   set_memory(malloc, rawidx);
4052 
4053   // a normal slow-call doesn't change i_o, but an allocation does
4054   // we create a separate i_o projection for the normal control path
4055   set_i_o(_gvn.transform( new ProjNode(allocx, TypeFunc::I_O, false) ) );
4056   Node* rawoop = _gvn.transform( new ProjNode(allocx, TypeFunc::Parms) );
4057 
4058   // put in an initialization barrier
4059   InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx,
4060                                                  rawoop)->as_Initialize();
4061   assert(alloc->initialization() == init,  "2-way macro link must work");
4062   assert(init ->allocation()     == alloc, "2-way macro link must work");
4063   {
4064     // Extract memory strands which may participate in the new object's
4065     // initialization, and source them from the new InitializeNode.
4066     // This will allow us to observe initializations when they occur,
4067     // and link them properly (as a group) to the InitializeNode.
4068     assert(init->in(InitializeNode::Memory) == malloc, "");
4069     MergeMemNode* minit_in = MergeMemNode::make(malloc);
4070     init->set_req(InitializeNode::Memory, minit_in);
4071     record_for_igvn(minit_in); // fold it up later, if possible
4072     _gvn.set_type(minit_in, Type::MEMORY);
4073     Node* minit_out = memory(rawidx);
4074     assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
4075     // Add an edge in the MergeMem for the header fields so an access
4076     // to one of those has correct memory state
4077     set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes())));
4078     set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes())));
4079     if (oop_type->isa_aryptr()) {
4080       const TypeAryPtr* arytype = oop_type->is_aryptr();
4081       if (arytype->klass()->is_flat_array_klass()) {
4082         // Initially all flattened array accesses share a single slice
4083         // but that changes after parsing. Prepare the memory graph so
4084         // it can optimize flattened array accesses properly once they
4085         // don't share a single slice.
4086         assert(C->flattened_accesses_share_alias(), "should be set at parse time");
4087         C->set_flattened_accesses_share_alias(false);
4088         ciFlatArrayKlass* vak = arytype->klass()->as_flat_array_klass();
4089         ciInlineKlass* vk = vak->element_klass()->as_inline_klass();
4090         for (int i = 0, len = vk->nof_nonstatic_fields(); i < len; i++) {
4091           ciField* field = vk->nonstatic_field_at(i);
4092           if (field->offset() >= TrackedInitializationLimit * HeapWordSize)
4093             continue;  // do not bother to track really large numbers of fields
4094           int off_in_vt = field->offset() - vk->first_field_offset();
4095           const TypePtr* adr_type = arytype->with_field_offset(off_in_vt)->add_offset(Type::OffsetBot);
4096           int fieldidx = C->get_alias_index(adr_type, true);
4097           // Pass NULL for init_out. Having per flat array element field memory edges as uses of the Initialize node
4098           // can result in per flat array field Phis to be created which confuses the logic of
4099           // Compile::adjust_flattened_array_access_aliases().
4100           hook_memory_on_init(*this, fieldidx, minit_in, NULL);
4101         }
4102         C->set_flattened_accesses_share_alias(true);
4103         hook_memory_on_init(*this, C->get_alias_index(TypeAryPtr::INLINES), minit_in, minit_out);
4104       } else {
4105         const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
4106         int            elemidx  = C->get_alias_index(telemref);
4107         hook_memory_on_init(*this, elemidx, minit_in, minit_out);
4108       }
4109     } else if (oop_type->isa_instptr()) {
4110       set_memory(minit_out, C->get_alias_index(oop_type)); // mark word
4111       ciInstanceKlass* ik = oop_type->klass()->as_instance_klass();
4112       for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
4113         ciField* field = ik->nonstatic_field_at(i);
4114         if (field->offset() >= TrackedInitializationLimit * HeapWordSize)
4115           continue;  // do not bother to track really large numbers of fields
4116         // Find (or create) the alias category for this field:
4117         int fieldidx = C->alias_type(field)->index();
4118         hook_memory_on_init(*this, fieldidx, minit_in, minit_out);
4119       }
4120     }
4121   }
4122 
4123   // Cast raw oop to the real thing...
4124   Node* javaoop = new CheckCastPPNode(control(), rawoop, oop_type);
4125   javaoop = _gvn.transform(javaoop);
4126   C->set_recent_alloc(control(), javaoop);
4127   assert(just_allocated_object(control()) == javaoop, "just allocated");
4128 
4129 #ifdef ASSERT
4130   { // Verify that the AllocateNode::Ideal_allocation recognizers work:

4141       assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please");
4142     }
4143   }
4144 #endif //ASSERT
4145 
4146   return javaoop;
4147 }
4148 
4149 //---------------------------new_instance--------------------------------------
4150 // This routine takes a klass_node which may be constant (for a static type)
4151 // or may be non-constant (for reflective code).  It will work equally well
4152 // for either, and the graph will fold nicely if the optimizer later reduces
4153 // the type to a constant.
4154 // The optional arguments are for specialized use by intrinsics:
4155 //  - If 'extra_slow_test' if not null is an extra condition for the slow-path.
4156 //  - If 'return_size_val', report the the total object size to the caller.
4157 //  - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
4158 Node* GraphKit::new_instance(Node* klass_node,
4159                              Node* extra_slow_test,
4160                              Node* *return_size_val,
4161                              bool deoptimize_on_exception,
4162                              InlineTypeBaseNode* inline_type_node) {
4163   // Compute size in doublewords
4164   // The size is always an integral number of doublewords, represented
4165   // as a positive bytewise size stored in the klass's layout_helper.
4166   // The layout_helper also encodes (in a low bit) the need for a slow path.
4167   jint  layout_con = Klass::_lh_neutral_value;
4168   Node* layout_val = get_layout_helper(klass_node, layout_con);
4169   bool  layout_is_con = (layout_val == NULL);
4170 
4171   if (extra_slow_test == NULL)  extra_slow_test = intcon(0);
4172   // Generate the initial go-slow test.  It's either ALWAYS (return a
4173   // Node for 1) or NEVER (return a NULL) or perhaps (in the reflective
4174   // case) a computed value derived from the layout_helper.
4175   Node* initial_slow_test = NULL;
4176   if (layout_is_con) {
4177     assert(!StressReflectiveCode, "stress mode does not use these paths");
4178     bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con);
4179     initial_slow_test = must_go_slow ? intcon(1) : extra_slow_test;
4180   } else {   // reflective case
4181     // This reflective path is used by Unsafe.allocateInstance.
4182     // (It may be stress-tested by specifying StressReflectiveCode.)
4183     // Basically, we want to get into the VM is there's an illegal argument.
4184     Node* bit = intcon(Klass::_lh_instance_slow_path_bit);
4185     initial_slow_test = _gvn.transform( new AndINode(layout_val, bit) );
4186     if (extra_slow_test != intcon(0)) {
4187       initial_slow_test = _gvn.transform( new OrINode(initial_slow_test, extra_slow_test) );
4188     }
4189     // (Macro-expander will further convert this to a Bool, if necessary.)

4200 
4201     // Clear the low bits to extract layout_helper_size_in_bytes:
4202     assert((int)Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");
4203     Node* mask = MakeConX(~ (intptr_t)right_n_bits(LogBytesPerLong));
4204     size = _gvn.transform( new AndXNode(size, mask) );
4205   }
4206   if (return_size_val != NULL) {
4207     (*return_size_val) = size;
4208   }
4209 
4210   // This is a precise notnull oop of the klass.
4211   // (Actually, it need not be precise if this is a reflective allocation.)
4212   // It's what we cast the result to.
4213   const TypeKlassPtr* tklass = _gvn.type(klass_node)->isa_klassptr();
4214   if (!tklass)  tklass = TypeInstKlassPtr::OBJECT;
4215   const TypeOopPtr* oop_type = tklass->as_instance_type();
4216 
4217   // Now generate allocation code
4218 
4219   // The entire memory state is needed for slow path of the allocation
4220   // since GC and deoptimization can happen.
4221   Node *mem = reset_memory();
4222   set_all_memory(mem); // Create new memory state
4223 
4224   AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
4225                                          control(), mem, i_o(),
4226                                          size, klass_node,
4227                                          initial_slow_test, inline_type_node);
4228 
4229   return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception);
4230 }
4231 
4232 //-------------------------------new_array-------------------------------------
4233 // helper for newarray and anewarray
4234 // The 'length' parameter is (obviously) the length of the array.
4235 // See comments on new_instance for the meaning of the other arguments.
4236 Node* GraphKit::new_array(Node* klass_node,     // array klass (maybe variable)
4237                           Node* length,         // number of array elements
4238                           int   nargs,          // number of arguments to push back for uncommon trap
4239                           Node* *return_size_val,
4240                           bool deoptimize_on_exception) {
4241   jint  layout_con = Klass::_lh_neutral_value;
4242   Node* layout_val = get_layout_helper(klass_node, layout_con);
4243   bool  layout_is_con = (layout_val == NULL);
4244 
4245   if (!layout_is_con && !StressReflectiveCode &&
4246       !too_many_traps(Deoptimization::Reason_class_check)) {
4247     // This is a reflective array creation site.
4248     // Optimistically assume that it is a subtype of Object[],
4249     // so that we can fold up all the address arithmetic.
4250     layout_con = Klass::array_layout_helper(T_OBJECT);
4251     Node* cmp_lh = _gvn.transform( new CmpINode(layout_val, intcon(layout_con)) );
4252     Node* bol_lh = _gvn.transform( new BoolNode(cmp_lh, BoolTest::eq) );
4253     { BuildCutout unless(this, bol_lh, PROB_MAX);
4254       inc_sp(nargs);
4255       uncommon_trap(Deoptimization::Reason_class_check,
4256                     Deoptimization::Action_maybe_recompile);
4257     }
4258     layout_val = NULL;
4259     layout_is_con = true;
4260   }
4261 
4262   // Generate the initial go-slow test.  Make sure we do not overflow
4263   // if length is huge (near 2Gig) or negative!  We do not need
4264   // exact double-words here, just a close approximation of needed
4265   // double-words.  We can't add any offset or rounding bits, lest we
4266   // take a size -1 of bytes and make it positive.  Use an unsigned
4267   // compare, so negative sizes look hugely positive.
4268   int fast_size_limit = FastAllocateSizeLimit;
4269   if (layout_is_con) {
4270     assert(!StressReflectiveCode, "stress mode does not use these paths");
4271     // Increase the size limit if we have exact knowledge of array type.
4272     int log2_esize = Klass::layout_helper_log2_element_size(layout_con);
4273     fast_size_limit <<= MAX2(LogBytesPerLong - log2_esize, 0);
4274   }
4275 
4276   Node* initial_slow_cmp  = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) );
4277   Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) );
4278 
4279   // --- Size Computation ---
4280   // array_size = round_to_heap(array_header + (length << elem_shift));
4281   // where round_to_heap(x) == align_to(x, MinObjAlignmentInBytes)
4282   // and align_to(x, y) == ((x + y-1) & ~(y-1))
4283   // The rounding mask is strength-reduced, if possible.
4284   int round_mask = MinObjAlignmentInBytes - 1;
4285   Node* header_size = NULL;
4286   int   header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
4287   // (T_BYTE has the weakest alignment and size restrictions...)
4288   if (layout_is_con) {
4289     int       hsize  = Klass::layout_helper_header_size(layout_con);
4290     int       eshift = Klass::layout_helper_log2_element_size(layout_con);
4291     bool is_flat_array = Klass::layout_helper_is_flatArray(layout_con);
4292     if ((round_mask & ~right_n_bits(eshift)) == 0)
4293       round_mask = 0;  // strength-reduce it if it goes away completely
4294     assert(is_flat_array || (hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
4295     assert(header_size_min <= hsize, "generic minimum is smallest");
4296     header_size_min = hsize;
4297     header_size = intcon(hsize + round_mask);
4298   } else {
4299     Node* hss   = intcon(Klass::_lh_header_size_shift);
4300     Node* hsm   = intcon(Klass::_lh_header_size_mask);
4301     Node* hsize = _gvn.transform( new URShiftINode(layout_val, hss) );
4302     hsize       = _gvn.transform( new AndINode(hsize, hsm) );
4303     Node* mask  = intcon(round_mask);
4304     header_size = _gvn.transform( new AddINode(hsize, mask) );
4305   }
4306 
4307   Node* elem_shift = NULL;
4308   if (layout_is_con) {
4309     int eshift = Klass::layout_helper_log2_element_size(layout_con);
4310     if (eshift != 0)
4311       elem_shift = intcon(eshift);
4312   } else {
4313     // There is no need to mask or shift this value.
4314     // The semantics of LShiftINode include an implicit mask to 0x1F.

4358   // places, one where the length is sharply limited, and the other
4359   // after a successful allocation.
4360   Node* abody = lengthx;
4361   if (elem_shift != NULL)
4362     abody     = _gvn.transform( new LShiftXNode(lengthx, elem_shift) );
4363   Node* size  = _gvn.transform( new AddXNode(headerx, abody) );
4364   if (round_mask != 0) {
4365     Node* mask = MakeConX(~round_mask);
4366     size       = _gvn.transform( new AndXNode(size, mask) );
4367   }
4368   // else if round_mask == 0, the size computation is self-rounding
4369 
4370   if (return_size_val != NULL) {
4371     // This is the size
4372     (*return_size_val) = size;
4373   }
4374 
4375   // Now generate allocation code
4376 
4377   // The entire memory state is needed for slow path of the allocation
4378   // since GC and deoptimization can happen.
4379   Node *mem = reset_memory();
4380   set_all_memory(mem); // Create new memory state
4381 
4382   if (initial_slow_test->is_Bool()) {
4383     // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
4384     initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
4385   }
4386 
4387   const TypeKlassPtr* ary_klass = _gvn.type(klass_node)->isa_klassptr();
4388   const TypeOopPtr* ary_type = ary_klass->as_instance_type();
4389   Node* valid_length_test = _gvn.intcon(1);
4390   if (ary_type->klass()->is_array_klass()) {
4391     BasicType bt = ary_type->klass()->as_array_klass()->element_type()->basic_type();
4392     jint max = TypeAryPtr::max_array_length(bt);
4393     Node* valid_length_cmp  = _gvn.transform(new CmpUNode(length, intcon(max)));
4394     valid_length_test = _gvn.transform(new BoolNode(valid_length_cmp, BoolTest::le));
4395   }
4396 
4397   const TypeAryPtr* ary_ptr = ary_type->isa_aryptr();
4398 
4399   // Inline type array variants:
4400   // - null-ok:              MyValue.ref[] (ciObjArrayKlass "[LMyValue")
4401   // - null-free:            MyValue.val[] (ciObjArrayKlass "[QMyValue")
4402   // - null-free, flattened: MyValue.val[] (ciFlatArrayKlass "[QMyValue")
4403   // Check if array is a null-free, non-flattened inline type array
4404   // that needs to be initialized with the default inline type.
4405   Node* default_value = NULL;
4406   Node* raw_default_value = NULL;
4407   if (ary_ptr != NULL && ary_ptr->klass_is_exact()) {
4408     // Array type is known
4409     if (ary_ptr->klass()->as_array_klass()->is_elem_null_free()) {
4410       ciInlineKlass* vk = ary_ptr->klass()->as_array_klass()->element_klass()->as_inline_klass();
4411       if (!vk->flatten_array()) {
4412         default_value = InlineTypeNode::default_oop(gvn(), vk);
4413       }
4414     }
4415   } else if (ary_klass->klass()->can_be_inline_array_klass()) {
4416     // Array type is not known, add runtime checks
4417     assert(!ary_klass->klass_is_exact(), "unexpected exact type");
4418     Node* r = new RegionNode(3);
4419     default_value = new PhiNode(r, TypeInstPtr::BOTTOM);
4420 
4421     Node* bol = array_lh_test(klass_node, Klass::_lh_array_tag_flat_value_bit_inplace | Klass::_lh_null_free_array_bit_inplace, Klass::_lh_null_free_array_bit_inplace);
4422     IfNode* iff = create_and_map_if(control(), bol, PROB_FAIR, COUNT_UNKNOWN);
4423 
4424     // Null-free, non-flattened inline type array, initialize with the default value
4425     set_control(_gvn.transform(new IfTrueNode(iff)));
4426     Node* p = basic_plus_adr(klass_node, in_bytes(ArrayKlass::element_klass_offset()));
4427     Node* eklass = _gvn.transform(LoadKlassNode::make(_gvn, control(), immutable_memory(), p, TypeInstPtr::KLASS));
4428     Node* adr_fixed_block_addr = basic_plus_adr(eklass, in_bytes(InstanceKlass::adr_inlineklass_fixed_block_offset()));
4429     Node* adr_fixed_block = make_load(control(), adr_fixed_block_addr, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
4430     Node* default_value_offset_addr = basic_plus_adr(adr_fixed_block, in_bytes(InlineKlass::default_value_offset_offset()));
4431     Node* default_value_offset = make_load(control(), default_value_offset_addr, TypeInt::INT, T_INT, MemNode::unordered);
4432     Node* elem_mirror = load_mirror_from_klass(eklass);
4433     Node* default_value_addr = basic_plus_adr(elem_mirror, ConvI2X(default_value_offset));
4434     Node* val = access_load_at(elem_mirror, default_value_addr, _gvn.type(default_value_addr)->is_ptr(), TypeInstPtr::BOTTOM, T_OBJECT, IN_HEAP);
4435     r->init_req(1, control());
4436     default_value->init_req(1, val);
4437 
4438     // Otherwise initialize with all zero
4439     r->init_req(2, _gvn.transform(new IfFalseNode(iff)));
4440     default_value->init_req(2, null());
4441 
4442     set_control(_gvn.transform(r));
4443     default_value = _gvn.transform(default_value);
4444   }
4445   if (default_value != NULL) {
4446     if (UseCompressedOops) {
4447       // With compressed oops, the 64-bit init value is built from two 32-bit compressed oops
4448       default_value = _gvn.transform(new EncodePNode(default_value, default_value->bottom_type()->make_narrowoop()));
4449       Node* lower = _gvn.transform(new CastP2XNode(control(), default_value));
4450       Node* upper = _gvn.transform(new LShiftLNode(lower, intcon(32)));
4451       raw_default_value = _gvn.transform(new OrLNode(lower, upper));
4452     } else {
4453       raw_default_value = _gvn.transform(new CastP2XNode(control(), default_value));
4454     }
4455   }
4456 
4457   // Create the AllocateArrayNode and its result projections
4458   AllocateArrayNode* alloc = new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
4459                                                    control(), mem, i_o(),
4460                                                    size, klass_node,
4461                                                    initial_slow_test,
4462                                                    length, valid_length_test,
4463                                                    default_value, raw_default_value);
4464 
4465   // Cast to correct type.  Note that the klass_node may be constant or not,
4466   // and in the latter case the actual array type will be inexact also.
4467   // (This happens via a non-constant argument to inline_native_newArray.)
4468   // In any case, the value of klass_node provides the desired array type.
4469   const TypeInt* length_type = _gvn.find_int_type(length);
4470   if (ary_type->isa_aryptr() && length_type != NULL) {
4471     // Try to get a better type than POS for the size
4472     ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
4473   }
4474 
4475   Node* javaoop = set_output_for_allocation(alloc, ary_type, deoptimize_on_exception);
4476 
4477   array_ideal_length(alloc, ary_type, true);
4478   return javaoop;
4479 }
4480 
4481 // The following "Ideal_foo" functions are placed here because they recognize
4482 // the graph shapes created by the functions immediately above.
4483 

4598   set_all_memory(ideal.merged_memory());
4599   set_i_o(ideal.i_o());
4600   set_control(ideal.ctrl());
4601 }
4602 
4603 void GraphKit::final_sync(IdealKit& ideal) {
4604   // Final sync IdealKit and graphKit.
4605   sync_kit(ideal);
4606 }
4607 
4608 Node* GraphKit::load_String_length(Node* str, bool set_ctrl) {
4609   Node* len = load_array_length(load_String_value(str, set_ctrl));
4610   Node* coder = load_String_coder(str, set_ctrl);
4611   // Divide length by 2 if coder is UTF16
4612   return _gvn.transform(new RShiftINode(len, coder));
4613 }
4614 
4615 Node* GraphKit::load_String_value(Node* str, bool set_ctrl) {
4616   int value_offset = java_lang_String::value_offset();
4617   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4618                                                      false, NULL, Type::Offset(0));
4619   const TypePtr* value_field_type = string_type->add_offset(value_offset);
4620   const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
4621                                                   TypeAry::make(TypeInt::BYTE, TypeInt::POS, false, true, true),
4622                                                   ciTypeArrayKlass::make(T_BYTE), true, Type::Offset(0));
4623   Node* p = basic_plus_adr(str, str, value_offset);
4624   Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT,
4625                               IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
4626   return load;
4627 }
4628 
4629 Node* GraphKit::load_String_coder(Node* str, bool set_ctrl) {
4630   if (!CompactStrings) {
4631     return intcon(java_lang_String::CODER_UTF16);
4632   }
4633   int coder_offset = java_lang_String::coder_offset();
4634   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4635                                                      false, NULL, Type::Offset(0));
4636   const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4637 
4638   Node* p = basic_plus_adr(str, str, coder_offset);
4639   Node* load = access_load_at(str, p, coder_field_type, TypeInt::BYTE, T_BYTE,
4640                               IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
4641   return load;
4642 }
4643 
4644 void GraphKit::store_String_value(Node* str, Node* value) {
4645   int value_offset = java_lang_String::value_offset();
4646   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4647                                                      false, NULL, Type::Offset(0));
4648   const TypePtr* value_field_type = string_type->add_offset(value_offset);
4649 
4650   access_store_at(str,  basic_plus_adr(str, value_offset), value_field_type,
4651                   value, TypeAryPtr::BYTES, T_OBJECT, IN_HEAP | MO_UNORDERED);
4652 }
4653 
4654 void GraphKit::store_String_coder(Node* str, Node* value) {
4655   int coder_offset = java_lang_String::coder_offset();
4656   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4657                                                      false, NULL, Type::Offset(0));
4658   const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4659 
4660   access_store_at(str, basic_plus_adr(str, coder_offset), coder_field_type,
4661                   value, TypeInt::BYTE, T_BYTE, IN_HEAP | MO_UNORDERED);
4662 }
4663 
4664 // Capture src and dst memory state with a MergeMemNode
4665 Node* GraphKit::capture_memory(const TypePtr* src_type, const TypePtr* dst_type) {
4666   if (src_type == dst_type) {
4667     // Types are equal, we don't need a MergeMemNode
4668     return memory(src_type);
4669   }
4670   MergeMemNode* merge = MergeMemNode::make(map()->memory());
4671   record_for_igvn(merge); // fold it up later, if possible
4672   int src_idx = C->get_alias_index(src_type);
4673   int dst_idx = C->get_alias_index(dst_type);
4674   merge->set_memory_at(src_idx, memory(src_idx));
4675   merge->set_memory_at(dst_idx, memory(dst_idx));
4676   return merge;
4677 }

4750   i_char->init_req(2, AddI(i_char, intcon(2)));
4751 
4752   set_control(IfFalse(iff));
4753   set_memory(st, TypeAryPtr::BYTES);
4754 }
4755 
4756 Node* GraphKit::make_constant_from_field(ciField* field, Node* obj) {
4757   if (!field->is_constant()) {
4758     return NULL; // Field not marked as constant.
4759   }
4760   ciInstance* holder = NULL;
4761   if (!field->is_static()) {
4762     ciObject* const_oop = obj->bottom_type()->is_oopptr()->const_oop();
4763     if (const_oop != NULL && const_oop->is_instance()) {
4764       holder = const_oop->as_instance();
4765     }
4766   }
4767   const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(),
4768                                                         /*is_unsigned_load=*/false);
4769   if (con_type != NULL) {
4770     Node* con = makecon(con_type);
4771     if (field->type()->is_inlinetype()) {
4772       con = InlineTypeNode::make_from_oop(this, con, field->type()->as_inline_klass(), field->is_null_free());
4773     } else if (con_type->is_inlinetypeptr()) {
4774       con = InlineTypeNode::make_from_oop(this, con, con_type->inline_klass(), field->is_null_free());
4775     }
4776     return con;
4777   }
4778   return NULL;
4779 }
4780 
4781 //---------------------------load_mirror_from_klass----------------------------
4782 // Given a klass oop, load its java mirror (a java.lang.Class oop).
4783 Node* GraphKit::load_mirror_from_klass(Node* klass) {
4784   Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
4785   Node* load = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
4786   // mirror = ((OopHandle)mirror)->resolve();
4787   return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
4788 }
< prev index next >