1 /*
   2  * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciFlatArrayKlass.hpp"
  27 #include "ci/ciInlineKlass.hpp"
  28 #include "ci/ciUtilities.hpp"
  29 #include "classfile/javaClasses.hpp"
  30 #include "ci/ciNativeEntryPoint.hpp"
  31 #include "ci/ciObjArray.hpp"
  32 #include "asm/register.hpp"
  33 #include "compiler/compileLog.hpp"
  34 #include "gc/shared/barrierSet.hpp"
  35 #include "gc/shared/c2/barrierSetC2.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "opto/addnode.hpp"
  39 #include "opto/castnode.hpp"
  40 #include "opto/convertnode.hpp"
  41 #include "opto/graphKit.hpp"
  42 #include "opto/idealKit.hpp"
  43 #include "opto/inlinetypenode.hpp"
  44 #include "opto/intrinsicnode.hpp"
  45 #include "opto/locknode.hpp"
  46 #include "opto/machnode.hpp"
  47 #include "opto/narrowptrnode.hpp"
  48 #include "opto/opaquenode.hpp"
  49 #include "opto/parse.hpp"
  50 #include "opto/rootnode.hpp"
  51 #include "opto/runtime.hpp"
  52 #include "opto/subtypenode.hpp"
  53 #include "runtime/deoptimization.hpp"
  54 #include "runtime/sharedRuntime.hpp"
  55 #include "utilities/bitMap.inline.hpp"
  56 #include "utilities/powerOfTwo.hpp"
  57 #include "utilities/growableArray.hpp"
  58 
  59 //----------------------------GraphKit-----------------------------------------
  60 // Main utility constructor.
  61 GraphKit::GraphKit(JVMState* jvms, PhaseGVN* gvn)
  62   : Phase(Phase::Parser),
  63     _env(C->env()),
  64     _gvn((gvn != NULL) ? *gvn : *C->initial_gvn()),
  65     _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
  66 {
  67   assert(gvn == NULL || !gvn->is_IterGVN() || gvn->is_IterGVN()->delay_transform(), "delay transform should be enabled");
  68   _exceptions = jvms->map()->next_exception();
  69   if (_exceptions != NULL)  jvms->map()->set_next_exception(NULL);
  70   set_jvms(jvms);
  71 #ifdef ASSERT
  72   if (_gvn.is_IterGVN() != NULL) {
  73     assert(_gvn.is_IterGVN()->delay_transform(), "Transformation must be delayed if IterGVN is used");
  74     // Save the initial size of _for_igvn worklist for verification (see ~GraphKit)
  75     _worklist_size = _gvn.C->for_igvn()->size();
  76   }
  77 #endif
  78 }
  79 
  80 // Private constructor for parser.
  81 GraphKit::GraphKit()
  82   : Phase(Phase::Parser),
  83     _env(C->env()),
  84     _gvn(*C->initial_gvn()),
  85     _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
  86 {
  87   _exceptions = NULL;
  88   set_map(NULL);
  89   debug_only(_sp = -99);
  90   debug_only(set_bci(-99));
  91 }
  92 
  93 
  94 
  95 //---------------------------clean_stack---------------------------------------
  96 // Clear away rubbish from the stack area of the JVM state.
  97 // This destroys any arguments that may be waiting on the stack.
  98 void GraphKit::clean_stack(int from_sp) {
  99   SafePointNode* map      = this->map();
 100   JVMState*      jvms     = this->jvms();
 101   int            stk_size = jvms->stk_size();
 102   int            stkoff   = jvms->stkoff();
 103   Node*          top      = this->top();
 104   for (int i = from_sp; i < stk_size; i++) {
 105     if (map->in(stkoff + i) != top) {
 106       map->set_req(stkoff + i, top);
 107     }
 108   }
 109 }
 110 
 111 
 112 //--------------------------------sync_jvms-----------------------------------
 113 // Make sure our current jvms agrees with our parse state.
 114 JVMState* GraphKit::sync_jvms() const {
 115   JVMState* jvms = this->jvms();
 116   jvms->set_bci(bci());       // Record the new bci in the JVMState
 117   jvms->set_sp(sp());         // Record the new sp in the JVMState
 118   assert(jvms_in_sync(), "jvms is now in sync");
 119   return jvms;
 120 }
 121 
 122 //--------------------------------sync_jvms_for_reexecute---------------------
 123 // Make sure our current jvms agrees with our parse state.  This version
 124 // uses the reexecute_sp for reexecuting bytecodes.
 125 JVMState* GraphKit::sync_jvms_for_reexecute() {
 126   JVMState* jvms = this->jvms();
 127   jvms->set_bci(bci());          // Record the new bci in the JVMState
 128   jvms->set_sp(reexecute_sp());  // Record the new sp in the JVMState
 129   return jvms;
 130 }
 131 
 132 #ifdef ASSERT
 133 bool GraphKit::jvms_in_sync() const {
 134   Parse* parse = is_Parse();
 135   if (parse == NULL) {
 136     if (bci() !=      jvms()->bci())          return false;
 137     if (sp()  != (int)jvms()->sp())           return false;
 138     return true;
 139   }
 140   if (jvms()->method() != parse->method())    return false;
 141   if (jvms()->bci()    != parse->bci())       return false;
 142   int jvms_sp = jvms()->sp();
 143   if (jvms_sp          != parse->sp())        return false;
 144   int jvms_depth = jvms()->depth();
 145   if (jvms_depth       != parse->depth())     return false;
 146   return true;
 147 }
 148 
 149 // Local helper checks for special internal merge points
 150 // used to accumulate and merge exception states.
 151 // They are marked by the region's in(0) edge being the map itself.
 152 // Such merge points must never "escape" into the parser at large,
 153 // until they have been handed to gvn.transform.
 154 static bool is_hidden_merge(Node* reg) {
 155   if (reg == NULL)  return false;
 156   if (reg->is_Phi()) {
 157     reg = reg->in(0);
 158     if (reg == NULL)  return false;
 159   }
 160   return reg->is_Region() && reg->in(0) != NULL && reg->in(0)->is_Root();
 161 }
 162 
 163 void GraphKit::verify_map() const {
 164   if (map() == NULL)  return;  // null map is OK
 165   assert(map()->req() <= jvms()->endoff(), "no extra garbage on map");
 166   assert(!map()->has_exceptions(),    "call add_exception_states_from 1st");
 167   assert(!is_hidden_merge(control()), "call use_exception_state, not set_map");
 168 }
 169 
 170 void GraphKit::verify_exception_state(SafePointNode* ex_map) {
 171   assert(ex_map->next_exception() == NULL, "not already part of a chain");
 172   assert(has_saved_ex_oop(ex_map), "every exception state has an ex_oop");
 173 }
 174 #endif
 175 
 176 //---------------------------stop_and_kill_map---------------------------------
 177 // Set _map to NULL, signalling a stop to further bytecode execution.
 178 // First smash the current map's control to a constant, to mark it dead.
 179 void GraphKit::stop_and_kill_map() {
 180   SafePointNode* dead_map = stop();
 181   if (dead_map != NULL) {
 182     dead_map->disconnect_inputs(C); // Mark the map as killed.
 183     assert(dead_map->is_killed(), "must be so marked");
 184   }
 185 }
 186 
 187 
 188 //--------------------------------stopped--------------------------------------
 189 // Tell if _map is NULL, or control is top.
 190 bool GraphKit::stopped() {
 191   if (map() == NULL)           return true;
 192   else if (control() == top()) return true;
 193   else                         return false;
 194 }
 195 
 196 
 197 //-----------------------------has_ex_handler----------------------------------
 198 // Tell if this method or any caller method has exception handlers.
 199 bool GraphKit::has_ex_handler() {
 200   for (JVMState* jvmsp = jvms(); jvmsp != NULL; jvmsp = jvmsp->caller()) {
 201     if (jvmsp->has_method() && jvmsp->method()->has_exception_handlers()) {
 202       return true;
 203     }
 204   }
 205   return false;
 206 }
 207 
 208 //------------------------------save_ex_oop------------------------------------
 209 // Save an exception without blowing stack contents or other JVM state.
 210 void GraphKit::set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop) {
 211   assert(!has_saved_ex_oop(ex_map), "clear ex-oop before setting again");
 212   ex_map->add_req(ex_oop);
 213   debug_only(verify_exception_state(ex_map));
 214 }
 215 
 216 inline static Node* common_saved_ex_oop(SafePointNode* ex_map, bool clear_it) {
 217   assert(GraphKit::has_saved_ex_oop(ex_map), "ex_oop must be there");
 218   Node* ex_oop = ex_map->in(ex_map->req()-1);
 219   if (clear_it)  ex_map->del_req(ex_map->req()-1);
 220   return ex_oop;
 221 }
 222 
 223 //-----------------------------saved_ex_oop------------------------------------
 224 // Recover a saved exception from its map.
 225 Node* GraphKit::saved_ex_oop(SafePointNode* ex_map) {
 226   return common_saved_ex_oop(ex_map, false);
 227 }
 228 
 229 //--------------------------clear_saved_ex_oop---------------------------------
 230 // Erase a previously saved exception from its map.
 231 Node* GraphKit::clear_saved_ex_oop(SafePointNode* ex_map) {
 232   return common_saved_ex_oop(ex_map, true);
 233 }
 234 
 235 #ifdef ASSERT
 236 //---------------------------has_saved_ex_oop----------------------------------
 237 // Erase a previously saved exception from its map.
 238 bool GraphKit::has_saved_ex_oop(SafePointNode* ex_map) {
 239   return ex_map->req() == ex_map->jvms()->endoff()+1;
 240 }
 241 #endif
 242 
 243 //-------------------------make_exception_state--------------------------------
 244 // Turn the current JVM state into an exception state, appending the ex_oop.
 245 SafePointNode* GraphKit::make_exception_state(Node* ex_oop) {
 246   sync_jvms();
 247   SafePointNode* ex_map = stop();  // do not manipulate this map any more
 248   set_saved_ex_oop(ex_map, ex_oop);
 249   return ex_map;
 250 }
 251 
 252 
 253 //--------------------------add_exception_state--------------------------------
 254 // Add an exception to my list of exceptions.
 255 void GraphKit::add_exception_state(SafePointNode* ex_map) {
 256   if (ex_map == NULL || ex_map->control() == top()) {
 257     return;
 258   }
 259 #ifdef ASSERT
 260   verify_exception_state(ex_map);
 261   if (has_exceptions()) {
 262     assert(ex_map->jvms()->same_calls_as(_exceptions->jvms()), "all collected exceptions must come from the same place");
 263   }
 264 #endif
 265 
 266   // If there is already an exception of exactly this type, merge with it.
 267   // In particular, null-checks and other low-level exceptions common up here.
 268   Node*       ex_oop  = saved_ex_oop(ex_map);
 269   const Type* ex_type = _gvn.type(ex_oop);
 270   if (ex_oop == top()) {
 271     // No action needed.
 272     return;
 273   }
 274   assert(ex_type->isa_instptr(), "exception must be an instance");
 275   for (SafePointNode* e2 = _exceptions; e2 != NULL; e2 = e2->next_exception()) {
 276     const Type* ex_type2 = _gvn.type(saved_ex_oop(e2));
 277     // We check sp also because call bytecodes can generate exceptions
 278     // both before and after arguments are popped!
 279     if (ex_type2 == ex_type
 280         && e2->_jvms->sp() == ex_map->_jvms->sp()) {
 281       combine_exception_states(ex_map, e2);
 282       return;
 283     }
 284   }
 285 
 286   // No pre-existing exception of the same type.  Chain it on the list.
 287   push_exception_state(ex_map);
 288 }
 289 
 290 //-----------------------add_exception_states_from-----------------------------
 291 void GraphKit::add_exception_states_from(JVMState* jvms) {
 292   SafePointNode* ex_map = jvms->map()->next_exception();
 293   if (ex_map != NULL) {
 294     jvms->map()->set_next_exception(NULL);
 295     for (SafePointNode* next_map; ex_map != NULL; ex_map = next_map) {
 296       next_map = ex_map->next_exception();
 297       ex_map->set_next_exception(NULL);
 298       add_exception_state(ex_map);
 299     }
 300   }
 301 }
 302 
 303 //-----------------------transfer_exceptions_into_jvms-------------------------
 304 JVMState* GraphKit::transfer_exceptions_into_jvms() {
 305   if (map() == NULL) {
 306     // We need a JVMS to carry the exceptions, but the map has gone away.
 307     // Create a scratch JVMS, cloned from any of the exception states...
 308     if (has_exceptions()) {
 309       _map = _exceptions;
 310       _map = clone_map();
 311       _map->set_next_exception(NULL);
 312       clear_saved_ex_oop(_map);
 313       debug_only(verify_map());
 314     } else {
 315       // ...or created from scratch
 316       JVMState* jvms = new (C) JVMState(_method, NULL);
 317       jvms->set_bci(_bci);
 318       jvms->set_sp(_sp);
 319       jvms->set_map(new SafePointNode(TypeFunc::Parms, jvms));
 320       set_jvms(jvms);
 321       for (uint i = 0; i < map()->req(); i++)  map()->init_req(i, top());
 322       set_all_memory(top());
 323       while (map()->req() < jvms->endoff())  map()->add_req(top());
 324     }
 325     // (This is a kludge, in case you didn't notice.)
 326     set_control(top());
 327   }
 328   JVMState* jvms = sync_jvms();
 329   assert(!jvms->map()->has_exceptions(), "no exceptions on this map yet");
 330   jvms->map()->set_next_exception(_exceptions);
 331   _exceptions = NULL;   // done with this set of exceptions
 332   return jvms;
 333 }
 334 
 335 static inline void add_n_reqs(Node* dstphi, Node* srcphi) {
 336   assert(is_hidden_merge(dstphi), "must be a special merge node");
 337   assert(is_hidden_merge(srcphi), "must be a special merge node");
 338   uint limit = srcphi->req();
 339   for (uint i = PhiNode::Input; i < limit; i++) {
 340     dstphi->add_req(srcphi->in(i));
 341   }
 342 }
 343 static inline void add_one_req(Node* dstphi, Node* src) {
 344   assert(is_hidden_merge(dstphi), "must be a special merge node");
 345   assert(!is_hidden_merge(src), "must not be a special merge node");
 346   dstphi->add_req(src);
 347 }
 348 
 349 //-----------------------combine_exception_states------------------------------
 350 // This helper function combines exception states by building phis on a
 351 // specially marked state-merging region.  These regions and phis are
 352 // untransformed, and can build up gradually.  The region is marked by
 353 // having a control input of its exception map, rather than NULL.  Such
 354 // regions do not appear except in this function, and in use_exception_state.
 355 void GraphKit::combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map) {
 356   if (failing())  return;  // dying anyway...
 357   JVMState* ex_jvms = ex_map->_jvms;
 358   assert(ex_jvms->same_calls_as(phi_map->_jvms), "consistent call chains");
 359   assert(ex_jvms->stkoff() == phi_map->_jvms->stkoff(), "matching locals");
 360   assert(ex_jvms->sp() == phi_map->_jvms->sp(), "matching stack sizes");
 361   assert(ex_jvms->monoff() == phi_map->_jvms->monoff(), "matching JVMS");
 362   assert(ex_jvms->scloff() == phi_map->_jvms->scloff(), "matching scalar replaced objects");
 363   assert(ex_map->req() == phi_map->req(), "matching maps");
 364   uint tos = ex_jvms->stkoff() + ex_jvms->sp();
 365   Node*         hidden_merge_mark = root();
 366   Node*         region  = phi_map->control();
 367   MergeMemNode* phi_mem = phi_map->merged_memory();
 368   MergeMemNode* ex_mem  = ex_map->merged_memory();
 369   if (region->in(0) != hidden_merge_mark) {
 370     // The control input is not (yet) a specially-marked region in phi_map.
 371     // Make it so, and build some phis.
 372     region = new RegionNode(2);
 373     _gvn.set_type(region, Type::CONTROL);
 374     region->set_req(0, hidden_merge_mark);  // marks an internal ex-state
 375     region->init_req(1, phi_map->control());
 376     phi_map->set_control(region);
 377     Node* io_phi = PhiNode::make(region, phi_map->i_o(), Type::ABIO);
 378     record_for_igvn(io_phi);
 379     _gvn.set_type(io_phi, Type::ABIO);
 380     phi_map->set_i_o(io_phi);
 381     for (MergeMemStream mms(phi_mem); mms.next_non_empty(); ) {
 382       Node* m = mms.memory();
 383       Node* m_phi = PhiNode::make(region, m, Type::MEMORY, mms.adr_type(C));
 384       record_for_igvn(m_phi);
 385       _gvn.set_type(m_phi, Type::MEMORY);
 386       mms.set_memory(m_phi);
 387     }
 388   }
 389 
 390   // Either or both of phi_map and ex_map might already be converted into phis.
 391   Node* ex_control = ex_map->control();
 392   // if there is special marking on ex_map also, we add multiple edges from src
 393   bool add_multiple = (ex_control->in(0) == hidden_merge_mark);
 394   // how wide was the destination phi_map, originally?
 395   uint orig_width = region->req();
 396 
 397   if (add_multiple) {
 398     add_n_reqs(region, ex_control);
 399     add_n_reqs(phi_map->i_o(), ex_map->i_o());
 400   } else {
 401     // ex_map has no merges, so we just add single edges everywhere
 402     add_one_req(region, ex_control);
 403     add_one_req(phi_map->i_o(), ex_map->i_o());
 404   }
 405   for (MergeMemStream mms(phi_mem, ex_mem); mms.next_non_empty2(); ) {
 406     if (mms.is_empty()) {
 407       // get a copy of the base memory, and patch some inputs into it
 408       const TypePtr* adr_type = mms.adr_type(C);
 409       Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
 410       assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
 411       mms.set_memory(phi);
 412       // Prepare to append interesting stuff onto the newly sliced phi:
 413       while (phi->req() > orig_width)  phi->del_req(phi->req()-1);
 414     }
 415     // Append stuff from ex_map:
 416     if (add_multiple) {
 417       add_n_reqs(mms.memory(), mms.memory2());
 418     } else {
 419       add_one_req(mms.memory(), mms.memory2());
 420     }
 421   }
 422   uint limit = ex_map->req();
 423   for (uint i = TypeFunc::Parms; i < limit; i++) {
 424     // Skip everything in the JVMS after tos.  (The ex_oop follows.)
 425     if (i == tos)  i = ex_jvms->monoff();
 426     Node* src = ex_map->in(i);
 427     Node* dst = phi_map->in(i);
 428     if (src != dst) {
 429       PhiNode* phi;
 430       if (dst->in(0) != region) {
 431         dst = phi = PhiNode::make(region, dst, _gvn.type(dst));
 432         record_for_igvn(phi);
 433         _gvn.set_type(phi, phi->type());
 434         phi_map->set_req(i, dst);
 435         // Prepare to append interesting stuff onto the new phi:
 436         while (dst->req() > orig_width)  dst->del_req(dst->req()-1);
 437       } else {
 438         assert(dst->is_Phi(), "nobody else uses a hidden region");
 439         phi = dst->as_Phi();
 440       }
 441       if (add_multiple && src->in(0) == ex_control) {
 442         // Both are phis.
 443         add_n_reqs(dst, src);
 444       } else {
 445         while (dst->req() < region->req())  add_one_req(dst, src);
 446       }
 447       const Type* srctype = _gvn.type(src);
 448       if (phi->type() != srctype) {
 449         const Type* dsttype = phi->type()->meet_speculative(srctype);
 450         if (phi->type() != dsttype) {
 451           phi->set_type(dsttype);
 452           _gvn.set_type(phi, dsttype);
 453         }
 454       }
 455     }
 456   }
 457   phi_map->merge_replaced_nodes_with(ex_map);
 458 }
 459 
 460 //--------------------------use_exception_state--------------------------------
 461 Node* GraphKit::use_exception_state(SafePointNode* phi_map) {
 462   if (failing()) { stop(); return top(); }
 463   Node* region = phi_map->control();
 464   Node* hidden_merge_mark = root();
 465   assert(phi_map->jvms()->map() == phi_map, "sanity: 1-1 relation");
 466   Node* ex_oop = clear_saved_ex_oop(phi_map);
 467   if (region->in(0) == hidden_merge_mark) {
 468     // Special marking for internal ex-states.  Process the phis now.
 469     region->set_req(0, region);  // now it's an ordinary region
 470     set_jvms(phi_map->jvms());   // ...so now we can use it as a map
 471     // Note: Setting the jvms also sets the bci and sp.
 472     set_control(_gvn.transform(region));
 473     uint tos = jvms()->stkoff() + sp();
 474     for (uint i = 1; i < tos; i++) {
 475       Node* x = phi_map->in(i);
 476       if (x->in(0) == region) {
 477         assert(x->is_Phi(), "expected a special phi");
 478         phi_map->set_req(i, _gvn.transform(x));
 479       }
 480     }
 481     for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) {
 482       Node* x = mms.memory();
 483       if (x->in(0) == region) {
 484         assert(x->is_Phi(), "nobody else uses a hidden region");
 485         mms.set_memory(_gvn.transform(x));
 486       }
 487     }
 488     if (ex_oop->in(0) == region) {
 489       assert(ex_oop->is_Phi(), "expected a special phi");
 490       ex_oop = _gvn.transform(ex_oop);
 491     }
 492   } else {
 493     set_jvms(phi_map->jvms());
 494   }
 495 
 496   assert(!is_hidden_merge(phi_map->control()), "hidden ex. states cleared");
 497   assert(!is_hidden_merge(phi_map->i_o()), "hidden ex. states cleared");
 498   return ex_oop;
 499 }
 500 
 501 //---------------------------------java_bc-------------------------------------
 502 Bytecodes::Code GraphKit::java_bc() const {
 503   ciMethod* method = this->method();
 504   int       bci    = this->bci();
 505   if (method != NULL && bci != InvocationEntryBci)
 506     return method->java_code_at_bci(bci);
 507   else
 508     return Bytecodes::_illegal;
 509 }
 510 
 511 void GraphKit::uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason,
 512                                                           bool must_throw) {
 513     // if the exception capability is set, then we will generate code
 514     // to check the JavaThread.should_post_on_exceptions flag to see
 515     // if we actually need to report exception events (for this
 516     // thread).  If we don't need to report exception events, we will
 517     // take the normal fast path provided by add_exception_events.  If
 518     // exception event reporting is enabled for this thread, we will
 519     // take the uncommon_trap in the BuildCutout below.
 520 
 521     // first must access the should_post_on_exceptions_flag in this thread's JavaThread
 522     Node* jthread = _gvn.transform(new ThreadLocalNode());
 523     Node* adr = basic_plus_adr(top(), jthread, in_bytes(JavaThread::should_post_on_exceptions_flag_offset()));
 524     Node* should_post_flag = make_load(control(), adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, MemNode::unordered);
 525 
 526     // Test the should_post_on_exceptions_flag vs. 0
 527     Node* chk = _gvn.transform( new CmpINode(should_post_flag, intcon(0)) );
 528     Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
 529 
 530     // Branch to slow_path if should_post_on_exceptions_flag was true
 531     { BuildCutout unless(this, tst, PROB_MAX);
 532       // Do not try anything fancy if we're notifying the VM on every throw.
 533       // Cf. case Bytecodes::_athrow in parse2.cpp.
 534       uncommon_trap(reason, Deoptimization::Action_none,
 535                     (ciKlass*)NULL, (char*)NULL, must_throw);
 536     }
 537 
 538 }
 539 
 540 //------------------------------builtin_throw----------------------------------
 541 void GraphKit::builtin_throw(Deoptimization::DeoptReason reason, Node* arg) {
 542   bool must_throw = true;
 543 
 544   // If this particular condition has not yet happened at this
 545   // bytecode, then use the uncommon trap mechanism, and allow for
 546   // a future recompilation if several traps occur here.
 547   // If the throw is hot, try to use a more complicated inline mechanism
 548   // which keeps execution inside the compiled code.
 549   bool treat_throw_as_hot = false;
 550   ciMethodData* md = method()->method_data();
 551 
 552   if (ProfileTraps) {
 553     if (too_many_traps(reason)) {
 554       treat_throw_as_hot = true;
 555     }
 556     // (If there is no MDO at all, assume it is early in
 557     // execution, and that any deopts are part of the
 558     // startup transient, and don't need to be remembered.)
 559 
 560     // Also, if there is a local exception handler, treat all throws
 561     // as hot if there has been at least one in this method.
 562     if (C->trap_count(reason) != 0
 563         && method()->method_data()->trap_count(reason) != 0
 564         && has_ex_handler()) {
 565         treat_throw_as_hot = true;
 566     }
 567   }
 568 
 569   // If this throw happens frequently, an uncommon trap might cause
 570   // a performance pothole.  If there is a local exception handler,
 571   // and if this particular bytecode appears to be deoptimizing often,
 572   // let us handle the throw inline, with a preconstructed instance.
 573   // Note:   If the deopt count has blown up, the uncommon trap
 574   // runtime is going to flush this nmethod, not matter what.
 575   if (treat_throw_as_hot && method()->can_omit_stack_trace()) {
 576     // If the throw is local, we use a pre-existing instance and
 577     // punt on the backtrace.  This would lead to a missing backtrace
 578     // (a repeat of 4292742) if the backtrace object is ever asked
 579     // for its backtrace.
 580     // Fixing this remaining case of 4292742 requires some flavor of
 581     // escape analysis.  Leave that for the future.
 582     ciInstance* ex_obj = NULL;
 583     switch (reason) {
 584     case Deoptimization::Reason_null_check:
 585       ex_obj = env()->NullPointerException_instance();
 586       break;
 587     case Deoptimization::Reason_div0_check:
 588       ex_obj = env()->ArithmeticException_instance();
 589       break;
 590     case Deoptimization::Reason_range_check:
 591       ex_obj = env()->ArrayIndexOutOfBoundsException_instance();
 592       break;
 593     case Deoptimization::Reason_class_check:
 594       ex_obj = env()->ClassCastException_instance();
 595       break;
 596     case Deoptimization::Reason_array_check:
 597       ex_obj = env()->ArrayStoreException_instance();
 598       break;
 599     default:
 600       break;
 601     }
 602     if (failing()) { stop(); return; }  // exception allocation might fail
 603     if (ex_obj != NULL) {
 604       if (env()->jvmti_can_post_on_exceptions()) {
 605         // check if we must post exception events, take uncommon trap if so
 606         uncommon_trap_if_should_post_on_exceptions(reason, must_throw);
 607         // here if should_post_on_exceptions is false
 608         // continue on with the normal codegen
 609       }
 610 
 611       // Cheat with a preallocated exception object.
 612       if (C->log() != NULL)
 613         C->log()->elem("hot_throw preallocated='1' reason='%s'",
 614                        Deoptimization::trap_reason_name(reason));
 615       const TypeInstPtr* ex_con  = TypeInstPtr::make(ex_obj);
 616       Node*              ex_node = _gvn.transform(ConNode::make(ex_con));
 617 
 618       // Clear the detail message of the preallocated exception object.
 619       // Weblogic sometimes mutates the detail message of exceptions
 620       // using reflection.
 621       int offset = java_lang_Throwable::get_detailMessage_offset();
 622       const TypePtr* adr_typ = ex_con->add_offset(offset);
 623 
 624       Node *adr = basic_plus_adr(ex_node, ex_node, offset);
 625       const TypeOopPtr* val_type = TypeOopPtr::make_from_klass(env()->String_klass());
 626       Node *store = access_store_at(ex_node, adr, adr_typ, null(), val_type, T_OBJECT, IN_HEAP);
 627 
 628       if (!method()->has_exception_handlers()) {
 629         // We don't need to preserve the stack if there's no handler as the entire frame is going to be popped anyway.
 630         // This prevents issues with exception handling and late inlining.
 631         set_sp(0);
 632         clean_stack(0);
 633       }
 634 
 635       add_exception_state(make_exception_state(ex_node));
 636       return;
 637     }
 638   }
 639 
 640   // %%% Maybe add entry to OptoRuntime which directly throws the exc.?
 641   // It won't be much cheaper than bailing to the interp., since we'll
 642   // have to pass up all the debug-info, and the runtime will have to
 643   // create the stack trace.
 644 
 645   // Usual case:  Bail to interpreter.
 646   // Reserve the right to recompile if we haven't seen anything yet.
 647 
 648   ciMethod* m = Deoptimization::reason_is_speculate(reason) ? C->method() : NULL;
 649   Deoptimization::DeoptAction action = Deoptimization::Action_maybe_recompile;
 650   if (treat_throw_as_hot
 651       && (method()->method_data()->trap_recompiled_at(bci(), m)
 652           || C->too_many_traps(reason))) {
 653     // We cannot afford to take more traps here.  Suffer in the interpreter.
 654     if (C->log() != NULL)
 655       C->log()->elem("hot_throw preallocated='0' reason='%s' mcount='%d'",
 656                      Deoptimization::trap_reason_name(reason),
 657                      C->trap_count(reason));
 658     action = Deoptimization::Action_none;
 659   }
 660 
 661   // "must_throw" prunes the JVM state to include only the stack, if there
 662   // are no local exception handlers.  This should cut down on register
 663   // allocation time and code size, by drastically reducing the number
 664   // of in-edges on the call to the uncommon trap.
 665 
 666   uncommon_trap(reason, action, (ciKlass*)NULL, (char*)NULL, must_throw);
 667 }
 668 
 669 
 670 //----------------------------PreserveJVMState---------------------------------
 671 PreserveJVMState::PreserveJVMState(GraphKit* kit, bool clone_map) {
 672   debug_only(kit->verify_map());
 673   _kit    = kit;
 674   _map    = kit->map();   // preserve the map
 675   _sp     = kit->sp();
 676   kit->set_map(clone_map ? kit->clone_map() : NULL);
 677 #ifdef ASSERT
 678   _bci    = kit->bci();
 679   Parse* parser = kit->is_Parse();
 680   int block = (parser == NULL || parser->block() == NULL) ? -1 : parser->block()->rpo();
 681   _block  = block;
 682 #endif
 683 }
 684 PreserveJVMState::~PreserveJVMState() {
 685   GraphKit* kit = _kit;
 686 #ifdef ASSERT
 687   assert(kit->bci() == _bci, "bci must not shift");
 688   Parse* parser = kit->is_Parse();
 689   int block = (parser == NULL || parser->block() == NULL) ? -1 : parser->block()->rpo();
 690   assert(block == _block,    "block must not shift");
 691 #endif
 692   kit->set_map(_map);
 693   kit->set_sp(_sp);
 694 }
 695 
 696 
 697 //-----------------------------BuildCutout-------------------------------------
 698 BuildCutout::BuildCutout(GraphKit* kit, Node* p, float prob, float cnt)
 699   : PreserveJVMState(kit)
 700 {
 701   assert(p->is_Con() || p->is_Bool(), "test must be a bool");
 702   SafePointNode* outer_map = _map;   // preserved map is caller's
 703   SafePointNode* inner_map = kit->map();
 704   IfNode* iff = kit->create_and_map_if(outer_map->control(), p, prob, cnt);
 705   outer_map->set_control(kit->gvn().transform( new IfTrueNode(iff) ));
 706   inner_map->set_control(kit->gvn().transform( new IfFalseNode(iff) ));
 707 }
 708 BuildCutout::~BuildCutout() {
 709   GraphKit* kit = _kit;
 710   assert(kit->stopped(), "cutout code must stop, throw, return, etc.");
 711 }
 712 
 713 //---------------------------PreserveReexecuteState----------------------------
 714 PreserveReexecuteState::PreserveReexecuteState(GraphKit* kit) {
 715   assert(!kit->stopped(), "must call stopped() before");
 716   _kit    =    kit;
 717   _sp     =    kit->sp();
 718   _reexecute = kit->jvms()->_reexecute;
 719 }
 720 PreserveReexecuteState::~PreserveReexecuteState() {
 721   if (_kit->stopped()) return;
 722   _kit->jvms()->_reexecute = _reexecute;
 723   _kit->set_sp(_sp);
 724 }
 725 
 726 //------------------------------clone_map--------------------------------------
 727 // Implementation of PreserveJVMState
 728 //
 729 // Only clone_map(...) here. If this function is only used in the
 730 // PreserveJVMState class we may want to get rid of this extra
 731 // function eventually and do it all there.
 732 
 733 SafePointNode* GraphKit::clone_map() {
 734   if (map() == NULL)  return NULL;
 735 
 736   // Clone the memory edge first
 737   Node* mem = MergeMemNode::make(map()->memory());
 738   gvn().set_type_bottom(mem);
 739 
 740   SafePointNode *clonemap = (SafePointNode*)map()->clone();
 741   JVMState* jvms = this->jvms();
 742   JVMState* clonejvms = jvms->clone_shallow(C);
 743   clonemap->set_memory(mem);
 744   clonemap->set_jvms(clonejvms);
 745   clonejvms->set_map(clonemap);
 746   record_for_igvn(clonemap);
 747   gvn().set_type_bottom(clonemap);
 748   return clonemap;
 749 }
 750 
 751 
 752 //-----------------------------set_map_clone-----------------------------------
 753 void GraphKit::set_map_clone(SafePointNode* m) {
 754   _map = m;
 755   _map = clone_map();
 756   _map->set_next_exception(NULL);
 757   debug_only(verify_map());
 758 }
 759 
 760 
 761 //----------------------------kill_dead_locals---------------------------------
 762 // Detect any locals which are known to be dead, and force them to top.
 763 void GraphKit::kill_dead_locals() {
 764   // Consult the liveness information for the locals.  If any
 765   // of them are unused, then they can be replaced by top().  This
 766   // should help register allocation time and cut down on the size
 767   // of the deoptimization information.
 768 
 769   // This call is made from many of the bytecode handling
 770   // subroutines called from the Big Switch in do_one_bytecode.
 771   // Every bytecode which might include a slow path is responsible
 772   // for killing its dead locals.  The more consistent we
 773   // are about killing deads, the fewer useless phis will be
 774   // constructed for them at various merge points.
 775 
 776   // bci can be -1 (InvocationEntryBci).  We return the entry
 777   // liveness for the method.
 778 
 779   if (method() == NULL || method()->code_size() == 0) {
 780     // We are building a graph for a call to a native method.
 781     // All locals are live.
 782     return;
 783   }
 784 
 785   ResourceMark rm;
 786 
 787   // Consult the liveness information for the locals.  If any
 788   // of them are unused, then they can be replaced by top().  This
 789   // should help register allocation time and cut down on the size
 790   // of the deoptimization information.
 791   MethodLivenessResult live_locals = method()->liveness_at_bci(bci());
 792 
 793   int len = (int)live_locals.size();
 794   assert(len <= jvms()->loc_size(), "too many live locals");
 795   for (int local = 0; local < len; local++) {
 796     if (!live_locals.at(local)) {
 797       set_local(local, top());
 798     }
 799   }
 800 }
 801 
 802 #ifdef ASSERT
 803 //-------------------------dead_locals_are_killed------------------------------
 804 // Return true if all dead locals are set to top in the map.
 805 // Used to assert "clean" debug info at various points.
 806 bool GraphKit::dead_locals_are_killed() {
 807   if (method() == NULL || method()->code_size() == 0) {
 808     // No locals need to be dead, so all is as it should be.
 809     return true;
 810   }
 811 
 812   // Make sure somebody called kill_dead_locals upstream.
 813   ResourceMark rm;
 814   for (JVMState* jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) {
 815     if (jvms->loc_size() == 0)  continue;  // no locals to consult
 816     SafePointNode* map = jvms->map();
 817     ciMethod* method = jvms->method();
 818     int       bci    = jvms->bci();
 819     if (jvms == this->jvms()) {
 820       bci = this->bci();  // it might not yet be synched
 821     }
 822     MethodLivenessResult live_locals = method->liveness_at_bci(bci);
 823     int len = (int)live_locals.size();
 824     if (!live_locals.is_valid() || len == 0)
 825       // This method is trivial, or is poisoned by a breakpoint.
 826       return true;
 827     assert(len == jvms->loc_size(), "live map consistent with locals map");
 828     for (int local = 0; local < len; local++) {
 829       if (!live_locals.at(local) && map->local(jvms, local) != top()) {
 830         if (PrintMiscellaneous && (Verbose || WizardMode)) {
 831           tty->print_cr("Zombie local %d: ", local);
 832           jvms->dump();
 833         }
 834         return false;
 835       }
 836     }
 837   }
 838   return true;
 839 }
 840 
 841 #endif //ASSERT
 842 
 843 // Helper function for enforcing certain bytecodes to reexecute if deoptimization happens.
 844 static bool should_reexecute_implied_by_bytecode(JVMState *jvms, bool is_anewarray) {
 845   ciMethod* cur_method = jvms->method();
 846   int       cur_bci   = jvms->bci();
 847   if (cur_method != NULL && cur_bci != InvocationEntryBci) {
 848     Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
 849     return Interpreter::bytecode_should_reexecute(code) ||
 850            (is_anewarray && (code == Bytecodes::_multianewarray));
 851     // Reexecute _multianewarray bytecode which was replaced with
 852     // sequence of [a]newarray. See Parse::do_multianewarray().
 853     //
 854     // Note: interpreter should not have it set since this optimization
 855     // is limited by dimensions and guarded by flag so in some cases
 856     // multianewarray() runtime calls will be generated and
 857     // the bytecode should not be reexecutes (stack will not be reset).
 858   } else {
 859     return false;
 860   }
 861 }
 862 
 863 // Helper function for adding JVMState and debug information to node
 864 void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) {
 865   // Add the safepoint edges to the call (or other safepoint).
 866 
 867   // Make sure dead locals are set to top.  This
 868   // should help register allocation time and cut down on the size
 869   // of the deoptimization information.
 870   assert(dead_locals_are_killed(), "garbage in debug info before safepoint");
 871 
 872   // Walk the inline list to fill in the correct set of JVMState's
 873   // Also fill in the associated edges for each JVMState.
 874 
 875   // If the bytecode needs to be reexecuted we need to put
 876   // the arguments back on the stack.
 877   const bool should_reexecute = jvms()->should_reexecute();
 878   JVMState* youngest_jvms = should_reexecute ? sync_jvms_for_reexecute() : sync_jvms();
 879 
 880   // NOTE: set_bci (called from sync_jvms) might reset the reexecute bit to
 881   // undefined if the bci is different.  This is normal for Parse but it
 882   // should not happen for LibraryCallKit because only one bci is processed.
 883   assert(!is_LibraryCallKit() || (jvms()->should_reexecute() == should_reexecute),
 884          "in LibraryCallKit the reexecute bit should not change");
 885 
 886   // If we are guaranteed to throw, we can prune everything but the
 887   // input to the current bytecode.
 888   bool can_prune_locals = false;
 889   uint stack_slots_not_pruned = 0;
 890   int inputs = 0, depth = 0;
 891   if (must_throw) {
 892     assert(method() == youngest_jvms->method(), "sanity");
 893     if (compute_stack_effects(inputs, depth)) {
 894       can_prune_locals = true;
 895       stack_slots_not_pruned = inputs;
 896     }
 897   }
 898 
 899   if (env()->should_retain_local_variables()) {
 900     // At any safepoint, this method can get breakpointed, which would
 901     // then require an immediate deoptimization.
 902     can_prune_locals = false;  // do not prune locals
 903     stack_slots_not_pruned = 0;
 904   }
 905 
 906   // do not scribble on the input jvms
 907   JVMState* out_jvms = youngest_jvms->clone_deep(C);
 908   call->set_jvms(out_jvms); // Start jvms list for call node
 909 
 910   // For a known set of bytecodes, the interpreter should reexecute them if
 911   // deoptimization happens. We set the reexecute state for them here
 912   if (out_jvms->is_reexecute_undefined() && //don't change if already specified
 913       should_reexecute_implied_by_bytecode(out_jvms, call->is_AllocateArray())) {
 914 #ifdef ASSERT
 915     int inputs = 0, not_used; // initialized by GraphKit::compute_stack_effects()
 916     assert(method() == youngest_jvms->method(), "sanity");
 917     assert(compute_stack_effects(inputs, not_used), "unknown bytecode: %s", Bytecodes::name(java_bc()));
 918     assert(out_jvms->sp() >= (uint)inputs, "not enough operands for reexecution");
 919 #endif // ASSERT
 920     out_jvms->set_should_reexecute(true); //NOTE: youngest_jvms not changed
 921   }
 922 
 923   // Presize the call:
 924   DEBUG_ONLY(uint non_debug_edges = call->req());
 925   call->add_req_batch(top(), youngest_jvms->debug_depth());
 926   assert(call->req() == non_debug_edges + youngest_jvms->debug_depth(), "");
 927 
 928   // Set up edges so that the call looks like this:
 929   //  Call [state:] ctl io mem fptr retadr
 930   //       [parms:] parm0 ... parmN
 931   //       [root:]  loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
 932   //    [...mid:]   loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN [...]
 933   //       [young:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
 934   // Note that caller debug info precedes callee debug info.
 935 
 936   // Fill pointer walks backwards from "young:" to "root:" in the diagram above:
 937   uint debug_ptr = call->req();
 938 
 939   // Loop over the map input edges associated with jvms, add them
 940   // to the call node, & reset all offsets to match call node array.
 941   for (JVMState* in_jvms = youngest_jvms; in_jvms != NULL; ) {
 942     uint debug_end   = debug_ptr;
 943     uint debug_start = debug_ptr - in_jvms->debug_size();
 944     debug_ptr = debug_start;  // back up the ptr
 945 
 946     uint p = debug_start;  // walks forward in [debug_start, debug_end)
 947     uint j, k, l;
 948     SafePointNode* in_map = in_jvms->map();
 949     out_jvms->set_map(call);
 950 
 951     if (can_prune_locals) {
 952       assert(in_jvms->method() == out_jvms->method(), "sanity");
 953       // If the current throw can reach an exception handler in this JVMS,
 954       // then we must keep everything live that can reach that handler.
 955       // As a quick and dirty approximation, we look for any handlers at all.
 956       if (in_jvms->method()->has_exception_handlers()) {
 957         can_prune_locals = false;
 958       }
 959     }
 960 
 961     // Add the Locals
 962     k = in_jvms->locoff();
 963     l = in_jvms->loc_size();
 964     out_jvms->set_locoff(p);
 965     if (!can_prune_locals) {
 966       for (j = 0; j < l; j++)
 967         call->set_req(p++, in_map->in(k+j));
 968     } else {
 969       p += l;  // already set to top above by add_req_batch
 970     }
 971 
 972     // Add the Expression Stack
 973     k = in_jvms->stkoff();
 974     l = in_jvms->sp();
 975     out_jvms->set_stkoff(p);
 976     if (!can_prune_locals) {
 977       for (j = 0; j < l; j++)
 978         call->set_req(p++, in_map->in(k+j));
 979     } else if (can_prune_locals && stack_slots_not_pruned != 0) {
 980       // Divide stack into {S0,...,S1}, where S0 is set to top.
 981       uint s1 = stack_slots_not_pruned;
 982       stack_slots_not_pruned = 0;  // for next iteration
 983       if (s1 > l)  s1 = l;
 984       uint s0 = l - s1;
 985       p += s0;  // skip the tops preinstalled by add_req_batch
 986       for (j = s0; j < l; j++)
 987         call->set_req(p++, in_map->in(k+j));
 988     } else {
 989       p += l;  // already set to top above by add_req_batch
 990     }
 991 
 992     // Add the Monitors
 993     k = in_jvms->monoff();
 994     l = in_jvms->mon_size();
 995     out_jvms->set_monoff(p);
 996     for (j = 0; j < l; j++)
 997       call->set_req(p++, in_map->in(k+j));
 998 
 999     // Copy any scalar object fields.
1000     k = in_jvms->scloff();
1001     l = in_jvms->scl_size();
1002     out_jvms->set_scloff(p);
1003     for (j = 0; j < l; j++)
1004       call->set_req(p++, in_map->in(k+j));
1005 
1006     // Finish the new jvms.
1007     out_jvms->set_endoff(p);
1008 
1009     assert(out_jvms->endoff()     == debug_end,             "fill ptr must match");
1010     assert(out_jvms->depth()      == in_jvms->depth(),      "depth must match");
1011     assert(out_jvms->loc_size()   == in_jvms->loc_size(),   "size must match");
1012     assert(out_jvms->mon_size()   == in_jvms->mon_size(),   "size must match");
1013     assert(out_jvms->scl_size()   == in_jvms->scl_size(),   "size must match");
1014     assert(out_jvms->debug_size() == in_jvms->debug_size(), "size must match");
1015 
1016     // Update the two tail pointers in parallel.
1017     out_jvms = out_jvms->caller();
1018     in_jvms  = in_jvms->caller();
1019   }
1020 
1021   assert(debug_ptr == non_debug_edges, "debug info must fit exactly");
1022 
1023   // Test the correctness of JVMState::debug_xxx accessors:
1024   assert(call->jvms()->debug_start() == non_debug_edges, "");
1025   assert(call->jvms()->debug_end()   == call->req(), "");
1026   assert(call->jvms()->debug_depth() == call->req() - non_debug_edges, "");
1027 }
1028 
1029 bool GraphKit::compute_stack_effects(int& inputs, int& depth) {
1030   Bytecodes::Code code = java_bc();
1031   if (code == Bytecodes::_wide) {
1032     code = method()->java_code_at_bci(bci() + 1);
1033   }
1034 
1035   BasicType rtype = T_ILLEGAL;
1036   int       rsize = 0;
1037 
1038   if (code != Bytecodes::_illegal) {
1039     depth = Bytecodes::depth(code); // checkcast=0, athrow=-1
1040     rtype = Bytecodes::result_type(code); // checkcast=P, athrow=V
1041     if (rtype < T_CONFLICT)
1042       rsize = type2size[rtype];
1043   }
1044 
1045   switch (code) {
1046   case Bytecodes::_illegal:
1047     return false;
1048 
1049   case Bytecodes::_ldc:
1050   case Bytecodes::_ldc_w:
1051   case Bytecodes::_ldc2_w:
1052     inputs = 0;
1053     break;
1054 
1055   case Bytecodes::_dup:         inputs = 1;  break;
1056   case Bytecodes::_dup_x1:      inputs = 2;  break;
1057   case Bytecodes::_dup_x2:      inputs = 3;  break;
1058   case Bytecodes::_dup2:        inputs = 2;  break;
1059   case Bytecodes::_dup2_x1:     inputs = 3;  break;
1060   case Bytecodes::_dup2_x2:     inputs = 4;  break;
1061   case Bytecodes::_swap:        inputs = 2;  break;
1062   case Bytecodes::_arraylength: inputs = 1;  break;
1063 
1064   case Bytecodes::_getstatic:
1065   case Bytecodes::_putstatic:
1066   case Bytecodes::_getfield:
1067   case Bytecodes::_putfield:
1068     {
1069       bool ignored_will_link;
1070       ciField* field = method()->get_field_at_bci(bci(), ignored_will_link);
1071       int      size  = field->type()->size();
1072       bool is_get = (depth >= 0), is_static = (depth & 1);
1073       inputs = (is_static ? 0 : 1);
1074       if (is_get) {
1075         depth = size - inputs;
1076       } else {
1077         inputs += size;        // putxxx pops the value from the stack
1078         depth = - inputs;
1079       }
1080     }
1081     break;
1082 
1083   case Bytecodes::_invokevirtual:
1084   case Bytecodes::_invokespecial:
1085   case Bytecodes::_invokestatic:
1086   case Bytecodes::_invokedynamic:
1087   case Bytecodes::_invokeinterface:
1088     {
1089       bool ignored_will_link;
1090       ciSignature* declared_signature = NULL;
1091       ciMethod* ignored_callee = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
1092       assert(declared_signature != NULL, "cannot be null");
1093       inputs   = declared_signature->arg_size_for_bc(code);
1094       int size = declared_signature->return_type()->size();
1095       depth = size - inputs;
1096     }
1097     break;
1098 
1099   case Bytecodes::_multianewarray:
1100     {
1101       ciBytecodeStream iter(method());
1102       iter.reset_to_bci(bci());
1103       iter.next();
1104       inputs = iter.get_dimensions();
1105       assert(rsize == 1, "");
1106       depth = rsize - inputs;
1107     }
1108     break;
1109 
1110   case Bytecodes::_withfield: {
1111     bool ignored_will_link;
1112     ciField* field = method()->get_field_at_bci(bci(), ignored_will_link);
1113     int      size  = field->type()->size();
1114     inputs = size+1;
1115     depth = rsize - inputs;
1116     break;
1117   }
1118 
1119   case Bytecodes::_ireturn:
1120   case Bytecodes::_lreturn:
1121   case Bytecodes::_freturn:
1122   case Bytecodes::_dreturn:
1123   case Bytecodes::_areturn:
1124     assert(rsize == -depth, "");
1125     inputs = rsize;
1126     break;
1127 
1128   case Bytecodes::_jsr:
1129   case Bytecodes::_jsr_w:
1130     inputs = 0;
1131     depth  = 1;                  // S.B. depth=1, not zero
1132     break;
1133 
1134   default:
1135     // bytecode produces a typed result
1136     inputs = rsize - depth;
1137     assert(inputs >= 0, "");
1138     break;
1139   }
1140 
1141 #ifdef ASSERT
1142   // spot check
1143   int outputs = depth + inputs;
1144   assert(outputs >= 0, "sanity");
1145   switch (code) {
1146   case Bytecodes::_checkcast: assert(inputs == 1 && outputs == 1, ""); break;
1147   case Bytecodes::_athrow:    assert(inputs == 1 && outputs == 0, ""); break;
1148   case Bytecodes::_aload_0:   assert(inputs == 0 && outputs == 1, ""); break;
1149   case Bytecodes::_return:    assert(inputs == 0 && outputs == 0, ""); break;
1150   case Bytecodes::_drem:      assert(inputs == 4 && outputs == 2, ""); break;
1151   default:                    break;
1152   }
1153 #endif //ASSERT
1154 
1155   return true;
1156 }
1157 
1158 
1159 
1160 //------------------------------basic_plus_adr---------------------------------
1161 Node* GraphKit::basic_plus_adr(Node* base, Node* ptr, Node* offset) {
1162   // short-circuit a common case
1163   if (offset == intcon(0))  return ptr;
1164   return _gvn.transform( new AddPNode(base, ptr, offset) );
1165 }
1166 
1167 Node* GraphKit::ConvI2L(Node* offset) {
1168   // short-circuit a common case
1169   jint offset_con = find_int_con(offset, Type::OffsetBot);
1170   if (offset_con != Type::OffsetBot) {
1171     return longcon((jlong) offset_con);
1172   }
1173   return _gvn.transform( new ConvI2LNode(offset));
1174 }
1175 
1176 Node* GraphKit::ConvI2UL(Node* offset) {
1177   juint offset_con = (juint) find_int_con(offset, Type::OffsetBot);
1178   if (offset_con != (juint) Type::OffsetBot) {
1179     return longcon((julong) offset_con);
1180   }
1181   Node* conv = _gvn.transform( new ConvI2LNode(offset));
1182   Node* mask = _gvn.transform(ConLNode::make((julong) max_juint));
1183   return _gvn.transform( new AndLNode(conv, mask) );
1184 }
1185 
1186 Node* GraphKit::ConvL2I(Node* offset) {
1187   // short-circuit a common case
1188   jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot);
1189   if (offset_con != (jlong)Type::OffsetBot) {
1190     return intcon((int) offset_con);
1191   }
1192   return _gvn.transform( new ConvL2INode(offset));
1193 }
1194 
1195 //-------------------------load_object_klass-----------------------------------
1196 Node* GraphKit::load_object_klass(Node* obj) {
1197   // Special-case a fresh allocation to avoid building nodes:
1198   Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
1199   if (akls != NULL)  return akls;
1200   Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
1201   return _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
1202 }
1203 
1204 //-------------------------load_array_length-----------------------------------
1205 Node* GraphKit::load_array_length(Node* array) {
1206   // Special-case a fresh allocation to avoid building nodes:
1207   AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array, &_gvn);
1208   Node *alen;
1209   if (alloc == NULL) {
1210     Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes());
1211     alen = _gvn.transform( new LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS));
1212   } else {
1213     alen = array_ideal_length(alloc, _gvn.type(array)->is_oopptr(), false);
1214   }
1215   return alen;
1216 }
1217 
1218 Node* GraphKit::array_ideal_length(AllocateArrayNode* alloc,
1219                                    const TypeOopPtr* oop_type,
1220                                    bool replace_length_in_map) {
1221   Node* length = alloc->Ideal_length();
1222   if (replace_length_in_map == false || map()->find_edge(length) >= 0) {
1223     Node* ccast = alloc->make_ideal_length(oop_type, &_gvn);
1224     if (ccast != length) {
1225       // do not transfrom ccast here, it might convert to top node for
1226       // negative array length and break assumptions in parsing stage.
1227       _gvn.set_type_bottom(ccast);
1228       record_for_igvn(ccast);
1229       if (replace_length_in_map) {
1230         replace_in_map(length, ccast);
1231       }
1232       return ccast;
1233     }
1234   }
1235   return length;
1236 }
1237 
1238 //------------------------------do_null_check----------------------------------
1239 // Helper function to do a NULL pointer check.  Returned value is
1240 // the incoming address with NULL casted away.  You are allowed to use the
1241 // not-null value only if you are control dependent on the test.
1242 #ifndef PRODUCT
1243 extern int explicit_null_checks_inserted,
1244            explicit_null_checks_elided;
1245 #endif
1246 Node* GraphKit::null_check_common(Node* value, BasicType type,
1247                                   // optional arguments for variations:
1248                                   bool assert_null,
1249                                   Node* *null_control,
1250                                   bool speculative,
1251                                   bool is_init_check) {
1252   assert(!assert_null || null_control == NULL, "not both at once");
1253   if (stopped())  return top();
1254   NOT_PRODUCT(explicit_null_checks_inserted++);
1255 
1256   if (value->is_InlineType()) {
1257     InlineTypeNode* vt = value->as_InlineType();
1258     null_check_common(vt->get_is_init(), T_INT, assert_null, null_control, speculative, true);
1259     if (stopped()) {
1260       return top();
1261     }
1262     if (assert_null) {
1263       // TODO 8284443 Scalarize here (this currently leads to compilation bailouts)
1264       // vt = InlineTypeNode::make_null(_gvn, vt->type()->inline_klass());
1265       // replace_in_map(value, vt);
1266       // return vt;
1267       return null();
1268     }
1269     bool do_replace_in_map = (null_control == NULL || (*null_control) == top());
1270     return cast_not_null(value, do_replace_in_map);
1271   } else if (value->is_InlineTypePtr()) {
1272     // Null checking a scalarized but nullable inline type. Check the IsInit
1273     // input instead of the oop input to avoid keeping buffer allocations alive.
1274     InlineTypePtrNode* vtptr = value->as_InlineTypePtr();
1275     while (vtptr->get_oop()->is_InlineTypePtr()) {
1276       vtptr = vtptr->get_oop()->as_InlineTypePtr();
1277     }
1278     null_check_common(vtptr->get_is_init(), T_INT, assert_null, null_control, speculative, true);
1279     if (stopped()) {
1280       return top();
1281     }
1282     if (assert_null) {
1283       // TODO 8284443 Scalarize here (this currently leads to compilation bailouts)
1284       // vtptr = InlineTypePtrNode::make_null(_gvn, vtptr->type()->inline_klass());
1285       // replace_in_map(value, vtptr);
1286       // return vtptr;
1287       return null();
1288     }
1289     bool do_replace_in_map = (null_control == NULL || (*null_control) == top());
1290     return cast_not_null(value, do_replace_in_map);
1291   }
1292 
1293   // Construct NULL check
1294   Node *chk = NULL;
1295   switch(type) {
1296     case T_LONG   : chk = new CmpLNode(value, _gvn.zerocon(T_LONG)); break;
1297     case T_INT    : chk = new CmpINode(value, _gvn.intcon(0)); break;
1298     case T_PRIMITIVE_OBJECT : // fall through
1299     case T_ARRAY  : // fall through
1300       type = T_OBJECT;  // simplify further tests
1301     case T_OBJECT : {
1302       const Type *t = _gvn.type( value );
1303 
1304       const TypeOopPtr* tp = t->isa_oopptr();
1305       if (tp != NULL && tp->klass() != NULL && !tp->klass()->is_loaded()
1306           // Only for do_null_check, not any of its siblings:
1307           && !assert_null && null_control == NULL) {
1308         // Usually, any field access or invocation on an unloaded oop type
1309         // will simply fail to link, since the statically linked class is
1310         // likely also to be unloaded.  However, in -Xcomp mode, sometimes
1311         // the static class is loaded but the sharper oop type is not.
1312         // Rather than checking for this obscure case in lots of places,
1313         // we simply observe that a null check on an unloaded class
1314         // will always be followed by a nonsense operation, so we
1315         // can just issue the uncommon trap here.
1316         // Our access to the unloaded class will only be correct
1317         // after it has been loaded and initialized, which requires
1318         // a trip through the interpreter.
1319 #ifndef PRODUCT
1320         if (WizardMode) { tty->print("Null check of unloaded "); tp->klass()->print(); tty->cr(); }
1321 #endif
1322         uncommon_trap(Deoptimization::Reason_unloaded,
1323                       Deoptimization::Action_reinterpret,
1324                       tp->klass(), "!loaded");
1325         return top();
1326       }
1327 
1328       if (assert_null) {
1329         // See if the type is contained in NULL_PTR.
1330         // If so, then the value is already null.
1331         if (t->higher_equal(TypePtr::NULL_PTR)) {
1332           NOT_PRODUCT(explicit_null_checks_elided++);
1333           return value;           // Elided null assert quickly!
1334         }
1335       } else {
1336         // See if mixing in the NULL pointer changes type.
1337         // If so, then the NULL pointer was not allowed in the original
1338         // type.  In other words, "value" was not-null.
1339         if (t->meet(TypePtr::NULL_PTR) != t->remove_speculative()) {
1340           // same as: if (!TypePtr::NULL_PTR->higher_equal(t)) ...
1341           NOT_PRODUCT(explicit_null_checks_elided++);
1342           return value;           // Elided null check quickly!
1343         }
1344       }
1345       chk = new CmpPNode( value, null() );
1346       break;
1347     }
1348 
1349     default:
1350       fatal("unexpected type: %s", type2name(type));
1351   }
1352   assert(chk != NULL, "sanity check");
1353   chk = _gvn.transform(chk);
1354 
1355   BoolTest::mask btest = assert_null ? BoolTest::eq : BoolTest::ne;
1356   BoolNode *btst = new BoolNode( chk, btest);
1357   Node   *tst = _gvn.transform( btst );
1358 
1359   //-----------
1360   // if peephole optimizations occurred, a prior test existed.
1361   // If a prior test existed, maybe it dominates as we can avoid this test.
1362   if (tst != btst && type == T_OBJECT) {
1363     // At this point we want to scan up the CFG to see if we can
1364     // find an identical test (and so avoid this test altogether).
1365     Node *cfg = control();
1366     int depth = 0;
1367     while( depth < 16 ) {       // Limit search depth for speed
1368       if( cfg->Opcode() == Op_IfTrue &&
1369           cfg->in(0)->in(1) == tst ) {
1370         // Found prior test.  Use "cast_not_null" to construct an identical
1371         // CastPP (and hence hash to) as already exists for the prior test.
1372         // Return that casted value.
1373         if (assert_null) {
1374           replace_in_map(value, null());
1375           return null();  // do not issue the redundant test
1376         }
1377         Node *oldcontrol = control();
1378         set_control(cfg);
1379         Node *res = cast_not_null(value);
1380         set_control(oldcontrol);
1381         NOT_PRODUCT(explicit_null_checks_elided++);
1382         return res;
1383       }
1384       cfg = IfNode::up_one_dom(cfg, /*linear_only=*/ true);
1385       if (cfg == NULL)  break;  // Quit at region nodes
1386       depth++;
1387     }
1388   }
1389 
1390   //-----------
1391   // Branch to failure if null
1392   float ok_prob = PROB_MAX;  // a priori estimate:  nulls never happen
1393   Deoptimization::DeoptReason reason;
1394   if (assert_null) {
1395     reason = Deoptimization::reason_null_assert(speculative);
1396   } else if (type == T_OBJECT || is_init_check) {
1397     reason = Deoptimization::reason_null_check(speculative);
1398   } else {
1399     reason = Deoptimization::Reason_div0_check;
1400   }
1401   // %%% Since Reason_unhandled is not recorded on a per-bytecode basis,
1402   // ciMethodData::has_trap_at will return a conservative -1 if any
1403   // must-be-null assertion has failed.  This could cause performance
1404   // problems for a method after its first do_null_assert failure.
1405   // Consider using 'Reason_class_check' instead?
1406 
1407   // To cause an implicit null check, we set the not-null probability
1408   // to the maximum (PROB_MAX).  For an explicit check the probability
1409   // is set to a smaller value.
1410   if (null_control != NULL || too_many_traps(reason)) {
1411     // probability is less likely
1412     ok_prob =  PROB_LIKELY_MAG(3);
1413   } else if (!assert_null &&
1414              (ImplicitNullCheckThreshold > 0) &&
1415              method() != NULL &&
1416              (method()->method_data()->trap_count(reason)
1417               >= (uint)ImplicitNullCheckThreshold)) {
1418     ok_prob =  PROB_LIKELY_MAG(3);
1419   }
1420 
1421   if (null_control != NULL) {
1422     IfNode* iff = create_and_map_if(control(), tst, ok_prob, COUNT_UNKNOWN);
1423     Node* null_true = _gvn.transform( new IfFalseNode(iff));
1424     set_control(      _gvn.transform( new IfTrueNode(iff)));
1425 #ifndef PRODUCT
1426     if (null_true == top()) {
1427       explicit_null_checks_elided++;
1428     }
1429 #endif
1430     (*null_control) = null_true;
1431   } else {
1432     BuildCutout unless(this, tst, ok_prob);
1433     // Check for optimizer eliding test at parse time
1434     if (stopped()) {
1435       // Failure not possible; do not bother making uncommon trap.
1436       NOT_PRODUCT(explicit_null_checks_elided++);
1437     } else if (assert_null) {
1438       uncommon_trap(reason,
1439                     Deoptimization::Action_make_not_entrant,
1440                     NULL, "assert_null");
1441     } else {
1442       replace_in_map(value, zerocon(type));
1443       builtin_throw(reason);
1444     }
1445   }
1446 
1447   // Must throw exception, fall-thru not possible?
1448   if (stopped()) {
1449     return top();               // No result
1450   }
1451 
1452   if (assert_null) {
1453     // Cast obj to null on this path.
1454     replace_in_map(value, zerocon(type));
1455     return zerocon(type);
1456   }
1457 
1458   // Cast obj to not-null on this path, if there is no null_control.
1459   // (If there is a null_control, a non-null value may come back to haunt us.)
1460   if (type == T_OBJECT) {
1461     Node* cast = cast_not_null(value, false);
1462     if (null_control == NULL || (*null_control) == top())
1463       replace_in_map(value, cast);
1464     value = cast;
1465   }
1466 
1467   return value;
1468 }
1469 
1470 //------------------------------cast_not_null----------------------------------
1471 // Cast obj to not-null on this path
1472 Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
1473   if (obj->is_InlineType()) {
1474     InlineTypeNode* vt = obj->clone()->as_InlineType();
1475     vt->set_is_init(_gvn);
1476     vt = _gvn.transform(vt)->as_InlineType();
1477     if (do_replace_in_map) {
1478       replace_in_map(obj, vt);
1479     }
1480     return vt;
1481   } else if (obj->is_InlineTypePtr()) {
1482     // Cast oop input instead
1483     Node* cast = cast_not_null(obj->as_InlineTypePtr()->get_oop(), do_replace_in_map);
1484     if (cast->is_top()) {
1485       // Always null
1486       return top();
1487     }
1488     // Create a new node with the casted oop input and is_init set
1489     InlineTypeBaseNode* vt = obj->clone()->as_InlineTypePtr();
1490     vt->set_oop(cast);
1491     vt->set_is_init(_gvn);
1492     vt = _gvn.transform(vt)->as_InlineTypePtr();
1493     if (do_replace_in_map) {
1494       replace_in_map(obj, vt);
1495     }
1496     return vt;
1497   }
1498   const Type *t = _gvn.type(obj);
1499   const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL);
1500   // Object is already not-null?
1501   if( t == t_not_null ) return obj;
1502 
1503   Node *cast = new CastPPNode(obj,t_not_null);
1504   cast->init_req(0, control());
1505   cast = _gvn.transform( cast );
1506 
1507   // Scan for instances of 'obj' in the current JVM mapping.
1508   // These instances are known to be not-null after the test.
1509   if (do_replace_in_map)
1510     replace_in_map(obj, cast);
1511 
1512   return cast;                  // Return casted value
1513 }
1514 
1515 // Sometimes in intrinsics, we implicitly know an object is not null
1516 // (there's no actual null check) so we can cast it to not null. In
1517 // the course of optimizations, the input to the cast can become null.
1518 // In that case that data path will die and we need the control path
1519 // to become dead as well to keep the graph consistent. So we have to
1520 // add a check for null for which one branch can't be taken. It uses
1521 // an Opaque4 node that will cause the check to be removed after loop
1522 // opts so the test goes away and the compiled code doesn't execute a
1523 // useless check.
1524 Node* GraphKit::must_be_not_null(Node* value, bool do_replace_in_map) {
1525   if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(value))) {
1526     return value;
1527   }
1528   Node* chk = _gvn.transform(new CmpPNode(value, null()));
1529   Node *tst = _gvn.transform(new BoolNode(chk, BoolTest::ne));
1530   Node* opaq = _gvn.transform(new Opaque4Node(C, tst, intcon(1)));
1531   IfNode *iff = new IfNode(control(), opaq, PROB_MAX, COUNT_UNKNOWN);
1532   _gvn.set_type(iff, iff->Value(&_gvn));
1533   Node *if_f = _gvn.transform(new IfFalseNode(iff));
1534   Node *frame = _gvn.transform(new ParmNode(C->start(), TypeFunc::FramePtr));
1535   Node* halt = _gvn.transform(new HaltNode(if_f, frame, "unexpected null in intrinsic"));
1536   C->root()->add_req(halt);
1537   Node *if_t = _gvn.transform(new IfTrueNode(iff));
1538   set_control(if_t);
1539   return cast_not_null(value, do_replace_in_map);
1540 }
1541 
1542 
1543 //--------------------------replace_in_map-------------------------------------
1544 void GraphKit::replace_in_map(Node* old, Node* neww) {
1545   if (old == neww) {
1546     return;
1547   }
1548 
1549   map()->replace_edge(old, neww);
1550 
1551   // Note: This operation potentially replaces any edge
1552   // on the map.  This includes locals, stack, and monitors
1553   // of the current (innermost) JVM state.
1554 
1555   // don't let inconsistent types from profiling escape this
1556   // method
1557 
1558   const Type* told = _gvn.type(old);
1559   const Type* tnew = _gvn.type(neww);
1560 
1561   if (!tnew->higher_equal(told)) {
1562     return;
1563   }
1564 
1565   map()->record_replaced_node(old, neww);
1566 }
1567 
1568 
1569 //=============================================================================
1570 //--------------------------------memory---------------------------------------
1571 Node* GraphKit::memory(uint alias_idx) {
1572   MergeMemNode* mem = merged_memory();
1573   Node* p = mem->memory_at(alias_idx);
1574   assert(p != mem->empty_memory(), "empty");
1575   _gvn.set_type(p, Type::MEMORY);  // must be mapped
1576   return p;
1577 }
1578 
1579 //-----------------------------reset_memory------------------------------------
1580 Node* GraphKit::reset_memory() {
1581   Node* mem = map()->memory();
1582   // do not use this node for any more parsing!
1583   debug_only( map()->set_memory((Node*)NULL) );
1584   return _gvn.transform( mem );
1585 }
1586 
1587 //------------------------------set_all_memory---------------------------------
1588 void GraphKit::set_all_memory(Node* newmem) {
1589   Node* mergemem = MergeMemNode::make(newmem);
1590   gvn().set_type_bottom(mergemem);
1591   map()->set_memory(mergemem);
1592 }
1593 
1594 //------------------------------set_all_memory_call----------------------------
1595 void GraphKit::set_all_memory_call(Node* call, bool separate_io_proj) {
1596   Node* newmem = _gvn.transform( new ProjNode(call, TypeFunc::Memory, separate_io_proj) );
1597   set_all_memory(newmem);
1598 }
1599 
1600 //=============================================================================
1601 //
1602 // parser factory methods for MemNodes
1603 //
1604 // These are layered on top of the factory methods in LoadNode and StoreNode,
1605 // and integrate with the parser's memory state and _gvn engine.
1606 //
1607 
1608 // factory methods in "int adr_idx"
1609 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
1610                           int adr_idx,
1611                           MemNode::MemOrd mo,
1612                           LoadNode::ControlDependency control_dependency,
1613                           bool require_atomic_access,
1614                           bool unaligned,
1615                           bool mismatched,
1616                           bool unsafe,
1617                           uint8_t barrier_data) {
1618   assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1619   const TypePtr* adr_type = NULL; // debug-mode-only argument
1620   debug_only(adr_type = C->get_adr_type(adr_idx));
1621   Node* mem = memory(adr_idx);
1622   Node* ld;
1623   if (require_atomic_access && bt == T_LONG) {
1624     ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);
1625   } else if (require_atomic_access && bt == T_DOUBLE) {
1626     ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);
1627   } else {
1628     ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);
1629   }
1630   ld = _gvn.transform(ld);
1631 
1632   if (((bt == T_OBJECT || bt == T_PRIMITIVE_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
1633     // Improve graph before escape analysis and boxing elimination.
1634     record_for_igvn(ld);
1635   }
1636   return ld;
1637 }
1638 
1639 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1640                                 int adr_idx,
1641                                 MemNode::MemOrd mo,
1642                                 bool require_atomic_access,
1643                                 bool unaligned,
1644                                 bool mismatched,
1645                                 bool unsafe) {
1646   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1647   const TypePtr* adr_type = NULL;
1648   debug_only(adr_type = C->get_adr_type(adr_idx));
1649   Node *mem = memory(adr_idx);
1650   Node* st;
1651   if (require_atomic_access && bt == T_LONG) {
1652     st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
1653   } else if (require_atomic_access && bt == T_DOUBLE) {
1654     st = StoreDNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
1655   } else {
1656     st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
1657   }
1658   if (unaligned) {
1659     st->as_Store()->set_unaligned_access();
1660   }
1661   if (mismatched) {
1662     st->as_Store()->set_mismatched_access();
1663   }
1664   if (unsafe) {
1665     st->as_Store()->set_unsafe_access();
1666   }
1667   st = _gvn.transform(st);
1668   set_memory(st, adr_idx);
1669   // Back-to-back stores can only remove intermediate store with DU info
1670   // so push on worklist for optimizer.
1671   if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1672     record_for_igvn(st);
1673 
1674   return st;
1675 }
1676 
1677 Node* GraphKit::access_store_at(Node* obj,
1678                                 Node* adr,
1679                                 const TypePtr* adr_type,
1680                                 Node* val,
1681                                 const Type* val_type,
1682                                 BasicType bt,
1683                                 DecoratorSet decorators,
1684                                 bool safe_for_replace) {
1685   // Transformation of a value which could be NULL pointer (CastPP #NULL)
1686   // could be delayed during Parse (for example, in adjust_map_after_if()).
1687   // Execute transformation here to avoid barrier generation in such case.
1688   if (_gvn.type(val) == TypePtr::NULL_PTR) {
1689     val = _gvn.makecon(TypePtr::NULL_PTR);
1690   }
1691 
1692   if (stopped()) {
1693     return top(); // Dead path ?
1694   }
1695 
1696   assert(val != NULL, "not dead path");
1697   if (val->is_InlineType()) {
1698     // Store to non-flattened field. Buffer the inline type and make sure
1699     // the store is re-executed if the allocation triggers deoptimization.
1700     PreserveReexecuteState preexecs(this);
1701     jvms()->set_should_reexecute(true);
1702     val = val->as_InlineType()->buffer(this, safe_for_replace);
1703   }
1704 
1705   C2AccessValuePtr addr(adr, adr_type);
1706   C2AccessValue value(val, val_type);
1707   C2ParseAccess access(this, decorators | C2_WRITE_ACCESS, bt, obj, addr);
1708   if (access.is_raw()) {
1709     return _barrier_set->BarrierSetC2::store_at(access, value);
1710   } else {
1711     return _barrier_set->store_at(access, value);
1712   }
1713 }
1714 
1715 Node* GraphKit::access_load_at(Node* obj,   // containing obj
1716                                Node* adr,   // actual adress to store val at
1717                                const TypePtr* adr_type,
1718                                const Type* val_type,
1719                                BasicType bt,
1720                                DecoratorSet decorators,
1721                                Node* ctl) {
1722   if (stopped()) {
1723     return top(); // Dead path ?
1724   }
1725 
1726   C2AccessValuePtr addr(adr, adr_type);
1727   C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, obj, addr, ctl);
1728   if (access.is_raw()) {
1729     return _barrier_set->BarrierSetC2::load_at(access, val_type);
1730   } else {
1731     return _barrier_set->load_at(access, val_type);
1732   }
1733 }
1734 
1735 Node* GraphKit::access_load(Node* adr,   // actual adress to load val at
1736                             const Type* val_type,
1737                             BasicType bt,
1738                             DecoratorSet decorators) {
1739   if (stopped()) {
1740     return top(); // Dead path ?
1741   }
1742 
1743   C2AccessValuePtr addr(adr, adr->bottom_type()->is_ptr());
1744   C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, NULL, addr);
1745   if (access.is_raw()) {
1746     return _barrier_set->BarrierSetC2::load_at(access, val_type);
1747   } else {
1748     return _barrier_set->load_at(access, val_type);
1749   }
1750 }
1751 
1752 Node* GraphKit::access_atomic_cmpxchg_val_at(Node* obj,
1753                                              Node* adr,
1754                                              const TypePtr* adr_type,
1755                                              int alias_idx,
1756                                              Node* expected_val,
1757                                              Node* new_val,
1758                                              const Type* value_type,
1759                                              BasicType bt,
1760                                              DecoratorSet decorators) {
1761   C2AccessValuePtr addr(adr, adr_type);
1762   C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS,
1763                         bt, obj, addr, alias_idx);
1764   if (access.is_raw()) {
1765     return _barrier_set->BarrierSetC2::atomic_cmpxchg_val_at(access, expected_val, new_val, value_type);
1766   } else {
1767     return _barrier_set->atomic_cmpxchg_val_at(access, expected_val, new_val, value_type);
1768   }
1769 }
1770 
1771 Node* GraphKit::access_atomic_cmpxchg_bool_at(Node* obj,
1772                                               Node* adr,
1773                                               const TypePtr* adr_type,
1774                                               int alias_idx,
1775                                               Node* expected_val,
1776                                               Node* new_val,
1777                                               const Type* value_type,
1778                                               BasicType bt,
1779                                               DecoratorSet decorators) {
1780   C2AccessValuePtr addr(adr, adr_type);
1781   C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS,
1782                         bt, obj, addr, alias_idx);
1783   if (access.is_raw()) {
1784     return _barrier_set->BarrierSetC2::atomic_cmpxchg_bool_at(access, expected_val, new_val, value_type);
1785   } else {
1786     return _barrier_set->atomic_cmpxchg_bool_at(access, expected_val, new_val, value_type);
1787   }
1788 }
1789 
1790 Node* GraphKit::access_atomic_xchg_at(Node* obj,
1791                                       Node* adr,
1792                                       const TypePtr* adr_type,
1793                                       int alias_idx,
1794                                       Node* new_val,
1795                                       const Type* value_type,
1796                                       BasicType bt,
1797                                       DecoratorSet decorators) {
1798   C2AccessValuePtr addr(adr, adr_type);
1799   C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS,
1800                         bt, obj, addr, alias_idx);
1801   if (access.is_raw()) {
1802     return _barrier_set->BarrierSetC2::atomic_xchg_at(access, new_val, value_type);
1803   } else {
1804     return _barrier_set->atomic_xchg_at(access, new_val, value_type);
1805   }
1806 }
1807 
1808 Node* GraphKit::access_atomic_add_at(Node* obj,
1809                                      Node* adr,
1810                                      const TypePtr* adr_type,
1811                                      int alias_idx,
1812                                      Node* new_val,
1813                                      const Type* value_type,
1814                                      BasicType bt,
1815                                      DecoratorSet decorators) {
1816   C2AccessValuePtr addr(adr, adr_type);
1817   C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS, bt, obj, addr, alias_idx);
1818   if (access.is_raw()) {
1819     return _barrier_set->BarrierSetC2::atomic_add_at(access, new_val, value_type);
1820   } else {
1821     return _barrier_set->atomic_add_at(access, new_val, value_type);
1822   }
1823 }
1824 
1825 void GraphKit::access_clone(Node* src, Node* dst, Node* size, bool is_array) {
1826   return _barrier_set->clone(this, src, dst, size, is_array);
1827 }
1828 
1829 //-------------------------array_element_address-------------------------
1830 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1831                                       const TypeInt* sizetype, Node* ctrl) {
1832   uint shift  = exact_log2(type2aelembytes(elembt));
1833   ciKlass* arytype_klass = _gvn.type(ary)->is_aryptr()->klass();
1834   if (arytype_klass != NULL && arytype_klass->is_flat_array_klass()) {
1835     ciFlatArrayKlass* vak = arytype_klass->as_flat_array_klass();
1836     shift = vak->log2_element_size();
1837   }
1838   uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1839 
1840   // short-circuit a common case (saves lots of confusing waste motion)
1841   jint idx_con = find_int_con(idx, -1);
1842   if (idx_con >= 0) {
1843     intptr_t offset = header + ((intptr_t)idx_con << shift);
1844     return basic_plus_adr(ary, offset);
1845   }
1846 
1847   // must be correct type for alignment purposes
1848   Node* base  = basic_plus_adr(ary, header);
1849   idx = Compile::conv_I2X_index(&_gvn, idx, sizetype, ctrl);
1850   Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );
1851   return basic_plus_adr(ary, base, scale);
1852 }
1853 
1854 //-------------------------load_array_element-------------------------
1855 Node* GraphKit::load_array_element(Node* ary, Node* idx, const TypeAryPtr* arytype, bool set_ctrl) {
1856   const Type* elemtype = arytype->elem();
1857   BasicType elembt = elemtype->array_element_basic_type();
1858   assert(elembt != T_PRIMITIVE_OBJECT, "inline types are not supported by this method");
1859   Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1860   if (elembt == T_NARROWOOP) {
1861     elembt = T_OBJECT; // To satisfy switch in LoadNode::make()
1862   }
1863   Node* ld = access_load_at(ary, adr, arytype, elemtype, elembt,
1864                             IN_HEAP | IS_ARRAY | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0));
1865   return ld;
1866 }
1867 
1868 //-------------------------set_arguments_for_java_call-------------------------
1869 // Arguments (pre-popped from the stack) are taken from the JVMS.
1870 void GraphKit::set_arguments_for_java_call(CallJavaNode* call, bool is_late_inline) {
1871   PreserveReexecuteState preexecs(this);
1872   if (EnableValhalla) {
1873     // Make sure the call is "re-executed", if buffering of inline type arguments triggers deoptimization.
1874     // At this point, the call hasn't been executed yet, so we will only ever execute the call once.
1875     jvms()->set_should_reexecute(true);
1876     int arg_size = method()->get_declared_signature_at_bci(bci())->arg_size_for_bc(java_bc());
1877     inc_sp(arg_size);
1878   }
1879   // Add the call arguments
1880   const TypeTuple* domain = call->tf()->domain_sig();
1881   uint nargs = domain->cnt();
1882   int arg_num = 0;
1883   for (uint i = TypeFunc::Parms, idx = TypeFunc::Parms; i < nargs; i++) {
1884     Node* arg = argument(i-TypeFunc::Parms);
1885     const Type* t = domain->field_at(i);
1886     if (t->is_inlinetypeptr() && call->method()->is_scalarized_arg(arg_num)) {
1887       // We don't pass inline type arguments by reference but instead pass each field of the inline type
1888       if (!arg->is_InlineTypeBase()) {
1889         assert(_gvn.type(arg)->is_zero_type() && !t->inline_klass()->is_null_free(), "Unexpected argument type");
1890         arg = InlineTypeNode::make_from_oop(this, arg, t->inline_klass(), t->inline_klass()->is_null_free());
1891       }
1892       InlineTypeBaseNode* vt = arg->as_InlineTypeBase();
1893       vt->pass_fields(this, call, idx, true, !t->maybe_null());
1894       // If an inline type argument is passed as fields, attach the Method* to the call site
1895       // to be able to access the extended signature later via attached_method_before_pc().
1896       // For example, see CompiledMethod::preserve_callee_argument_oops().
1897       call->set_override_symbolic_info(true);
1898       arg_num++;
1899       continue;
1900     } else if (arg->is_InlineType()) {
1901       // Pass inline type argument via oop to callee
1902       arg = arg->as_InlineType()->buffer(this);
1903       if (!is_late_inline) {
1904         arg = arg->as_InlineTypePtr()->get_oop();
1905       }
1906     }
1907     if (t != Type::HALF) {
1908       arg_num++;
1909     }
1910     call->init_req(idx++, arg);
1911   }
1912 }
1913 
1914 //---------------------------set_edges_for_java_call---------------------------
1915 // Connect a newly created call into the current JVMS.
1916 // A return value node (if any) is returned from set_edges_for_java_call.
1917 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1918 
1919   // Add the predefined inputs:
1920   call->init_req( TypeFunc::Control, control() );
1921   call->init_req( TypeFunc::I_O    , i_o() );
1922   call->init_req( TypeFunc::Memory , reset_memory() );
1923   call->init_req( TypeFunc::FramePtr, frameptr() );
1924   call->init_req( TypeFunc::ReturnAdr, top() );
1925 
1926   add_safepoint_edges(call, must_throw);
1927 
1928   Node* xcall = _gvn.transform(call);
1929 
1930   if (xcall == top()) {
1931     set_control(top());
1932     return;
1933   }
1934   assert(xcall == call, "call identity is stable");
1935 
1936   // Re-use the current map to produce the result.
1937 
1938   set_control(_gvn.transform(new ProjNode(call, TypeFunc::Control)));
1939   set_i_o(    _gvn.transform(new ProjNode(call, TypeFunc::I_O    , separate_io_proj)));
1940   set_all_memory_call(xcall, separate_io_proj);
1941 
1942   //return xcall;   // no need, caller already has it
1943 }
1944 
1945 Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj, bool deoptimize) {
1946   if (stopped())  return top();  // maybe the call folded up?
1947 
1948   // Note:  Since any out-of-line call can produce an exception,
1949   // we always insert an I_O projection from the call into the result.
1950 
1951   make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj, deoptimize);
1952 
1953   if (separate_io_proj) {
1954     // The caller requested separate projections be used by the fall
1955     // through and exceptional paths, so replace the projections for
1956     // the fall through path.
1957     set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));
1958     set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));
1959   }
1960 
1961   // Capture the return value, if any.
1962   Node* ret;
1963   if (call->method() == NULL || call->method()->return_type()->basic_type() == T_VOID) {
1964     ret = top();
1965   } else if (call->tf()->returns_inline_type_as_fields()) {
1966     // Return of multiple values (inline type fields): we create a
1967     // InlineType node, each field is a projection from the call.
1968     ciInlineKlass* vk = call->method()->return_type()->as_inline_klass();
1969     uint base_input = TypeFunc::Parms;
1970     ret = InlineTypeNode::make_from_multi(this, call, vk, base_input, false, call->method()->signature()->returns_null_free_inline_type());
1971   } else {
1972     ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1973   }
1974 
1975   return ret;
1976 }
1977 
1978 //--------------------set_predefined_input_for_runtime_call--------------------
1979 // Reading and setting the memory state is way conservative here.
1980 // The real problem is that I am not doing real Type analysis on memory,
1981 // so I cannot distinguish card mark stores from other stores.  Across a GC
1982 // point the Store Barrier and the card mark memory has to agree.  I cannot
1983 // have a card mark store and its barrier split across the GC point from
1984 // either above or below.  Here I get that to happen by reading ALL of memory.
1985 // A better answer would be to separate out card marks from other memory.
1986 // For now, return the input memory state, so that it can be reused
1987 // after the call, if this call has restricted memory effects.
1988 Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem) {
1989   // Set fixed predefined input arguments
1990   Node* memory = reset_memory();
1991   Node* m = narrow_mem == NULL ? memory : narrow_mem;
1992   call->init_req( TypeFunc::Control,   control()  );
1993   call->init_req( TypeFunc::I_O,       top()      ); // does no i/o
1994   call->init_req( TypeFunc::Memory,    m          ); // may gc ptrs
1995   call->init_req( TypeFunc::FramePtr,  frameptr() );
1996   call->init_req( TypeFunc::ReturnAdr, top()      );
1997   return memory;
1998 }
1999 
2000 //-------------------set_predefined_output_for_runtime_call--------------------
2001 // Set control and memory (not i_o) from the call.
2002 // If keep_mem is not NULL, use it for the output state,
2003 // except for the RawPtr output of the call, if hook_mem is TypeRawPtr::BOTTOM.
2004 // If hook_mem is NULL, this call produces no memory effects at all.
2005 // If hook_mem is a Java-visible memory slice (such as arraycopy operands),
2006 // then only that memory slice is taken from the call.
2007 // In the last case, we must put an appropriate memory barrier before
2008 // the call, so as to create the correct anti-dependencies on loads
2009 // preceding the call.
2010 void GraphKit::set_predefined_output_for_runtime_call(Node* call,
2011                                                       Node* keep_mem,
2012                                                       const TypePtr* hook_mem) {
2013   // no i/o
2014   set_control(_gvn.transform( new ProjNode(call,TypeFunc::Control) ));
2015   if (keep_mem) {
2016     // First clone the existing memory state
2017     set_all_memory(keep_mem);
2018     if (hook_mem != NULL) {
2019       // Make memory for the call
2020       Node* mem = _gvn.transform( new ProjNode(call, TypeFunc::Memory) );
2021       // Set the RawPtr memory state only.  This covers all the heap top/GC stuff
2022       // We also use hook_mem to extract specific effects from arraycopy stubs.
2023       set_memory(mem, hook_mem);
2024     }
2025     // ...else the call has NO memory effects.
2026 
2027     // Make sure the call advertises its memory effects precisely.
2028     // This lets us build accurate anti-dependences in gcm.cpp.
2029     assert(C->alias_type(call->adr_type()) == C->alias_type(hook_mem),
2030            "call node must be constructed correctly");
2031   } else {
2032     assert(hook_mem == NULL, "");
2033     // This is not a "slow path" call; all memory comes from the call.
2034     set_all_memory_call(call);
2035   }
2036 }
2037 
2038 // Keep track of MergeMems feeding into other MergeMems
2039 static void add_mergemem_users_to_worklist(Unique_Node_List& wl, Node* mem) {
2040   if (!mem->is_MergeMem()) {
2041     return;
2042   }
2043   for (SimpleDUIterator i(mem); i.has_next(); i.next()) {
2044     Node* use = i.get();
2045     if (use->is_MergeMem()) {
2046       wl.push(use);
2047     }
2048   }
2049 }
2050 
2051 // Replace the call with the current state of the kit.
2052 void GraphKit::replace_call(CallNode* call, Node* result, bool do_replaced_nodes) {
2053   JVMState* ejvms = NULL;
2054   if (has_exceptions()) {
2055     ejvms = transfer_exceptions_into_jvms();
2056   }
2057 
2058   ReplacedNodes replaced_nodes = map()->replaced_nodes();
2059   ReplacedNodes replaced_nodes_exception;
2060   Node* ex_ctl = top();
2061 
2062   SafePointNode* final_state = stop();
2063 
2064   // Find all the needed outputs of this call
2065   CallProjections* callprojs = call->extract_projections(true);
2066 
2067   Unique_Node_List wl;
2068   Node* init_mem = call->in(TypeFunc::Memory);
2069   Node* final_mem = final_state->in(TypeFunc::Memory);
2070   Node* final_ctl = final_state->in(TypeFunc::Control);
2071   Node* final_io = final_state->in(TypeFunc::I_O);
2072 
2073   // Replace all the old call edges with the edges from the inlining result
2074   if (callprojs->fallthrough_catchproj != NULL) {
2075     C->gvn_replace_by(callprojs->fallthrough_catchproj, final_ctl);
2076   }
2077   if (callprojs->fallthrough_memproj != NULL) {
2078     if (final_mem->is_MergeMem()) {
2079       // Parser's exits MergeMem was not transformed but may be optimized
2080       final_mem = _gvn.transform(final_mem);
2081     }
2082     C->gvn_replace_by(callprojs->fallthrough_memproj,   final_mem);
2083     add_mergemem_users_to_worklist(wl, final_mem);
2084   }
2085   if (callprojs->fallthrough_ioproj != NULL) {
2086     C->gvn_replace_by(callprojs->fallthrough_ioproj,    final_io);
2087   }
2088 
2089   // Replace the result with the new result if it exists and is used
2090   if (callprojs->resproj[0] != NULL && result != NULL) {
2091     // If the inlined code is dead, the result projections for an inline type returned as
2092     // fields have not been replaced. They will go away once the call is replaced by TOP below.
2093     assert(callprojs->nb_resproj == 1 || (call->tf()->returns_inline_type_as_fields() && stopped()),
2094            "unexpected number of results");
2095     C->gvn_replace_by(callprojs->resproj[0], result);
2096   }
2097 
2098   if (ejvms == NULL) {
2099     // No exception edges to simply kill off those paths
2100     if (callprojs->catchall_catchproj != NULL) {
2101       C->gvn_replace_by(callprojs->catchall_catchproj, C->top());
2102     }
2103     if (callprojs->catchall_memproj != NULL) {
2104       C->gvn_replace_by(callprojs->catchall_memproj,   C->top());
2105     }
2106     if (callprojs->catchall_ioproj != NULL) {
2107       C->gvn_replace_by(callprojs->catchall_ioproj,    C->top());
2108     }
2109     // Replace the old exception object with top
2110     if (callprojs->exobj != NULL) {
2111       C->gvn_replace_by(callprojs->exobj, C->top());
2112     }
2113   } else {
2114     GraphKit ekit(ejvms);
2115 
2116     // Load my combined exception state into the kit, with all phis transformed:
2117     SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states();
2118     replaced_nodes_exception = ex_map->replaced_nodes();
2119 
2120     Node* ex_oop = ekit.use_exception_state(ex_map);
2121 
2122     if (callprojs->catchall_catchproj != NULL) {
2123       C->gvn_replace_by(callprojs->catchall_catchproj, ekit.control());
2124       ex_ctl = ekit.control();
2125     }
2126     if (callprojs->catchall_memproj != NULL) {
2127       Node* ex_mem = ekit.reset_memory();
2128       C->gvn_replace_by(callprojs->catchall_memproj,   ex_mem);
2129       add_mergemem_users_to_worklist(wl, ex_mem);
2130     }
2131     if (callprojs->catchall_ioproj != NULL) {
2132       C->gvn_replace_by(callprojs->catchall_ioproj,    ekit.i_o());
2133     }
2134 
2135     // Replace the old exception object with the newly created one
2136     if (callprojs->exobj != NULL) {
2137       C->gvn_replace_by(callprojs->exobj, ex_oop);
2138     }
2139   }
2140 
2141   // Disconnect the call from the graph
2142   call->disconnect_inputs(C);
2143   C->gvn_replace_by(call, C->top());
2144 
2145   // Clean up any MergeMems that feed other MergeMems since the
2146   // optimizer doesn't like that.
2147   while (wl.size() > 0) {
2148     _gvn.transform(wl.pop());
2149   }
2150 
2151   if (callprojs->fallthrough_catchproj != NULL && !final_ctl->is_top() && do_replaced_nodes) {
2152     replaced_nodes.apply(C, final_ctl);
2153   }
2154   if (!ex_ctl->is_top() && do_replaced_nodes) {
2155     replaced_nodes_exception.apply(C, ex_ctl);
2156   }
2157 }
2158 
2159 
2160 //------------------------------increment_counter------------------------------
2161 // for statistics: increment a VM counter by 1
2162 
2163 void GraphKit::increment_counter(address counter_addr) {
2164   Node* adr1 = makecon(TypeRawPtr::make(counter_addr));
2165   increment_counter(adr1);
2166 }
2167 
2168 void GraphKit::increment_counter(Node* counter_addr) {
2169   int adr_type = Compile::AliasIdxRaw;
2170   Node* ctrl = control();
2171   Node* cnt  = make_load(ctrl, counter_addr, TypeLong::LONG, T_LONG, adr_type, MemNode::unordered);
2172   Node* incr = _gvn.transform(new AddLNode(cnt, _gvn.longcon(1)));
2173   store_to_memory(ctrl, counter_addr, incr, T_LONG, adr_type, MemNode::unordered);
2174 }
2175 
2176 
2177 //------------------------------uncommon_trap----------------------------------
2178 // Bail out to the interpreter in mid-method.  Implemented by calling the
2179 // uncommon_trap blob.  This helper function inserts a runtime call with the
2180 // right debug info.
2181 void GraphKit::uncommon_trap(int trap_request,
2182                              ciKlass* klass, const char* comment,
2183                              bool must_throw,
2184                              bool keep_exact_action) {
2185   if (failing())  stop();
2186   if (stopped())  return; // trap reachable?
2187 
2188   // Note:  If ProfileTraps is true, and if a deopt. actually
2189   // occurs here, the runtime will make sure an MDO exists.  There is
2190   // no need to call method()->ensure_method_data() at this point.
2191 
2192   // Set the stack pointer to the right value for reexecution:
2193   set_sp(reexecute_sp());
2194 
2195 #ifdef ASSERT
2196   if (!must_throw) {
2197     // Make sure the stack has at least enough depth to execute
2198     // the current bytecode.
2199     int inputs, ignored_depth;
2200     if (compute_stack_effects(inputs, ignored_depth)) {
2201       assert(sp() >= inputs, "must have enough JVMS stack to execute %s: sp=%d, inputs=%d",
2202              Bytecodes::name(java_bc()), sp(), inputs);
2203     }
2204   }
2205 #endif
2206 
2207   Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
2208   Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);
2209 
2210   switch (action) {
2211   case Deoptimization::Action_maybe_recompile:
2212   case Deoptimization::Action_reinterpret:
2213     // Temporary fix for 6529811 to allow virtual calls to be sure they
2214     // get the chance to go from mono->bi->mega
2215     if (!keep_exact_action &&
2216         Deoptimization::trap_request_index(trap_request) < 0 &&
2217         too_many_recompiles(reason)) {
2218       // This BCI is causing too many recompilations.
2219       if (C->log() != NULL) {
2220         C->log()->elem("observe that='trap_action_change' reason='%s' from='%s' to='none'",
2221                 Deoptimization::trap_reason_name(reason),
2222                 Deoptimization::trap_action_name(action));
2223       }
2224       action = Deoptimization::Action_none;
2225       trap_request = Deoptimization::make_trap_request(reason, action);
2226     } else {
2227       C->set_trap_can_recompile(true);
2228     }
2229     break;
2230   case Deoptimization::Action_make_not_entrant:
2231     C->set_trap_can_recompile(true);
2232     break;
2233   case Deoptimization::Action_none:
2234   case Deoptimization::Action_make_not_compilable:
2235     break;
2236   default:
2237 #ifdef ASSERT
2238     fatal("unknown action %d: %s", action, Deoptimization::trap_action_name(action));
2239 #endif
2240     break;
2241   }
2242 
2243   if (TraceOptoParse) {
2244     char buf[100];
2245     tty->print_cr("Uncommon trap %s at bci:%d",
2246                   Deoptimization::format_trap_request(buf, sizeof(buf),
2247                                                       trap_request), bci());
2248   }
2249 
2250   CompileLog* log = C->log();
2251   if (log != NULL) {
2252     int kid = (klass == NULL)? -1: log->identify(klass);
2253     log->begin_elem("uncommon_trap bci='%d'", bci());
2254     char buf[100];
2255     log->print(" %s", Deoptimization::format_trap_request(buf, sizeof(buf),
2256                                                           trap_request));
2257     if (kid >= 0)         log->print(" klass='%d'", kid);
2258     if (comment != NULL)  log->print(" comment='%s'", comment);
2259     log->end_elem();
2260   }
2261 
2262   // Make sure any guarding test views this path as very unlikely
2263   Node *i0 = control()->in(0);
2264   if (i0 != NULL && i0->is_If()) {        // Found a guarding if test?
2265     IfNode *iff = i0->as_If();
2266     float f = iff->_prob;   // Get prob
2267     if (control()->Opcode() == Op_IfTrue) {
2268       if (f > PROB_UNLIKELY_MAG(4))
2269         iff->_prob = PROB_MIN;
2270     } else {
2271       if (f < PROB_LIKELY_MAG(4))
2272         iff->_prob = PROB_MAX;
2273     }
2274   }
2275 
2276   // Clear out dead values from the debug info.
2277   kill_dead_locals();
2278 
2279   // Now insert the uncommon trap subroutine call
2280   address call_addr = SharedRuntime::uncommon_trap_blob()->entry_point();
2281   const TypePtr* no_memory_effects = NULL;
2282   // Pass the index of the class to be loaded
2283   Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON |
2284                                  (must_throw ? RC_MUST_THROW : 0),
2285                                  OptoRuntime::uncommon_trap_Type(),
2286                                  call_addr, "uncommon_trap", no_memory_effects,
2287                                  intcon(trap_request));
2288   assert(call->as_CallStaticJava()->uncommon_trap_request() == trap_request,
2289          "must extract request correctly from the graph");
2290   assert(trap_request != 0, "zero value reserved by uncommon_trap_request");
2291 
2292   call->set_req(TypeFunc::ReturnAdr, returnadr());
2293   // The debug info is the only real input to this call.
2294 
2295   // Halt-and-catch fire here.  The above call should never return!
2296   HaltNode* halt = new HaltNode(control(), frameptr(), "uncommon trap returned which should never happen"
2297                                                        PRODUCT_ONLY(COMMA /*reachable*/false));
2298   _gvn.set_type_bottom(halt);
2299   root()->add_req(halt);
2300 
2301   stop_and_kill_map();
2302 }
2303 
2304 
2305 //--------------------------just_allocated_object------------------------------
2306 // Report the object that was just allocated.
2307 // It must be the case that there are no intervening safepoints.
2308 // We use this to determine if an object is so "fresh" that
2309 // it does not require card marks.
2310 Node* GraphKit::just_allocated_object(Node* current_control) {
2311   Node* ctrl = current_control;
2312   // Object::<init> is invoked after allocation, most of invoke nodes
2313   // will be reduced, but a region node is kept in parse time, we check
2314   // the pattern and skip the region node if it degraded to a copy.
2315   if (ctrl != NULL && ctrl->is_Region() && ctrl->req() == 2 &&
2316       ctrl->as_Region()->is_copy()) {
2317     ctrl = ctrl->as_Region()->is_copy();
2318   }
2319   if (C->recent_alloc_ctl() == ctrl) {
2320    return C->recent_alloc_obj();
2321   }
2322   return NULL;
2323 }
2324 
2325 
2326 /**
2327  * Record profiling data exact_kls for Node n with the type system so
2328  * that it can propagate it (speculation)
2329  *
2330  * @param n          node that the type applies to
2331  * @param exact_kls  type from profiling
2332  * @param maybe_null did profiling see null?
2333  *
2334  * @return           node with improved type
2335  */
2336 Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls, ProfilePtrKind ptr_kind) {
2337   const Type* current_type = _gvn.type(n);
2338   assert(UseTypeSpeculation, "type speculation must be on");
2339 
2340   const TypePtr* speculative = current_type->speculative();
2341 
2342   // Should the klass from the profile be recorded in the speculative type?
2343   if (current_type->would_improve_type(exact_kls, jvms()->depth())) {
2344     const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls);
2345     const TypeOopPtr* xtype = tklass->as_instance_type();
2346     assert(xtype->klass_is_exact(), "Should be exact");
2347     // Any reason to believe n is not null (from this profiling or a previous one)?
2348     assert(ptr_kind != ProfileAlwaysNull, "impossible here");
2349     const TypePtr* ptr = (ptr_kind != ProfileNeverNull && current_type->speculative_maybe_null()) ? TypePtr::BOTTOM : TypePtr::NOTNULL;
2350     // record the new speculative type's depth
2351     speculative = xtype->cast_to_ptr_type(ptr->ptr())->is_ptr();
2352     speculative = speculative->with_inline_depth(jvms()->depth());
2353   } else if (current_type->would_improve_ptr(ptr_kind)) {
2354     // Profiling report that null was never seen so we can change the
2355     // speculative type to non null ptr.
2356     if (ptr_kind == ProfileAlwaysNull) {
2357       speculative = TypePtr::NULL_PTR;
2358     } else {
2359       assert(ptr_kind == ProfileNeverNull, "nothing else is an improvement");
2360       const TypePtr* ptr = TypePtr::NOTNULL;
2361       if (speculative != NULL) {
2362         speculative = speculative->cast_to_ptr_type(ptr->ptr())->is_ptr();
2363       } else {
2364         speculative = ptr;
2365       }
2366     }
2367   }
2368 
2369   if (speculative != current_type->speculative()) {
2370     // Build a type with a speculative type (what we think we know
2371     // about the type but will need a guard when we use it)
2372     const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, speculative);
2373     // We're changing the type, we need a new CheckCast node to carry
2374     // the new type. The new type depends on the control: what
2375     // profiling tells us is only valid from here as far as we can
2376     // tell.
2377     Node* cast = new CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type));
2378     cast = _gvn.transform(cast);
2379     replace_in_map(n, cast);
2380     n = cast;
2381   }
2382 
2383   return n;
2384 }
2385 
2386 /**
2387  * Record profiling data from receiver profiling at an invoke with the
2388  * type system so that it can propagate it (speculation)
2389  *
2390  * @param n  receiver node
2391  *
2392  * @return   node with improved type
2393  */
2394 Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) {
2395   if (!UseTypeSpeculation) {
2396     return n;
2397   }
2398   ciKlass* exact_kls = profile_has_unique_klass();
2399   ProfilePtrKind ptr_kind = ProfileMaybeNull;
2400   if ((java_bc() == Bytecodes::_checkcast ||
2401        java_bc() == Bytecodes::_instanceof ||
2402        java_bc() == Bytecodes::_aastore) &&
2403       method()->method_data()->is_mature()) {
2404     ciProfileData* data = method()->method_data()->bci_to_data(bci());
2405     if (data != NULL) {
2406       if (java_bc() == Bytecodes::_aastore) {
2407         ciKlass* array_type = NULL;
2408         ciKlass* element_type = NULL;
2409         ProfilePtrKind element_ptr = ProfileMaybeNull;
2410         bool flat_array = true;
2411         bool null_free_array = true;
2412         method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
2413         exact_kls = element_type;
2414         ptr_kind = element_ptr;
2415       } else {
2416         if (!data->as_BitData()->null_seen()) {
2417           ptr_kind = ProfileNeverNull;
2418         } else {
2419           assert(data->is_ReceiverTypeData(), "bad profile data type");
2420           ciReceiverTypeData* call = (ciReceiverTypeData*)data->as_ReceiverTypeData();
2421           uint i = 0;
2422           for (; i < call->row_limit(); i++) {
2423             ciKlass* receiver = call->receiver(i);
2424             if (receiver != NULL) {
2425               break;
2426             }
2427           }
2428           ptr_kind = (i == call->row_limit()) ? ProfileAlwaysNull : ProfileMaybeNull;
2429         }
2430       }
2431     }
2432   }
2433   return record_profile_for_speculation(n, exact_kls, ptr_kind);
2434 }
2435 
2436 /**
2437  * Record profiling data from argument profiling at an invoke with the
2438  * type system so that it can propagate it (speculation)
2439  *
2440  * @param dest_method  target method for the call
2441  * @param bc           what invoke bytecode is this?
2442  */
2443 void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) {
2444   if (!UseTypeSpeculation) {
2445     return;
2446   }
2447   const TypeFunc* tf    = TypeFunc::make(dest_method);
2448   int             nargs = tf->domain_sig()->cnt() - TypeFunc::Parms;
2449   int skip = Bytecodes::has_receiver(bc) ? 1 : 0;
2450   for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {
2451     const Type *targ = tf->domain_sig()->field_at(j + TypeFunc::Parms);
2452     if (is_reference_type(targ->basic_type())) {
2453       ProfilePtrKind ptr_kind = ProfileMaybeNull;
2454       ciKlass* better_type = NULL;
2455       if (method()->argument_profiled_type(bci(), i, better_type, ptr_kind)) {
2456         record_profile_for_speculation(argument(j), better_type, ptr_kind);
2457       }
2458       i++;
2459     }
2460   }
2461 }
2462 
2463 /**
2464  * Record profiling data from parameter profiling at an invoke with
2465  * the type system so that it can propagate it (speculation)
2466  */
2467 void GraphKit::record_profiled_parameters_for_speculation() {
2468   if (!UseTypeSpeculation) {
2469     return;
2470   }
2471   for (int i = 0, j = 0; i < method()->arg_size() ; i++) {
2472     if (_gvn.type(local(i))->isa_oopptr()) {
2473       ProfilePtrKind ptr_kind = ProfileMaybeNull;
2474       ciKlass* better_type = NULL;
2475       if (method()->parameter_profiled_type(j, better_type, ptr_kind)) {
2476         record_profile_for_speculation(local(i), better_type, ptr_kind);
2477       }
2478       j++;
2479     }
2480   }
2481 }
2482 
2483 /**
2484  * Record profiling data from return value profiling at an invoke with
2485  * the type system so that it can propagate it (speculation)
2486  */
2487 void GraphKit::record_profiled_return_for_speculation() {
2488   if (!UseTypeSpeculation) {
2489     return;
2490   }
2491   ProfilePtrKind ptr_kind = ProfileMaybeNull;
2492   ciKlass* better_type = NULL;
2493   if (method()->return_profiled_type(bci(), better_type, ptr_kind)) {
2494     // If profiling reports a single type for the return value,
2495     // feed it to the type system so it can propagate it as a
2496     // speculative type
2497     record_profile_for_speculation(stack(sp()-1), better_type, ptr_kind);
2498   }
2499 }
2500 
2501 void GraphKit::round_double_arguments(ciMethod* dest_method) {
2502   if (Matcher::strict_fp_requires_explicit_rounding) {
2503     // (Note:  TypeFunc::make has a cache that makes this fast.)
2504     const TypeFunc* tf    = TypeFunc::make(dest_method);
2505     int             nargs = tf->domain_sig()->cnt() - TypeFunc::Parms;
2506     for (int j = 0; j < nargs; j++) {
2507       const Type *targ = tf->domain_sig()->field_at(j + TypeFunc::Parms);
2508       if (targ->basic_type() == T_DOUBLE) {
2509         // If any parameters are doubles, they must be rounded before
2510         // the call, dprecision_rounding does gvn.transform
2511         Node *arg = argument(j);
2512         arg = dprecision_rounding(arg);
2513         set_argument(j, arg);
2514       }
2515     }
2516   }
2517 }
2518 
2519 // rounding for strict float precision conformance
2520 Node* GraphKit::precision_rounding(Node* n) {
2521   if (Matcher::strict_fp_requires_explicit_rounding) {
2522 #ifdef IA32
2523     if (UseSSE == 0) {
2524       return _gvn.transform(new RoundFloatNode(0, n));
2525     }
2526 #else
2527     Unimplemented();
2528 #endif // IA32
2529   }
2530   return n;
2531 }
2532 
2533 // rounding for strict double precision conformance
2534 Node* GraphKit::dprecision_rounding(Node *n) {
2535   if (Matcher::strict_fp_requires_explicit_rounding) {
2536 #ifdef IA32
2537     if (UseSSE < 2) {
2538       return _gvn.transform(new RoundDoubleNode(0, n));
2539     }
2540 #else
2541     Unimplemented();
2542 #endif // IA32
2543   }
2544   return n;
2545 }
2546 
2547 //=============================================================================
2548 // Generate a fast path/slow path idiom.  Graph looks like:
2549 // [foo] indicates that 'foo' is a parameter
2550 //
2551 //              [in]     NULL
2552 //                 \    /
2553 //                  CmpP
2554 //                  Bool ne
2555 //                   If
2556 //                  /  \
2557 //              True    False-<2>
2558 //              / |
2559 //             /  cast_not_null
2560 //           Load  |    |   ^
2561 //        [fast_test]   |   |
2562 // gvn to   opt_test    |   |
2563 //          /    \      |  <1>
2564 //      True     False  |
2565 //        |         \\  |
2566 //   [slow_call]     \[fast_result]
2567 //    Ctl   Val       \      \
2568 //     |               \      \
2569 //    Catch       <1>   \      \
2570 //   /    \        ^     \      \
2571 //  Ex    No_Ex    |      \      \
2572 //  |       \   \  |       \ <2>  \
2573 //  ...      \  [slow_res] |  |    \   [null_result]
2574 //            \         \--+--+---  |  |
2575 //             \           | /    \ | /
2576 //              --------Region     Phi
2577 //
2578 //=============================================================================
2579 // Code is structured as a series of driver functions all called 'do_XXX' that
2580 // call a set of helper functions.  Helper functions first, then drivers.
2581 
2582 //------------------------------null_check_oop---------------------------------
2583 // Null check oop.  Set null-path control into Region in slot 3.
2584 // Make a cast-not-nullness use the other not-null control.  Return cast.
2585 Node* GraphKit::null_check_oop(Node* value, Node* *null_control,
2586                                bool never_see_null,
2587                                bool safe_for_replace,
2588                                bool speculative) {
2589   // Initial NULL check taken path
2590   (*null_control) = top();
2591   Node* cast = null_check_common(value, T_OBJECT, false, null_control, speculative);
2592 
2593   // Generate uncommon_trap:
2594   if (never_see_null && (*null_control) != top()) {
2595     // If we see an unexpected null at a check-cast we record it and force a
2596     // recompile; the offending check-cast will be compiled to handle NULLs.
2597     // If we see more than one offending BCI, then all checkcasts in the
2598     // method will be compiled to handle NULLs.
2599     PreserveJVMState pjvms(this);
2600     set_control(*null_control);
2601     replace_in_map(value, null());
2602     Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculative);
2603     uncommon_trap(reason,
2604                   Deoptimization::Action_make_not_entrant);
2605     (*null_control) = top();    // NULL path is dead
2606   }
2607   if ((*null_control) == top() && safe_for_replace) {
2608     replace_in_map(value, cast);
2609   }
2610 
2611   // Cast away null-ness on the result
2612   return cast;
2613 }
2614 
2615 //------------------------------opt_iff----------------------------------------
2616 // Optimize the fast-check IfNode.  Set the fast-path region slot 2.
2617 // Return slow-path control.
2618 Node* GraphKit::opt_iff(Node* region, Node* iff) {
2619   IfNode *opt_iff = _gvn.transform(iff)->as_If();
2620 
2621   // Fast path taken; set region slot 2
2622   Node *fast_taken = _gvn.transform( new IfFalseNode(opt_iff) );
2623   region->init_req(2,fast_taken); // Capture fast-control
2624 
2625   // Fast path not-taken, i.e. slow path
2626   Node *slow_taken = _gvn.transform( new IfTrueNode(opt_iff) );
2627   return slow_taken;
2628 }
2629 
2630 //-----------------------------make_runtime_call-------------------------------
2631 Node* GraphKit::make_runtime_call(int flags,
2632                                   const TypeFunc* call_type, address call_addr,
2633                                   const char* call_name,
2634                                   const TypePtr* adr_type,
2635                                   // The following parms are all optional.
2636                                   // The first NULL ends the list.
2637                                   Node* parm0, Node* parm1,
2638                                   Node* parm2, Node* parm3,
2639                                   Node* parm4, Node* parm5,
2640                                   Node* parm6, Node* parm7) {
2641   assert(call_addr != NULL, "must not call NULL targets");
2642 
2643   // Slow-path call
2644   bool is_leaf = !(flags & RC_NO_LEAF);
2645   bool has_io  = (!is_leaf && !(flags & RC_NO_IO));
2646   if (call_name == NULL) {
2647     assert(!is_leaf, "must supply name for leaf");
2648     call_name = OptoRuntime::stub_name(call_addr);
2649   }
2650   CallNode* call;
2651   if (!is_leaf) {
2652     call = new CallStaticJavaNode(call_type, call_addr, call_name, adr_type);
2653   } else if (flags & RC_NO_FP) {
2654     call = new CallLeafNoFPNode(call_type, call_addr, call_name, adr_type);
2655   } else  if (flags & RC_VECTOR){
2656     uint num_bits = call_type->range_sig()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte;
2657     call = new CallLeafVectorNode(call_type, call_addr, call_name, adr_type, num_bits);
2658   } else {
2659     call = new CallLeafNode(call_type, call_addr, call_name, adr_type);
2660   }
2661 
2662   // The following is similar to set_edges_for_java_call,
2663   // except that the memory effects of the call are restricted to AliasIdxRaw.
2664 
2665   // Slow path call has no side-effects, uses few values
2666   bool wide_in  = !(flags & RC_NARROW_MEM);
2667   bool wide_out = (C->get_alias_index(adr_type) == Compile::AliasIdxBot);
2668 
2669   Node* prev_mem = NULL;
2670   if (wide_in) {
2671     prev_mem = set_predefined_input_for_runtime_call(call);
2672   } else {
2673     assert(!wide_out, "narrow in => narrow out");
2674     Node* narrow_mem = memory(adr_type);
2675     prev_mem = set_predefined_input_for_runtime_call(call, narrow_mem);
2676   }
2677 
2678   // Hook each parm in order.  Stop looking at the first NULL.
2679   if (parm0 != NULL) { call->init_req(TypeFunc::Parms+0, parm0);
2680   if (parm1 != NULL) { call->init_req(TypeFunc::Parms+1, parm1);
2681   if (parm2 != NULL) { call->init_req(TypeFunc::Parms+2, parm2);
2682   if (parm3 != NULL) { call->init_req(TypeFunc::Parms+3, parm3);
2683   if (parm4 != NULL) { call->init_req(TypeFunc::Parms+4, parm4);
2684   if (parm5 != NULL) { call->init_req(TypeFunc::Parms+5, parm5);
2685   if (parm6 != NULL) { call->init_req(TypeFunc::Parms+6, parm6);
2686   if (parm7 != NULL) { call->init_req(TypeFunc::Parms+7, parm7);
2687   /* close each nested if ===> */  } } } } } } } }
2688   assert(call->in(call->req()-1) != NULL, "must initialize all parms");
2689 
2690   if (!is_leaf) {
2691     // Non-leaves can block and take safepoints:
2692     add_safepoint_edges(call, ((flags & RC_MUST_THROW) != 0));
2693   }
2694   // Non-leaves can throw exceptions:
2695   if (has_io) {
2696     call->set_req(TypeFunc::I_O, i_o());
2697   }
2698 
2699   if (flags & RC_UNCOMMON) {
2700     // Set the count to a tiny probability.  Cf. Estimate_Block_Frequency.
2701     // (An "if" probability corresponds roughly to an unconditional count.
2702     // Sort of.)
2703     call->set_cnt(PROB_UNLIKELY_MAG(4));
2704   }
2705 
2706   Node* c = _gvn.transform(call);
2707   assert(c == call, "cannot disappear");
2708 
2709   if (wide_out) {
2710     // Slow path call has full side-effects.
2711     set_predefined_output_for_runtime_call(call);
2712   } else {
2713     // Slow path call has few side-effects, and/or sets few values.
2714     set_predefined_output_for_runtime_call(call, prev_mem, adr_type);
2715   }
2716 
2717   if (has_io) {
2718     set_i_o(_gvn.transform(new ProjNode(call, TypeFunc::I_O)));
2719   }
2720   return call;
2721 
2722 }
2723 
2724 // i2b
2725 Node* GraphKit::sign_extend_byte(Node* in) {
2726   Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(24)));
2727   return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(24)));
2728 }
2729 
2730 // i2s
2731 Node* GraphKit::sign_extend_short(Node* in) {
2732   Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(16)));
2733   return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(16)));
2734 }
2735 
2736 //-----------------------------make_native_call-------------------------------
2737 Node* GraphKit::make_native_call(address call_addr, const TypeFunc* call_type, uint nargs, ciNativeEntryPoint* nep) {
2738   // Select just the actual call args to pass on
2739   // [MethodHandle fallback, long addr, HALF addr, ... args , NativeEntryPoint nep]
2740   //                                             |          |
2741   //                                             V          V
2742   //                                             [ ... args ]
2743   uint n_filtered_args = nargs - 4; // -fallback, -addr (2), -nep;
2744   ResourceMark rm;
2745   Node** argument_nodes = NEW_RESOURCE_ARRAY(Node*, n_filtered_args);
2746   const Type** arg_types = TypeTuple::fields(n_filtered_args);
2747   GrowableArray<VMReg> arg_regs(C->comp_arena(), n_filtered_args, n_filtered_args, VMRegImpl::Bad());
2748 
2749   VMReg* argRegs = nep->argMoves();
2750   {
2751     for (uint vm_arg_pos = 0, java_arg_read_pos = 0;
2752         vm_arg_pos < n_filtered_args; vm_arg_pos++) {
2753       uint vm_unfiltered_arg_pos = vm_arg_pos + 3; // +3 to skip fallback handle argument and addr (2 since long)
2754       Node* node = argument(vm_unfiltered_arg_pos);
2755       const Type* type = call_type->domain_sig()->field_at(TypeFunc::Parms + vm_unfiltered_arg_pos);
2756       VMReg reg = type == Type::HALF
2757         ? VMRegImpl::Bad()
2758         : argRegs[java_arg_read_pos++];
2759 
2760       argument_nodes[vm_arg_pos] = node;
2761       arg_types[TypeFunc::Parms + vm_arg_pos] = type;
2762       arg_regs.at_put(vm_arg_pos, reg);
2763     }
2764   }
2765 
2766   uint n_returns = call_type->range_sig()->cnt() - TypeFunc::Parms;
2767   GrowableArray<VMReg> ret_regs(C->comp_arena(), n_returns, n_returns, VMRegImpl::Bad());
2768   const Type** ret_types = TypeTuple::fields(n_returns);
2769 
2770   VMReg* retRegs = nep->returnMoves();
2771   {
2772     for (uint vm_ret_pos = 0, java_ret_read_pos = 0;
2773         vm_ret_pos < n_returns; vm_ret_pos++) { // 0 or 1
2774       const Type* type = call_type->range_sig()->field_at(TypeFunc::Parms + vm_ret_pos);
2775       VMReg reg = type == Type::HALF
2776         ? VMRegImpl::Bad()
2777         : retRegs[java_ret_read_pos++];
2778 
2779       ret_regs.at_put(vm_ret_pos, reg);
2780       ret_types[TypeFunc::Parms + vm_ret_pos] = type;
2781     }
2782   }
2783 
2784   const TypeFunc* new_call_type = TypeFunc::make(
2785     TypeTuple::make(TypeFunc::Parms + n_filtered_args, arg_types),
2786     TypeTuple::make(TypeFunc::Parms + n_returns, ret_types)
2787   );
2788 
2789   if (nep->need_transition()) {
2790     RuntimeStub* invoker = SharedRuntime::make_native_invoker(call_addr,
2791                                                               nep->shadow_space(),
2792                                                               arg_regs, ret_regs);
2793     if (invoker == NULL) {
2794       C->record_failure("native invoker not implemented on this platform");
2795       return NULL;
2796     }
2797     C->add_native_invoker(invoker);
2798     call_addr = invoker->code_begin();
2799   }
2800   assert(call_addr != NULL, "sanity");
2801 
2802   CallNativeNode* call = new CallNativeNode(new_call_type, call_addr, nep->name(), TypePtr::BOTTOM,
2803                                             arg_regs,
2804                                             ret_regs,
2805                                             nep->shadow_space(),
2806                                             nep->need_transition());
2807 
2808   if (call->_need_transition) {
2809     add_safepoint_edges(call);
2810   }
2811 
2812   set_predefined_input_for_runtime_call(call);
2813 
2814   for (uint i = 0; i < n_filtered_args; i++) {
2815     call->init_req(i + TypeFunc::Parms, argument_nodes[i]);
2816   }
2817 
2818   Node* c = gvn().transform(call);
2819   assert(c == call, "cannot disappear");
2820 
2821   set_predefined_output_for_runtime_call(call);
2822 
2823   Node* ret;
2824   if (method() == NULL || method()->return_type()->basic_type() == T_VOID) {
2825     ret = top();
2826   } else {
2827     ret =  gvn().transform(new ProjNode(call, TypeFunc::Parms));
2828     // Unpack native results if needed
2829     // Need this method type since it's unerased
2830     switch (nep->method_type()->rtype()->basic_type()) {
2831       case T_CHAR:
2832         ret = _gvn.transform(new AndINode(ret, _gvn.intcon(0xFFFF)));
2833         break;
2834       case T_BYTE:
2835         ret = sign_extend_byte(ret);
2836         break;
2837       case T_SHORT:
2838         ret = sign_extend_short(ret);
2839         break;
2840       default: // do nothing
2841         break;
2842     }
2843   }
2844 
2845   push_node(method()->return_type()->basic_type(), ret);
2846 
2847   return call;
2848 }
2849 
2850 //------------------------------merge_memory-----------------------------------
2851 // Merge memory from one path into the current memory state.
2852 void GraphKit::merge_memory(Node* new_mem, Node* region, int new_path) {
2853   for (MergeMemStream mms(merged_memory(), new_mem->as_MergeMem()); mms.next_non_empty2(); ) {
2854     Node* old_slice = mms.force_memory();
2855     Node* new_slice = mms.memory2();
2856     if (old_slice != new_slice) {
2857       PhiNode* phi;
2858       if (old_slice->is_Phi() && old_slice->as_Phi()->region() == region) {
2859         if (mms.is_empty()) {
2860           // clone base memory Phi's inputs for this memory slice
2861           assert(old_slice == mms.base_memory(), "sanity");
2862           phi = PhiNode::make(region, NULL, Type::MEMORY, mms.adr_type(C));
2863           _gvn.set_type(phi, Type::MEMORY);
2864           for (uint i = 1; i < phi->req(); i++) {
2865             phi->init_req(i, old_slice->in(i));
2866           }
2867         } else {
2868           phi = old_slice->as_Phi(); // Phi was generated already
2869         }
2870       } else {
2871         phi = PhiNode::make(region, old_slice, Type::MEMORY, mms.adr_type(C));
2872         _gvn.set_type(phi, Type::MEMORY);
2873       }
2874       phi->set_req(new_path, new_slice);
2875       mms.set_memory(phi);
2876     }
2877   }
2878 }
2879 
2880 //------------------------------make_slow_call_ex------------------------------
2881 // Make the exception handler hookups for the slow call
2882 void GraphKit::make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj, bool deoptimize) {
2883   if (stopped())  return;
2884 
2885   // Make a catch node with just two handlers:  fall-through and catch-all
2886   Node* i_o  = _gvn.transform( new ProjNode(call, TypeFunc::I_O, separate_io_proj) );
2887   Node* catc = _gvn.transform( new CatchNode(control(), i_o, 2) );
2888   Node* norm = new CatchProjNode(catc, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci);
2889   _gvn.set_type_bottom(norm);
2890   C->record_for_igvn(norm);
2891   Node* excp = _gvn.transform( new CatchProjNode(catc, CatchProjNode::catch_all_index,    CatchProjNode::no_handler_bci) );
2892 
2893   { PreserveJVMState pjvms(this);
2894     set_control(excp);
2895     set_i_o(i_o);
2896 
2897     if (excp != top()) {
2898       if (deoptimize) {
2899         // Deoptimize if an exception is caught. Don't construct exception state in this case.
2900         uncommon_trap(Deoptimization::Reason_unhandled,
2901                       Deoptimization::Action_none);
2902       } else {
2903         // Create an exception state also.
2904         // Use an exact type if the caller has a specific exception.
2905         const Type* ex_type = TypeOopPtr::make_from_klass_unique(ex_klass)->cast_to_ptr_type(TypePtr::NotNull);
2906         Node*       ex_oop  = new CreateExNode(ex_type, control(), i_o);
2907         add_exception_state(make_exception_state(_gvn.transform(ex_oop)));
2908       }
2909     }
2910   }
2911 
2912   // Get the no-exception control from the CatchNode.
2913   set_control(norm);
2914 }
2915 
2916 static IfNode* gen_subtype_check_compare(Node* ctrl, Node* in1, Node* in2, BoolTest::mask test, float p, PhaseGVN& gvn, BasicType bt) {
2917   Node* cmp = NULL;
2918   switch(bt) {
2919   case T_INT: cmp = new CmpINode(in1, in2); break;
2920   case T_ADDRESS: cmp = new CmpPNode(in1, in2); break;
2921   default: fatal("unexpected comparison type %s", type2name(bt));
2922   }
2923   gvn.transform(cmp);
2924   Node* bol = gvn.transform(new BoolNode(cmp, test));
2925   IfNode* iff = new IfNode(ctrl, bol, p, COUNT_UNKNOWN);
2926   gvn.transform(iff);
2927   if (!bol->is_Con()) gvn.record_for_igvn(iff);
2928   return iff;
2929 }
2930 
2931 //-------------------------------gen_subtype_check-----------------------------
2932 // Generate a subtyping check.  Takes as input the subtype and supertype.
2933 // Returns 2 values: sets the default control() to the true path and returns
2934 // the false path.  Only reads invariant memory; sets no (visible) memory.
2935 // The PartialSubtypeCheckNode sets the hidden 1-word cache in the encoding
2936 // but that's not exposed to the optimizer.  This call also doesn't take in an
2937 // Object; if you wish to check an Object you need to load the Object's class
2938 // prior to coming here.
2939 Node* Phase::gen_subtype_check(Node* subklass, Node* superklass, Node** ctrl, Node* mem, PhaseGVN& gvn) {
2940   Compile* C = gvn.C;
2941   if ((*ctrl)->is_top()) {
2942     return C->top();
2943   }
2944 
2945   // Fast check for identical types, perhaps identical constants.
2946   // The types can even be identical non-constants, in cases
2947   // involving Array.newInstance, Object.clone, etc.
2948   if (subklass == superklass)
2949     return C->top();             // false path is dead; no test needed.
2950 
2951   if (gvn.type(superklass)->singleton()) {
2952     ciKlass* superk = gvn.type(superklass)->is_klassptr()->klass();
2953     ciKlass* subk   = gvn.type(subklass)->is_klassptr()->klass();
2954 
2955     // In the common case of an exact superklass, try to fold up the
2956     // test before generating code.  You may ask, why not just generate
2957     // the code and then let it fold up?  The answer is that the generated
2958     // code will necessarily include null checks, which do not always
2959     // completely fold away.  If they are also needless, then they turn
2960     // into a performance loss.  Example:
2961     //    Foo[] fa = blah(); Foo x = fa[0]; fa[1] = x;
2962     // Here, the type of 'fa' is often exact, so the store check
2963     // of fa[1]=x will fold up, without testing the nullness of x.
2964     switch (C->static_subtype_check(superk, subk)) {
2965     case Compile::SSC_always_false:
2966       {
2967         Node* always_fail = *ctrl;
2968         *ctrl = gvn.C->top();
2969         return always_fail;
2970       }
2971     case Compile::SSC_always_true:
2972       return C->top();
2973     case Compile::SSC_easy_test:
2974       {
2975         // Just do a direct pointer compare and be done.
2976         IfNode* iff = gen_subtype_check_compare(*ctrl, subklass, superklass, BoolTest::eq, PROB_STATIC_FREQUENT, gvn, T_ADDRESS);
2977         *ctrl = gvn.transform(new IfTrueNode(iff));
2978         return gvn.transform(new IfFalseNode(iff));
2979       }
2980     case Compile::SSC_full_test:
2981       break;
2982     default:
2983       ShouldNotReachHere();
2984     }
2985   }
2986 
2987   // %%% Possible further optimization:  Even if the superklass is not exact,
2988   // if the subklass is the unique subtype of the superklass, the check
2989   // will always succeed.  We could leave a dependency behind to ensure this.
2990 
2991   // First load the super-klass's check-offset
2992   Node *p1 = gvn.transform(new AddPNode(superklass, superklass, gvn.MakeConX(in_bytes(Klass::super_check_offset_offset()))));
2993   Node* m = C->immutable_memory();
2994   Node *chk_off = gvn.transform(new LoadINode(NULL, m, p1, gvn.type(p1)->is_ptr(), TypeInt::INT, MemNode::unordered));
2995   int cacheoff_con = in_bytes(Klass::secondary_super_cache_offset());
2996   bool might_be_cache = (gvn.find_int_con(chk_off, cacheoff_con) == cacheoff_con);
2997 
2998   // Load from the sub-klass's super-class display list, or a 1-word cache of
2999   // the secondary superclass list, or a failing value with a sentinel offset
3000   // if the super-klass is an interface or exceptionally deep in the Java
3001   // hierarchy and we have to scan the secondary superclass list the hard way.
3002   // Worst-case type is a little odd: NULL is allowed as a result (usually
3003   // klass loads can never produce a NULL).
3004   Node *chk_off_X = chk_off;
3005 #ifdef _LP64
3006   chk_off_X = gvn.transform(new ConvI2LNode(chk_off_X));
3007 #endif
3008   Node *p2 = gvn.transform(new AddPNode(subklass,subklass,chk_off_X));
3009   // For some types like interfaces the following loadKlass is from a 1-word
3010   // cache which is mutable so can't use immutable memory.  Other
3011   // types load from the super-class display table which is immutable.
3012   Node *kmem = C->immutable_memory();
3013   // secondary_super_cache is not immutable but can be treated as such because:
3014   // - no ideal node writes to it in a way that could cause an
3015   //   incorrect/missed optimization of the following Load.
3016   // - it's a cache so, worse case, not reading the latest value
3017   //   wouldn't cause incorrect execution
3018   if (might_be_cache && mem != NULL) {
3019     kmem = mem->is_MergeMem() ? mem->as_MergeMem()->memory_at(C->get_alias_index(gvn.type(p2)->is_ptr())) : mem;
3020   }
3021   Node *nkls = gvn.transform(LoadKlassNode::make(gvn, NULL, kmem, p2, gvn.type(p2)->is_ptr(), TypeInstKlassPtr::OBJECT_OR_NULL));
3022 
3023   // Compile speed common case: ARE a subtype and we canNOT fail
3024   if( superklass == nkls )
3025     return C->top();             // false path is dead; no test needed.
3026 
3027   // See if we get an immediate positive hit.  Happens roughly 83% of the
3028   // time.  Test to see if the value loaded just previously from the subklass
3029   // is exactly the superklass.
3030   IfNode *iff1 = gen_subtype_check_compare(*ctrl, superklass, nkls, BoolTest::eq, PROB_LIKELY(0.83f), gvn, T_ADDRESS);
3031   Node *iftrue1 = gvn.transform( new IfTrueNode (iff1));
3032   *ctrl = gvn.transform(new IfFalseNode(iff1));
3033 
3034   // Compile speed common case: Check for being deterministic right now.  If
3035   // chk_off is a constant and not equal to cacheoff then we are NOT a
3036   // subklass.  In this case we need exactly the 1 test above and we can
3037   // return those results immediately.
3038   if (!might_be_cache) {
3039     Node* not_subtype_ctrl = *ctrl;
3040     *ctrl = iftrue1; // We need exactly the 1 test above
3041     return not_subtype_ctrl;
3042   }
3043 
3044   // Gather the various success & failures here
3045   RegionNode *r_ok_subtype = new RegionNode(4);
3046   gvn.record_for_igvn(r_ok_subtype);
3047   RegionNode *r_not_subtype = new RegionNode(3);
3048   gvn.record_for_igvn(r_not_subtype);
3049 
3050   r_ok_subtype->init_req(1, iftrue1);
3051 
3052   // Check for immediate negative hit.  Happens roughly 11% of the time (which
3053   // is roughly 63% of the remaining cases).  Test to see if the loaded
3054   // check-offset points into the subklass display list or the 1-element
3055   // cache.  If it points to the display (and NOT the cache) and the display
3056   // missed then it's not a subtype.
3057   Node *cacheoff = gvn.intcon(cacheoff_con);
3058   IfNode *iff2 = gen_subtype_check_compare(*ctrl, chk_off, cacheoff, BoolTest::ne, PROB_LIKELY(0.63f), gvn, T_INT);
3059   r_not_subtype->init_req(1, gvn.transform(new IfTrueNode (iff2)));
3060   *ctrl = gvn.transform(new IfFalseNode(iff2));
3061 
3062   // Check for self.  Very rare to get here, but it is taken 1/3 the time.
3063   // No performance impact (too rare) but allows sharing of secondary arrays
3064   // which has some footprint reduction.
3065   IfNode *iff3 = gen_subtype_check_compare(*ctrl, subklass, superklass, BoolTest::eq, PROB_LIKELY(0.36f), gvn, T_ADDRESS);
3066   r_ok_subtype->init_req(2, gvn.transform(new IfTrueNode(iff3)));
3067   *ctrl = gvn.transform(new IfFalseNode(iff3));
3068 
3069   // -- Roads not taken here: --
3070   // We could also have chosen to perform the self-check at the beginning
3071   // of this code sequence, as the assembler does.  This would not pay off
3072   // the same way, since the optimizer, unlike the assembler, can perform
3073   // static type analysis to fold away many successful self-checks.
3074   // Non-foldable self checks work better here in second position, because
3075   // the initial primary superclass check subsumes a self-check for most
3076   // types.  An exception would be a secondary type like array-of-interface,
3077   // which does not appear in its own primary supertype display.
3078   // Finally, we could have chosen to move the self-check into the
3079   // PartialSubtypeCheckNode, and from there out-of-line in a platform
3080   // dependent manner.  But it is worthwhile to have the check here,
3081   // where it can be perhaps be optimized.  The cost in code space is
3082   // small (register compare, branch).
3083 
3084   // Now do a linear scan of the secondary super-klass array.  Again, no real
3085   // performance impact (too rare) but it's gotta be done.
3086   // Since the code is rarely used, there is no penalty for moving it
3087   // out of line, and it can only improve I-cache density.
3088   // The decision to inline or out-of-line this final check is platform
3089   // dependent, and is found in the AD file definition of PartialSubtypeCheck.
3090   Node* psc = gvn.transform(
3091     new PartialSubtypeCheckNode(*ctrl, subklass, superklass));
3092 
3093   IfNode *iff4 = gen_subtype_check_compare(*ctrl, psc, gvn.zerocon(T_OBJECT), BoolTest::ne, PROB_FAIR, gvn, T_ADDRESS);
3094   r_not_subtype->init_req(2, gvn.transform(new IfTrueNode (iff4)));
3095   r_ok_subtype ->init_req(3, gvn.transform(new IfFalseNode(iff4)));
3096 
3097   // Return false path; set default control to true path.
3098   *ctrl = gvn.transform(r_ok_subtype);
3099   return gvn.transform(r_not_subtype);
3100 }
3101 
3102 Node* GraphKit::gen_subtype_check(Node* obj_or_subklass, Node* superklass) {
3103   const Type* sub_t = _gvn.type(obj_or_subklass);
3104   if (sub_t->isa_inlinetype()) {
3105     obj_or_subklass = makecon(TypeKlassPtr::make(sub_t->inline_klass()));
3106   }
3107   bool expand_subtype_check = C->post_loop_opts_phase() ||   // macro node expansion is over
3108                               ExpandSubTypeCheckAtParseTime; // forced expansion
3109   if (expand_subtype_check) {
3110     MergeMemNode* mem = merged_memory();
3111     Node* ctrl = control();
3112     Node* subklass = obj_or_subklass;
3113     if (!sub_t->isa_klassptr() && !sub_t->isa_inlinetype()) {
3114       subklass = load_object_klass(obj_or_subklass);
3115     }
3116     Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, _gvn);
3117     set_control(ctrl);
3118     return n;
3119   }
3120 
3121   Node* check = _gvn.transform(new SubTypeCheckNode(C, obj_or_subklass, superklass));
3122   Node* bol = _gvn.transform(new BoolNode(check, BoolTest::eq));
3123   IfNode* iff = create_and_xform_if(control(), bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
3124   set_control(_gvn.transform(new IfTrueNode(iff)));
3125   return _gvn.transform(new IfFalseNode(iff));
3126 }
3127 
3128 // Profile-driven exact type check:
3129 Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass,
3130                                     float prob, Node* *casted_receiver) {
3131   assert(!klass->is_interface(), "no exact type check on interfaces");
3132   Node* fail = top();
3133   const Type* rec_t = _gvn.type(receiver);
3134   if (rec_t->isa_inlinetype()) {
3135     if (klass->equals(rec_t->inline_klass())) {
3136       (*casted_receiver) = receiver; // Always passes
3137     } else {
3138       (*casted_receiver) = top();    // Always fails
3139       fail = control();
3140       set_control(top());
3141     }
3142     return fail;
3143   }
3144   const TypeKlassPtr* tklass = TypeKlassPtr::make(klass);
3145   Node* recv_klass = load_object_klass(receiver);
3146   fail = type_check(recv_klass, tklass, prob);
3147 
3148   if (!stopped()) {
3149     const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
3150     const TypeOopPtr* recv_xtype = tklass->as_instance_type();
3151     assert(recv_xtype->klass_is_exact(), "");
3152 
3153     if (!receiver_type->higher_equal(recv_xtype)) { // ignore redundant casts
3154       // Subsume downstream occurrences of receiver with a cast to
3155       // recv_xtype, since now we know what the type will be.
3156       Node* cast = new CheckCastPPNode(control(), receiver, recv_xtype);
3157       Node* res = _gvn.transform(cast);
3158       if (recv_xtype->is_inlinetypeptr()) {
3159         assert(!gvn().type(res)->maybe_null(), "receiver should never be null");
3160         res = InlineTypeNode::make_from_oop(this, res, recv_xtype->inline_klass())->as_InlineTypeBase()->as_ptr(&gvn());
3161       }
3162       (*casted_receiver) = res;
3163       // (User must make the replace_in_map call.)
3164     }
3165   }
3166 
3167   return fail;
3168 }
3169 
3170 Node* GraphKit::type_check(Node* recv_klass, const TypeKlassPtr* tklass,
3171                            float prob) {
3172   Node* want_klass = makecon(tklass);
3173   Node* cmp = _gvn.transform(new CmpPNode(recv_klass, want_klass));
3174   Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
3175   IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN);
3176   set_control(_gvn.transform(new IfTrueNode (iff)));
3177   Node* fail = _gvn.transform(new IfFalseNode(iff));
3178   return fail;
3179 }
3180 
3181 //------------------------------subtype_check_receiver-------------------------
3182 Node* GraphKit::subtype_check_receiver(Node* receiver, ciKlass* klass,
3183                                        Node** casted_receiver) {
3184   const TypeKlassPtr* tklass = TypeKlassPtr::make(klass);
3185   Node* want_klass = makecon(tklass);
3186 
3187   Node* slow_ctl = gen_subtype_check(receiver, want_klass);
3188 
3189   // Ignore interface type information until interface types are properly tracked.
3190   if (!stopped() && !klass->is_interface()) {
3191     const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
3192     const TypeOopPtr* recv_type = tklass->cast_to_exactness(false)->is_klassptr()->as_instance_type();
3193     if (receiver_type != NULL && !receiver_type->higher_equal(recv_type)) { // ignore redundant casts
3194       Node* cast = new CheckCastPPNode(control(), receiver, recv_type);
3195       (*casted_receiver) = _gvn.transform(cast);
3196     }
3197   }
3198 
3199   return slow_ctl;
3200 }
3201 
3202 //------------------------------seems_never_null-------------------------------
3203 // Use null_seen information if it is available from the profile.
3204 // If we see an unexpected null at a type check we record it and force a
3205 // recompile; the offending check will be recompiled to handle NULLs.
3206 // If we see several offending BCIs, then all checks in the
3207 // method will be recompiled.
3208 bool GraphKit::seems_never_null(Node* obj, ciProfileData* data, bool& speculating) {
3209   speculating = !_gvn.type(obj)->speculative_maybe_null();
3210   Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculating);
3211   if (UncommonNullCast               // Cutout for this technique
3212       && obj != null()               // And not the -Xcomp stupid case?
3213       && !too_many_traps(reason)
3214       ) {
3215     if (speculating) {
3216       return true;
3217     }
3218     if (data == NULL)
3219       // Edge case:  no mature data.  Be optimistic here.
3220       return true;
3221     // If the profile has not seen a null, assume it won't happen.
3222     assert(java_bc() == Bytecodes::_checkcast ||
3223            java_bc() == Bytecodes::_instanceof ||
3224            java_bc() == Bytecodes::_aastore, "MDO must collect null_seen bit here");
3225     if (java_bc() == Bytecodes::_aastore) {
3226       return ((ciArrayLoadStoreData*)data->as_ArrayLoadStoreData())->element()->ptr_kind() == ProfileNeverNull;
3227     }
3228     return !data->as_BitData()->null_seen();
3229   }
3230   speculating = false;
3231   return false;
3232 }
3233 
3234 void GraphKit::guard_klass_being_initialized(Node* klass) {
3235   int init_state_off = in_bytes(InstanceKlass::init_state_offset());
3236   Node* adr = basic_plus_adr(top(), klass, init_state_off);
3237   Node* init_state = LoadNode::make(_gvn, NULL, immutable_memory(), adr,
3238                                     adr->bottom_type()->is_ptr(), TypeInt::BYTE,
3239                                     T_BYTE, MemNode::unordered);
3240   init_state = _gvn.transform(init_state);
3241 
3242   Node* being_initialized_state = makecon(TypeInt::make(InstanceKlass::being_initialized));
3243 
3244   Node* chk = _gvn.transform(new CmpINode(being_initialized_state, init_state));
3245   Node* tst = _gvn.transform(new BoolNode(chk, BoolTest::eq));
3246 
3247   { BuildCutout unless(this, tst, PROB_MAX);
3248     uncommon_trap(Deoptimization::Reason_initialized, Deoptimization::Action_reinterpret);
3249   }
3250 }
3251 
3252 void GraphKit::guard_init_thread(Node* klass) {
3253   int init_thread_off = in_bytes(InstanceKlass::init_thread_offset());
3254   Node* adr = basic_plus_adr(top(), klass, init_thread_off);
3255 
3256   Node* init_thread = LoadNode::make(_gvn, NULL, immutable_memory(), adr,
3257                                      adr->bottom_type()->is_ptr(), TypePtr::NOTNULL,
3258                                      T_ADDRESS, MemNode::unordered);
3259   init_thread = _gvn.transform(init_thread);
3260 
3261   Node* cur_thread = _gvn.transform(new ThreadLocalNode());
3262 
3263   Node* chk = _gvn.transform(new CmpPNode(cur_thread, init_thread));
3264   Node* tst = _gvn.transform(new BoolNode(chk, BoolTest::eq));
3265 
3266   { BuildCutout unless(this, tst, PROB_MAX);
3267     uncommon_trap(Deoptimization::Reason_uninitialized, Deoptimization::Action_none);
3268   }
3269 }
3270 
3271 void GraphKit::clinit_barrier(ciInstanceKlass* ik, ciMethod* context) {
3272   if (ik->is_being_initialized()) {
3273     if (C->needs_clinit_barrier(ik, context)) {
3274       Node* klass = makecon(TypeKlassPtr::make(ik));
3275       guard_klass_being_initialized(klass);
3276       guard_init_thread(klass);
3277       insert_mem_bar(Op_MemBarCPUOrder);
3278     }
3279   } else if (ik->is_initialized()) {
3280     return; // no barrier needed
3281   } else {
3282     uncommon_trap(Deoptimization::Reason_uninitialized,
3283                   Deoptimization::Action_reinterpret,
3284                   NULL);
3285   }
3286 }
3287 
3288 //------------------------maybe_cast_profiled_receiver-------------------------
3289 // If the profile has seen exactly one type, narrow to exactly that type.
3290 // Subsequent type checks will always fold up.
3291 Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
3292                                              ciKlass* require_klass,
3293                                              ciKlass* spec_klass,
3294                                              bool safe_for_replace) {
3295   if (!UseTypeProfile || !TypeProfileCasts) return NULL;
3296 
3297   Deoptimization::DeoptReason reason = Deoptimization::reason_class_check(spec_klass != NULL);
3298 
3299   // Make sure we haven't already deoptimized from this tactic.
3300   if (too_many_traps_or_recompiles(reason))
3301     return NULL;
3302 
3303   // (No, this isn't a call, but it's enough like a virtual call
3304   // to use the same ciMethod accessor to get the profile info...)
3305   // If we have a speculative type use it instead of profiling (which
3306   // may not help us)
3307   ciKlass* exact_kls = spec_klass;
3308   if (exact_kls == NULL) {
3309     if (java_bc() == Bytecodes::_aastore) {
3310       ciKlass* array_type = NULL;
3311       ciKlass* element_type = NULL;
3312       ProfilePtrKind element_ptr = ProfileMaybeNull;
3313       bool flat_array = true;
3314       bool null_free_array = true;
3315       method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
3316       exact_kls = element_type;
3317     } else {
3318       exact_kls = profile_has_unique_klass();
3319     }
3320   }
3321   if (exact_kls != NULL) {// no cast failures here
3322     if (require_klass == NULL ||
3323         C->static_subtype_check(require_klass, exact_kls) == Compile::SSC_always_true) {
3324       // If we narrow the type to match what the type profile sees or
3325       // the speculative type, we can then remove the rest of the
3326       // cast.
3327       // This is a win, even if the exact_kls is very specific,
3328       // because downstream operations, such as method calls,
3329       // will often benefit from the sharper type.
3330       Node* exact_obj = not_null_obj; // will get updated in place...
3331       Node* slow_ctl  = type_check_receiver(exact_obj, exact_kls, 1.0,
3332                                             &exact_obj);
3333       { PreserveJVMState pjvms(this);
3334         set_control(slow_ctl);
3335         uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
3336       }
3337       if (safe_for_replace) {
3338         replace_in_map(not_null_obj, exact_obj);
3339       }
3340       return exact_obj;
3341     }
3342     // assert(ssc == Compile::SSC_always_true)... except maybe the profile lied to us.
3343   }
3344 
3345   return NULL;
3346 }
3347 
3348 /**
3349  * Cast obj to type and emit guard unless we had too many traps here
3350  * already
3351  *
3352  * @param obj       node being casted
3353  * @param type      type to cast the node to
3354  * @param not_null  true if we know node cannot be null
3355  */
3356 Node* GraphKit::maybe_cast_profiled_obj(Node* obj,
3357                                         ciKlass* type,
3358                                         bool not_null) {
3359   if (stopped()) {
3360     return obj;
3361   }
3362 
3363   // type == NULL if profiling tells us this object is always null
3364   if (type != NULL) {
3365     Deoptimization::DeoptReason class_reason = Deoptimization::Reason_speculate_class_check;
3366     Deoptimization::DeoptReason null_reason = Deoptimization::Reason_speculate_null_check;
3367 
3368     if (!too_many_traps_or_recompiles(null_reason) &&
3369         !too_many_traps_or_recompiles(class_reason)) {
3370       Node* not_null_obj = NULL;
3371       // not_null is true if we know the object is not null and
3372       // there's no need for a null check
3373       if (!not_null) {
3374         Node* null_ctl = top();
3375         not_null_obj = null_check_oop(obj, &null_ctl, true, true, true);
3376         assert(null_ctl->is_top(), "no null control here");
3377       } else {
3378         not_null_obj = obj;
3379       }
3380 
3381       Node* exact_obj = not_null_obj;
3382       ciKlass* exact_kls = type;
3383       Node* slow_ctl  = type_check_receiver(exact_obj, exact_kls, 1.0,
3384                                             &exact_obj);
3385       {
3386         PreserveJVMState pjvms(this);
3387         set_control(slow_ctl);
3388         uncommon_trap_exact(class_reason, Deoptimization::Action_maybe_recompile);
3389       }
3390       replace_in_map(not_null_obj, exact_obj);
3391       obj = exact_obj;
3392     }
3393   } else {
3394     if (!too_many_traps_or_recompiles(Deoptimization::Reason_null_assert)) {
3395       Node* exact_obj = null_assert(obj);
3396       replace_in_map(obj, exact_obj);
3397       obj = exact_obj;
3398     }
3399   }
3400   return obj;
3401 }
3402 
3403 //-------------------------------gen_instanceof--------------------------------
3404 // Generate an instance-of idiom.  Used by both the instance-of bytecode
3405 // and the reflective instance-of call.
3406 Node* GraphKit::gen_instanceof(Node* obj, Node* superklass, bool safe_for_replace) {
3407   kill_dead_locals();           // Benefit all the uncommon traps
3408   assert( !stopped(), "dead parse path should be checked in callers" );
3409   assert(!TypePtr::NULL_PTR->higher_equal(_gvn.type(superklass)->is_klassptr()),
3410          "must check for not-null not-dead klass in callers");
3411 
3412   // Make the merge point
3413   enum { _obj_path = 1, _fail_path, _null_path, PATH_LIMIT };
3414   RegionNode* region = new RegionNode(PATH_LIMIT);
3415   Node*       phi    = new PhiNode(region, TypeInt::BOOL);
3416   C->set_has_split_ifs(true); // Has chance for split-if optimization
3417 
3418   ciProfileData* data = NULL;
3419   if (java_bc() == Bytecodes::_instanceof) {  // Only for the bytecode
3420     data = method()->method_data()->bci_to_data(bci());
3421   }
3422   bool speculative_not_null = false;
3423   bool never_see_null = (ProfileDynamicTypes  // aggressive use of profile
3424                          && seems_never_null(obj, data, speculative_not_null));
3425   bool is_value = obj->is_InlineType();
3426 
3427   // Null check; get casted pointer; set region slot 3
3428   Node* null_ctl = top();
3429   if (is_value) {
3430     // TODO 8284443 Enable this
3431     safe_for_replace = false;
3432     never_see_null = false;
3433   }
3434   Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
3435 
3436   // If not_null_obj is dead, only null-path is taken
3437   if (stopped()) {              // Doing instance-of on a NULL?
3438     set_control(null_ctl);
3439     return intcon(0);
3440   }
3441   region->init_req(_null_path, null_ctl);
3442   phi   ->init_req(_null_path, intcon(0)); // Set null path value
3443   if (null_ctl == top()) {
3444     // Do this eagerly, so that pattern matches like is_diamond_phi
3445     // will work even during parsing.
3446     assert(_null_path == PATH_LIMIT-1, "delete last");
3447     region->del_req(_null_path);
3448     phi   ->del_req(_null_path);
3449   }
3450 
3451   // Do we know the type check always succeed?
3452   if (!is_value) {
3453     bool known_statically = false;
3454     if (_gvn.type(superklass)->singleton()) {
3455       ciKlass* superk = _gvn.type(superklass)->is_klassptr()->klass();
3456       ciKlass* subk = _gvn.type(obj)->is_oopptr()->klass();
3457       if (subk != NULL && subk->is_loaded()) {
3458         int static_res = C->static_subtype_check(superk, subk);
3459         known_statically = (static_res == Compile::SSC_always_true || static_res == Compile::SSC_always_false);
3460       }
3461     }
3462 
3463     if (!known_statically) {
3464       const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3465       // We may not have profiling here or it may not help us. If we
3466       // have a speculative type use it to perform an exact cast.
3467       ciKlass* spec_obj_type = obj_type->speculative_type();
3468       if (spec_obj_type != NULL || (ProfileDynamicTypes && data != NULL)) {
3469         Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, NULL, spec_obj_type, safe_for_replace);
3470         if (stopped()) {            // Profile disagrees with this path.
3471           set_control(null_ctl);    // Null is the only remaining possibility.
3472           return intcon(0);
3473         }
3474         if (cast_obj != NULL) {
3475           not_null_obj = cast_obj;
3476           is_value = not_null_obj->is_InlineType();
3477         }
3478       }
3479     }
3480   }
3481 
3482   // Generate the subtype check
3483   Node* not_subtype_ctrl = gen_subtype_check(not_null_obj, superklass);
3484 
3485   // Plug in the success path to the general merge in slot 1.
3486   region->init_req(_obj_path, control());
3487   phi   ->init_req(_obj_path, intcon(1));
3488 
3489   // Plug in the failing path to the general merge in slot 2.
3490   region->init_req(_fail_path, not_subtype_ctrl);
3491   phi   ->init_req(_fail_path, intcon(0));
3492 
3493   // Return final merged results
3494   set_control( _gvn.transform(region) );
3495   record_for_igvn(region);
3496 
3497   // If we know the type check always succeeds then we don't use the
3498   // profiling data at this bytecode. Don't lose it, feed it to the
3499   // type system as a speculative type.
3500   if (safe_for_replace && !is_value) {
3501     Node* casted_obj = record_profiled_receiver_for_speculation(obj);
3502     replace_in_map(obj, casted_obj);
3503   }
3504 
3505   return _gvn.transform(phi);
3506 }
3507 
3508 //-------------------------------gen_checkcast---------------------------------
3509 // Generate a checkcast idiom.  Used by both the checkcast bytecode and the
3510 // array store bytecode.  Stack must be as-if BEFORE doing the bytecode so the
3511 // uncommon-trap paths work.  Adjust stack after this call.
3512 // If failure_control is supplied and not null, it is filled in with
3513 // the control edge for the cast failure.  Otherwise, an appropriate
3514 // uncommon trap or exception is thrown.
3515 Node* GraphKit::gen_checkcast(Node *obj, Node* superklass, Node* *failure_control, bool null_free) {
3516   kill_dead_locals();           // Benefit all the uncommon traps
3517   const TypeKlassPtr* tk = _gvn.type(superklass)->is_klassptr();
3518   const TypeOopPtr* toop = TypeOopPtr::make_from_klass(tk->klass());
3519   bool safe_for_replace = (failure_control == NULL);
3520   bool from_inline = obj->is_InlineType();
3521   assert(!null_free || toop->is_inlinetypeptr(), "must be an inline type pointer");
3522 
3523   // Fast cutout:  Check the case that the cast is vacuously true.
3524   // This detects the common cases where the test will short-circuit
3525   // away completely.  We do this before we perform the null check,
3526   // because if the test is going to turn into zero code, we don't
3527   // want a residual null check left around.  (Causes a slowdown,
3528   // for example, in some objArray manipulations, such as a[i]=a[j].)
3529   if (tk->singleton()) {
3530     ciKlass* klass = NULL;
3531     if (obj->is_InlineTypeBase()) {
3532       klass = _gvn.type(obj)->inline_klass();
3533     } else {
3534       const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr();
3535       if (objtp != NULL) {
3536         klass = objtp->klass();
3537       }
3538     }
3539     if (klass != NULL) {
3540       switch (C->static_subtype_check(tk->klass(), klass)) {
3541       case Compile::SSC_always_true:
3542         // If we know the type check always succeed then we don't use
3543         // the profiling data at this bytecode. Don't lose it, feed it
3544         // to the type system as a speculative type.
3545         if (!from_inline) {
3546           obj = record_profiled_receiver_for_speculation(obj);
3547         }
3548         if (null_free) {
3549           assert(safe_for_replace, "must be");
3550           obj = null_check(obj);
3551         }
3552         assert(stopped() || !toop->is_inlinetypeptr() || obj->is_InlineTypeBase(), "should have been scalarized");
3553         return obj;
3554       case Compile::SSC_always_false:
3555         if (null_free) {
3556           assert(safe_for_replace, "must be");
3557           obj = null_check(obj);
3558         }
3559         // It needs a null check because a null will *pass* the cast check.
3560         const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr();
3561         if (objtp != NULL && !objtp->maybe_null()) {
3562           bool is_aastore = (java_bc() == Bytecodes::_aastore);
3563           Deoptimization::DeoptReason reason = is_aastore ?
3564             Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
3565           builtin_throw(reason, makecon(TypeKlassPtr::make(klass)));
3566           return top();
3567         } else if (!too_many_traps_or_recompiles(Deoptimization::Reason_null_assert)) {
3568           return null_assert(obj);
3569         }
3570         break; // Fall through to full check
3571       }
3572     }
3573   }
3574 
3575   ciProfileData* data = NULL;
3576   if (failure_control == NULL) {        // use MDO in regular case only
3577     assert(java_bc() == Bytecodes::_aastore ||
3578            java_bc() == Bytecodes::_checkcast,
3579            "interpreter profiles type checks only for these BCs");
3580     if (method()->method_data()->is_mature()) {
3581       data = method()->method_data()->bci_to_data(bci());
3582     }
3583   }
3584 
3585   // Make the merge point
3586   enum { _obj_path = 1, _null_path, PATH_LIMIT };
3587   RegionNode* region = new RegionNode(PATH_LIMIT);
3588   Node*       phi    = new PhiNode(region, toop);
3589   _gvn.set_type(region, Type::CONTROL);
3590   _gvn.set_type(phi, toop);
3591 
3592   C->set_has_split_ifs(true); // Has chance for split-if optimization
3593 
3594   // Use null-cast information if it is available
3595   bool speculative_not_null = false;
3596   bool never_see_null = ((failure_control == NULL)  // regular case only
3597                          && seems_never_null(obj, data, speculative_not_null));
3598 
3599   // Null check; get casted pointer; set region slot 3
3600   Node* null_ctl = top();
3601   Node* not_null_obj = NULL;
3602   if (null_free) {
3603     assert(safe_for_replace, "must be");
3604     not_null_obj = null_check(obj);
3605   } else if (from_inline) {
3606     // TODO 8284443 obj can be null and null should pass
3607     not_null_obj = obj;
3608   } else {
3609     not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
3610   }
3611 
3612   // If not_null_obj is dead, only null-path is taken
3613   if (stopped()) {              // Doing instance-of on a NULL?
3614     set_control(null_ctl);
3615     if (toop->is_inlinetypeptr()) {
3616       return InlineTypePtrNode::make_null(_gvn, toop->inline_klass());
3617     }
3618     return null();
3619   }
3620   region->init_req(_null_path, null_ctl);
3621   phi   ->init_req(_null_path, null());  // Set null path value
3622   if (null_ctl == top()) {
3623     // Do this eagerly, so that pattern matches like is_diamond_phi
3624     // will work even during parsing.
3625     assert(_null_path == PATH_LIMIT-1, "delete last");
3626     region->del_req(_null_path);
3627     phi   ->del_req(_null_path);
3628   }
3629 
3630   Node* cast_obj = NULL;
3631   if (!from_inline && tk->klass_is_exact()) {
3632     // The following optimization tries to statically cast the speculative type of the object
3633     // (for example obtained during profiling) to the type of the superklass and then do a
3634     // dynamic check that the type of the object is what we expect. To work correctly
3635     // for checkcast and aastore the type of superklass should be exact.
3636     const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3637     // We may not have profiling here or it may not help us. If we have
3638     // a speculative type use it to perform an exact cast.
3639     ciKlass* spec_obj_type = obj_type->speculative_type();
3640     if (spec_obj_type != NULL || data != NULL) {
3641       cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk->klass(), spec_obj_type, safe_for_replace);
3642       if (cast_obj != NULL) {
3643         if (failure_control != NULL) // failure is now impossible
3644           (*failure_control) = top();
3645         // adjust the type of the phi to the exact klass:
3646         phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR));
3647       }
3648     }
3649   }
3650 
3651   if (cast_obj == NULL) {
3652     // Generate the subtype check
3653     Node* not_subtype_ctrl = gen_subtype_check(not_null_obj, superklass);
3654 
3655     // Plug in success path into the merge
3656     cast_obj = from_inline ? not_null_obj : _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop));
3657     // Failure path ends in uncommon trap (or may be dead - failure impossible)
3658     if (failure_control == NULL) {
3659       if (not_subtype_ctrl != top()) { // If failure is possible
3660         PreserveJVMState pjvms(this);
3661         set_control(not_subtype_ctrl);
3662         Node* obj_klass = NULL;
3663         if (not_null_obj->is_InlineTypeBase()) {
3664           obj_klass = makecon(TypeKlassPtr::make(_gvn.type(not_null_obj)->inline_klass()));
3665         } else {
3666           obj_klass = load_object_klass(not_null_obj);
3667         }
3668         bool is_aastore = (java_bc() == Bytecodes::_aastore);
3669         Deoptimization::DeoptReason reason = is_aastore ?
3670           Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
3671         builtin_throw(reason, obj_klass);
3672       }
3673     } else {
3674       (*failure_control) = not_subtype_ctrl;
3675     }
3676   }
3677 
3678   region->init_req(_obj_path, control());
3679   phi   ->init_req(_obj_path, cast_obj);
3680 
3681   // A merge of NULL or Casted-NotNull obj
3682   Node* res = _gvn.transform(phi);
3683 
3684   // Note I do NOT always 'replace_in_map(obj,result)' here.
3685   //  if( tk->klass()->can_be_primary_super()  )
3686     // This means that if I successfully store an Object into an array-of-String
3687     // I 'forget' that the Object is really now known to be a String.  I have to
3688     // do this because we don't have true union types for interfaces - if I store
3689     // a Baz into an array-of-Interface and then tell the optimizer it's an
3690     // Interface, I forget that it's also a Baz and cannot do Baz-like field
3691     // references to it.  FIX THIS WHEN UNION TYPES APPEAR!
3692   //  replace_in_map( obj, res );
3693 
3694   // Return final merged results
3695   set_control( _gvn.transform(region) );
3696   record_for_igvn(region);
3697 
3698   bool not_inline = !toop->can_be_inline_type();
3699   bool not_flattened = !UseFlatArray || not_inline || (toop->is_inlinetypeptr() && !toop->inline_klass()->flatten_array());
3700   if (EnableValhalla && not_flattened) {
3701     // Check if obj has been loaded from an array
3702     obj = obj->isa_DecodeN() ? obj->in(1) : obj;
3703     Node* array = NULL;
3704     if (obj->isa_Load()) {
3705       Node* address = obj->in(MemNode::Address);
3706       if (address->isa_AddP()) {
3707         array = address->as_AddP()->in(AddPNode::Base);
3708       }
3709     } else if (obj->is_Phi()) {
3710       Node* region = obj->in(0);
3711       // TODO make this more robust (see JDK-8231346)
3712       if (region->req() == 3 && region->in(2) != NULL && region->in(2)->in(0) != NULL) {
3713         IfNode* iff = region->in(2)->in(0)->isa_If();
3714         if (iff != NULL) {
3715           iff->is_flat_array_check(&_gvn, &array);
3716         }
3717       }
3718     }
3719     if (array != NULL) {
3720       const TypeAryPtr* ary_t = _gvn.type(array)->isa_aryptr();
3721       if (ary_t != NULL) {
3722         if (!ary_t->is_not_null_free() && not_inline) {
3723           // Casting array element to a non-inline-type, mark array as not null-free.
3724           Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, ary_t->cast_to_not_null_free()));
3725           replace_in_map(array, cast);
3726         } else if (!ary_t->is_not_flat()) {
3727           // Casting array element to a non-flattened type, mark array as not flat.
3728           Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, ary_t->cast_to_not_flat()));
3729           replace_in_map(array, cast);
3730         }
3731       }
3732     }
3733   }
3734 
3735   if (!stopped() && !res->is_InlineTypeBase()) {
3736     res = record_profiled_receiver_for_speculation(res);
3737     if (toop->is_inlinetypeptr()) {
3738       Node* vt = InlineTypeNode::make_from_oop(this, res, toop->inline_klass(), !gvn().type(res)->maybe_null());
3739       res = vt;
3740       if (safe_for_replace) {
3741         if (vt->is_InlineType() && C->inlining_incrementally()) {
3742           vt = vt->as_InlineType()->as_ptr(&_gvn);
3743         }
3744         replace_in_map(obj, vt);
3745         replace_in_map(not_null_obj, vt);
3746         replace_in_map(res, vt);
3747       }
3748     }
3749   }
3750   return res;
3751 }
3752 
3753 Node* GraphKit::inline_type_test(Node* obj, bool is_inline) {
3754   Node* mark_adr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
3755   Node* mark = make_load(NULL, mark_adr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
3756   Node* mask = MakeConX(markWord::inline_type_pattern);
3757   Node* masked = _gvn.transform(new AndXNode(mark, mask));
3758   Node* cmp = _gvn.transform(new CmpXNode(masked, mask));
3759   return _gvn.transform(new BoolNode(cmp, is_inline ? BoolTest::eq : BoolTest::ne));
3760 }
3761 
3762 Node* GraphKit::is_val_mirror(Node* mirror) {
3763   Node* p = basic_plus_adr(mirror, java_lang_Class::secondary_mirror_offset());
3764   Node* secondary_mirror = access_load_at(mirror, p, _gvn.type(p)->is_ptr(), TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR), T_OBJECT, IN_HEAP);
3765   Node* cmp = _gvn.transform(new CmpPNode(mirror, secondary_mirror));
3766   return _gvn.transform(new BoolNode(cmp, BoolTest::eq));
3767 }
3768 
3769 Node* GraphKit::array_lh_test(Node* klass, jint mask, jint val, bool eq) {
3770   Node* lh_adr = basic_plus_adr(klass, in_bytes(Klass::layout_helper_offset()));
3771   // Make sure to use immutable memory here to enable hoisting the check out of loops
3772   Node* lh_val = _gvn.transform(LoadNode::make(_gvn, NULL, immutable_memory(), lh_adr, lh_adr->bottom_type()->is_ptr(), TypeInt::INT, T_INT, MemNode::unordered));
3773   Node* masked = _gvn.transform(new AndINode(lh_val, intcon(mask)));
3774   Node* cmp = _gvn.transform(new CmpINode(masked, intcon(val)));
3775   return _gvn.transform(new BoolNode(cmp, eq ? BoolTest::eq : BoolTest::ne));
3776 }
3777 
3778 Node* GraphKit::flat_array_test(Node* array_or_klass, bool flat) {
3779   // We can't use immutable memory here because the mark word is mutable.
3780   // PhaseIdealLoop::move_flat_array_check_out_of_loop will make sure the
3781   // check is moved out of loops (mainly to enable loop unswitching).
3782   Node* mem = UseArrayMarkWordCheck ? memory(Compile::AliasIdxRaw) : immutable_memory();
3783   Node* cmp = _gvn.transform(new FlatArrayCheckNode(C, mem, array_or_klass));
3784   record_for_igvn(cmp); // Give it a chance to be optimized out by IGVN
3785   return _gvn.transform(new BoolNode(cmp, flat ? BoolTest::eq : BoolTest::ne));
3786 }
3787 
3788 Node* GraphKit::null_free_array_test(Node* klass, bool null_free) {
3789   return array_lh_test(klass, Klass::_lh_null_free_array_bit_inplace, 0, !null_free);
3790 }
3791 
3792 // Deoptimize if 'ary' is a null-free inline type array and 'val' is null
3793 Node* GraphKit::inline_array_null_guard(Node* ary, Node* val, int nargs, bool safe_for_replace) {
3794   RegionNode* region = new RegionNode(3);
3795   Node* null_ctl = top();
3796   null_check_oop(val, &null_ctl);
3797   if (null_ctl != top()) {
3798     PreserveJVMState pjvms(this);
3799     set_control(null_ctl);
3800     {
3801       // Deoptimize if null-free array
3802       BuildCutout unless(this, null_free_array_test(load_object_klass(ary), /* null_free = */ false), PROB_MAX);
3803       inc_sp(nargs);
3804       uncommon_trap(Deoptimization::Reason_null_check,
3805                     Deoptimization::Action_none);
3806     }
3807     region->init_req(1, control());
3808   }
3809   region->init_req(2, control());
3810   set_control(_gvn.transform(region));
3811   record_for_igvn(region);
3812   if (_gvn.type(val) == TypePtr::NULL_PTR) {
3813     // Since we were just successfully storing null, the array can't be null free.
3814     const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
3815     ary_t = ary_t->cast_to_not_null_free();
3816     Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, ary_t));
3817     if (safe_for_replace) {
3818       replace_in_map(ary, cast);
3819     }
3820     ary = cast;
3821   }
3822   return ary;
3823 }
3824 
3825 //------------------------------next_monitor-----------------------------------
3826 // What number should be given to the next monitor?
3827 int GraphKit::next_monitor() {
3828   int current = jvms()->monitor_depth()* C->sync_stack_slots();
3829   int next = current + C->sync_stack_slots();
3830   // Keep the toplevel high water mark current:
3831   if (C->fixed_slots() < next)  C->set_fixed_slots(next);
3832   return current;
3833 }
3834 
3835 //------------------------------insert_mem_bar---------------------------------
3836 // Memory barrier to avoid floating things around
3837 // The membar serves as a pinch point between both control and all memory slices.
3838 Node* GraphKit::insert_mem_bar(int opcode, Node* precedent) {
3839   MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent);
3840   mb->init_req(TypeFunc::Control, control());
3841   mb->init_req(TypeFunc::Memory,  reset_memory());
3842   Node* membar = _gvn.transform(mb);
3843   set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control)));
3844   set_all_memory_call(membar);
3845   return membar;
3846 }
3847 
3848 //-------------------------insert_mem_bar_volatile----------------------------
3849 // Memory barrier to avoid floating things around
3850 // The membar serves as a pinch point between both control and memory(alias_idx).
3851 // If you want to make a pinch point on all memory slices, do not use this
3852 // function (even with AliasIdxBot); use insert_mem_bar() instead.
3853 Node* GraphKit::insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent) {
3854   // When Parse::do_put_xxx updates a volatile field, it appends a series
3855   // of MemBarVolatile nodes, one for *each* volatile field alias category.
3856   // The first membar is on the same memory slice as the field store opcode.
3857   // This forces the membar to follow the store.  (Bug 6500685 broke this.)
3858   // All the other membars (for other volatile slices, including AliasIdxBot,
3859   // which stands for all unknown volatile slices) are control-dependent
3860   // on the first membar.  This prevents later volatile loads or stores
3861   // from sliding up past the just-emitted store.
3862 
3863   MemBarNode* mb = MemBarNode::make(C, opcode, alias_idx, precedent);
3864   mb->set_req(TypeFunc::Control,control());
3865   if (alias_idx == Compile::AliasIdxBot) {
3866     mb->set_req(TypeFunc::Memory, merged_memory()->base_memory());
3867   } else {
3868     assert(!(opcode == Op_Initialize && alias_idx != Compile::AliasIdxRaw), "fix caller");
3869     mb->set_req(TypeFunc::Memory, memory(alias_idx));
3870   }
3871   Node* membar = _gvn.transform(mb);
3872   set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control)));
3873   if (alias_idx == Compile::AliasIdxBot) {
3874     merged_memory()->set_base_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)));
3875   } else {
3876     set_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)),alias_idx);
3877   }
3878   return membar;
3879 }
3880 
3881 //------------------------------shared_lock------------------------------------
3882 // Emit locking code.
3883 FastLockNode* GraphKit::shared_lock(Node* obj) {
3884   // bci is either a monitorenter bc or InvocationEntryBci
3885   // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3886   assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3887 
3888   if( !GenerateSynchronizationCode )
3889     return NULL;                // Not locking things?
3890 
3891   if (stopped())                // Dead monitor?
3892     return NULL;
3893 
3894   assert(dead_locals_are_killed(), "should kill locals before sync. point");
3895 
3896   // Box the stack location
3897   Node* box = _gvn.transform(new BoxLockNode(next_monitor()));
3898   Node* mem = reset_memory();
3899 
3900   FastLockNode * flock = _gvn.transform(new FastLockNode(0, obj, box) )->as_FastLock();
3901 
3902   // Create the rtm counters for this fast lock if needed.
3903   flock->create_rtm_lock_counter(sync_jvms()); // sync_jvms used to get current bci
3904 
3905   // Add monitor to debug info for the slow path.  If we block inside the
3906   // slow path and de-opt, we need the monitor hanging around
3907   map()->push_monitor( flock );
3908 
3909   const TypeFunc *tf = LockNode::lock_type();
3910   LockNode *lock = new LockNode(C, tf);
3911 
3912   lock->init_req( TypeFunc::Control, control() );
3913   lock->init_req( TypeFunc::Memory , mem );
3914   lock->init_req( TypeFunc::I_O    , top() )     ;   // does no i/o
3915   lock->init_req( TypeFunc::FramePtr, frameptr() );
3916   lock->init_req( TypeFunc::ReturnAdr, top() );
3917 
3918   lock->init_req(TypeFunc::Parms + 0, obj);
3919   lock->init_req(TypeFunc::Parms + 1, box);
3920   lock->init_req(TypeFunc::Parms + 2, flock);
3921   add_safepoint_edges(lock);
3922 
3923   lock = _gvn.transform( lock )->as_Lock();
3924 
3925   // lock has no side-effects, sets few values
3926   set_predefined_output_for_runtime_call(lock, mem, TypeRawPtr::BOTTOM);
3927 
3928   insert_mem_bar(Op_MemBarAcquireLock);
3929 
3930   // Add this to the worklist so that the lock can be eliminated
3931   record_for_igvn(lock);
3932 
3933 #ifndef PRODUCT
3934   if (PrintLockStatistics) {
3935     // Update the counter for this lock.  Don't bother using an atomic
3936     // operation since we don't require absolute accuracy.
3937     lock->create_lock_counter(map()->jvms());
3938     increment_counter(lock->counter()->addr());
3939   }
3940 #endif
3941 
3942   return flock;
3943 }
3944 
3945 
3946 //------------------------------shared_unlock----------------------------------
3947 // Emit unlocking code.
3948 void GraphKit::shared_unlock(Node* box, Node* obj) {
3949   // bci is either a monitorenter bc or InvocationEntryBci
3950   // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3951   assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3952 
3953   if( !GenerateSynchronizationCode )
3954     return;
3955   if (stopped()) {               // Dead monitor?
3956     map()->pop_monitor();        // Kill monitor from debug info
3957     return;
3958   }
3959   assert(!obj->is_InlineTypeBase(), "should not unlock on inline type");
3960 
3961   // Memory barrier to avoid floating things down past the locked region
3962   insert_mem_bar(Op_MemBarReleaseLock);
3963 
3964   const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();
3965   UnlockNode *unlock = new UnlockNode(C, tf);
3966 #ifdef ASSERT
3967   unlock->set_dbg_jvms(sync_jvms());
3968 #endif
3969   uint raw_idx = Compile::AliasIdxRaw;
3970   unlock->init_req( TypeFunc::Control, control() );
3971   unlock->init_req( TypeFunc::Memory , memory(raw_idx) );
3972   unlock->init_req( TypeFunc::I_O    , top() )     ;   // does no i/o
3973   unlock->init_req( TypeFunc::FramePtr, frameptr() );
3974   unlock->init_req( TypeFunc::ReturnAdr, top() );
3975 
3976   unlock->init_req(TypeFunc::Parms + 0, obj);
3977   unlock->init_req(TypeFunc::Parms + 1, box);
3978   unlock = _gvn.transform(unlock)->as_Unlock();
3979 
3980   Node* mem = reset_memory();
3981 
3982   // unlock has no side-effects, sets few values
3983   set_predefined_output_for_runtime_call(unlock, mem, TypeRawPtr::BOTTOM);
3984 
3985   // Kill monitor from debug info
3986   map()->pop_monitor( );
3987 }
3988 
3989 //-------------------------------get_layout_helper-----------------------------
3990 // If the given klass is a constant or known to be an array,
3991 // fetch the constant layout helper value into constant_value
3992 // and return (Node*)NULL.  Otherwise, load the non-constant
3993 // layout helper value, and return the node which represents it.
3994 // This two-faced routine is useful because allocation sites
3995 // almost always feature constant types.
3996 Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {
3997   const TypeKlassPtr* inst_klass = _gvn.type(klass_node)->isa_klassptr();
3998   if (!StressReflectiveCode && inst_klass != NULL) {
3999     ciKlass* klass = inst_klass->klass();
4000     assert(klass != NULL, "klass should not be NULL");
4001     bool xklass = inst_klass->klass_is_exact();
4002     bool can_be_flattened = false;
4003     if (UseFlatArray && klass->is_obj_array_klass() && !klass->as_obj_array_klass()->is_elem_null_free()) {
4004       // The runtime type of [LMyValue might be [QMyValue due to [QMyValue <: [LMyValue.
4005       ciKlass* elem = klass->as_obj_array_klass()->element_klass();
4006       can_be_flattened = elem->can_be_inline_klass() && (!elem->is_inlinetype() || elem->flatten_array());
4007     }
4008     if (!can_be_flattened && (xklass || klass->is_array_klass())) {
4009       jint lhelper = klass->layout_helper();
4010       if (lhelper != Klass::_lh_neutral_value) {
4011         constant_value = lhelper;
4012         return (Node*) NULL;
4013       }
4014     }
4015   }
4016   constant_value = Klass::_lh_neutral_value;  // put in a known value
4017   Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset()));
4018   return make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered);
4019 }
4020 
4021 // We just put in an allocate/initialize with a big raw-memory effect.
4022 // Hook selected additional alias categories on the initialization.
4023 static void hook_memory_on_init(GraphKit& kit, int alias_idx,
4024                                 MergeMemNode* init_in_merge,
4025                                 Node* init_out_raw) {
4026   DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory());
4027   assert(init_in_merge->memory_at(alias_idx) == init_in_raw, "");
4028 
4029   Node* prevmem = kit.memory(alias_idx);
4030   init_in_merge->set_memory_at(alias_idx, prevmem);
4031   if (init_out_raw != NULL) {
4032     kit.set_memory(init_out_raw, alias_idx);
4033   }
4034 }
4035 
4036 //---------------------------set_output_for_allocation-------------------------
4037 Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
4038                                           const TypeOopPtr* oop_type,
4039                                           bool deoptimize_on_exception) {
4040   int rawidx = Compile::AliasIdxRaw;
4041   alloc->set_req( TypeFunc::FramePtr, frameptr() );
4042   add_safepoint_edges(alloc);
4043   Node* allocx = _gvn.transform(alloc);
4044   set_control( _gvn.transform(new ProjNode(allocx, TypeFunc::Control) ) );
4045   // create memory projection for i_o
4046   set_memory ( _gvn.transform( new ProjNode(allocx, TypeFunc::Memory, true) ), rawidx );
4047   make_slow_call_ex(allocx, env()->Throwable_klass(), true, deoptimize_on_exception);
4048 
4049   // create a memory projection as for the normal control path
4050   Node* malloc = _gvn.transform(new ProjNode(allocx, TypeFunc::Memory));
4051   set_memory(malloc, rawidx);
4052 
4053   // a normal slow-call doesn't change i_o, but an allocation does
4054   // we create a separate i_o projection for the normal control path
4055   set_i_o(_gvn.transform( new ProjNode(allocx, TypeFunc::I_O, false) ) );
4056   Node* rawoop = _gvn.transform( new ProjNode(allocx, TypeFunc::Parms) );
4057 
4058   // put in an initialization barrier
4059   InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx,
4060                                                  rawoop)->as_Initialize();
4061   assert(alloc->initialization() == init,  "2-way macro link must work");
4062   assert(init ->allocation()     == alloc, "2-way macro link must work");
4063   {
4064     // Extract memory strands which may participate in the new object's
4065     // initialization, and source them from the new InitializeNode.
4066     // This will allow us to observe initializations when they occur,
4067     // and link them properly (as a group) to the InitializeNode.
4068     assert(init->in(InitializeNode::Memory) == malloc, "");
4069     MergeMemNode* minit_in = MergeMemNode::make(malloc);
4070     init->set_req(InitializeNode::Memory, minit_in);
4071     record_for_igvn(minit_in); // fold it up later, if possible
4072     _gvn.set_type(minit_in, Type::MEMORY);
4073     Node* minit_out = memory(rawidx);
4074     assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
4075     // Add an edge in the MergeMem for the header fields so an access
4076     // to one of those has correct memory state
4077     set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes())));
4078     set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes())));
4079     if (oop_type->isa_aryptr()) {
4080       const TypeAryPtr* arytype = oop_type->is_aryptr();
4081       if (arytype->klass()->is_flat_array_klass()) {
4082         // Initially all flattened array accesses share a single slice
4083         // but that changes after parsing. Prepare the memory graph so
4084         // it can optimize flattened array accesses properly once they
4085         // don't share a single slice.
4086         assert(C->flattened_accesses_share_alias(), "should be set at parse time");
4087         C->set_flattened_accesses_share_alias(false);
4088         ciFlatArrayKlass* vak = arytype->klass()->as_flat_array_klass();
4089         ciInlineKlass* vk = vak->element_klass()->as_inline_klass();
4090         for (int i = 0, len = vk->nof_nonstatic_fields(); i < len; i++) {
4091           ciField* field = vk->nonstatic_field_at(i);
4092           if (field->offset() >= TrackedInitializationLimit * HeapWordSize)
4093             continue;  // do not bother to track really large numbers of fields
4094           int off_in_vt = field->offset() - vk->first_field_offset();
4095           const TypePtr* adr_type = arytype->with_field_offset(off_in_vt)->add_offset(Type::OffsetBot);
4096           int fieldidx = C->get_alias_index(adr_type, true);
4097           // Pass NULL for init_out. Having per flat array element field memory edges as uses of the Initialize node
4098           // can result in per flat array field Phis to be created which confuses the logic of
4099           // Compile::adjust_flattened_array_access_aliases().
4100           hook_memory_on_init(*this, fieldidx, minit_in, NULL);
4101         }
4102         C->set_flattened_accesses_share_alias(true);
4103         hook_memory_on_init(*this, C->get_alias_index(TypeAryPtr::INLINES), minit_in, minit_out);
4104       } else {
4105         const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
4106         int            elemidx  = C->get_alias_index(telemref);
4107         hook_memory_on_init(*this, elemidx, minit_in, minit_out);
4108       }
4109     } else if (oop_type->isa_instptr()) {
4110       set_memory(minit_out, C->get_alias_index(oop_type)); // mark word
4111       ciInstanceKlass* ik = oop_type->klass()->as_instance_klass();
4112       for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
4113         ciField* field = ik->nonstatic_field_at(i);
4114         if (field->offset() >= TrackedInitializationLimit * HeapWordSize)
4115           continue;  // do not bother to track really large numbers of fields
4116         // Find (or create) the alias category for this field:
4117         int fieldidx = C->alias_type(field)->index();
4118         hook_memory_on_init(*this, fieldidx, minit_in, minit_out);
4119       }
4120     }
4121   }
4122 
4123   // Cast raw oop to the real thing...
4124   Node* javaoop = new CheckCastPPNode(control(), rawoop, oop_type);
4125   javaoop = _gvn.transform(javaoop);
4126   C->set_recent_alloc(control(), javaoop);
4127   assert(just_allocated_object(control()) == javaoop, "just allocated");
4128 
4129 #ifdef ASSERT
4130   { // Verify that the AllocateNode::Ideal_allocation recognizers work:
4131     assert(AllocateNode::Ideal_allocation(rawoop, &_gvn) == alloc,
4132            "Ideal_allocation works");
4133     assert(AllocateNode::Ideal_allocation(javaoop, &_gvn) == alloc,
4134            "Ideal_allocation works");
4135     if (alloc->is_AllocateArray()) {
4136       assert(AllocateArrayNode::Ideal_array_allocation(rawoop, &_gvn) == alloc->as_AllocateArray(),
4137              "Ideal_allocation works");
4138       assert(AllocateArrayNode::Ideal_array_allocation(javaoop, &_gvn) == alloc->as_AllocateArray(),
4139              "Ideal_allocation works");
4140     } else {
4141       assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please");
4142     }
4143   }
4144 #endif //ASSERT
4145 
4146   return javaoop;
4147 }
4148 
4149 //---------------------------new_instance--------------------------------------
4150 // This routine takes a klass_node which may be constant (for a static type)
4151 // or may be non-constant (for reflective code).  It will work equally well
4152 // for either, and the graph will fold nicely if the optimizer later reduces
4153 // the type to a constant.
4154 // The optional arguments are for specialized use by intrinsics:
4155 //  - If 'extra_slow_test' if not null is an extra condition for the slow-path.
4156 //  - If 'return_size_val', report the the total object size to the caller.
4157 //  - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
4158 Node* GraphKit::new_instance(Node* klass_node,
4159                              Node* extra_slow_test,
4160                              Node* *return_size_val,
4161                              bool deoptimize_on_exception,
4162                              InlineTypeBaseNode* inline_type_node) {
4163   // Compute size in doublewords
4164   // The size is always an integral number of doublewords, represented
4165   // as a positive bytewise size stored in the klass's layout_helper.
4166   // The layout_helper also encodes (in a low bit) the need for a slow path.
4167   jint  layout_con = Klass::_lh_neutral_value;
4168   Node* layout_val = get_layout_helper(klass_node, layout_con);
4169   bool  layout_is_con = (layout_val == NULL);
4170 
4171   if (extra_slow_test == NULL)  extra_slow_test = intcon(0);
4172   // Generate the initial go-slow test.  It's either ALWAYS (return a
4173   // Node for 1) or NEVER (return a NULL) or perhaps (in the reflective
4174   // case) a computed value derived from the layout_helper.
4175   Node* initial_slow_test = NULL;
4176   if (layout_is_con) {
4177     assert(!StressReflectiveCode, "stress mode does not use these paths");
4178     bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con);
4179     initial_slow_test = must_go_slow ? intcon(1) : extra_slow_test;
4180   } else {   // reflective case
4181     // This reflective path is used by Unsafe.allocateInstance.
4182     // (It may be stress-tested by specifying StressReflectiveCode.)
4183     // Basically, we want to get into the VM is there's an illegal argument.
4184     Node* bit = intcon(Klass::_lh_instance_slow_path_bit);
4185     initial_slow_test = _gvn.transform( new AndINode(layout_val, bit) );
4186     if (extra_slow_test != intcon(0)) {
4187       initial_slow_test = _gvn.transform( new OrINode(initial_slow_test, extra_slow_test) );
4188     }
4189     // (Macro-expander will further convert this to a Bool, if necessary.)
4190   }
4191 
4192   // Find the size in bytes.  This is easy; it's the layout_helper.
4193   // The size value must be valid even if the slow path is taken.
4194   Node* size = NULL;
4195   if (layout_is_con) {
4196     size = MakeConX(Klass::layout_helper_size_in_bytes(layout_con));
4197   } else {   // reflective case
4198     // This reflective path is used by clone and Unsafe.allocateInstance.
4199     size = ConvI2X(layout_val);
4200 
4201     // Clear the low bits to extract layout_helper_size_in_bytes:
4202     assert((int)Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");
4203     Node* mask = MakeConX(~ (intptr_t)right_n_bits(LogBytesPerLong));
4204     size = _gvn.transform( new AndXNode(size, mask) );
4205   }
4206   if (return_size_val != NULL) {
4207     (*return_size_val) = size;
4208   }
4209 
4210   // This is a precise notnull oop of the klass.
4211   // (Actually, it need not be precise if this is a reflective allocation.)
4212   // It's what we cast the result to.
4213   const TypeKlassPtr* tklass = _gvn.type(klass_node)->isa_klassptr();
4214   if (!tklass)  tklass = TypeInstKlassPtr::OBJECT;
4215   const TypeOopPtr* oop_type = tklass->as_instance_type();
4216 
4217   // Now generate allocation code
4218 
4219   // The entire memory state is needed for slow path of the allocation
4220   // since GC and deoptimization can happen.
4221   Node *mem = reset_memory();
4222   set_all_memory(mem); // Create new memory state
4223 
4224   AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
4225                                          control(), mem, i_o(),
4226                                          size, klass_node,
4227                                          initial_slow_test, inline_type_node);
4228 
4229   return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception);
4230 }
4231 
4232 //-------------------------------new_array-------------------------------------
4233 // helper for newarray and anewarray
4234 // The 'length' parameter is (obviously) the length of the array.
4235 // See comments on new_instance for the meaning of the other arguments.
4236 Node* GraphKit::new_array(Node* klass_node,     // array klass (maybe variable)
4237                           Node* length,         // number of array elements
4238                           int   nargs,          // number of arguments to push back for uncommon trap
4239                           Node* *return_size_val,
4240                           bool deoptimize_on_exception) {
4241   jint  layout_con = Klass::_lh_neutral_value;
4242   Node* layout_val = get_layout_helper(klass_node, layout_con);
4243   bool  layout_is_con = (layout_val == NULL);
4244 
4245   if (!layout_is_con && !StressReflectiveCode &&
4246       !too_many_traps(Deoptimization::Reason_class_check)) {
4247     // This is a reflective array creation site.
4248     // Optimistically assume that it is a subtype of Object[],
4249     // so that we can fold up all the address arithmetic.
4250     layout_con = Klass::array_layout_helper(T_OBJECT);
4251     Node* cmp_lh = _gvn.transform( new CmpINode(layout_val, intcon(layout_con)) );
4252     Node* bol_lh = _gvn.transform( new BoolNode(cmp_lh, BoolTest::eq) );
4253     { BuildCutout unless(this, bol_lh, PROB_MAX);
4254       inc_sp(nargs);
4255       uncommon_trap(Deoptimization::Reason_class_check,
4256                     Deoptimization::Action_maybe_recompile);
4257     }
4258     layout_val = NULL;
4259     layout_is_con = true;
4260   }
4261 
4262   // Generate the initial go-slow test.  Make sure we do not overflow
4263   // if length is huge (near 2Gig) or negative!  We do not need
4264   // exact double-words here, just a close approximation of needed
4265   // double-words.  We can't add any offset or rounding bits, lest we
4266   // take a size -1 of bytes and make it positive.  Use an unsigned
4267   // compare, so negative sizes look hugely positive.
4268   int fast_size_limit = FastAllocateSizeLimit;
4269   if (layout_is_con) {
4270     assert(!StressReflectiveCode, "stress mode does not use these paths");
4271     // Increase the size limit if we have exact knowledge of array type.
4272     int log2_esize = Klass::layout_helper_log2_element_size(layout_con);
4273     fast_size_limit <<= MAX2(LogBytesPerLong - log2_esize, 0);
4274   }
4275 
4276   Node* initial_slow_cmp  = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) );
4277   Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) );
4278 
4279   // --- Size Computation ---
4280   // array_size = round_to_heap(array_header + (length << elem_shift));
4281   // where round_to_heap(x) == align_to(x, MinObjAlignmentInBytes)
4282   // and align_to(x, y) == ((x + y-1) & ~(y-1))
4283   // The rounding mask is strength-reduced, if possible.
4284   int round_mask = MinObjAlignmentInBytes - 1;
4285   Node* header_size = NULL;
4286   int   header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
4287   // (T_BYTE has the weakest alignment and size restrictions...)
4288   if (layout_is_con) {
4289     int       hsize  = Klass::layout_helper_header_size(layout_con);
4290     int       eshift = Klass::layout_helper_log2_element_size(layout_con);
4291     bool is_flat_array = Klass::layout_helper_is_flatArray(layout_con);
4292     if ((round_mask & ~right_n_bits(eshift)) == 0)
4293       round_mask = 0;  // strength-reduce it if it goes away completely
4294     assert(is_flat_array || (hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
4295     assert(header_size_min <= hsize, "generic minimum is smallest");
4296     header_size_min = hsize;
4297     header_size = intcon(hsize + round_mask);
4298   } else {
4299     Node* hss   = intcon(Klass::_lh_header_size_shift);
4300     Node* hsm   = intcon(Klass::_lh_header_size_mask);
4301     Node* hsize = _gvn.transform( new URShiftINode(layout_val, hss) );
4302     hsize       = _gvn.transform( new AndINode(hsize, hsm) );
4303     Node* mask  = intcon(round_mask);
4304     header_size = _gvn.transform( new AddINode(hsize, mask) );
4305   }
4306 
4307   Node* elem_shift = NULL;
4308   if (layout_is_con) {
4309     int eshift = Klass::layout_helper_log2_element_size(layout_con);
4310     if (eshift != 0)
4311       elem_shift = intcon(eshift);
4312   } else {
4313     // There is no need to mask or shift this value.
4314     // The semantics of LShiftINode include an implicit mask to 0x1F.
4315     assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
4316     elem_shift = layout_val;
4317   }
4318 
4319   // Transition to native address size for all offset calculations:
4320   Node* lengthx = ConvI2X(length);
4321   Node* headerx = ConvI2X(header_size);
4322 #ifdef _LP64
4323   { const TypeInt* tilen = _gvn.find_int_type(length);
4324     if (tilen != NULL && tilen->_lo < 0) {
4325       // Add a manual constraint to a positive range.  Cf. array_element_address.
4326       jint size_max = fast_size_limit;
4327       if (size_max > tilen->_hi)  size_max = tilen->_hi;
4328       const TypeInt* tlcon = TypeInt::make(0, size_max, Type::WidenMin);
4329 
4330       // Only do a narrow I2L conversion if the range check passed.
4331       IfNode* iff = new IfNode(control(), initial_slow_test, PROB_MIN, COUNT_UNKNOWN);
4332       _gvn.transform(iff);
4333       RegionNode* region = new RegionNode(3);
4334       _gvn.set_type(region, Type::CONTROL);
4335       lengthx = new PhiNode(region, TypeLong::LONG);
4336       _gvn.set_type(lengthx, TypeLong::LONG);
4337 
4338       // Range check passed. Use ConvI2L node with narrow type.
4339       Node* passed = IfFalse(iff);
4340       region->init_req(1, passed);
4341       // Make I2L conversion control dependent to prevent it from
4342       // floating above the range check during loop optimizations.
4343       lengthx->init_req(1, C->constrained_convI2L(&_gvn, length, tlcon, passed));
4344 
4345       // Range check failed. Use ConvI2L with wide type because length may be invalid.
4346       region->init_req(2, IfTrue(iff));
4347       lengthx->init_req(2, ConvI2X(length));
4348 
4349       set_control(region);
4350       record_for_igvn(region);
4351       record_for_igvn(lengthx);
4352     }
4353   }
4354 #endif
4355 
4356   // Combine header size (plus rounding) and body size.  Then round down.
4357   // This computation cannot overflow, because it is used only in two
4358   // places, one where the length is sharply limited, and the other
4359   // after a successful allocation.
4360   Node* abody = lengthx;
4361   if (elem_shift != NULL)
4362     abody     = _gvn.transform( new LShiftXNode(lengthx, elem_shift) );
4363   Node* size  = _gvn.transform( new AddXNode(headerx, abody) );
4364   if (round_mask != 0) {
4365     Node* mask = MakeConX(~round_mask);
4366     size       = _gvn.transform( new AndXNode(size, mask) );
4367   }
4368   // else if round_mask == 0, the size computation is self-rounding
4369 
4370   if (return_size_val != NULL) {
4371     // This is the size
4372     (*return_size_val) = size;
4373   }
4374 
4375   // Now generate allocation code
4376 
4377   // The entire memory state is needed for slow path of the allocation
4378   // since GC and deoptimization can happen.
4379   Node *mem = reset_memory();
4380   set_all_memory(mem); // Create new memory state
4381 
4382   if (initial_slow_test->is_Bool()) {
4383     // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
4384     initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
4385   }
4386 
4387   const TypeKlassPtr* ary_klass = _gvn.type(klass_node)->isa_klassptr();
4388   const TypeOopPtr* ary_type = ary_klass->as_instance_type();
4389   Node* valid_length_test = _gvn.intcon(1);
4390   if (ary_type->klass()->is_array_klass()) {
4391     BasicType bt = ary_type->klass()->as_array_klass()->element_type()->basic_type();
4392     jint max = TypeAryPtr::max_array_length(bt);
4393     Node* valid_length_cmp  = _gvn.transform(new CmpUNode(length, intcon(max)));
4394     valid_length_test = _gvn.transform(new BoolNode(valid_length_cmp, BoolTest::le));
4395   }
4396 
4397   const TypeAryPtr* ary_ptr = ary_type->isa_aryptr();
4398 
4399   // Inline type array variants:
4400   // - null-ok:              MyValue.ref[] (ciObjArrayKlass "[LMyValue")
4401   // - null-free:            MyValue.val[] (ciObjArrayKlass "[QMyValue")
4402   // - null-free, flattened: MyValue.val[] (ciFlatArrayKlass "[QMyValue")
4403   // Check if array is a null-free, non-flattened inline type array
4404   // that needs to be initialized with the default inline type.
4405   Node* default_value = NULL;
4406   Node* raw_default_value = NULL;
4407   if (ary_ptr != NULL && ary_ptr->klass_is_exact()) {
4408     // Array type is known
4409     if (ary_ptr->klass()->as_array_klass()->is_elem_null_free()) {
4410       ciInlineKlass* vk = ary_ptr->klass()->as_array_klass()->element_klass()->as_inline_klass();
4411       if (!vk->flatten_array()) {
4412         default_value = InlineTypeNode::default_oop(gvn(), vk);
4413       }
4414     }
4415   } else if (ary_klass->klass()->can_be_inline_array_klass()) {
4416     // Array type is not known, add runtime checks
4417     assert(!ary_klass->klass_is_exact(), "unexpected exact type");
4418     Node* r = new RegionNode(3);
4419     default_value = new PhiNode(r, TypeInstPtr::BOTTOM);
4420 
4421     Node* bol = array_lh_test(klass_node, Klass::_lh_array_tag_flat_value_bit_inplace | Klass::_lh_null_free_array_bit_inplace, Klass::_lh_null_free_array_bit_inplace);
4422     IfNode* iff = create_and_map_if(control(), bol, PROB_FAIR, COUNT_UNKNOWN);
4423 
4424     // Null-free, non-flattened inline type array, initialize with the default value
4425     set_control(_gvn.transform(new IfTrueNode(iff)));
4426     Node* p = basic_plus_adr(klass_node, in_bytes(ArrayKlass::element_klass_offset()));
4427     Node* eklass = _gvn.transform(LoadKlassNode::make(_gvn, control(), immutable_memory(), p, TypeInstPtr::KLASS));
4428     Node* adr_fixed_block_addr = basic_plus_adr(eklass, in_bytes(InstanceKlass::adr_inlineklass_fixed_block_offset()));
4429     Node* adr_fixed_block = make_load(control(), adr_fixed_block_addr, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
4430     Node* default_value_offset_addr = basic_plus_adr(adr_fixed_block, in_bytes(InlineKlass::default_value_offset_offset()));
4431     Node* default_value_offset = make_load(control(), default_value_offset_addr, TypeInt::INT, T_INT, MemNode::unordered);
4432     Node* elem_mirror = load_mirror_from_klass(eklass);
4433     Node* default_value_addr = basic_plus_adr(elem_mirror, ConvI2X(default_value_offset));
4434     Node* val = access_load_at(elem_mirror, default_value_addr, _gvn.type(default_value_addr)->is_ptr(), TypeInstPtr::BOTTOM, T_OBJECT, IN_HEAP);
4435     r->init_req(1, control());
4436     default_value->init_req(1, val);
4437 
4438     // Otherwise initialize with all zero
4439     r->init_req(2, _gvn.transform(new IfFalseNode(iff)));
4440     default_value->init_req(2, null());
4441 
4442     set_control(_gvn.transform(r));
4443     default_value = _gvn.transform(default_value);
4444   }
4445   if (default_value != NULL) {
4446     if (UseCompressedOops) {
4447       // With compressed oops, the 64-bit init value is built from two 32-bit compressed oops
4448       default_value = _gvn.transform(new EncodePNode(default_value, default_value->bottom_type()->make_narrowoop()));
4449       Node* lower = _gvn.transform(new CastP2XNode(control(), default_value));
4450       Node* upper = _gvn.transform(new LShiftLNode(lower, intcon(32)));
4451       raw_default_value = _gvn.transform(new OrLNode(lower, upper));
4452     } else {
4453       raw_default_value = _gvn.transform(new CastP2XNode(control(), default_value));
4454     }
4455   }
4456 
4457   // Create the AllocateArrayNode and its result projections
4458   AllocateArrayNode* alloc = new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
4459                                                    control(), mem, i_o(),
4460                                                    size, klass_node,
4461                                                    initial_slow_test,
4462                                                    length, valid_length_test,
4463                                                    default_value, raw_default_value);
4464 
4465   // Cast to correct type.  Note that the klass_node may be constant or not,
4466   // and in the latter case the actual array type will be inexact also.
4467   // (This happens via a non-constant argument to inline_native_newArray.)
4468   // In any case, the value of klass_node provides the desired array type.
4469   const TypeInt* length_type = _gvn.find_int_type(length);
4470   if (ary_type->isa_aryptr() && length_type != NULL) {
4471     // Try to get a better type than POS for the size
4472     ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
4473   }
4474 
4475   Node* javaoop = set_output_for_allocation(alloc, ary_type, deoptimize_on_exception);
4476 
4477   array_ideal_length(alloc, ary_type, true);
4478   return javaoop;
4479 }
4480 
4481 // The following "Ideal_foo" functions are placed here because they recognize
4482 // the graph shapes created by the functions immediately above.
4483 
4484 //---------------------------Ideal_allocation----------------------------------
4485 // Given an oop pointer or raw pointer, see if it feeds from an AllocateNode.
4486 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) {
4487   if (ptr == NULL) {     // reduce dumb test in callers
4488     return NULL;
4489   }
4490 
4491   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4492   ptr = bs->step_over_gc_barrier(ptr);
4493 
4494   if (ptr->is_CheckCastPP()) { // strip only one raw-to-oop cast
4495     ptr = ptr->in(1);
4496     if (ptr == NULL) return NULL;
4497   }
4498   // Return NULL for allocations with several casts:
4499   //   j.l.reflect.Array.newInstance(jobject, jint)
4500   //   Object.clone()
4501   // to keep more precise type from last cast.
4502   if (ptr->is_Proj()) {
4503     Node* allo = ptr->in(0);
4504     if (allo != NULL && allo->is_Allocate()) {
4505       return allo->as_Allocate();
4506     }
4507   }
4508   // Report failure to match.
4509   return NULL;
4510 }
4511 
4512 // Fancy version which also strips off an offset (and reports it to caller).
4513 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase,
4514                                              intptr_t& offset) {
4515   Node* base = AddPNode::Ideal_base_and_offset(ptr, phase, offset);
4516   if (base == NULL)  return NULL;
4517   return Ideal_allocation(base, phase);
4518 }
4519 
4520 // Trace Initialize <- Proj[Parm] <- Allocate
4521 AllocateNode* InitializeNode::allocation() {
4522   Node* rawoop = in(InitializeNode::RawAddress);
4523   if (rawoop->is_Proj()) {
4524     Node* alloc = rawoop->in(0);
4525     if (alloc->is_Allocate()) {
4526       return alloc->as_Allocate();
4527     }
4528   }
4529   return NULL;
4530 }
4531 
4532 // Trace Allocate -> Proj[Parm] -> Initialize
4533 InitializeNode* AllocateNode::initialization() {
4534   ProjNode* rawoop = proj_out_or_null(AllocateNode::RawAddress);
4535   if (rawoop == NULL)  return NULL;
4536   for (DUIterator_Fast imax, i = rawoop->fast_outs(imax); i < imax; i++) {
4537     Node* init = rawoop->fast_out(i);
4538     if (init->is_Initialize()) {
4539       assert(init->as_Initialize()->allocation() == this, "2-way link");
4540       return init->as_Initialize();
4541     }
4542   }
4543   return NULL;
4544 }
4545 
4546 //----------------------------- loop predicates ---------------------------
4547 
4548 //------------------------------add_predicate_impl----------------------------
4549 void GraphKit::add_empty_predicate_impl(Deoptimization::DeoptReason reason, int nargs) {
4550   // Too many traps seen?
4551   if (too_many_traps(reason)) {
4552 #ifdef ASSERT
4553     if (TraceLoopPredicate) {
4554       int tc = C->trap_count(reason);
4555       tty->print("too many traps=%s tcount=%d in ",
4556                     Deoptimization::trap_reason_name(reason), tc);
4557       method()->print(); // which method has too many predicate traps
4558       tty->cr();
4559     }
4560 #endif
4561     // We cannot afford to take more traps here,
4562     // do not generate predicate.
4563     return;
4564   }
4565 
4566   Node *cont    = _gvn.intcon(1);
4567   Node* opq     = _gvn.transform(new Opaque1Node(C, cont));
4568   Node *bol     = _gvn.transform(new Conv2BNode(opq));
4569   IfNode* iff   = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN);
4570   Node* iffalse = _gvn.transform(new IfFalseNode(iff));
4571   C->add_predicate_opaq(opq);
4572   {
4573     PreserveJVMState pjvms(this);
4574     set_control(iffalse);
4575     inc_sp(nargs);
4576     uncommon_trap(reason, Deoptimization::Action_maybe_recompile);
4577   }
4578   Node* iftrue = _gvn.transform(new IfTrueNode(iff));
4579   set_control(iftrue);
4580 }
4581 
4582 //------------------------------add_predicate---------------------------------
4583 void GraphKit::add_empty_predicates(int nargs) {
4584   // These loop predicates remain empty. All concrete loop predicates are inserted above the corresponding
4585   // empty loop predicate later by 'PhaseIdealLoop::create_new_if_for_predicate'. All concrete loop predicates of
4586   // a specific kind (normal, profile or limit check) share the same uncommon trap as the empty loop predicate.
4587   if (UseLoopPredicate) {
4588     add_empty_predicate_impl(Deoptimization::Reason_predicate, nargs);
4589   }
4590   if (UseProfiledLoopPredicate) {
4591     add_empty_predicate_impl(Deoptimization::Reason_profile_predicate, nargs);
4592   }
4593   // loop's limit check predicate should be near the loop.
4594   add_empty_predicate_impl(Deoptimization::Reason_loop_limit_check, nargs);
4595 }
4596 
4597 void GraphKit::sync_kit(IdealKit& ideal) {
4598   set_all_memory(ideal.merged_memory());
4599   set_i_o(ideal.i_o());
4600   set_control(ideal.ctrl());
4601 }
4602 
4603 void GraphKit::final_sync(IdealKit& ideal) {
4604   // Final sync IdealKit and graphKit.
4605   sync_kit(ideal);
4606 }
4607 
4608 Node* GraphKit::load_String_length(Node* str, bool set_ctrl) {
4609   Node* len = load_array_length(load_String_value(str, set_ctrl));
4610   Node* coder = load_String_coder(str, set_ctrl);
4611   // Divide length by 2 if coder is UTF16
4612   return _gvn.transform(new RShiftINode(len, coder));
4613 }
4614 
4615 Node* GraphKit::load_String_value(Node* str, bool set_ctrl) {
4616   int value_offset = java_lang_String::value_offset();
4617   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4618                                                      false, NULL, Type::Offset(0));
4619   const TypePtr* value_field_type = string_type->add_offset(value_offset);
4620   const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
4621                                                   TypeAry::make(TypeInt::BYTE, TypeInt::POS, false, true, true),
4622                                                   ciTypeArrayKlass::make(T_BYTE), true, Type::Offset(0));
4623   Node* p = basic_plus_adr(str, str, value_offset);
4624   Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT,
4625                               IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
4626   return load;
4627 }
4628 
4629 Node* GraphKit::load_String_coder(Node* str, bool set_ctrl) {
4630   if (!CompactStrings) {
4631     return intcon(java_lang_String::CODER_UTF16);
4632   }
4633   int coder_offset = java_lang_String::coder_offset();
4634   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4635                                                      false, NULL, Type::Offset(0));
4636   const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4637 
4638   Node* p = basic_plus_adr(str, str, coder_offset);
4639   Node* load = access_load_at(str, p, coder_field_type, TypeInt::BYTE, T_BYTE,
4640                               IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
4641   return load;
4642 }
4643 
4644 void GraphKit::store_String_value(Node* str, Node* value) {
4645   int value_offset = java_lang_String::value_offset();
4646   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4647                                                      false, NULL, Type::Offset(0));
4648   const TypePtr* value_field_type = string_type->add_offset(value_offset);
4649 
4650   access_store_at(str,  basic_plus_adr(str, value_offset), value_field_type,
4651                   value, TypeAryPtr::BYTES, T_OBJECT, IN_HEAP | MO_UNORDERED);
4652 }
4653 
4654 void GraphKit::store_String_coder(Node* str, Node* value) {
4655   int coder_offset = java_lang_String::coder_offset();
4656   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4657                                                      false, NULL, Type::Offset(0));
4658   const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4659 
4660   access_store_at(str, basic_plus_adr(str, coder_offset), coder_field_type,
4661                   value, TypeInt::BYTE, T_BYTE, IN_HEAP | MO_UNORDERED);
4662 }
4663 
4664 // Capture src and dst memory state with a MergeMemNode
4665 Node* GraphKit::capture_memory(const TypePtr* src_type, const TypePtr* dst_type) {
4666   if (src_type == dst_type) {
4667     // Types are equal, we don't need a MergeMemNode
4668     return memory(src_type);
4669   }
4670   MergeMemNode* merge = MergeMemNode::make(map()->memory());
4671   record_for_igvn(merge); // fold it up later, if possible
4672   int src_idx = C->get_alias_index(src_type);
4673   int dst_idx = C->get_alias_index(dst_type);
4674   merge->set_memory_at(src_idx, memory(src_idx));
4675   merge->set_memory_at(dst_idx, memory(dst_idx));
4676   return merge;
4677 }
4678 
4679 Node* GraphKit::compress_string(Node* src, const TypeAryPtr* src_type, Node* dst, Node* count) {
4680   assert(Matcher::match_rule_supported(Op_StrCompressedCopy), "Intrinsic not supported");
4681   assert(src_type == TypeAryPtr::BYTES || src_type == TypeAryPtr::CHARS, "invalid source type");
4682   // If input and output memory types differ, capture both states to preserve
4683   // the dependency between preceding and subsequent loads/stores.
4684   // For example, the following program:
4685   //  StoreB
4686   //  compress_string
4687   //  LoadB
4688   // has this memory graph (use->def):
4689   //  LoadB -> compress_string -> CharMem
4690   //             ... -> StoreB -> ByteMem
4691   // The intrinsic hides the dependency between LoadB and StoreB, causing
4692   // the load to read from memory not containing the result of the StoreB.
4693   // The correct memory graph should look like this:
4694   //  LoadB -> compress_string -> MergeMem(CharMem, StoreB(ByteMem))
4695   Node* mem = capture_memory(src_type, TypeAryPtr::BYTES);
4696   StrCompressedCopyNode* str = new StrCompressedCopyNode(control(), mem, src, dst, count);
4697   Node* res_mem = _gvn.transform(new SCMemProjNode(_gvn.transform(str)));
4698   set_memory(res_mem, TypeAryPtr::BYTES);
4699   return str;
4700 }
4701 
4702 void GraphKit::inflate_string(Node* src, Node* dst, const TypeAryPtr* dst_type, Node* count) {
4703   assert(Matcher::match_rule_supported(Op_StrInflatedCopy), "Intrinsic not supported");
4704   assert(dst_type == TypeAryPtr::BYTES || dst_type == TypeAryPtr::CHARS, "invalid dest type");
4705   // Capture src and dst memory (see comment in 'compress_string').
4706   Node* mem = capture_memory(TypeAryPtr::BYTES, dst_type);
4707   StrInflatedCopyNode* str = new StrInflatedCopyNode(control(), mem, src, dst, count);
4708   set_memory(_gvn.transform(str), dst_type);
4709 }
4710 
4711 void GraphKit::inflate_string_slow(Node* src, Node* dst, Node* start, Node* count) {
4712   /**
4713    * int i_char = start;
4714    * for (int i_byte = 0; i_byte < count; i_byte++) {
4715    *   dst[i_char++] = (char)(src[i_byte] & 0xff);
4716    * }
4717    */
4718   add_empty_predicates();
4719   C->set_has_loops(true);
4720 
4721   RegionNode* head = new RegionNode(3);
4722   head->init_req(1, control());
4723   gvn().set_type(head, Type::CONTROL);
4724   record_for_igvn(head);
4725 
4726   Node* i_byte = new PhiNode(head, TypeInt::INT);
4727   i_byte->init_req(1, intcon(0));
4728   gvn().set_type(i_byte, TypeInt::INT);
4729   record_for_igvn(i_byte);
4730 
4731   Node* i_char = new PhiNode(head, TypeInt::INT);
4732   i_char->init_req(1, start);
4733   gvn().set_type(i_char, TypeInt::INT);
4734   record_for_igvn(i_char);
4735 
4736   Node* mem = PhiNode::make(head, memory(TypeAryPtr::BYTES), Type::MEMORY, TypeAryPtr::BYTES);
4737   gvn().set_type(mem, Type::MEMORY);
4738   record_for_igvn(mem);
4739   set_control(head);
4740   set_memory(mem, TypeAryPtr::BYTES);
4741   Node* ch = load_array_element(src, i_byte, TypeAryPtr::BYTES, /* set_ctrl */ true);
4742   Node* st = store_to_memory(control(), array_element_address(dst, i_char, T_BYTE),
4743                              AndI(ch, intcon(0xff)), T_CHAR, TypeAryPtr::BYTES, MemNode::unordered,
4744                              false, false, true /* mismatched */);
4745 
4746   IfNode* iff = create_and_map_if(head, Bool(CmpI(i_byte, count), BoolTest::lt), PROB_FAIR, COUNT_UNKNOWN);
4747   head->init_req(2, IfTrue(iff));
4748   mem->init_req(2, st);
4749   i_byte->init_req(2, AddI(i_byte, intcon(1)));
4750   i_char->init_req(2, AddI(i_char, intcon(2)));
4751 
4752   set_control(IfFalse(iff));
4753   set_memory(st, TypeAryPtr::BYTES);
4754 }
4755 
4756 Node* GraphKit::make_constant_from_field(ciField* field, Node* obj) {
4757   if (!field->is_constant()) {
4758     return NULL; // Field not marked as constant.
4759   }
4760   ciInstance* holder = NULL;
4761   if (!field->is_static()) {
4762     ciObject* const_oop = obj->bottom_type()->is_oopptr()->const_oop();
4763     if (const_oop != NULL && const_oop->is_instance()) {
4764       holder = const_oop->as_instance();
4765     }
4766   }
4767   const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(),
4768                                                         /*is_unsigned_load=*/false);
4769   if (con_type != NULL) {
4770     Node* con = makecon(con_type);
4771     if (field->type()->is_inlinetype()) {
4772       con = InlineTypeNode::make_from_oop(this, con, field->type()->as_inline_klass(), field->is_null_free());
4773     } else if (con_type->is_inlinetypeptr()) {
4774       con = InlineTypeNode::make_from_oop(this, con, con_type->inline_klass(), field->is_null_free());
4775     }
4776     return con;
4777   }
4778   return NULL;
4779 }
4780 
4781 //---------------------------load_mirror_from_klass----------------------------
4782 // Given a klass oop, load its java mirror (a java.lang.Class oop).
4783 Node* GraphKit::load_mirror_from_klass(Node* klass) {
4784   Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
4785   Node* load = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
4786   // mirror = ((OopHandle)mirror)->resolve();
4787   return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
4788 }