1 /*
   2  * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/macroAssembler.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "ci/ciFlatArray.hpp"
  28 #include "ci/ciInlineKlass.hpp"
  29 #include "ci/ciReplay.hpp"
  30 #include "classfile/javaClasses.hpp"
  31 #include "code/aotCodeCache.hpp"
  32 #include "code/exceptionHandlerTable.hpp"
  33 #include "code/nmethod.hpp"
  34 #include "compiler/compilationFailureInfo.hpp"
  35 #include "compiler/compilationMemoryStatistic.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "compiler/compileLog.hpp"
  38 #include "compiler/compiler_globals.hpp"
  39 #include "compiler/compilerDefinitions.hpp"
  40 #include "compiler/compilerOracle.hpp"
  41 #include "compiler/disassembler.hpp"
  42 #include "compiler/oopMap.hpp"
  43 #include "gc/shared/barrierSet.hpp"
  44 #include "gc/shared/c2/barrierSetC2.hpp"
  45 #include "jfr/jfrEvents.hpp"
  46 #include "jvm_io.h"
  47 #include "memory/allocation.hpp"
  48 #include "memory/arena.hpp"
  49 #include "memory/resourceArea.hpp"
  50 #include "opto/addnode.hpp"
  51 #include "opto/block.hpp"
  52 #include "opto/c2compiler.hpp"
  53 #include "opto/callGenerator.hpp"
  54 #include "opto/callnode.hpp"
  55 #include "opto/castnode.hpp"
  56 #include "opto/cfgnode.hpp"
  57 #include "opto/chaitin.hpp"
  58 #include "opto/compile.hpp"
  59 #include "opto/connode.hpp"
  60 #include "opto/convertnode.hpp"
  61 #include "opto/divnode.hpp"
  62 #include "opto/escape.hpp"
  63 #include "opto/idealGraphPrinter.hpp"
  64 #include "opto/inlinetypenode.hpp"
  65 #include "opto/locknode.hpp"
  66 #include "opto/loopnode.hpp"
  67 #include "opto/machnode.hpp"
  68 #include "opto/macro.hpp"
  69 #include "opto/matcher.hpp"
  70 #include "opto/mathexactnode.hpp"
  71 #include "opto/memnode.hpp"
  72 #include "opto/movenode.hpp"
  73 #include "opto/mulnode.hpp"
  74 #include "opto/multnode.hpp"
  75 #include "opto/narrowptrnode.hpp"
  76 #include "opto/node.hpp"
  77 #include "opto/opaquenode.hpp"
  78 #include "opto/opcodes.hpp"
  79 #include "opto/output.hpp"
  80 #include "opto/parse.hpp"
  81 #include "opto/phaseX.hpp"
  82 #include "opto/reachability.hpp"
  83 #include "opto/rootnode.hpp"
  84 #include "opto/runtime.hpp"
  85 #include "opto/stringopts.hpp"
  86 #include "opto/type.hpp"
  87 #include "opto/vector.hpp"
  88 #include "opto/vectornode.hpp"
  89 #include "runtime/arguments.hpp"
  90 #include "runtime/globals_extension.hpp"
  91 #include "runtime/sharedRuntime.hpp"
  92 #include "runtime/signature.hpp"
  93 #include "runtime/stubRoutines.hpp"
  94 #include "runtime/timer.hpp"
  95 #include "utilities/align.hpp"
  96 #include "utilities/copy.hpp"
  97 #include "utilities/hashTable.hpp"
  98 #include "utilities/macros.hpp"
  99 
 100 // -------------------- Compile::mach_constant_base_node -----------------------
 101 // Constant table base node singleton.
 102 MachConstantBaseNode* Compile::mach_constant_base_node() {
 103   if (_mach_constant_base_node == nullptr) {
 104     _mach_constant_base_node = new MachConstantBaseNode();
 105     _mach_constant_base_node->add_req(C->root());
 106   }
 107   return _mach_constant_base_node;
 108 }
 109 
 110 
 111 /// Support for intrinsics.
 112 
 113 // Return the index at which m must be inserted (or already exists).
 114 // The sort order is by the address of the ciMethod, with is_virtual as minor key.
 115 class IntrinsicDescPair {
 116  private:
 117   ciMethod* _m;
 118   bool _is_virtual;
 119  public:
 120   IntrinsicDescPair(ciMethod* m, bool is_virtual) : _m(m), _is_virtual(is_virtual) {}
 121   static int compare(IntrinsicDescPair* const& key, CallGenerator* const& elt) {
 122     ciMethod* m= elt->method();
 123     ciMethod* key_m = key->_m;
 124     if (key_m < m)      return -1;
 125     else if (key_m > m) return 1;
 126     else {
 127       bool is_virtual = elt->is_virtual();
 128       bool key_virtual = key->_is_virtual;
 129       if (key_virtual < is_virtual)      return -1;
 130       else if (key_virtual > is_virtual) return 1;
 131       else                               return 0;
 132     }
 133   }
 134 };
 135 int Compile::intrinsic_insertion_index(ciMethod* m, bool is_virtual, bool& found) {
 136 #ifdef ASSERT
 137   for (int i = 1; i < _intrinsics.length(); i++) {
 138     CallGenerator* cg1 = _intrinsics.at(i-1);
 139     CallGenerator* cg2 = _intrinsics.at(i);
 140     assert(cg1->method() != cg2->method()
 141            ? cg1->method()     < cg2->method()
 142            : cg1->is_virtual() < cg2->is_virtual(),
 143            "compiler intrinsics list must stay sorted");
 144   }
 145 #endif
 146   IntrinsicDescPair pair(m, is_virtual);
 147   return _intrinsics.find_sorted<IntrinsicDescPair*, IntrinsicDescPair::compare>(&pair, found);
 148 }
 149 
 150 void Compile::register_intrinsic(CallGenerator* cg) {
 151   bool found = false;
 152   int index = intrinsic_insertion_index(cg->method(), cg->is_virtual(), found);
 153   assert(!found, "registering twice");
 154   _intrinsics.insert_before(index, cg);
 155   assert(find_intrinsic(cg->method(), cg->is_virtual()) == cg, "registration worked");
 156 }
 157 
 158 CallGenerator* Compile::find_intrinsic(ciMethod* m, bool is_virtual) {
 159   assert(m->is_loaded(), "don't try this on unloaded methods");
 160   if (_intrinsics.length() > 0) {
 161     bool found = false;
 162     int index = intrinsic_insertion_index(m, is_virtual, found);
 163      if (found) {
 164       return _intrinsics.at(index);
 165     }
 166   }
 167   // Lazily create intrinsics for intrinsic IDs well-known in the runtime.
 168   if (m->intrinsic_id() != vmIntrinsics::_none &&
 169       m->intrinsic_id() <= vmIntrinsics::LAST_COMPILER_INLINE) {
 170     CallGenerator* cg = make_vm_intrinsic(m, is_virtual);
 171     if (cg != nullptr) {
 172       // Save it for next time:
 173       register_intrinsic(cg);
 174       return cg;
 175     } else {
 176       gather_intrinsic_statistics(m->intrinsic_id(), is_virtual, _intrinsic_disabled);
 177     }
 178   }
 179   return nullptr;
 180 }
 181 
 182 // Compile::make_vm_intrinsic is defined in library_call.cpp.
 183 
 184 #ifndef PRODUCT
 185 // statistics gathering...
 186 
 187 juint  Compile::_intrinsic_hist_count[vmIntrinsics::number_of_intrinsics()] = {0};
 188 jubyte Compile::_intrinsic_hist_flags[vmIntrinsics::number_of_intrinsics()] = {0};
 189 
 190 inline int as_int(vmIntrinsics::ID id) {
 191   return vmIntrinsics::as_int(id);
 192 }
 193 
 194 bool Compile::gather_intrinsic_statistics(vmIntrinsics::ID id, bool is_virtual, int flags) {
 195   assert(id > vmIntrinsics::_none && id < vmIntrinsics::ID_LIMIT, "oob");
 196   int oflags = _intrinsic_hist_flags[as_int(id)];
 197   assert(flags != 0, "what happened?");
 198   if (is_virtual) {
 199     flags |= _intrinsic_virtual;
 200   }
 201   bool changed = (flags != oflags);
 202   if ((flags & _intrinsic_worked) != 0) {
 203     juint count = (_intrinsic_hist_count[as_int(id)] += 1);
 204     if (count == 1) {
 205       changed = true;           // first time
 206     }
 207     // increment the overall count also:
 208     _intrinsic_hist_count[as_int(vmIntrinsics::_none)] += 1;
 209   }
 210   if (changed) {
 211     if (((oflags ^ flags) & _intrinsic_virtual) != 0) {
 212       // Something changed about the intrinsic's virtuality.
 213       if ((flags & _intrinsic_virtual) != 0) {
 214         // This is the first use of this intrinsic as a virtual call.
 215         if (oflags != 0) {
 216           // We already saw it as a non-virtual, so note both cases.
 217           flags |= _intrinsic_both;
 218         }
 219       } else if ((oflags & _intrinsic_both) == 0) {
 220         // This is the first use of this intrinsic as a non-virtual
 221         flags |= _intrinsic_both;
 222       }
 223     }
 224     _intrinsic_hist_flags[as_int(id)] = (jubyte) (oflags | flags);
 225   }
 226   // update the overall flags also:
 227   _intrinsic_hist_flags[as_int(vmIntrinsics::_none)] |= (jubyte) flags;
 228   return changed;
 229 }
 230 
 231 static char* format_flags(int flags, char* buf) {
 232   buf[0] = 0;
 233   if ((flags & Compile::_intrinsic_worked) != 0)    strcat(buf, ",worked");
 234   if ((flags & Compile::_intrinsic_failed) != 0)    strcat(buf, ",failed");
 235   if ((flags & Compile::_intrinsic_disabled) != 0)  strcat(buf, ",disabled");
 236   if ((flags & Compile::_intrinsic_virtual) != 0)   strcat(buf, ",virtual");
 237   if ((flags & Compile::_intrinsic_both) != 0)      strcat(buf, ",nonvirtual");
 238   if (buf[0] == 0)  strcat(buf, ",");
 239   assert(buf[0] == ',', "must be");
 240   return &buf[1];
 241 }
 242 
 243 void Compile::print_intrinsic_statistics() {
 244   char flagsbuf[100];
 245   ttyLocker ttyl;
 246   if (xtty != nullptr)  xtty->head("statistics type='intrinsic'");
 247   tty->print_cr("Compiler intrinsic usage:");
 248   juint total = _intrinsic_hist_count[as_int(vmIntrinsics::_none)];
 249   if (total == 0)  total = 1;  // avoid div0 in case of no successes
 250   #define PRINT_STAT_LINE(name, c, f) \
 251     tty->print_cr("  %4d (%4.1f%%) %s (%s)", (int)(c), ((c) * 100.0) / total, name, f);
 252   for (auto id : EnumRange<vmIntrinsicID>{}) {
 253     int   flags = _intrinsic_hist_flags[as_int(id)];
 254     juint count = _intrinsic_hist_count[as_int(id)];
 255     if ((flags | count) != 0) {
 256       PRINT_STAT_LINE(vmIntrinsics::name_at(id), count, format_flags(flags, flagsbuf));
 257     }
 258   }
 259   PRINT_STAT_LINE("total", total, format_flags(_intrinsic_hist_flags[as_int(vmIntrinsics::_none)], flagsbuf));
 260   if (xtty != nullptr)  xtty->tail("statistics");
 261 }
 262 
 263 void Compile::print_statistics() {
 264   { ttyLocker ttyl;
 265     if (xtty != nullptr)  xtty->head("statistics type='opto'");
 266     Parse::print_statistics();
 267     PhaseStringOpts::print_statistics();
 268     PhaseCCP::print_statistics();
 269     PhaseRegAlloc::print_statistics();
 270     PhaseOutput::print_statistics();
 271     PhasePeephole::print_statistics();
 272     PhaseIdealLoop::print_statistics();
 273     ConnectionGraph::print_statistics();
 274     PhaseMacroExpand::print_statistics();
 275     if (xtty != nullptr)  xtty->tail("statistics");
 276   }
 277   if (_intrinsic_hist_flags[as_int(vmIntrinsics::_none)] != 0) {
 278     // put this under its own <statistics> element.
 279     print_intrinsic_statistics();
 280   }
 281 }
 282 #endif //PRODUCT
 283 
 284 void Compile::gvn_replace_by(Node* n, Node* nn) {
 285   for (DUIterator_Last imin, i = n->last_outs(imin); i >= imin; ) {
 286     Node* use = n->last_out(i);
 287     bool is_in_table = initial_gvn()->hash_delete(use);
 288     uint uses_found = 0;
 289     for (uint j = 0; j < use->len(); j++) {
 290       if (use->in(j) == n) {
 291         if (j < use->req())
 292           use->set_req(j, nn);
 293         else
 294           use->set_prec(j, nn);
 295         uses_found++;
 296       }
 297     }
 298     if (is_in_table) {
 299       // reinsert into table
 300       initial_gvn()->hash_find_insert(use);
 301     }
 302     record_for_igvn(use);
 303     PhaseIterGVN::add_users_of_use_to_worklist(nn, use, *_igvn_worklist);
 304     i -= uses_found;    // we deleted 1 or more copies of this edge
 305   }
 306 }
 307 
 308 
 309 // Identify all nodes that are reachable from below, useful.
 310 // Use breadth-first pass that records state in a Unique_Node_List,
 311 // recursive traversal is slower.
 312 void Compile::identify_useful_nodes(Unique_Node_List &useful) {
 313   int estimated_worklist_size = live_nodes();
 314   useful.map( estimated_worklist_size, nullptr );  // preallocate space
 315 
 316   // Initialize worklist
 317   if (root() != nullptr)  { useful.push(root()); }
 318   // If 'top' is cached, declare it useful to preserve cached node
 319   if (cached_top_node())  { useful.push(cached_top_node()); }
 320 
 321   // Push all useful nodes onto the list, breadthfirst
 322   for( uint next = 0; next < useful.size(); ++next ) {
 323     assert( next < unique(), "Unique useful nodes < total nodes");
 324     Node *n  = useful.at(next);
 325     uint max = n->len();
 326     for( uint i = 0; i < max; ++i ) {
 327       Node *m = n->in(i);
 328       if (not_a_node(m))  continue;
 329       useful.push(m);
 330     }
 331   }
 332 }
 333 
 334 // Update dead_node_list with any missing dead nodes using useful
 335 // list. Consider all non-useful nodes to be useless i.e., dead nodes.
 336 void Compile::update_dead_node_list(Unique_Node_List &useful) {
 337   uint max_idx = unique();
 338   VectorSet& useful_node_set = useful.member_set();
 339 
 340   for (uint node_idx = 0; node_idx < max_idx; node_idx++) {
 341     // If node with index node_idx is not in useful set,
 342     // mark it as dead in dead node list.
 343     if (!useful_node_set.test(node_idx)) {
 344       record_dead_node(node_idx);
 345     }
 346   }
 347 }
 348 
 349 void Compile::remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful) {
 350   int shift = 0;
 351   for (int i = 0; i < inlines->length(); i++) {
 352     CallGenerator* cg = inlines->at(i);
 353     if (useful.member(cg->call_node())) {
 354       if (shift > 0) {
 355         inlines->at_put(i - shift, cg);
 356       }
 357     } else {
 358       shift++; // skip over the dead element
 359     }
 360   }
 361   if (shift > 0) {
 362     inlines->trunc_to(inlines->length() - shift); // remove last elements from compacted array
 363   }
 364 }
 365 
 366 void Compile::remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Node* dead) {
 367   assert(dead != nullptr && dead->is_Call(), "sanity");
 368   int found = 0;
 369   for (int i = 0; i < inlines->length(); i++) {
 370     if (inlines->at(i)->call_node() == dead) {
 371       inlines->remove_at(i);
 372       found++;
 373       NOT_DEBUG( break; ) // elements are unique, so exit early
 374     }
 375   }
 376   assert(found <= 1, "not unique");
 377 }
 378 
 379 template<typename N, ENABLE_IF_SDEFN(std::is_base_of<Node, N>::value)>
 380 void Compile::remove_useless_nodes(GrowableArray<N*>& node_list, Unique_Node_List& useful) {
 381   for (int i = node_list.length() - 1; i >= 0; i--) {
 382     N* node = node_list.at(i);
 383     if (!useful.member(node)) {
 384       node_list.delete_at(i); // replaces i-th with last element which is known to be useful (already processed)
 385     }
 386   }
 387 }
 388 
 389 void Compile::remove_useless_node(Node* dead) {
 390   remove_modified_node(dead);
 391 
 392   // Constant node that has no out-edges and has only one in-edge from
 393   // root is usually dead. However, sometimes reshaping walk makes
 394   // it reachable by adding use edges. So, we will NOT count Con nodes
 395   // as dead to be conservative about the dead node count at any
 396   // given time.
 397   if (!dead->is_Con()) {
 398     record_dead_node(dead->_idx);
 399   }
 400   if (dead->is_macro()) {
 401     remove_macro_node(dead);
 402   }
 403   if (dead->is_expensive()) {
 404     remove_expensive_node(dead);
 405   }
 406   if (dead->is_ReachabilityFence()) {
 407     remove_reachability_fence(dead->as_ReachabilityFence());
 408   }
 409   if (dead->is_OpaqueTemplateAssertionPredicate()) {
 410     remove_template_assertion_predicate_opaque(dead->as_OpaqueTemplateAssertionPredicate());
 411   }
 412   if (dead->is_ParsePredicate()) {
 413     remove_parse_predicate(dead->as_ParsePredicate());
 414   }
 415   if (dead->for_post_loop_opts_igvn()) {
 416     remove_from_post_loop_opts_igvn(dead);
 417   }
 418   if (dead->is_InlineType()) {
 419     remove_inline_type(dead);
 420   }
 421   if (dead->is_LoadFlat() || dead->is_StoreFlat()) {
 422     remove_flat_access(dead);
 423   }
 424   if (dead->for_merge_stores_igvn()) {
 425     remove_from_merge_stores_igvn(dead);
 426   }
 427   if (dead->is_Call()) {
 428     remove_useless_late_inlines(                &_late_inlines, dead);
 429     remove_useless_late_inlines(         &_string_late_inlines, dead);
 430     remove_useless_late_inlines(         &_boxing_late_inlines, dead);
 431     remove_useless_late_inlines(&_vector_reboxing_late_inlines, dead);
 432 
 433     if (dead->is_CallStaticJava()) {
 434       remove_unstable_if_trap(dead->as_CallStaticJava(), false);
 435     }
 436   }
 437   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 438   bs->unregister_potential_barrier_node(dead);
 439 }
 440 
 441 // Disconnect all useless nodes by disconnecting those at the boundary.
 442 void Compile::disconnect_useless_nodes(Unique_Node_List& useful, Unique_Node_List& worklist, const Unique_Node_List* root_and_safepoints) {
 443   uint next = 0;
 444   while (next < useful.size()) {
 445     Node *n = useful.at(next++);
 446     if (n->is_SafePoint()) {
 447       // We're done with a parsing phase. Replaced nodes are not valid
 448       // beyond that point.
 449       n->as_SafePoint()->delete_replaced_nodes();
 450     }
 451     // Use raw traversal of out edges since this code removes out edges
 452     int max = n->outcnt();
 453     for (int j = 0; j < max; ++j) {
 454       Node* child = n->raw_out(j);
 455       if (!useful.member(child)) {
 456         assert(!child->is_top() || child != top(),
 457                "If top is cached in Compile object it is in useful list");
 458         // Only need to remove this out-edge to the useless node
 459         n->raw_del_out(j);
 460         --j;
 461         --max;
 462         if (child->is_data_proj_of_pure_function(n)) {
 463           worklist.push(n);
 464         }
 465       }
 466     }
 467     if (n->outcnt() == 1 && n->has_special_unique_user()) {
 468       assert(useful.member(n->unique_out()), "do not push a useless node");
 469       worklist.push(n->unique_out());
 470     }
 471     if (n->outcnt() == 0) {
 472       worklist.push(n);
 473     }
 474   }
 475 
 476   remove_useless_nodes(_macro_nodes,        useful); // remove useless macro nodes
 477   remove_useless_nodes(_parse_predicates,   useful); // remove useless Parse Predicate nodes
 478   // Remove useless Template Assertion Predicate opaque nodes
 479   remove_useless_nodes(_template_assertion_predicate_opaques, useful);
 480   remove_useless_nodes(_expensive_nodes,    useful); // remove useless expensive nodes
 481   remove_useless_nodes(_reachability_fences, useful); // remove useless node recorded for post loop opts IGVN pass
 482   remove_useless_nodes(_for_post_loop_igvn, useful); // remove useless node recorded for post loop opts IGVN pass
 483   remove_useless_nodes(_inline_type_nodes,  useful); // remove useless inline type nodes
 484   remove_useless_nodes(_flat_access_nodes, useful);  // remove useless flat access nodes
 485 #ifdef ASSERT
 486   if (_modified_nodes != nullptr) {
 487     _modified_nodes->remove_useless_nodes(useful.member_set());
 488   }
 489 #endif
 490   remove_useless_nodes(_for_merge_stores_igvn, useful); // remove useless node recorded for merge stores IGVN pass
 491   remove_useless_unstable_if_traps(useful);          // remove useless unstable_if traps
 492   remove_useless_coarsened_locks(useful);            // remove useless coarsened locks nodes
 493 #ifdef ASSERT
 494   if (_modified_nodes != nullptr) {
 495     _modified_nodes->remove_useless_nodes(useful.member_set());
 496   }
 497 #endif
 498 
 499   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 500   bs->eliminate_useless_gc_barriers(useful, this);
 501   // clean up the late inline lists
 502   remove_useless_late_inlines(                &_late_inlines, useful);
 503   remove_useless_late_inlines(         &_string_late_inlines, useful);
 504   remove_useless_late_inlines(         &_boxing_late_inlines, useful);
 505   remove_useless_late_inlines(&_vector_reboxing_late_inlines, useful);
 506   DEBUG_ONLY(verify_graph_edges(true /*check for no_dead_code*/, root_and_safepoints);)
 507 }
 508 
 509 // ============================================================================
 510 //------------------------------CompileWrapper---------------------------------
 511 class CompileWrapper : public StackObj {
 512   Compile *const _compile;
 513  public:
 514   CompileWrapper(Compile* compile);
 515 
 516   ~CompileWrapper();
 517 };
 518 
 519 CompileWrapper::CompileWrapper(Compile* compile) : _compile(compile) {
 520   // the Compile* pointer is stored in the current ciEnv:
 521   ciEnv* env = compile->env();
 522   assert(env == ciEnv::current(), "must already be a ciEnv active");
 523   assert(env->compiler_data() == nullptr, "compile already active?");
 524   env->set_compiler_data(compile);
 525   assert(compile == Compile::current(), "sanity");
 526 
 527   compile->set_type_dict(nullptr);
 528   compile->set_clone_map(new Dict(cmpkey, hashkey, _compile->comp_arena()));
 529   compile->clone_map().set_clone_idx(0);
 530   compile->set_type_last_size(0);
 531   compile->set_last_tf(nullptr, nullptr);
 532   compile->set_indexSet_arena(nullptr);
 533   compile->set_indexSet_free_block_list(nullptr);
 534   compile->init_type_arena();
 535   Type::Initialize(compile);
 536   _compile->begin_method();
 537   _compile->clone_map().set_debug(_compile->has_method() && _compile->directive()->CloneMapDebugOption);
 538 }
 539 CompileWrapper::~CompileWrapper() {
 540   // simulate crash during compilation
 541   assert(CICrashAt < 0 || _compile->compile_id() != CICrashAt, "just as planned");
 542 
 543   _compile->end_method();
 544   _compile->env()->set_compiler_data(nullptr);
 545 }
 546 
 547 
 548 //----------------------------print_compile_messages---------------------------
 549 void Compile::print_compile_messages() {
 550 #ifndef PRODUCT
 551   // Check if recompiling
 552   if (!subsume_loads() && PrintOpto) {
 553     // Recompiling without allowing machine instructions to subsume loads
 554     tty->print_cr("*********************************************************");
 555     tty->print_cr("** Bailout: Recompile without subsuming loads          **");
 556     tty->print_cr("*********************************************************");
 557   }
 558   if ((do_escape_analysis() != DoEscapeAnalysis) && PrintOpto) {
 559     // Recompiling without escape analysis
 560     tty->print_cr("*********************************************************");
 561     tty->print_cr("** Bailout: Recompile without escape analysis          **");
 562     tty->print_cr("*********************************************************");
 563   }
 564   if (do_iterative_escape_analysis() != DoEscapeAnalysis && PrintOpto) {
 565     // Recompiling without iterative escape analysis
 566     tty->print_cr("*********************************************************");
 567     tty->print_cr("** Bailout: Recompile without iterative escape analysis**");
 568     tty->print_cr("*********************************************************");
 569   }
 570   if (do_reduce_allocation_merges() != ReduceAllocationMerges && PrintOpto) {
 571     // Recompiling without reducing allocation merges
 572     tty->print_cr("*********************************************************");
 573     tty->print_cr("** Bailout: Recompile without reduce allocation merges **");
 574     tty->print_cr("*********************************************************");
 575   }
 576   if ((eliminate_boxing() != EliminateAutoBox) && PrintOpto) {
 577     // Recompiling without boxing elimination
 578     tty->print_cr("*********************************************************");
 579     tty->print_cr("** Bailout: Recompile without boxing elimination       **");
 580     tty->print_cr("*********************************************************");
 581   }
 582   if ((do_locks_coarsening() != EliminateLocks) && PrintOpto) {
 583     // Recompiling without locks coarsening
 584     tty->print_cr("*********************************************************");
 585     tty->print_cr("** Bailout: Recompile without locks coarsening         **");
 586     tty->print_cr("*********************************************************");
 587   }
 588   if (env()->break_at_compile()) {
 589     // Open the debugger when compiling this method.
 590     tty->print("### Breaking when compiling: ");
 591     method()->print_short_name();
 592     tty->cr();
 593     BREAKPOINT;
 594   }
 595 
 596   if( PrintOpto ) {
 597     if (is_osr_compilation()) {
 598       tty->print("[OSR]%3d", _compile_id);
 599     } else {
 600       tty->print("%3d", _compile_id);
 601     }
 602   }
 603 #endif
 604 }
 605 
 606 #ifndef PRODUCT
 607 void Compile::print_phase(const char* phase_name) {
 608   tty->print_cr("%u.\t%s", ++_phase_counter, phase_name);
 609 }
 610 
 611 void Compile::print_ideal_ir(const char* compile_phase_name) const {
 612   // keep the following output all in one block
 613   // This output goes directly to the tty, not the compiler log.
 614   // To enable tools to match it up with the compilation activity,
 615   // be sure to tag this tty output with the compile ID.
 616 
 617   // Node dumping can cause a safepoint, which can break the tty lock.
 618   // Buffer all node dumps, so that all safepoints happen before we lock.
 619   ResourceMark rm;
 620   stringStream ss;
 621 
 622   if (_output == nullptr) {
 623     ss.print_cr("AFTER: %s", compile_phase_name);
 624     // Print out all nodes in ascending order of index.
 625     // It is important that we traverse both inputs and outputs of nodes,
 626     // so that we reach all nodes that are connected to Root.
 627     root()->dump_bfs(MaxNodeLimit, nullptr, "-+S$", &ss);
 628   } else {
 629     // Dump the node blockwise if we have a scheduling
 630     _output->print_scheduling(&ss);
 631   }
 632 
 633   // Check that the lock is not broken by a safepoint.
 634   NoSafepointVerifier nsv;
 635   ttyLocker ttyl;
 636   if (xtty != nullptr) {
 637     xtty->head("ideal compile_id='%d'%s compile_phase='%s'",
 638                compile_id(),
 639                is_osr_compilation() ? " compile_kind='osr'" : "",
 640                compile_phase_name);
 641   }
 642 
 643   tty->print("%s", ss.as_string());
 644 
 645   if (xtty != nullptr) {
 646     xtty->tail("ideal");
 647   }
 648 }
 649 #endif
 650 
 651 // ============================================================================
 652 //------------------------------Compile standard-------------------------------
 653 
 654 // Compile a method.  entry_bci is -1 for normal compilations and indicates
 655 // the continuation bci for on stack replacement.
 656 
 657 
 658 Compile::Compile(ciEnv* ci_env, ciMethod* target, int osr_bci,
 659                  Options options, DirectiveSet* directive)
 660     : Phase(Compiler),
 661       _compile_id(ci_env->compile_id()),
 662       _options(options),
 663       _method(target),
 664       _entry_bci(osr_bci),
 665       _ilt(nullptr),
 666       _stub_function(nullptr),
 667       _stub_name(nullptr),
 668       _stub_id(StubId::NO_STUBID),
 669       _stub_entry_point(nullptr),
 670       _max_node_limit(MaxNodeLimit),
 671       _post_loop_opts_phase(false),
 672       _merge_stores_phase(false),
 673       _allow_macro_nodes(true),
 674       _inlining_progress(false),
 675       _inlining_incrementally(false),
 676       _strength_reduction(false),
 677       _do_cleanup(false),
 678       _has_reserved_stack_access(target->has_reserved_stack_access()),
 679       _has_circular_inline_type(false),
 680 #ifndef PRODUCT
 681       _igv_idx(0),
 682       _trace_opto_output(directive->TraceOptoOutputOption),
 683 #endif
 684       _clinit_barrier_on_entry(false),
 685       _stress_seed(0),
 686       _comp_arena(mtCompiler, Arena::Tag::tag_comp),
 687       _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
 688       _env(ci_env),
 689       _directive(directive),
 690       _log(ci_env->log()),
 691       _first_failure_details(nullptr),
 692       _intrinsics(comp_arena(), 0, 0, nullptr),
 693       _macro_nodes(comp_arena(), 8, 0, nullptr),
 694       _parse_predicates(comp_arena(), 8, 0, nullptr),
 695       _template_assertion_predicate_opaques(comp_arena(), 8, 0, nullptr),
 696       _expensive_nodes(comp_arena(), 8, 0, nullptr),
 697       _reachability_fences(comp_arena(), 8, 0, nullptr),
 698       _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
 699       _inline_type_nodes (comp_arena(), 8, 0, nullptr),
 700       _flat_access_nodes(comp_arena(), 8, 0, nullptr),
 701       _for_merge_stores_igvn(comp_arena(), 8, 0, nullptr),
 702       _unstable_if_traps(comp_arena(), 8, 0, nullptr),
 703       _coarsened_locks(comp_arena(), 8, 0, nullptr),
 704       _congraph(nullptr),
 705       NOT_PRODUCT(_igv_printer(nullptr) COMMA)
 706       _unique(0),
 707       _dead_node_count(0),
 708       _dead_node_list(comp_arena()),
 709       _node_arena_one(mtCompiler, Arena::Tag::tag_node),
 710       _node_arena_two(mtCompiler, Arena::Tag::tag_node),
 711       _node_arena(&_node_arena_one),
 712       _mach_constant_base_node(nullptr),
 713       _Compile_types(mtCompiler, Arena::Tag::tag_type),
 714       _initial_gvn(nullptr),
 715       _igvn_worklist(nullptr),
 716       _types(nullptr),
 717       _node_hash(nullptr),
 718       _late_inlines(comp_arena(), 2, 0, nullptr),
 719       _string_late_inlines(comp_arena(), 2, 0, nullptr),
 720       _boxing_late_inlines(comp_arena(), 2, 0, nullptr),
 721       _vector_reboxing_late_inlines(comp_arena(), 2, 0, nullptr),
 722       _late_inlines_pos(0),
 723       _has_mh_late_inlines(false),
 724       _oom(false),
 725       _replay_inline_data(nullptr),
 726       _inline_printer(this),
 727       _java_calls(0),
 728       _inner_loops(0),
 729       _FIRST_STACK_mask(comp_arena()),
 730       _interpreter_frame_size(0),
 731       _regmask_arena(mtCompiler, Arena::Tag::tag_regmask),
 732       _output(nullptr)
 733 #ifndef PRODUCT
 734       ,
 735       _in_dump_cnt(0)
 736 #endif
 737 {
 738   C = this;
 739   CompileWrapper cw(this);
 740 
 741   TraceTime t1("Total compilation time", &_t_totalCompilation, CITime, CITimeVerbose);
 742   TraceTime t2(nullptr, &_t_methodCompilation, CITime, false);
 743 
 744 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
 745   bool print_opto_assembly = directive->PrintOptoAssemblyOption;
 746   // We can always print a disassembly, either abstract (hex dump) or
 747   // with the help of a suitable hsdis library. Thus, we should not
 748   // couple print_assembly and print_opto_assembly controls.
 749   // But: always print opto and regular assembly on compile command 'print'.
 750   bool print_assembly = directive->PrintAssemblyOption;
 751   set_print_assembly(print_opto_assembly || print_assembly);
 752 #else
 753   set_print_assembly(false); // must initialize.
 754 #endif
 755 
 756 #ifndef PRODUCT
 757   set_parsed_irreducible_loop(false);
 758 #endif
 759 
 760   if (directive->ReplayInlineOption) {
 761     _replay_inline_data = ciReplay::load_inline_data(method(), entry_bci(), ci_env->comp_level());
 762   }
 763   set_print_inlining(directive->PrintInliningOption || PrintOptoInlining);
 764   set_print_intrinsics(directive->PrintIntrinsicsOption);
 765   set_has_irreducible_loop(true); // conservative until build_loop_tree() reset it
 766 
 767   if (ProfileTraps) {
 768     // Make sure the method being compiled gets its own MDO,
 769     // so we can at least track the decompile_count().
 770     method()->ensure_method_data();
 771   }
 772 
 773   if (StressLCM || StressGCM || StressIGVN || StressCCP ||
 774       StressIncrementalInlining || StressMacroExpansion ||
 775       StressMacroElimination || StressUnstableIfTraps ||
 776       StressBailout || StressLoopPeeling || StressCountedLoop) {
 777     initialize_stress_seed(directive);
 778   }
 779 
 780   Init(/*do_aliasing=*/ true);
 781 
 782   print_compile_messages();
 783 
 784   _ilt = InlineTree::build_inline_tree_root();
 785 
 786   // Even if NO memory addresses are used, MergeMem nodes must have at least 1 slice
 787   assert(num_alias_types() >= AliasIdxRaw, "");
 788 
 789 #define MINIMUM_NODE_HASH  1023
 790 
 791   // GVN that will be run immediately on new nodes
 792   uint estimated_size = method()->code_size()*4+64;
 793   estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size);
 794   _igvn_worklist = new (comp_arena()) Unique_Node_List(comp_arena());
 795   _types = new (comp_arena()) Type_Array(comp_arena());
 796   _node_hash = new (comp_arena()) NodeHash(comp_arena(), estimated_size);
 797   PhaseGVN gvn;
 798   set_initial_gvn(&gvn);
 799 
 800   { // Scope for timing the parser
 801     TracePhase tp(_t_parser);
 802 
 803     // Put top into the hash table ASAP.
 804     initial_gvn()->transform(top());
 805 
 806     // Set up tf(), start(), and find a CallGenerator.
 807     CallGenerator* cg = nullptr;
 808     if (is_osr_compilation()) {
 809       init_tf(TypeFunc::make(method(), false, /* is_osr_compilation = */ true));
 810       StartNode* s = new StartOSRNode(root(), tf()->domain_sig());
 811       initial_gvn()->set_type_bottom(s);
 812       verify_start(s);
 813       cg = CallGenerator::for_osr(method(), entry_bci());
 814     } else {
 815       // Normal case.
 816       init_tf(TypeFunc::make(method(), false));
 817       StartNode* s = new StartNode(root(), tf()->domain_cc());
 818       initial_gvn()->set_type_bottom(s);
 819       verify_start(s);
 820       float past_uses = method()->interpreter_invocation_count();
 821       float expected_uses = past_uses;
 822       cg = CallGenerator::for_inline(method(), expected_uses);
 823     }
 824     if (failing())  return;
 825     if (cg == nullptr) {
 826       const char* reason = InlineTree::check_can_parse(method());
 827       assert(reason != nullptr, "expect reason for parse failure");
 828       stringStream ss;
 829       ss.print("cannot parse method: %s", reason);
 830       record_method_not_compilable(ss.as_string());
 831       return;
 832     }
 833 
 834     gvn.set_type(root(), root()->bottom_type());
 835 
 836     JVMState* jvms = build_start_state(start(), tf());
 837     if ((jvms = cg->generate(jvms)) == nullptr) {
 838       assert(failure_reason() != nullptr, "expect reason for parse failure");
 839       stringStream ss;
 840       ss.print("method parse failed: %s", failure_reason());
 841       record_method_not_compilable(ss.as_string() DEBUG_ONLY(COMMA true));
 842       return;
 843     }
 844     GraphKit kit(jvms);
 845 
 846     if (!kit.stopped()) {
 847       // Accept return values, and transfer control we know not where.
 848       // This is done by a special, unique ReturnNode bound to root.
 849       return_values(kit.jvms());
 850     }
 851 
 852     if (kit.has_exceptions()) {
 853       // Any exceptions that escape from this call must be rethrown
 854       // to whatever caller is dynamically above us on the stack.
 855       // This is done by a special, unique RethrowNode bound to root.
 856       rethrow_exceptions(kit.transfer_exceptions_into_jvms());
 857     }
 858 
 859     assert(IncrementalInline || (_late_inlines.length() == 0 && !has_mh_late_inlines()), "incremental inlining is off");
 860 
 861     if (_late_inlines.length() == 0 && !has_mh_late_inlines() && !failing() && has_stringbuilder()) {
 862       inline_string_calls(true);
 863     }
 864 
 865     if (failing())  return;
 866 
 867     // Remove clutter produced by parsing.
 868     if (!failing()) {
 869       ResourceMark rm;
 870       PhaseRemoveUseless pru(initial_gvn(), *igvn_worklist());
 871     }
 872   }
 873 
 874   // Note:  Large methods are capped off in do_one_bytecode().
 875   if (failing())  return;
 876 
 877   // After parsing, node notes are no longer automagic.
 878   // They must be propagated by register_new_node_with_optimizer(),
 879   // clone(), or the like.
 880   set_default_node_notes(nullptr);
 881 
 882 #ifndef PRODUCT
 883   if (should_print_igv(1)) {
 884     _igv_printer->print_inlining();
 885   }
 886 #endif
 887 
 888   if (failing())  return;
 889   NOT_PRODUCT( verify_graph_edges(); )
 890 
 891   // Now optimize
 892   Optimize();
 893   if (failing())  return;
 894   NOT_PRODUCT( verify_graph_edges(); )
 895 
 896 #ifndef PRODUCT
 897   if (should_print_ideal()) {
 898     print_ideal_ir("PrintIdeal");
 899   }
 900 #endif
 901 
 902 #ifdef ASSERT
 903   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 904   bs->verify_gc_barriers(this, BarrierSetC2::BeforeCodeGen);
 905 #endif
 906 
 907   // Dump compilation data to replay it.
 908   if (directive->DumpReplayOption) {
 909     env()->dump_replay_data(_compile_id);
 910   }
 911   if (directive->DumpInlineOption && (ilt() != nullptr)) {
 912     env()->dump_inline_data(_compile_id);
 913   }
 914 
 915   // Now that we know the size of all the monitors we can add fixed slots:
 916   // [...]
 917   // rsp+80: saved fp register
 918   // rsp+76: Fixed slot 7
 919   // rsp+72: Fixed slot 6 (stack increment)
 920   // rsp+68: Fixed slot 5
 921   // rsp+64: Fixed slot 4 (null marker)
 922   // rsp+60: Fixed slot 3
 923   // rsp+56: Fixed slot 2 (original deopt pc)
 924   // rsp+52: Fixed slot 1
 925   // rsp+48: Fixed slot 0 (monitors)
 926   // rsp+44: spill
 927   // [...]
 928 
 929   // One extra slot for the original deopt pc.
 930   int next_slot = fixed_slots();
 931   next_slot += VMRegImpl::slots_per_word;
 932 
 933   // One extra slot for the special stack increment value.
 934   if (needs_stack_repair()) {
 935     next_slot += VMRegImpl::slots_per_word;
 936   }
 937 
 938   // One extra slot to hold the null marker at scalarized returns.
 939   if (needs_nm_slot()) {
 940     next_slot += VMRegImpl::slots_per_word;
 941   }
 942   set_fixed_slots(next_slot);
 943 
 944   // Compute when to use implicit null checks. Used by matching trap based
 945   // nodes and NullCheck optimization.
 946   set_allowed_deopt_reasons();
 947 
 948   // Now generate code
 949   Code_Gen();
 950 }
 951 
 952 //------------------------------Compile----------------------------------------
 953 // Compile a runtime stub
 954 Compile::Compile(ciEnv* ci_env,
 955                  TypeFunc_generator generator,
 956                  address stub_function,
 957                  const char* stub_name,
 958                  StubId stub_id,
 959                  int is_fancy_jump,
 960                  bool pass_tls,
 961                  bool return_pc,
 962                  DirectiveSet* directive)
 963     : Phase(Compiler),
 964       _compile_id(0),
 965       _options(Options::for_runtime_stub()),
 966       _method(nullptr),
 967       _entry_bci(InvocationEntryBci),
 968       _stub_function(stub_function),
 969       _stub_name(stub_name),
 970       _stub_id(stub_id),
 971       _stub_entry_point(nullptr),
 972       _max_node_limit(MaxNodeLimit),
 973       _post_loop_opts_phase(false),
 974       _merge_stores_phase(false),
 975       _allow_macro_nodes(true),
 976       _inlining_progress(false),
 977       _inlining_incrementally(false),
 978       _has_reserved_stack_access(false),
 979       _has_circular_inline_type(false),
 980 #ifndef PRODUCT
 981       _igv_idx(0),
 982       _trace_opto_output(directive->TraceOptoOutputOption),
 983 #endif
 984       _clinit_barrier_on_entry(false),
 985       _stress_seed(0),
 986       _comp_arena(mtCompiler, Arena::Tag::tag_comp),
 987       _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
 988       _env(ci_env),
 989       _directive(directive),
 990       _log(ci_env->log()),
 991       _first_failure_details(nullptr),
 992       _reachability_fences(comp_arena(), 8, 0, nullptr),
 993       _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
 994       _for_merge_stores_igvn(comp_arena(), 8, 0, nullptr),
 995       _congraph(nullptr),
 996       NOT_PRODUCT(_igv_printer(nullptr) COMMA)
 997       _unique(0),
 998       _dead_node_count(0),
 999       _dead_node_list(comp_arena()),
1000       _node_arena_one(mtCompiler, Arena::Tag::tag_node),
1001       _node_arena_two(mtCompiler, Arena::Tag::tag_node),
1002       _node_arena(&_node_arena_one),
1003       _mach_constant_base_node(nullptr),
1004       _Compile_types(mtCompiler, Arena::Tag::tag_type),
1005       _initial_gvn(nullptr),
1006       _igvn_worklist(nullptr),
1007       _types(nullptr),
1008       _node_hash(nullptr),
1009       _has_mh_late_inlines(false),
1010       _oom(false),
1011       _replay_inline_data(nullptr),
1012       _inline_printer(this),
1013       _java_calls(0),
1014       _inner_loops(0),
1015       _FIRST_STACK_mask(comp_arena()),
1016       _interpreter_frame_size(0),
1017       _regmask_arena(mtCompiler, Arena::Tag::tag_regmask),
1018       _output(nullptr),
1019 #ifndef PRODUCT
1020       _in_dump_cnt(0),
1021 #endif
1022       _allowed_reasons(0) {
1023   C = this;
1024 
1025   // try to reuse an existing stub
1026   {
1027     BlobId blob_id = StubInfo::blob(_stub_id);
1028     CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::C2Blob, blob_id);
1029     if (blob != nullptr) {
1030       RuntimeStub* rs = blob->as_runtime_stub();
1031       _stub_entry_point = rs->entry_point();
1032       return;
1033     }
1034   }
1035 
1036   TraceTime t1(nullptr, &_t_totalCompilation, CITime, false);
1037   TraceTime t2(nullptr, &_t_stubCompilation, CITime, false);
1038 
1039 #ifndef PRODUCT
1040   set_print_assembly(PrintFrameConverterAssembly);
1041   set_parsed_irreducible_loop(false);
1042 #else
1043   set_print_assembly(false); // Must initialize.
1044 #endif
1045   set_has_irreducible_loop(false); // no loops
1046 
1047   CompileWrapper cw(this);
1048   Init(/*do_aliasing=*/ false);
1049   init_tf((*generator)());
1050 
1051   _igvn_worklist = new (comp_arena()) Unique_Node_List(comp_arena());
1052   _types = new (comp_arena()) Type_Array(comp_arena());
1053   _node_hash = new (comp_arena()) NodeHash(comp_arena(), 255);
1054 
1055   if (StressLCM || StressGCM || StressBailout) {
1056     initialize_stress_seed(directive);
1057   }
1058 
1059   {
1060     PhaseGVN gvn;
1061     set_initial_gvn(&gvn);    // not significant, but GraphKit guys use it pervasively
1062     gvn.transform(top());
1063 
1064     GraphKit kit;
1065     kit.gen_stub(stub_function, stub_name, is_fancy_jump, pass_tls, return_pc);
1066   }
1067 
1068   NOT_PRODUCT( verify_graph_edges(); )
1069 
1070   Code_Gen();
1071 }
1072 
1073 Compile::~Compile() {
1074   delete _first_failure_details;
1075 };
1076 
1077 //------------------------------Init-------------------------------------------
1078 // Prepare for a single compilation
1079 void Compile::Init(bool aliasing) {
1080   _do_aliasing = aliasing;
1081   _unique  = 0;
1082   _regalloc = nullptr;
1083 
1084   _tf      = nullptr;  // filled in later
1085   _top     = nullptr;  // cached later
1086   _matcher = nullptr;  // filled in later
1087   _cfg     = nullptr;  // filled in later
1088 
1089   _node_note_array = nullptr;
1090   _default_node_notes = nullptr;
1091   DEBUG_ONLY( _modified_nodes = nullptr; ) // Used in Optimize()
1092 
1093   _immutable_memory = nullptr; // filled in at first inquiry
1094 
1095 #ifdef ASSERT
1096   _phase_optimize_finished = false;
1097   _phase_verify_ideal_loop = false;
1098   _exception_backedge = false;
1099   _type_verify = nullptr;
1100 #endif
1101 
1102   // Globally visible Nodes
1103   // First set TOP to null to give safe behavior during creation of RootNode
1104   set_cached_top_node(nullptr);
1105   set_root(new RootNode());
1106   // Now that you have a Root to point to, create the real TOP
1107   set_cached_top_node( new ConNode(Type::TOP) );
1108   set_recent_alloc(nullptr, nullptr);
1109 
1110   // Create Debug Information Recorder to record scopes, oopmaps, etc.
1111   env()->set_oop_recorder(new OopRecorder(env()->arena()));
1112   env()->set_debug_info(new DebugInformationRecorder(env()->oop_recorder()));
1113   env()->set_dependencies(new Dependencies(env()));
1114 
1115   _fixed_slots = 0;
1116   set_has_split_ifs(false);
1117   set_has_loops(false); // first approximation
1118   set_has_stringbuilder(false);
1119   set_has_boxed_value(false);
1120   _trap_can_recompile = false;  // no traps emitted yet
1121   _major_progress = true; // start out assuming good things will happen
1122   set_has_unsafe_access(false);
1123   set_max_vector_size(0);
1124   set_clear_upper_avx(false);  //false as default for clear upper bits of ymm registers
1125   Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
1126   set_decompile_count(0);
1127 
1128 #ifndef PRODUCT
1129   _phase_counter = 0;
1130   Copy::zero_to_bytes(_igv_phase_iter, sizeof(_igv_phase_iter));
1131 #endif
1132 
1133   set_do_freq_based_layout(_directive->BlockLayoutByFrequencyOption);
1134   _loop_opts_cnt = LoopOptsCount;
1135   _has_flat_accesses = false;
1136   _flat_accesses_share_alias = true;
1137   _scalarize_in_safepoints = false;
1138   _needs_nm_slot = false;
1139 
1140   set_do_inlining(Inline);
1141   set_max_inline_size(MaxInlineSize);
1142   set_freq_inline_size(FreqInlineSize);
1143   set_do_scheduling(OptoScheduling);
1144 
1145   set_do_vector_loop(false);
1146   set_has_monitors(false);
1147   set_has_scoped_access(false);
1148 
1149   if (AllowVectorizeOnDemand) {
1150     if (has_method() && _directive->VectorizeOption) {
1151       set_do_vector_loop(true);
1152       NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n",  method()->name()->as_quoted_ascii());})
1153     } else if (has_method() && method()->name() != nullptr &&
1154                method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
1155       set_do_vector_loop(true);
1156     }
1157   }
1158   set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
1159   NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n",  method()->name()->as_quoted_ascii());})
1160 
1161   _max_node_limit = _directive->MaxNodeLimitOption;
1162 
1163   if (VM_Version::supports_fast_class_init_checks() && has_method() && !is_osr_compilation() && method()->needs_clinit_barrier()) {
1164     set_clinit_barrier_on_entry(true);
1165   }
1166   if (debug_info()->recording_non_safepoints()) {
1167     set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*>
1168                         (comp_arena(), 8, 0, nullptr));
1169     set_default_node_notes(Node_Notes::make(this));
1170   }
1171 
1172   const int grow_ats = 16;
1173   _max_alias_types = grow_ats;
1174   _alias_types   = NEW_ARENA_ARRAY(comp_arena(), AliasType*, grow_ats);
1175   AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType,  grow_ats);
1176   Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
1177   {
1178     for (int i = 0; i < grow_ats; i++)  _alias_types[i] = &ats[i];
1179   }
1180   // Initialize the first few types.
1181   _alias_types[AliasIdxTop]->Init(AliasIdxTop, nullptr);
1182   _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
1183   _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
1184   _num_alias_types = AliasIdxRaw+1;
1185   // Zero out the alias type cache.
1186   Copy::zero_to_bytes(_alias_cache, sizeof(_alias_cache));
1187   // A null adr_type hits in the cache right away.  Preload the right answer.
1188   probe_alias_cache(nullptr)->_index = AliasIdxTop;
1189 }
1190 
1191 #ifdef ASSERT
1192 // Verify that the current StartNode is valid.
1193 void Compile::verify_start(StartNode* s) const {
1194   assert(failing_internal() || s == start(), "should be StartNode");
1195 }
1196 #endif
1197 
1198 /**
1199  * Return the 'StartNode'. We must not have a pending failure, since the ideal graph
1200  * can be in an inconsistent state, i.e., we can get segmentation faults when traversing
1201  * the ideal graph.
1202  */
1203 StartNode* Compile::start() const {
1204   assert (!failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", failure_reason());
1205   for (DUIterator_Fast imax, i = root()->fast_outs(imax); i < imax; i++) {
1206     Node* start = root()->fast_out(i);
1207     if (start->is_Start()) {
1208       return start->as_Start();
1209     }
1210   }
1211   fatal("Did not find Start node!");
1212   return nullptr;
1213 }
1214 
1215 //-------------------------------immutable_memory-------------------------------------
1216 // Access immutable memory
1217 Node* Compile::immutable_memory() {
1218   if (_immutable_memory != nullptr) {
1219     return _immutable_memory;
1220   }
1221   StartNode* s = start();
1222   for (DUIterator_Fast imax, i = s->fast_outs(imax); true; i++) {
1223     Node *p = s->fast_out(i);
1224     if (p != s && p->as_Proj()->_con == TypeFunc::Memory) {
1225       _immutable_memory = p;
1226       return _immutable_memory;
1227     }
1228   }
1229   ShouldNotReachHere();
1230   return nullptr;
1231 }
1232 
1233 //----------------------set_cached_top_node------------------------------------
1234 // Install the cached top node, and make sure Node::is_top works correctly.
1235 void Compile::set_cached_top_node(Node* tn) {
1236   if (tn != nullptr)  verify_top(tn);
1237   Node* old_top = _top;
1238   _top = tn;
1239   // Calling Node::setup_is_top allows the nodes the chance to adjust
1240   // their _out arrays.
1241   if (_top != nullptr)     _top->setup_is_top();
1242   if (old_top != nullptr)  old_top->setup_is_top();
1243   assert(_top == nullptr || top()->is_top(), "");
1244 }
1245 
1246 #ifdef ASSERT
1247 uint Compile::count_live_nodes_by_graph_walk() {
1248   Unique_Node_List useful(comp_arena());
1249   // Get useful node list by walking the graph.
1250   identify_useful_nodes(useful);
1251   return useful.size();
1252 }
1253 
1254 void Compile::print_missing_nodes() {
1255 
1256   // Return if CompileLog is null and PrintIdealNodeCount is false.
1257   if ((_log == nullptr) && (! PrintIdealNodeCount)) {
1258     return;
1259   }
1260 
1261   // This is an expensive function. It is executed only when the user
1262   // specifies VerifyIdealNodeCount option or otherwise knows the
1263   // additional work that needs to be done to identify reachable nodes
1264   // by walking the flow graph and find the missing ones using
1265   // _dead_node_list.
1266 
1267   Unique_Node_List useful(comp_arena());
1268   // Get useful node list by walking the graph.
1269   identify_useful_nodes(useful);
1270 
1271   uint l_nodes = C->live_nodes();
1272   uint l_nodes_by_walk = useful.size();
1273 
1274   if (l_nodes != l_nodes_by_walk) {
1275     if (_log != nullptr) {
1276       _log->begin_head("mismatched_nodes count='%d'", abs((int) (l_nodes - l_nodes_by_walk)));
1277       _log->stamp();
1278       _log->end_head();
1279     }
1280     VectorSet& useful_member_set = useful.member_set();
1281     int last_idx = l_nodes_by_walk;
1282     for (int i = 0; i < last_idx; i++) {
1283       if (useful_member_set.test(i)) {
1284         if (_dead_node_list.test(i)) {
1285           if (_log != nullptr) {
1286             _log->elem("mismatched_node_info node_idx='%d' type='both live and dead'", i);
1287           }
1288           if (PrintIdealNodeCount) {
1289             // Print the log message to tty
1290               tty->print_cr("mismatched_node idx='%d' both live and dead'", i);
1291               useful.at(i)->dump();
1292           }
1293         }
1294       }
1295       else if (! _dead_node_list.test(i)) {
1296         if (_log != nullptr) {
1297           _log->elem("mismatched_node_info node_idx='%d' type='neither live nor dead'", i);
1298         }
1299         if (PrintIdealNodeCount) {
1300           // Print the log message to tty
1301           tty->print_cr("mismatched_node idx='%d' type='neither live nor dead'", i);
1302         }
1303       }
1304     }
1305     if (_log != nullptr) {
1306       _log->tail("mismatched_nodes");
1307     }
1308   }
1309 }
1310 void Compile::record_modified_node(Node* n) {
1311   if (_modified_nodes != nullptr && !_inlining_incrementally && !n->is_Con()) {
1312     _modified_nodes->push(n);
1313   }
1314 }
1315 
1316 void Compile::remove_modified_node(Node* n) {
1317   if (_modified_nodes != nullptr) {
1318     _modified_nodes->remove(n);
1319   }
1320 }
1321 #endif
1322 
1323 #ifndef PRODUCT
1324 void Compile::verify_top(Node* tn) const {
1325   if (tn != nullptr) {
1326     assert(tn->is_Con(), "top node must be a constant");
1327     assert(((ConNode*)tn)->type() == Type::TOP, "top node must have correct type");
1328     assert(tn->in(0) != nullptr, "must have live top node");
1329   }
1330 }
1331 #endif
1332 
1333 
1334 ///-------------------Managing Per-Node Debug & Profile Info-------------------
1335 
1336 void Compile::grow_node_notes(GrowableArray<Node_Notes*>* arr, int grow_by) {
1337   guarantee(arr != nullptr, "");
1338   int num_blocks = arr->length();
1339   if (grow_by < num_blocks)  grow_by = num_blocks;
1340   int num_notes = grow_by * _node_notes_block_size;
1341   Node_Notes* notes = NEW_ARENA_ARRAY(node_arena(), Node_Notes, num_notes);
1342   Copy::zero_to_bytes(notes, num_notes * sizeof(Node_Notes));
1343   while (num_notes > 0) {
1344     arr->append(notes);
1345     notes     += _node_notes_block_size;
1346     num_notes -= _node_notes_block_size;
1347   }
1348   assert(num_notes == 0, "exact multiple, please");
1349 }
1350 
1351 bool Compile::copy_node_notes_to(Node* dest, Node* source) {
1352   if (source == nullptr || dest == nullptr)  return false;
1353 
1354   if (dest->is_Con())
1355     return false;               // Do not push debug info onto constants.
1356 
1357 #ifdef ASSERT
1358   // Leave a bread crumb trail pointing to the original node:
1359   if (dest != nullptr && dest != source && dest->debug_orig() == nullptr) {
1360     dest->set_debug_orig(source);
1361   }
1362 #endif
1363 
1364   if (node_note_array() == nullptr)
1365     return false;               // Not collecting any notes now.
1366 
1367   // This is a copy onto a pre-existing node, which may already have notes.
1368   // If both nodes have notes, do not overwrite any pre-existing notes.
1369   Node_Notes* source_notes = node_notes_at(source->_idx);
1370   if (source_notes == nullptr || source_notes->is_clear())  return false;
1371   Node_Notes* dest_notes   = node_notes_at(dest->_idx);
1372   if (dest_notes == nullptr || dest_notes->is_clear()) {
1373     return set_node_notes_at(dest->_idx, source_notes);
1374   }
1375 
1376   Node_Notes merged_notes = (*source_notes);
1377   // The order of operations here ensures that dest notes will win...
1378   merged_notes.update_from(dest_notes);
1379   return set_node_notes_at(dest->_idx, &merged_notes);
1380 }
1381 
1382 
1383 //--------------------------allow_range_check_smearing-------------------------
1384 // Gating condition for coalescing similar range checks.
1385 // Sometimes we try 'speculatively' replacing a series of a range checks by a
1386 // single covering check that is at least as strong as any of them.
1387 // If the optimization succeeds, the simplified (strengthened) range check
1388 // will always succeed.  If it fails, we will deopt, and then give up
1389 // on the optimization.
1390 bool Compile::allow_range_check_smearing() const {
1391   // If this method has already thrown a range-check,
1392   // assume it was because we already tried range smearing
1393   // and it failed.
1394   uint already_trapped = trap_count(Deoptimization::Reason_range_check);
1395   return !already_trapped;
1396 }
1397 
1398 
1399 //------------------------------flatten_alias_type-----------------------------
1400 const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
1401   assert(do_aliasing(), "Aliasing should be enabled");
1402   int offset = tj->offset();
1403   TypePtr::PTR ptr = tj->ptr();
1404 
1405   // Known instance (scalarizable allocation) alias only with itself.
1406   bool is_known_inst = tj->isa_oopptr() != nullptr &&
1407                        tj->is_oopptr()->is_known_instance();
1408 
1409   // Process weird unsafe references.
1410   if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) {
1411     assert(InlineUnsafeOps || StressReflectiveCode, "indeterminate pointers come only from unsafe ops");
1412     assert(!is_known_inst, "scalarizable allocation should not have unsafe references");
1413     tj = TypeOopPtr::BOTTOM;
1414     ptr = tj->ptr();
1415     offset = tj->offset();
1416   }
1417 
1418   // Array pointers need some flattening
1419   const TypeAryPtr* ta = tj->isa_aryptr();
1420   if( ta && is_known_inst ) {
1421     if ( offset != Type::OffsetBot &&
1422          offset > arrayOopDesc::length_offset_in_bytes() ) {
1423       offset = Type::OffsetBot; // Flatten constant access into array body only
1424       tj = ta = ta->
1425               remove_speculative()->
1426               cast_to_ptr_type(ptr)->
1427               with_offset(offset);
1428     }
1429   } else if (ta != nullptr) {
1430     // Common slices
1431     if (offset == arrayOopDesc::length_offset_in_bytes()) {
1432       return TypeAryPtr::RANGE;
1433     } else if (offset == oopDesc::klass_offset_in_bytes()) {
1434       return TypeInstPtr::KLASS;
1435     } else if (offset == oopDesc::mark_offset_in_bytes()) {
1436       return TypeInstPtr::MARK;
1437     }
1438 
1439     // Remove size and stability
1440     const TypeAry* normalized_ary = TypeAry::make(ta->elem(), TypeInt::POS, false, ta->is_flat(), ta->is_not_flat(), ta->is_not_null_free(), ta->is_atomic());
1441     // Remove ptr, const_oop, and offset
1442     if (ta->elem() == Type::BOTTOM) {
1443       // Bottom array (meet of int[] and byte[] for example), accesses to it will be done with
1444       // Unsafe. This should alias with all arrays. For now just leave it as it is (this is
1445       // incorrect, see JDK-8331133).
1446       tj = ta = TypeAryPtr::make(TypePtr::BotPTR, nullptr, normalized_ary, nullptr, false, Type::Offset::bottom);
1447     } else if (ta->elem()->make_oopptr() != nullptr) {
1448       // Object arrays, keep field_offset
1449       tj = ta = TypeAryPtr::make(TypePtr::BotPTR, nullptr, normalized_ary, nullptr, ta->klass_is_exact(), Type::Offset::bottom, Type::Offset(ta->field_offset()));
1450     } else {
1451       // Primitive arrays
1452       tj = ta = TypeAryPtr::make(TypePtr::BotPTR, nullptr, normalized_ary, ta->exact_klass(), true, Type::Offset::bottom);
1453     }
1454 
1455     // Arrays of bytes and of booleans both use 'bastore' and 'baload' so
1456     // cannot be distinguished by bytecode alone.
1457     if (ta->elem() == TypeInt::BOOL) {
1458       tj = ta = TypeAryPtr::BYTES;
1459     }
1460 
1461     // All arrays of references share the same slice
1462     if (!ta->is_flat() && ta->elem()->make_oopptr() != nullptr) {
1463       const TypeAry* tary = TypeAry::make(TypeInstPtr::BOTTOM, TypeInt::POS, false, false, true, true, true);
1464       tj = ta = TypeAryPtr::make(TypePtr::BotPTR, nullptr, tary, nullptr, false, Type::Offset::bottom);
1465     }
1466 
1467     if (ta->is_flat()) {
1468       if (_flat_accesses_share_alias) {
1469         // Initially all flattened array accesses share a single slice
1470         tj = ta = TypeAryPtr::INLINES;
1471       } else {
1472         // Flat accesses are always exact
1473         tj = ta = ta->cast_to_exactness(true);
1474       }
1475     }
1476   }
1477 
1478   // Oop pointers need some flattening
1479   const TypeInstPtr *to = tj->isa_instptr();
1480   if (to && to != TypeOopPtr::BOTTOM) {
1481     ciInstanceKlass* ik = to->instance_klass();
1482     tj = to = to->cast_to_maybe_flat_in_array(); // flatten to maybe flat in array
1483     if( ptr == TypePtr::Constant ) {
1484       if (ik != ciEnv::current()->Class_klass() ||
1485           offset < ik->layout_helper_size_in_bytes()) {
1486         // No constant oop pointers (such as Strings); they alias with
1487         // unknown strings.
1488         assert(!is_known_inst, "not scalarizable allocation");
1489         tj = to = to->
1490                 cast_to_instance_id(TypeOopPtr::InstanceBot)->
1491                 remove_speculative()->
1492                 cast_to_ptr_type(TypePtr::BotPTR)->
1493                 cast_to_exactness(false);
1494       }
1495     } else if( is_known_inst ) {
1496       tj = to; // Keep NotNull and klass_is_exact for instance type
1497     } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
1498       // During the 2nd round of IterGVN, NotNull castings are removed.
1499       // Make sure the Bottom and NotNull variants alias the same.
1500       // Also, make sure exact and non-exact variants alias the same.
1501       tj = to = to->
1502               remove_speculative()->
1503               cast_to_instance_id(TypeOopPtr::InstanceBot)->
1504               cast_to_ptr_type(TypePtr::BotPTR)->
1505               cast_to_exactness(false);
1506     }
1507     if (to->speculative() != nullptr) {
1508       tj = to = to->remove_speculative();
1509     }
1510     // Canonicalize the holder of this field
1511     if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
1512       // First handle header references such as a LoadKlassNode, even if the
1513       // object's klass is unloaded at compile time (4965979).
1514       if (!is_known_inst) { // Do it only for non-instance types
1515         tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, nullptr, Type::Offset(offset));
1516       }
1517     } else if (offset < 0 || offset >= ik->layout_helper_size_in_bytes()) {
1518       // Static fields are in the space above the normal instance
1519       // fields in the java.lang.Class instance.
1520       if (ik != ciEnv::current()->Class_klass()) {
1521         to = nullptr;
1522         tj = TypeOopPtr::BOTTOM;
1523         offset = tj->offset();
1524       }
1525     } else {
1526       ciInstanceKlass *canonical_holder = ik->get_canonical_holder(offset);
1527       assert(offset < canonical_holder->layout_helper_size_in_bytes(), "");
1528       assert(tj->offset() == offset, "no change to offset expected");
1529       bool xk = to->klass_is_exact();
1530       int instance_id = to->instance_id();
1531 
1532       // If the input type's class is the holder: if exact, the type only includes interfaces implemented by the holder
1533       // but if not exact, it may include extra interfaces: build new type from the holder class to make sure only
1534       // its interfaces are included.
1535       if (xk && ik->equals(canonical_holder)) {
1536         assert(tj == TypeInstPtr::make(to->ptr(), canonical_holder, is_known_inst, nullptr, Type::Offset(offset), instance_id,
1537                                        TypePtr::MaybeFlat), "exact type should be canonical type");
1538       } else {
1539         assert(xk || !is_known_inst, "Known instance should be exact type");
1540         tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, is_known_inst, nullptr, Type::Offset(offset), instance_id,
1541                                     TypePtr::MaybeFlat);
1542       }
1543     }
1544   }
1545 
1546   // Klass pointers to object array klasses need some flattening
1547   const TypeKlassPtr *tk = tj->isa_klassptr();
1548   if( tk ) {
1549     // If we are referencing a field within a Klass, we need
1550     // to assume the worst case of an Object.  Both exact and
1551     // inexact types must flatten to the same alias class so
1552     // use NotNull as the PTR.
1553     if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {
1554       tj = tk = TypeInstKlassPtr::make(TypePtr::NotNull,
1555                                        env()->Object_klass(),
1556                                        Type::Offset(offset),
1557                                        TypePtr::MaybeFlat);
1558     }
1559 
1560     if (tk->isa_aryklassptr() && tk->is_aryklassptr()->elem()->isa_klassptr()) {
1561       ciKlass* k = ciObjArrayKlass::make(env()->Object_klass());
1562       if (!k || !k->is_loaded()) {                  // Only fails for some -Xcomp runs
1563         tj = tk = TypeInstKlassPtr::make(TypePtr::NotNull, env()->Object_klass(), Type::Offset(offset), TypePtr::MaybeFlat);
1564       } else {
1565         tj = tk = TypeAryKlassPtr::make(TypePtr::NotNull, tk->is_aryklassptr()->elem(), k, Type::Offset(offset), tk->is_not_flat(), tk->is_not_null_free(), tk->is_flat(), tk->is_null_free(), tk->is_atomic(), tk->is_aryklassptr()->is_refined_type());
1566       }
1567     }
1568     // Check for precise loads from the primary supertype array and force them
1569     // to the supertype cache alias index.  Check for generic array loads from
1570     // the primary supertype array and also force them to the supertype cache
1571     // alias index.  Since the same load can reach both, we need to merge
1572     // these 2 disparate memories into the same alias class.  Since the
1573     // primary supertype array is read-only, there's no chance of confusion
1574     // where we bypass an array load and an array store.
1575     int primary_supers_offset = in_bytes(Klass::primary_supers_offset());
1576     if (offset == Type::OffsetBot ||
1577         (offset >= primary_supers_offset &&
1578          offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) ||
1579         offset == (int)in_bytes(Klass::secondary_super_cache_offset())) {
1580       offset = in_bytes(Klass::secondary_super_cache_offset());
1581       tj = tk = tk->with_offset(offset);
1582     }
1583   }
1584 
1585   // Flatten all Raw pointers together.
1586   if (tj->base() == Type::RawPtr)
1587     tj = TypeRawPtr::BOTTOM;
1588 
1589   if (tj->base() == Type::AnyPtr)
1590     tj = TypePtr::BOTTOM;      // An error, which the caller must check for.
1591 
1592   offset = tj->offset();
1593   assert( offset != Type::OffsetTop, "Offset has fallen from constant" );
1594 
1595   assert( (offset != Type::OffsetBot && tj->base() != Type::AryPtr) ||
1596           (offset == Type::OffsetBot && tj->base() == Type::AryPtr) ||
1597           (offset == Type::OffsetBot && tj == TypeOopPtr::BOTTOM) ||
1598           (offset == Type::OffsetBot && tj == TypePtr::BOTTOM) ||
1599           (offset == oopDesc::mark_offset_in_bytes() && tj->base() == Type::AryPtr) ||
1600           (offset == oopDesc::klass_offset_in_bytes() && tj->base() == Type::AryPtr) ||
1601           (offset == arrayOopDesc::length_offset_in_bytes() && tj->base() == Type::AryPtr),
1602           "For oops, klasses, raw offset must be constant; for arrays the offset is never known" );
1603   assert( tj->ptr() != TypePtr::TopPTR &&
1604           tj->ptr() != TypePtr::AnyNull &&
1605           tj->ptr() != TypePtr::Null, "No imprecise addresses" );
1606 //    assert( tj->ptr() != TypePtr::Constant ||
1607 //            tj->base() == Type::RawPtr ||
1608 //            tj->base() == Type::KlassPtr, "No constant oop addresses" );
1609 
1610   return tj;
1611 }
1612 
1613 void Compile::AliasType::Init(int i, const TypePtr* at) {
1614   assert(AliasIdxTop <= i && i < Compile::current()->_max_alias_types, "Invalid alias index");
1615   _index = i;
1616   _adr_type = at;
1617   _field = nullptr;
1618   _element = nullptr;
1619   _is_rewritable = true; // default
1620   const TypeOopPtr *atoop = (at != nullptr) ? at->isa_oopptr() : nullptr;
1621   if (atoop != nullptr && atoop->is_known_instance()) {
1622     const TypeOopPtr *gt = atoop->cast_to_instance_id(TypeOopPtr::InstanceBot);
1623     _general_index = Compile::current()->get_alias_index(gt);
1624   } else {
1625     _general_index = 0;
1626   }
1627 }
1628 
1629 BasicType Compile::AliasType::basic_type() const {
1630   if (element() != nullptr) {
1631     const Type* element = adr_type()->is_aryptr()->elem();
1632     return element->isa_narrowoop() ? T_OBJECT : element->array_element_basic_type();
1633   } if (field() != nullptr) {
1634     return field()->layout_type();
1635   } else {
1636     return T_ILLEGAL; // unknown
1637   }
1638 }
1639 
1640 //---------------------------------print_on------------------------------------
1641 #ifndef PRODUCT
1642 void Compile::AliasType::print_on(outputStream* st) {
1643   if (index() < 10)
1644         st->print("@ <%d> ", index());
1645   else  st->print("@ <%d>",  index());
1646   st->print(is_rewritable() ? "   " : " RO");
1647   int offset = adr_type()->offset();
1648   if (offset == Type::OffsetBot)
1649         st->print(" +any");
1650   else  st->print(" +%-3d", offset);
1651   st->print(" in ");
1652   adr_type()->dump_on(st);
1653   const TypeOopPtr* tjp = adr_type()->isa_oopptr();
1654   if (field() != nullptr && tjp) {
1655     if (tjp->is_instptr()->instance_klass()  != field()->holder() ||
1656         tjp->offset() != field()->offset_in_bytes()) {
1657       st->print(" != ");
1658       field()->print();
1659       st->print(" ***");
1660     }
1661   }
1662 }
1663 
1664 void print_alias_types() {
1665   Compile* C = Compile::current();
1666   tty->print_cr("--- Alias types, AliasIdxBot .. %d", C->num_alias_types()-1);
1667   for (int idx = Compile::AliasIdxBot; idx < C->num_alias_types(); idx++) {
1668     C->alias_type(idx)->print_on(tty);
1669     tty->cr();
1670   }
1671 }
1672 #endif
1673 
1674 
1675 //----------------------------probe_alias_cache--------------------------------
1676 Compile::AliasCacheEntry* Compile::probe_alias_cache(const TypePtr* adr_type) {
1677   intptr_t key = (intptr_t) adr_type;
1678   key ^= key >> logAliasCacheSize;
1679   return &_alias_cache[key & right_n_bits(logAliasCacheSize)];
1680 }
1681 
1682 
1683 //-----------------------------grow_alias_types--------------------------------
1684 void Compile::grow_alias_types() {
1685   const int old_ats  = _max_alias_types; // how many before?
1686   const int new_ats  = old_ats;          // how many more?
1687   const int grow_ats = old_ats+new_ats;  // how many now?
1688   _max_alias_types = grow_ats;
1689   _alias_types =  REALLOC_ARENA_ARRAY(comp_arena(), _alias_types, old_ats, grow_ats);
1690   AliasType* ats =    NEW_ARENA_ARRAY(comp_arena(), AliasType, new_ats);
1691   Copy::zero_to_bytes(ats, sizeof(AliasType)*new_ats);
1692   for (int i = 0; i < new_ats; i++)  _alias_types[old_ats+i] = &ats[i];
1693 }
1694 
1695 
1696 //--------------------------------find_alias_type------------------------------
1697 Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field, bool uncached) {
1698   if (!do_aliasing()) {
1699     return alias_type(AliasIdxBot);
1700   }
1701 
1702   AliasCacheEntry* ace = nullptr;
1703   if (!uncached) {
1704     ace = probe_alias_cache(adr_type);
1705     if (ace->_adr_type == adr_type) {
1706       return alias_type(ace->_index);
1707     }
1708   }
1709 
1710   // Handle special cases.
1711   if (adr_type == nullptr)          return alias_type(AliasIdxTop);
1712   if (adr_type == TypePtr::BOTTOM)  return alias_type(AliasIdxBot);
1713 
1714   // Do it the slow way.
1715   const TypePtr* flat = flatten_alias_type(adr_type);
1716 
1717 #ifdef ASSERT
1718   {
1719     ResourceMark rm;
1720     assert(flat == flatten_alias_type(flat), "not idempotent: adr_type = %s; flat = %s => %s",
1721            Type::str(adr_type), Type::str(flat), Type::str(flatten_alias_type(flat)));
1722     assert(flat != TypePtr::BOTTOM, "cannot alias-analyze an untyped ptr: adr_type = %s",
1723            Type::str(adr_type));
1724     if (flat->isa_oopptr() && !flat->isa_klassptr()) {
1725       const TypeOopPtr* foop = flat->is_oopptr();
1726       // Scalarizable allocations have exact klass always.
1727       bool exact = !foop->klass_is_exact() || foop->is_known_instance();
1728       const TypePtr* xoop = foop->cast_to_exactness(exact)->is_ptr();
1729       assert(foop == flatten_alias_type(xoop), "exactness must not affect alias type: foop = %s; xoop = %s",
1730              Type::str(foop), Type::str(xoop));
1731     }
1732   }
1733 #endif
1734 
1735   int idx = AliasIdxTop;
1736   for (int i = 0; i < num_alias_types(); i++) {
1737     if (alias_type(i)->adr_type() == flat) {
1738       idx = i;
1739       break;
1740     }
1741   }
1742 
1743   if (idx == AliasIdxTop) {
1744     if (no_create)  return nullptr;
1745     // Grow the array if necessary.
1746     if (_num_alias_types == _max_alias_types)  grow_alias_types();
1747     // Add a new alias type.
1748     idx = _num_alias_types++;
1749     _alias_types[idx]->Init(idx, flat);
1750     if (flat == TypeInstPtr::KLASS)  alias_type(idx)->set_rewritable(false);
1751     if (flat == TypeAryPtr::RANGE)   alias_type(idx)->set_rewritable(false);
1752     if (flat->isa_instptr()) {
1753       if (flat->offset() == java_lang_Class::klass_offset()
1754           && flat->is_instptr()->instance_klass() == env()->Class_klass())
1755         alias_type(idx)->set_rewritable(false);
1756     }
1757     ciField* field = nullptr;
1758     if (flat->isa_aryptr()) {
1759 #ifdef ASSERT
1760       const int header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1761       // (T_BYTE has the weakest alignment and size restrictions...)
1762       assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
1763 #endif
1764       const Type* elemtype = flat->is_aryptr()->elem();
1765       if (flat->offset() == TypePtr::OffsetBot) {
1766         alias_type(idx)->set_element(elemtype);
1767       }
1768       int field_offset = flat->is_aryptr()->field_offset().get();
1769       if (flat->is_flat() &&
1770           field_offset != Type::OffsetBot) {
1771         ciInlineKlass* vk = elemtype->inline_klass();
1772         field_offset += vk->payload_offset();
1773         field = vk->get_field_by_offset(field_offset, false);
1774       }
1775     }
1776     if (flat->isa_klassptr()) {
1777       if (UseCompactObjectHeaders) {
1778         if (flat->offset() == in_bytes(Klass::prototype_header_offset()))
1779           alias_type(idx)->set_rewritable(false);
1780       }
1781       if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
1782         alias_type(idx)->set_rewritable(false);
1783       if (flat->offset() == in_bytes(Klass::misc_flags_offset()))
1784         alias_type(idx)->set_rewritable(false);
1785       if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
1786         alias_type(idx)->set_rewritable(false);
1787       if (flat->offset() == in_bytes(Klass::layout_helper_offset()))
1788         alias_type(idx)->set_rewritable(false);
1789       if (flat->offset() == in_bytes(Klass::secondary_super_cache_offset()))
1790         alias_type(idx)->set_rewritable(false);
1791     }
1792 
1793     if (flat->isa_instklassptr()) {
1794       if (flat->offset() == in_bytes(InstanceKlass::access_flags_offset())) {
1795         alias_type(idx)->set_rewritable(false);
1796       }
1797     }
1798     // %%% (We would like to finalize JavaThread::threadObj_offset(),
1799     // but the base pointer type is not distinctive enough to identify
1800     // references into JavaThread.)
1801 
1802     // Check for final fields.
1803     const TypeInstPtr* tinst = flat->isa_instptr();
1804     if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {
1805       if (tinst->const_oop() != nullptr &&
1806           tinst->instance_klass() == ciEnv::current()->Class_klass() &&
1807           tinst->offset() >= (tinst->instance_klass()->layout_helper_size_in_bytes())) {
1808         // static field
1809         ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
1810         field = k->get_field_by_offset(tinst->offset(), true);
1811       } else if (tinst->is_inlinetypeptr()) {
1812         // Inline type field
1813         ciInlineKlass* vk = tinst->inline_klass();
1814         field = vk->get_field_by_offset(tinst->offset(), false);
1815       } else {
1816         ciInstanceKlass *k = tinst->instance_klass();
1817         field = k->get_field_by_offset(tinst->offset(), false);
1818       }
1819     }
1820     assert(field == nullptr ||
1821            original_field == nullptr ||
1822            (field->holder() == original_field->holder() &&
1823             field->offset_in_bytes() == original_field->offset_in_bytes() &&
1824             field->is_static() == original_field->is_static()), "wrong field?");
1825     // Set field() and is_rewritable() attributes.
1826     if (field != nullptr) {
1827       alias_type(idx)->set_field(field);
1828       if (flat->isa_aryptr()) {
1829         // Fields of flat arrays are rewritable although they are declared final
1830         assert(flat->is_flat(), "must be a flat array");
1831         alias_type(idx)->set_rewritable(true);
1832       }
1833     }
1834   }
1835 
1836   // Fill the cache for next time.
1837   if (!uncached) {
1838     ace->_adr_type = adr_type;
1839     ace->_index    = idx;
1840     assert(alias_type(adr_type) == alias_type(idx),  "type must be installed");
1841 
1842     // Might as well try to fill the cache for the flattened version, too.
1843     AliasCacheEntry* face = probe_alias_cache(flat);
1844     if (face->_adr_type == nullptr) {
1845       face->_adr_type = flat;
1846       face->_index    = idx;
1847       assert(alias_type(flat) == alias_type(idx), "flat type must work too");
1848     }
1849   }
1850 
1851   return alias_type(idx);
1852 }
1853 
1854 
1855 Compile::AliasType* Compile::alias_type(ciField* field) {
1856   const TypeOopPtr* t;
1857   if (field->is_static())
1858     t = TypeInstPtr::make(field->holder()->java_mirror());
1859   else
1860     t = TypeOopPtr::make_from_klass_raw(field->holder());
1861   AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
1862   assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
1863   return atp;
1864 }
1865 
1866 
1867 //------------------------------have_alias_type--------------------------------
1868 bool Compile::have_alias_type(const TypePtr* adr_type) {
1869   AliasCacheEntry* ace = probe_alias_cache(adr_type);
1870   if (ace->_adr_type == adr_type) {
1871     return true;
1872   }
1873 
1874   // Handle special cases.
1875   if (adr_type == nullptr)             return true;
1876   if (adr_type == TypePtr::BOTTOM)  return true;
1877 
1878   return find_alias_type(adr_type, true, nullptr) != nullptr;
1879 }
1880 
1881 //-----------------------------must_alias--------------------------------------
1882 // True if all values of the given address type are in the given alias category.
1883 bool Compile::must_alias(const TypePtr* adr_type, int alias_idx) {
1884   if (alias_idx == AliasIdxBot)         return true;  // the universal category
1885   if (adr_type == nullptr)              return true;  // null serves as TypePtr::TOP
1886   if (alias_idx == AliasIdxTop)         return false; // the empty category
1887   if (adr_type->base() == Type::AnyPtr) return false; // TypePtr::BOTTOM or its twins
1888 
1889   // the only remaining possible overlap is identity
1890   int adr_idx = get_alias_index(adr_type);
1891   assert(adr_idx != AliasIdxBot && adr_idx != AliasIdxTop, "");
1892   assert(adr_idx == alias_idx ||
1893          (alias_type(alias_idx)->adr_type() != TypeOopPtr::BOTTOM
1894           && adr_type                       != TypeOopPtr::BOTTOM),
1895          "should not be testing for overlap with an unsafe pointer");
1896   return adr_idx == alias_idx;
1897 }
1898 
1899 //------------------------------can_alias--------------------------------------
1900 // True if any values of the given address type are in the given alias category.
1901 bool Compile::can_alias(const TypePtr* adr_type, int alias_idx) {
1902   if (alias_idx == AliasIdxTop)         return false; // the empty category
1903   if (adr_type == nullptr)              return false; // null serves as TypePtr::TOP
1904   // Known instance doesn't alias with bottom memory
1905   if (alias_idx == AliasIdxBot)         return !adr_type->is_known_instance();                   // the universal category
1906   if (adr_type->base() == Type::AnyPtr) return !C->get_adr_type(alias_idx)->is_known_instance(); // TypePtr::BOTTOM or its twins
1907 
1908   // the only remaining possible overlap is identity
1909   int adr_idx = get_alias_index(adr_type);
1910   assert(adr_idx != AliasIdxBot && adr_idx != AliasIdxTop, "");
1911   return adr_idx == alias_idx;
1912 }
1913 
1914 // Mark all ParsePredicateNodes as useless. They will later be removed from the graph in IGVN together with their
1915 // uncommon traps if no Runtime Predicates were created from the Parse Predicates.
1916 void Compile::mark_parse_predicate_nodes_useless(PhaseIterGVN& igvn) {
1917   if (parse_predicate_count() == 0) {
1918     return;
1919   }
1920   for (int i = 0; i < parse_predicate_count(); i++) {
1921     ParsePredicateNode* parse_predicate = _parse_predicates.at(i);
1922     parse_predicate->mark_useless(igvn);
1923   }
1924   _parse_predicates.clear();
1925 }
1926 
1927 void Compile::record_for_post_loop_opts_igvn(Node* n) {
1928   if (!n->for_post_loop_opts_igvn()) {
1929     assert(!_for_post_loop_igvn.contains(n), "duplicate");
1930     n->add_flag(Node::NodeFlags::Flag_for_post_loop_opts_igvn);
1931     _for_post_loop_igvn.append(n);
1932   }
1933 }
1934 
1935 void Compile::remove_from_post_loop_opts_igvn(Node* n) {
1936   n->remove_flag(Node::NodeFlags::Flag_for_post_loop_opts_igvn);
1937   _for_post_loop_igvn.remove(n);
1938 }
1939 
1940 void Compile::process_for_post_loop_opts_igvn(PhaseIterGVN& igvn) {
1941   // Verify that all previous optimizations produced a valid graph
1942   // at least to this point, even if no loop optimizations were done.
1943   PhaseIdealLoop::verify(igvn);
1944 
1945   if (_print_phase_loop_opts) {
1946     print_method(PHASE_AFTER_LOOP_OPTS, 2);
1947   }
1948   C->set_post_loop_opts_phase(); // no more loop opts allowed
1949 
1950   assert(!C->major_progress(), "not cleared");
1951 
1952   if (_for_post_loop_igvn.length() > 0) {
1953     while (_for_post_loop_igvn.length() > 0) {
1954       Node* n = _for_post_loop_igvn.pop();
1955       n->remove_flag(Node::NodeFlags::Flag_for_post_loop_opts_igvn);
1956       igvn._worklist.push(n);
1957     }
1958     igvn.optimize();
1959     if (failing()) return;
1960     assert(_for_post_loop_igvn.length() == 0, "no more delayed nodes allowed");
1961     assert(C->parse_predicate_count() == 0, "all parse predicates should have been removed now");
1962 
1963     // Sometimes IGVN sets major progress (e.g., when processing loop nodes).
1964     if (C->major_progress()) {
1965       C->clear_major_progress(); // ensure that major progress is now clear
1966     }
1967   }
1968 }
1969 
1970 void Compile::add_inline_type(Node* n) {
1971   assert(n->is_InlineType(), "unexpected node");
1972   _inline_type_nodes.push(n);
1973 }
1974 
1975 void Compile::remove_inline_type(Node* n) {
1976   assert(n->is_InlineType(), "unexpected node");
1977   if (_inline_type_nodes.contains(n)) {
1978     _inline_type_nodes.remove(n);
1979   }
1980 }
1981 
1982 // Does the return value keep otherwise useless inline type allocations alive?
1983 static bool return_val_keeps_allocations_alive(Node* ret_val) {
1984   ResourceMark rm;
1985   Unique_Node_List wq;
1986   wq.push(ret_val);
1987   bool some_allocations = false;
1988   for (uint i = 0; i < wq.size(); i++) {
1989     Node* n = wq.at(i);
1990     if (n->outcnt() > 1) {
1991       // Some other use for the allocation
1992       return false;
1993     } else if (n->is_InlineType()) {
1994       wq.push(n->in(1));
1995     } else if (n->is_Phi()) {
1996       for (uint j = 1; j < n->req(); j++) {
1997         wq.push(n->in(j));
1998       }
1999     } else if (n->is_CheckCastPP() &&
2000                n->in(1)->is_Proj() &&
2001                n->in(1)->in(0)->is_Allocate()) {
2002       some_allocations = true;
2003     } else if (n->is_CheckCastPP()) {
2004       wq.push(n->in(1));
2005     }
2006   }
2007   return some_allocations;
2008 }
2009 
2010 bool Compile::clear_argument_if_only_used_as_buffer_at_calls(Node* result_cast, PhaseIterGVN& igvn) {
2011   ResourceMark rm;
2012   Unique_Node_List wq;
2013   wq.push(result_cast);
2014   Node_List calls;
2015   for (uint i = 0; i < wq.size(); ++i) {
2016     Node* n = wq.at(i);
2017     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
2018       Node* u = n->fast_out(j);
2019       if (u->is_Phi()) {
2020         wq.push(u);
2021       } else if (u->is_InlineType() && u->as_InlineType()->get_oop() == n) {
2022         wq.push(u);
2023       } else if (u->is_CallJava()) {
2024         CallJavaNode* call = u->as_CallJava();
2025         if (call->method() != nullptr && call->method()->mismatch()) {
2026           return false;
2027         }
2028         uint nargs = call->tf()->domain_cc()->cnt();
2029         for (uint k = TypeFunc::Parms; k < nargs; k++) {
2030           Node* in = call->in(k);
2031           if (in == n && (call->method() == nullptr || !call->method()->is_scalarized_buffer_arg(k - TypeFunc::Parms))) {
2032             return false;
2033           }
2034         }
2035         calls.push(call);
2036       } else if (u->Opcode() == Op_EncodeP) {
2037         wq.push(u);
2038       } else if (u->is_AddP()) {
2039         wq.push(u);
2040       } else if (u->is_Store() && u->in(MemNode::Address) == n) {
2041         // storing to the buffer is fine
2042       } else if (u->is_SafePoint()) {
2043         SafePointNode* sfpt = u->as_SafePoint();
2044         int input = u->find_edge(n);
2045         JVMState* jvms = sfpt->jvms();
2046         if (jvms != nullptr) {
2047           if (input < (int)jvms->debug_start()) {
2048             return false;
2049           }
2050         }
2051       } else {
2052         return false;
2053       }
2054     }
2055   }
2056   for (uint i = 0; i < calls.size(); ++i) {
2057     CallJavaNode* call = calls.at(i)->as_CallJava();
2058     uint nargs = call->tf()->domain_cc()->cnt();
2059     for (uint k = TypeFunc::Parms; k < nargs; k++) {
2060       Node* in = call->in(k);
2061       if (wq.member(in)) {
2062         assert(call->method()->is_scalarized_buffer_arg(k - TypeFunc::Parms), "only buffer argument removed here");
2063         igvn.replace_input_of(call, k, igvn.zerocon(T_OBJECT));
2064       }
2065     }
2066   }
2067   return true;
2068 }
2069 
2070 void Compile::process_inline_types(PhaseIterGVN &igvn, bool remove) {
2071   // Make sure that the return value does not keep an otherwise unused allocation alive
2072   if (tf()->returns_inline_type_as_fields()) {
2073     Node* ret = nullptr;
2074     for (uint i = 1; i < root()->req(); i++) {
2075       Node* in = root()->in(i);
2076       if (in->Opcode() == Op_Return) {
2077         assert(ret == nullptr, "only one return");
2078         ret = in;
2079       }
2080     }
2081     if (ret != nullptr) {
2082       Node* ret_val = ret->in(TypeFunc::Parms);
2083       if (igvn.type(ret_val)->isa_oopptr() &&
2084           return_val_keeps_allocations_alive(ret_val)) {
2085         igvn.replace_input_of(ret, TypeFunc::Parms, InlineTypeNode::tagged_klass(igvn.type(ret_val)->inline_klass(), igvn));
2086         assert(ret_val->outcnt() == 0, "should be dead now");
2087         igvn.remove_dead_node(ret_val, PhaseIterGVN::NodeOrigin::Graph);
2088       }
2089     }
2090   }
2091   // if a newly allocated object is a value that's only passed as argument to calls as (possibly null) buffers, then
2092   // clear the call argument inputs so the allocation node can be removed
2093   for (int i = 0; i < C->macro_count(); ++i) {
2094     Node* macro_node = C->macro_node(i);
2095     if (macro_node->Opcode() == Op_Allocate) {
2096       AllocateNode* allocate = macro_node->as_Allocate();
2097       Node* result_cast = allocate->result_cast();
2098       if (result_cast != nullptr) {
2099         const Type* result_type = igvn.type(result_cast);
2100         if (result_type->is_inlinetypeptr()) {
2101           clear_argument_if_only_used_as_buffer_at_calls(result_cast, igvn);
2102         }
2103       }
2104     }
2105   }
2106 
2107   if (_inline_type_nodes.length() == 0) {
2108     // keep the graph canonical
2109     igvn.optimize();
2110     return;
2111   }
2112   // Scalarize inline types in safepoint debug info.
2113   // Delay this until all inlining is over to avoid getting inconsistent debug info.
2114   set_scalarize_in_safepoints(true);
2115   for (int i = _inline_type_nodes.length()-1; i >= 0; i--) {
2116     InlineTypeNode* vt = _inline_type_nodes.at(i)->as_InlineType();
2117     vt->make_scalar_in_safepoints(&igvn);
2118     igvn.record_for_igvn(vt);
2119   }
2120   if (remove) {
2121     // Remove inline type nodes by replacing them with their oop input
2122     while (_inline_type_nodes.length() > 0) {
2123       InlineTypeNode* vt = _inline_type_nodes.pop()->as_InlineType();
2124       if (vt->outcnt() == 0) {
2125         igvn.remove_dead_node(vt, PhaseIterGVN::NodeOrigin::Graph);
2126         continue;
2127       }
2128       for (DUIterator i = vt->outs(); vt->has_out(i); i++) {
2129         DEBUG_ONLY(bool must_be_buffered = false);
2130         Node* u = vt->out(i);
2131         // Check if any users are blackholes. If so, rewrite them to use either the
2132         // allocated buffer, or individual components, instead of the inline type node
2133         // that goes away.
2134         if (u->is_Blackhole()) {
2135           BlackholeNode* bh = u->as_Blackhole();
2136 
2137           // Unlink the old input
2138           int idx = bh->find_edge(vt);
2139           assert(idx != -1, "The edge should be there");
2140           bh->del_req(idx);
2141           --i;
2142 
2143           if (vt->is_allocated(&igvn)) {
2144             // Already has the allocated instance, blackhole that
2145             bh->add_req(vt->get_oop());
2146           } else {
2147             // Not allocated yet, blackhole the components
2148             for (uint c = 0; c < vt->field_count(); c++) {
2149               bh->add_req(vt->field_value(c));
2150             }
2151           }
2152 
2153           // Node modified, record for IGVN
2154           igvn.record_for_igvn(bh);
2155         }
2156 #ifdef ASSERT
2157         // Verify that inline type is buffered when replacing by oop
2158         else if (u->is_InlineType()) {
2159           // InlineType uses don't need buffering because they are about to be replaced as well
2160         } else if (u->is_Phi()) {
2161           // TODO 8302217 Remove this once InlineTypeNodes are reliably pushed through
2162         } else {
2163           must_be_buffered = true;
2164         }
2165         if (must_be_buffered && !vt->is_allocated(&igvn)) {
2166           vt->dump(0);
2167           u->dump(0);
2168           assert(false, "Should have been buffered");
2169         }
2170 #endif
2171       }
2172       igvn.replace_node(vt, vt->get_oop());
2173     }
2174   }
2175   igvn.optimize();
2176 }
2177 
2178 void Compile::add_flat_access(Node* n) {
2179   assert(n != nullptr && (n->Opcode() == Op_LoadFlat || n->Opcode() == Op_StoreFlat), "unexpected node %s", n == nullptr ? "nullptr" : n->Name());
2180   assert(!_flat_access_nodes.contains(n), "duplicate insertion");
2181   _flat_access_nodes.push(n);
2182 }
2183 
2184 void Compile::remove_flat_access(Node* n) {
2185   assert(n != nullptr && (n->Opcode() == Op_LoadFlat || n->Opcode() == Op_StoreFlat), "unexpected node %s", n == nullptr ? "nullptr" : n->Name());
2186   _flat_access_nodes.remove_if_existing(n);
2187 }
2188 
2189 void Compile::process_flat_accesses(PhaseIterGVN& igvn) {
2190   assert(igvn._worklist.size() == 0, "should be empty");
2191   igvn.set_delay_transform(true);
2192   for (int i = _flat_access_nodes.length() - 1; i >= 0; i--) {
2193     Node* n = _flat_access_nodes.at(i);
2194     assert(n != nullptr, "unexpected nullptr");
2195     if (n->is_LoadFlat()) {
2196       LoadFlatNode* loadn = n->as_LoadFlat();
2197       // Expending a flat load atomically means that we get a chunk of memory spanning multiple fields
2198       // that we chop with bitwise operations. That is too subtle for some optimizations, especially
2199       // constant folding when fields are constant. If we can get a constant object from which we are
2200       // flat-loading, we can simply replace the loads at compilation-time by the field of the constant
2201       // object.
2202       ciInstance* loaded_from = nullptr;
2203       if (FoldStableValues) {
2204         const TypeOopPtr* base_type = igvn.type(loadn->base())->is_oopptr();
2205         ciObject* oop = base_type->const_oop();
2206         int off = igvn.type(loadn->ptr())->isa_ptr()->offset();
2207 
2208         if (oop != nullptr && oop->is_instance()) {
2209           ciInstance* holder = oop->as_instance();
2210           ciKlass* klass = holder->klass();
2211           ciInstanceKlass* iklass = klass->as_instance_klass();
2212           ciField* field = iklass->get_non_flat_field_by_offset(off);
2213 
2214           if (field->is_stable()) {
2215             ciConstant fv = holder->field_value(field);
2216             if (is_reference_type(fv.basic_type()) && fv.as_object()->is_instance()) {
2217               // The field value is an object, not null. We can use stability.
2218               loaded_from = fv.as_object()->as_instance();
2219             }
2220           }
2221         } else if (oop != nullptr && oop->is_array() && off != Type::OffsetBot) {
2222           ciArray* array = oop->as_array();
2223           ciConstant elt = array->element_value_by_offset(off);
2224           const TypeAryPtr* aryptr = base_type->is_aryptr();
2225           if (aryptr->is_stable() && aryptr->is_atomic() && is_reference_type(elt.basic_type()) && elt.as_object()->is_instance()) {
2226             loaded_from = elt.as_object()->as_instance();
2227           }
2228         }
2229       }
2230 
2231       if (loaded_from != nullptr) {
2232         loadn->expand_constant(igvn, loaded_from);
2233       } else {
2234         loadn->expand_atomic(igvn);
2235       }
2236     } else {
2237       n->as_StoreFlat()->expand_atomic(igvn);
2238     }
2239   }
2240   _flat_access_nodes.clear_and_deallocate();
2241   igvn.set_delay_transform(false);
2242   igvn.optimize();
2243 }
2244 
2245 void Compile::adjust_flat_array_access_aliases(PhaseIterGVN& igvn) {
2246   DEBUG_ONLY(igvn.verify_empty_worklist(nullptr));
2247   if (!_has_flat_accesses) {
2248     return;
2249   }
2250   // Initially, all flat array accesses share the same slice to
2251   // keep dependencies with Object[] array accesses (that could be
2252   // to a flat array) correct. We're done with parsing so we
2253   // now know all flat array accesses in this compile
2254   // unit. Let's move flat array accesses to their own slice,
2255   // one per element field. This should help memory access
2256   // optimizations.
2257   ResourceMark rm;
2258   Unique_Node_List wq;
2259   wq.push(root());
2260 
2261   Node_List mergememnodes;
2262   Node_List memnodes;
2263 
2264   // Alias index currently shared by all flat memory accesses
2265   int index = get_alias_index(TypeAryPtr::INLINES);
2266 
2267   // Find MergeMem nodes and flat array accesses
2268   for (uint i = 0; i < wq.size(); i++) {
2269     Node* n = wq.at(i);
2270     if (n->is_Mem()) {
2271       const TypePtr* adr_type = nullptr;
2272       adr_type = get_adr_type(get_alias_index(n->adr_type()));
2273       if (adr_type == TypeAryPtr::INLINES) {
2274         memnodes.push(n);
2275       }
2276     } else if (n->is_MergeMem()) {
2277       MergeMemNode* mm = n->as_MergeMem();
2278       if (mm->memory_at(index) != mm->base_memory()) {
2279         mergememnodes.push(n);
2280       }
2281     }
2282     for (uint j = 0; j < n->req(); j++) {
2283       Node* m = n->in(j);
2284       if (m != nullptr) {
2285         wq.push(m);
2286       }
2287     }
2288   }
2289 
2290   _flat_accesses_share_alias = false;
2291 
2292   // We are going to change the slice for the flat array
2293   // accesses so we need to clear the cache entries that refer to
2294   // them.
2295   for (uint i = 0; i < AliasCacheSize; i++) {
2296     AliasCacheEntry* ace = &_alias_cache[i];
2297     if (ace->_adr_type != nullptr &&
2298         ace->_adr_type->is_flat()) {
2299       ace->_adr_type = nullptr;
2300       ace->_index = (i != 0) ? 0 : AliasIdxTop; // Make sure the nullptr adr_type resolves to AliasIdxTop
2301     }
2302   }
2303 
2304 #ifdef ASSERT
2305   for (uint i = 0; i < memnodes.size(); i++) {
2306     Node* m = memnodes.at(i);
2307     const TypePtr* adr_type = m->adr_type();
2308     m->as_Mem()->set_adr_type(adr_type);
2309   }
2310 #endif // ASSERT
2311 
2312   int start_alias = num_alias_types(); // Start of new aliases
2313   Node_Stack stack(0);
2314 #ifdef ASSERT
2315   VectorSet seen(Thread::current()->resource_area());
2316 #endif
2317   // Now let's fix the memory graph so each flat array access
2318   // is moved to the right slice. Start from the MergeMem nodes.
2319   uint last = unique();
2320   for (uint i = 0; i < mergememnodes.size(); i++) {
2321     MergeMemNode* current = mergememnodes.at(i)->as_MergeMem();
2322     if (current->outcnt() == 0) {
2323       // This node is killed by a previous iteration
2324       continue;
2325     }
2326 
2327     Node* n = current->memory_at(index);
2328     MergeMemNode* mm = nullptr;
2329     do {
2330       // Follow memory edges through memory accesses, phis and
2331       // narrow membars and push nodes on the stack. Once we hit
2332       // bottom memory, we pop element off the stack one at a
2333       // time, in reverse order, and move them to the right slice
2334       // by changing their memory edges.
2335       if ((n->is_Phi() && n->adr_type() != TypePtr::BOTTOM) || n->is_Mem() ||
2336           (n->adr_type() == TypeAryPtr::INLINES && !n->is_NarrowMemProj())) {
2337         assert(!seen.test_set(n->_idx), "");
2338         // Uses (a load for instance) will need to be moved to the
2339         // right slice as well and will get a new memory state
2340         // that we don't know yet. The use could also be the
2341         // backedge of a loop. We put a place holder node between
2342         // the memory node and its uses. We replace that place
2343         // holder with the correct memory state once we know it,
2344         // i.e. when nodes are popped off the stack. Using the
2345         // place holder make the logic work in the presence of
2346         // loops.
2347         if (n->outcnt() > 1) {
2348           Node* place_holder = nullptr;
2349           assert(!n->has_out_with(Op_Node), "");
2350           for (DUIterator k = n->outs(); n->has_out(k); k++) {
2351             Node* u = n->out(k);
2352             if (u != current && u->_idx < last) {
2353               bool success = false;
2354               for (uint l = 0; l < u->req(); l++) {
2355                 if (!stack.is_empty() && u == stack.node() && l == stack.index()) {
2356                   continue;
2357                 }
2358                 Node* in = u->in(l);
2359                 if (in == n) {
2360                   if (place_holder == nullptr) {
2361                     place_holder = new Node(1);
2362                     place_holder->init_req(0, n);
2363                   }
2364                   igvn.replace_input_of(u, l, place_holder);
2365                   success = true;
2366                 }
2367               }
2368               if (success) {
2369                 --k;
2370               }
2371             }
2372           }
2373         }
2374         if (n->is_Phi()) {
2375           stack.push(n, 1);
2376           n = n->in(1);
2377         } else if (n->is_Mem()) {
2378           stack.push(n, n->req());
2379           n = n->in(MemNode::Memory);
2380         } else {
2381           assert(n->is_Proj() && n->in(0)->Opcode() == Op_MemBarCPUOrder, "");
2382           stack.push(n, n->req());
2383           n = n->in(0)->in(TypeFunc::Memory);
2384         }
2385       } else {
2386         assert(n->adr_type() == TypePtr::BOTTOM || (n->Opcode() == Op_Node && n->_idx >= last) || n->is_NarrowMemProj(), "");
2387         // Build a new MergeMem node to carry the new memory state
2388         // as we build it. IGVN should fold extraneous MergeMem
2389         // nodes.
2390         if (n->is_NarrowMemProj()) {
2391           // We need 1 NarrowMemProj for each slice of this array
2392           InitializeNode* init = n->in(0)->as_Initialize();
2393           AllocateNode* alloc = init->allocation();
2394           Node* klass_node = alloc->in(AllocateNode::KlassNode);
2395           const TypeAryKlassPtr* klass_type = klass_node->bottom_type()->isa_aryklassptr();
2396           assert(klass_type != nullptr, "must be an array");
2397           assert(klass_type->klass_is_exact(), "must be an exact klass");
2398           ciArrayKlass* klass = klass_type->exact_klass()->as_array_klass();
2399           assert(klass->is_flat_array_klass(), "must be a flat array");
2400           ciInlineKlass* elem_klass = klass->element_klass()->as_inline_klass();
2401           const TypeAryPtr* oop_type = klass_type->as_instance_type()->is_aryptr();
2402           assert(oop_type->klass_is_exact(), "must be an exact klass");
2403 
2404           Node* base = alloc->in(TypeFunc::Memory);
2405           assert(base->bottom_type() == Type::MEMORY, "the memory input of AllocateNode must be a memory");
2406           assert(base->adr_type() == TypePtr::BOTTOM, "the memory input of AllocateNode must be a bottom memory");
2407           // Must create a MergeMem with base as the base memory, do not clone if base is a
2408           // MergeMem because it may not be processed yet
2409           mm = MergeMemNode::make(nullptr);
2410           mm->set_base_memory(base);
2411           for (int j = 0; j < elem_klass->nof_nonstatic_fields(); j++) {
2412             int field_offset = elem_klass->nonstatic_field_at(j)->offset_in_bytes() - elem_klass->payload_offset();
2413             const TypeAryPtr* field_ptr = oop_type->with_offset(Type::OffsetBot)->with_field_offset(field_offset);
2414             int field_alias_idx = get_alias_index(field_ptr);
2415             assert(field_ptr == get_adr_type(field_alias_idx), "must match");
2416             Node* new_proj = new NarrowMemProjNode(init, field_ptr);
2417             igvn.register_new_node_with_optimizer(new_proj);
2418             mm->set_memory_at(field_alias_idx, new_proj);
2419           }
2420           if (!klass->is_elem_null_free()) {
2421             int nm_offset = elem_klass->null_marker_offset_in_payload();
2422             const TypeAryPtr* nm_ptr = oop_type->with_offset(Type::OffsetBot)->with_field_offset(nm_offset);
2423             int nm_alias_idx = get_alias_index(nm_ptr);
2424             assert(nm_ptr == get_adr_type(nm_alias_idx), "must match");
2425             Node* new_proj = new NarrowMemProjNode(init, nm_ptr);
2426             igvn.register_new_node_with_optimizer(new_proj);
2427             mm->set_memory_at(nm_alias_idx, new_proj);
2428           }
2429 
2430           // Replace all uses of the old NarrowMemProj with the correct state
2431           MergeMemNode* new_n = MergeMemNode::make(mm);
2432           igvn.register_new_node_with_optimizer(new_n);
2433           igvn.replace_node(n, new_n);
2434         } else {
2435           // Must create a MergeMem with n as the base memory, do not clone if n is a MergeMem
2436           // because it may not be processed yet
2437           mm = MergeMemNode::make(nullptr);
2438           mm->set_base_memory(n);
2439         }
2440 
2441         igvn.register_new_node_with_optimizer(mm);
2442         while (stack.size() > 0) {
2443           Node* m = stack.node();
2444           uint idx = stack.index();
2445           if (m->is_Mem()) {
2446             // Move memory node to its new slice
2447             const TypePtr* adr_type = m->adr_type();
2448             int alias = get_alias_index(adr_type);
2449             Node* prev = mm->memory_at(alias);
2450             igvn.replace_input_of(m, MemNode::Memory, prev);
2451             mm->set_memory_at(alias, m);
2452           } else if (m->is_Phi()) {
2453             // We need as many new phis as there are new aliases
2454             Node* new_phi_in = MergeMemNode::make(mm);
2455             igvn.register_new_node_with_optimizer(new_phi_in);
2456             igvn.replace_input_of(m, idx, new_phi_in);
2457             if (idx == m->req()-1) {
2458               Node* r = m->in(0);
2459               for (int j = start_alias; j < num_alias_types(); j++) {
2460                 const TypePtr* adr_type = get_adr_type(j);
2461                 if (!adr_type->isa_aryptr() || !adr_type->is_flat()) {
2462                   continue;
2463                 }
2464                 Node* phi = new PhiNode(r, Type::MEMORY, get_adr_type(j));
2465                 igvn.register_new_node_with_optimizer(phi);
2466                 for (uint k = 1; k < m->req(); k++) {
2467                   phi->init_req(k, m->in(k)->as_MergeMem()->memory_at(j));
2468                 }
2469                 mm->set_memory_at(j, phi);
2470               }
2471               Node* base_phi = new PhiNode(r, Type::MEMORY, TypePtr::BOTTOM);
2472               igvn.register_new_node_with_optimizer(base_phi);
2473               for (uint k = 1; k < m->req(); k++) {
2474                 base_phi->init_req(k, m->in(k)->as_MergeMem()->base_memory());
2475               }
2476               mm->set_base_memory(base_phi);
2477             }
2478           } else {
2479             // This is a MemBarCPUOrder node from
2480             // Parse::array_load()/Parse::array_store(), in the
2481             // branch that handles flat arrays hidden under
2482             // an Object[] array. We also need one new membar per
2483             // new alias to keep the unknown access that the
2484             // membars protect properly ordered with accesses to
2485             // known flat array.
2486             assert(m->is_Proj(), "projection expected");
2487             Node* ctrl = m->in(0)->in(TypeFunc::Control);
2488             igvn.replace_input_of(m->in(0), TypeFunc::Control, top());
2489             for (int j = start_alias; j < num_alias_types(); j++) {
2490               const TypePtr* adr_type = get_adr_type(j);
2491               if (!adr_type->isa_aryptr() || !adr_type->is_flat()) {
2492                 continue;
2493               }
2494               MemBarNode* mb = new MemBarCPUOrderNode(this, j, nullptr);
2495               igvn.register_new_node_with_optimizer(mb);
2496               Node* mem = mm->memory_at(j);
2497               mb->init_req(TypeFunc::Control, ctrl);
2498               mb->init_req(TypeFunc::Memory, mem);
2499               ctrl = new ProjNode(mb, TypeFunc::Control);
2500               igvn.register_new_node_with_optimizer(ctrl);
2501               mem = new ProjNode(mb, TypeFunc::Memory);
2502               igvn.register_new_node_with_optimizer(mem);
2503               mm->set_memory_at(j, mem);
2504             }
2505             igvn.replace_node(m->in(0)->as_Multi()->proj_out(TypeFunc::Control), ctrl);
2506           }
2507           if (idx < m->req()-1) {
2508             idx += 1;
2509             stack.set_index(idx);
2510             n = m->in(idx);
2511             break;
2512           }
2513           // Take care of place holder nodes
2514           if (m->has_out_with(Op_Node)) {
2515             Node* place_holder = m->find_out_with(Op_Node);
2516             if (place_holder != nullptr) {
2517               Node* mm_clone = mm->clone();
2518               igvn.register_new_node_with_optimizer(mm_clone);
2519               Node* hook = new Node(1);
2520               hook->init_req(0, mm);
2521               igvn.replace_node(place_holder, mm_clone);
2522               hook->destruct(&igvn);
2523             }
2524             assert(!m->has_out_with(Op_Node), "place holder should be gone now");
2525           }
2526           stack.pop();
2527         }
2528       }
2529     } while(stack.size() > 0);
2530     // Fix the memory state at the MergeMem we started from
2531     igvn.rehash_node_delayed(current);
2532     for (int j = start_alias; j < num_alias_types(); j++) {
2533       const TypePtr* adr_type = get_adr_type(j);
2534       if (!adr_type->isa_aryptr() || !adr_type->is_flat()) {
2535         continue;
2536       }
2537       current->set_memory_at(j, mm);
2538     }
2539     current->set_memory_at(index, current->base_memory());
2540   }
2541   igvn.optimize();
2542 
2543 #ifdef ASSERT
2544   wq.clear();
2545   wq.push(root());
2546   for (uint i = 0; i < wq.size(); i++) {
2547     Node* n = wq.at(i);
2548     assert(n->adr_type() != TypeAryPtr::INLINES, "should have been removed from the graph");
2549     for (uint j = 0; j < n->req(); j++) {
2550       Node* m = n->in(j);
2551       if (m != nullptr) {
2552         wq.push(m);
2553       }
2554     }
2555   }
2556 #endif
2557 
2558   print_method(PHASE_SPLIT_INLINES_ARRAY, 2);
2559 }
2560 
2561 void Compile::record_for_merge_stores_igvn(Node* n) {
2562   if (!n->for_merge_stores_igvn()) {
2563     assert(!_for_merge_stores_igvn.contains(n), "duplicate");
2564     n->add_flag(Node::NodeFlags::Flag_for_merge_stores_igvn);
2565     _for_merge_stores_igvn.append(n);
2566   }
2567 }
2568 
2569 void Compile::remove_from_merge_stores_igvn(Node* n) {
2570   n->remove_flag(Node::NodeFlags::Flag_for_merge_stores_igvn);
2571   _for_merge_stores_igvn.remove(n);
2572 }
2573 
2574 // We need to wait with merging stores until RangeCheck smearing has removed the RangeChecks during
2575 // the post loops IGVN phase. If we do it earlier, then there may still be some RangeChecks between
2576 // the stores, and we merge the wrong sequence of stores.
2577 // Example:
2578 //   StoreI RangeCheck StoreI StoreI RangeCheck StoreI
2579 // Apply MergeStores:
2580 //   StoreI RangeCheck [   StoreL  ] RangeCheck StoreI
2581 // Remove more RangeChecks:
2582 //   StoreI            [   StoreL  ]            StoreI
2583 // But now it would have been better to do this instead:
2584 //   [         StoreL       ] [       StoreL         ]
2585 //
2586 // Note: we allow stores to merge in this dedicated IGVN round, and any later IGVN round,
2587 //       since we never unset _merge_stores_phase.
2588 void Compile::process_for_merge_stores_igvn(PhaseIterGVN& igvn) {
2589   C->set_merge_stores_phase();
2590 
2591   if (_for_merge_stores_igvn.length() > 0) {
2592     while (_for_merge_stores_igvn.length() > 0) {
2593       Node* n = _for_merge_stores_igvn.pop();
2594       n->remove_flag(Node::NodeFlags::Flag_for_merge_stores_igvn);
2595       igvn._worklist.push(n);
2596     }
2597     igvn.optimize();
2598     if (failing()) return;
2599     assert(_for_merge_stores_igvn.length() == 0, "no more delayed nodes allowed");
2600     print_method(PHASE_AFTER_MERGE_STORES, 3);
2601   }
2602 }
2603 
2604 void Compile::record_unstable_if_trap(UnstableIfTrap* trap) {
2605   if (OptimizeUnstableIf) {
2606     _unstable_if_traps.append(trap);
2607   }
2608 }
2609 
2610 void Compile::remove_useless_unstable_if_traps(Unique_Node_List& useful) {
2611   for (int i = _unstable_if_traps.length() - 1; i >= 0; i--) {
2612     UnstableIfTrap* trap = _unstable_if_traps.at(i);
2613     Node* n = trap->uncommon_trap();
2614     if (!useful.member(n)) {
2615       _unstable_if_traps.delete_at(i); // replaces i-th with last element which is known to be useful (already processed)
2616     }
2617   }
2618 }
2619 
2620 // Remove the unstable if trap associated with 'unc' from candidates. It is either dead
2621 // or fold-compares case. Return true if succeed or not found.
2622 //
2623 // In rare cases, the found trap has been processed. It is too late to delete it. Return
2624 // false and ask fold-compares to yield.
2625 //
2626 // 'fold-compares' may use the uncommon_trap of the dominating IfNode to cover the fused
2627 // IfNode. This breaks the unstable_if trap invariant: control takes the unstable path
2628 // when deoptimization does happen.
2629 bool Compile::remove_unstable_if_trap(CallStaticJavaNode* unc, bool yield) {
2630   for (int i = 0; i < _unstable_if_traps.length(); ++i) {
2631     UnstableIfTrap* trap = _unstable_if_traps.at(i);
2632     if (trap->uncommon_trap() == unc) {
2633       if (yield && trap->modified()) {
2634         return false;
2635       }
2636       _unstable_if_traps.delete_at(i);
2637       break;
2638     }
2639   }
2640   return true;
2641 }
2642 
2643 // Re-calculate unstable_if traps with the liveness of next_bci, which points to the unlikely path.
2644 // It needs to be done after igvn because fold-compares may fuse uncommon_traps and before renumbering.
2645 void Compile::process_for_unstable_if_traps(PhaseIterGVN& igvn) {
2646   for (int i = _unstable_if_traps.length() - 1; i >= 0; --i) {
2647     UnstableIfTrap* trap = _unstable_if_traps.at(i);
2648     CallStaticJavaNode* unc = trap->uncommon_trap();
2649     int next_bci = trap->next_bci();
2650     bool modified = trap->modified();
2651 
2652     if (next_bci != -1 && !modified) {
2653       assert(!_dead_node_list.test(unc->_idx), "changing a dead node!");
2654       JVMState* jvms = unc->jvms();
2655       ciMethod* method = jvms->method();
2656       ciBytecodeStream iter(method);
2657 
2658       iter.force_bci(jvms->bci());
2659       assert(next_bci == iter.next_bci() || next_bci == iter.get_dest(), "wrong next_bci at unstable_if");
2660       Bytecodes::Code c = iter.cur_bc();
2661       Node* lhs = nullptr;
2662       Node* rhs = nullptr;
2663       if (c == Bytecodes::_if_acmpeq || c == Bytecodes::_if_acmpne) {
2664         lhs = unc->peek_operand(0);
2665         rhs = unc->peek_operand(1);
2666       } else if (c == Bytecodes::_ifnull || c == Bytecodes::_ifnonnull) {
2667         lhs = unc->peek_operand(0);
2668       }
2669 
2670       ResourceMark rm;
2671       const MethodLivenessResult& live_locals = method->liveness_at_bci(next_bci);
2672       assert(live_locals.is_valid(), "broken liveness info");
2673       int len = (int)live_locals.size();
2674 
2675       for (int i = 0; i < len; i++) {
2676         Node* local = unc->local(jvms, i);
2677         // kill local using the liveness of next_bci.
2678         // give up when the local looks like an operand to secure reexecution.
2679         if (!live_locals.at(i) && !local->is_top() && local != lhs && local != rhs) {
2680           uint idx = jvms->locoff() + i;
2681 #ifdef ASSERT
2682           if (PrintOpto && Verbose) {
2683             tty->print("[unstable_if] kill local#%d: ", idx);
2684             local->dump();
2685             tty->cr();
2686           }
2687 #endif
2688           igvn.replace_input_of(unc, idx, top());
2689           modified = true;
2690         }
2691       }
2692     }
2693 
2694     // keep the modified trap for late query
2695     if (modified) {
2696       trap->set_modified();
2697     } else {
2698       _unstable_if_traps.delete_at(i);
2699     }
2700   }
2701   igvn.optimize();
2702 }
2703 
2704 // StringOpts and late inlining of string methods
2705 void Compile::inline_string_calls(bool parse_time) {
2706   {
2707     // remove useless nodes to make the usage analysis simpler
2708     ResourceMark rm;
2709     PhaseRemoveUseless pru(initial_gvn(), *igvn_worklist());
2710   }
2711 
2712   {
2713     ResourceMark rm;
2714     print_method(PHASE_BEFORE_STRINGOPTS, 3);
2715     PhaseStringOpts pso(initial_gvn());
2716     print_method(PHASE_AFTER_STRINGOPTS, 3);
2717   }
2718 
2719   // now inline anything that we skipped the first time around
2720   if (!parse_time) {
2721     _late_inlines_pos = _late_inlines.length();
2722   }
2723 
2724   while (_string_late_inlines.length() > 0) {
2725     CallGenerator* cg = _string_late_inlines.pop();
2726     cg->do_late_inline();
2727     if (failing())  return;
2728   }
2729   _string_late_inlines.trunc_to(0);
2730 }
2731 
2732 // Late inlining of boxing methods
2733 void Compile::inline_boxing_calls(PhaseIterGVN& igvn) {
2734   if (_boxing_late_inlines.length() > 0) {
2735     assert(has_boxed_value(), "inconsistent");
2736 
2737     set_inlining_incrementally(true);
2738 
2739     igvn_worklist()->ensure_empty(); // should be done with igvn
2740 
2741     _late_inlines_pos = _late_inlines.length();
2742 
2743     while (_boxing_late_inlines.length() > 0) {
2744       CallGenerator* cg = _boxing_late_inlines.pop();
2745       cg->do_late_inline();
2746       if (failing())  return;
2747     }
2748     _boxing_late_inlines.trunc_to(0);
2749 
2750     inline_incrementally_cleanup(igvn);
2751 
2752     set_inlining_incrementally(false);
2753   }
2754 }
2755 
2756 bool Compile::inline_incrementally_one() {
2757   assert(IncrementalInline, "incremental inlining should be on");
2758   assert(_late_inlines.length() > 0, "should have been checked by caller");
2759 
2760   TracePhase tp(_t_incrInline_inline);
2761 
2762   set_inlining_progress(false);
2763   set_do_cleanup(false);
2764 
2765   for (int i = 0; i < _late_inlines.length(); i++) {
2766     _late_inlines_pos = i+1;
2767     CallGenerator* cg = _late_inlines.at(i);
2768     bool is_scheduled_for_igvn_before = C->igvn_worklist()->member(cg->call_node());
2769     bool does_dispatch = cg->is_virtual_late_inline() || cg->is_mh_late_inline();
2770     if (inlining_incrementally() || does_dispatch) { // a call can be either inlined or strength-reduced to a direct call
2771       if (should_stress_inlining()) {
2772         // randomly add repeated inline attempt if stress-inlining
2773         cg->call_node()->set_generator(cg);
2774         C->igvn_worklist()->push(cg->call_node());
2775         continue;
2776       }
2777       cg->do_late_inline();
2778       assert(_late_inlines.at(i) == cg, "no insertions before current position allowed");
2779       if (failing()) {
2780         return false;
2781       } else if (inlining_progress()) {
2782         _late_inlines_pos = i+1; // restore the position in case new elements were inserted
2783         print_method(PHASE_INCREMENTAL_INLINE_STEP, 3, cg->call_node());
2784         break; // process one call site at a time
2785       } else {
2786         bool is_scheduled_for_igvn_after = C->igvn_worklist()->member(cg->call_node());
2787         if (!is_scheduled_for_igvn_before && is_scheduled_for_igvn_after) {
2788           // Avoid potential infinite loop if node already in the IGVN list
2789           assert(false, "scheduled for IGVN during inlining attempt");
2790         } else {
2791           // Ensure call node has not disappeared from IGVN worklist during a failed inlining attempt
2792           assert(!is_scheduled_for_igvn_before || is_scheduled_for_igvn_after, "call node removed from IGVN list during inlining pass");
2793           cg->call_node()->set_generator(cg);
2794         }
2795       }
2796     } else {
2797       // Ignore late inline direct calls when inlining is not allowed.
2798       // They are left in the late inline list when node budget is exhausted until the list is fully drained.
2799     }
2800   }
2801   // Remove processed elements.
2802   _late_inlines.remove_till(_late_inlines_pos);
2803   _late_inlines_pos = 0;
2804 
2805   assert(inlining_progress() || _late_inlines.length() == 0, "no progress");
2806 
2807   bool needs_cleanup = do_cleanup() || over_inlining_cutoff();
2808 
2809   set_inlining_progress(false);
2810   set_do_cleanup(false);
2811 
2812   bool force_cleanup = directive()->IncrementalInlineForceCleanupOption;
2813   return (_late_inlines.length() > 0) && !needs_cleanup && !force_cleanup;
2814 }
2815 
2816 void Compile::inline_incrementally_cleanup(PhaseIterGVN& igvn) {
2817   {
2818     TracePhase tp(_t_incrInline_pru);
2819     ResourceMark rm;
2820     PhaseRemoveUseless pru(initial_gvn(), *igvn_worklist());
2821   }
2822   {
2823     TracePhase tp(_t_incrInline_igvn);
2824     igvn.reset();
2825     igvn.optimize();
2826     if (failing()) return;
2827   }
2828   print_method(PHASE_INCREMENTAL_INLINE_CLEANUP, 3);
2829 }
2830 
2831 template<typename E>
2832 static void shuffle_array(Compile& C, GrowableArray<E>& array) {
2833   if (array.length() < 2) {
2834     return;
2835   }
2836   for (uint i = array.length() - 1; i >= 1; i--) {
2837     uint j = C.random() % (i + 1);
2838     swap(array.at(i), array.at(j));
2839   }
2840 }
2841 
2842 void Compile::shuffle_late_inlines() {
2843   shuffle_array(*C, _late_inlines);
2844 }
2845 
2846 // Perform incremental inlining until bound on number of live nodes is reached
2847 void Compile::inline_incrementally(PhaseIterGVN& igvn) {
2848   TracePhase tp(_t_incrInline);
2849 
2850   set_inlining_incrementally(true);
2851   uint low_live_nodes = 0;
2852 
2853   if (StressIncrementalInlining) {
2854     shuffle_late_inlines();
2855   }
2856 
2857   while (_late_inlines.length() > 0) {
2858     if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
2859       if (low_live_nodes < (uint)LiveNodeCountInliningCutoff * 8 / 10) {
2860         TracePhase tp(_t_incrInline_ideal);
2861         // PhaseIdealLoop is expensive so we only try it once we are
2862         // out of live nodes and we only try it again if the previous
2863         // helped got the number of nodes down significantly
2864         PhaseIdealLoop::optimize(igvn, LoopOptsNone);
2865         if (failing())  return;
2866         low_live_nodes = live_nodes();
2867         _major_progress = true;
2868       }
2869 
2870       if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
2871         bool do_print_inlining = print_inlining() || print_intrinsics();
2872         if (do_print_inlining || log() != nullptr) {
2873           // Print inlining message for candidates that we couldn't inline for lack of space.
2874           for (int i = 0; i < _late_inlines.length(); i++) {
2875             CallGenerator* cg = _late_inlines.at(i);
2876             const char* msg = "live nodes > LiveNodeCountInliningCutoff";
2877             if (do_print_inlining) {
2878               inline_printer()->record(cg->method(), cg->call_node()->jvms(), InliningResult::FAILURE, msg);
2879             }
2880             log_late_inline_failure(cg, msg);
2881           }
2882         }
2883         break; // finish
2884       }
2885     }
2886 
2887     igvn_worklist()->ensure_empty(); // should be done with igvn
2888 
2889     if (_late_inlines.length() == 0) {
2890       break; // no more progress
2891     }
2892 
2893     while (inline_incrementally_one()) {
2894       assert(!failing_internal() || failure_is_artificial(), "inconsistent");
2895     }
2896     if (failing())  return;
2897 
2898     inline_incrementally_cleanup(igvn);
2899 
2900     print_method(PHASE_INCREMENTAL_INLINE_STEP, 3);
2901 
2902     if (failing())  return;
2903   }
2904 
2905   igvn_worklist()->ensure_empty(); // should be done with igvn
2906 
2907   if (_string_late_inlines.length() > 0) {
2908     assert(has_stringbuilder(), "inconsistent");
2909 
2910     inline_string_calls(false);
2911 
2912     if (failing())  return;
2913 
2914     inline_incrementally_cleanup(igvn);
2915   }
2916 
2917   set_inlining_incrementally(false);
2918 }
2919 
2920 void Compile::process_late_inline_calls_no_inline(PhaseIterGVN& igvn) {
2921   // "inlining_incrementally() == false" is used to signal that no inlining is allowed
2922   // (see LateInlineVirtualCallGenerator::do_late_inline_check() for details).
2923   // Tracking and verification of modified nodes is disabled by setting "_modified_nodes == nullptr"
2924   // as if "inlining_incrementally() == true" were set.
2925   assert(inlining_incrementally() == false, "not allowed");
2926   set_strength_reduction(true);
2927 #ifdef ASSERT
2928   Unique_Node_List* modified_nodes = _modified_nodes;
2929   _modified_nodes = nullptr;
2930 #endif
2931   assert(_late_inlines.length() > 0, "sanity");
2932 
2933   if (StressIncrementalInlining) {
2934     shuffle_late_inlines();
2935   }
2936 
2937   while (_late_inlines.length() > 0) {
2938     igvn_worklist()->ensure_empty(); // should be done with igvn
2939 
2940     while (inline_incrementally_one()) {
2941       assert(!failing_internal() || failure_is_artificial(), "inconsistent");
2942     }
2943     if (failing())  return;
2944 
2945     inline_incrementally_cleanup(igvn);
2946   }
2947   DEBUG_ONLY( _modified_nodes = modified_nodes; )
2948   set_strength_reduction(false);
2949 }
2950 
2951 bool Compile::optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode) {
2952   if (_loop_opts_cnt > 0) {
2953     while (major_progress() && (_loop_opts_cnt > 0)) {
2954       TracePhase tp(_t_idealLoop);
2955       PhaseIdealLoop::optimize(igvn, mode);
2956       _loop_opts_cnt--;
2957       if (failing())  return false;
2958       if (major_progress()) {
2959         print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2960       }
2961     }
2962   }
2963   return true;
2964 }
2965 
2966 // Remove edges from "root" to each SafePoint at a backward branch.
2967 // They were inserted during parsing (see add_safepoint()) to make
2968 // infinite loops without calls or exceptions visible to root, i.e.,
2969 // useful.
2970 void Compile::remove_root_to_sfpts_edges(PhaseIterGVN& igvn) {
2971   Node *r = root();
2972   if (r != nullptr) {
2973     for (uint i = r->req(); i < r->len(); ++i) {
2974       Node *n = r->in(i);
2975       if (n != nullptr && n->is_SafePoint()) {
2976         r->rm_prec(i);
2977         if (n->outcnt() == 0) {
2978           igvn.remove_dead_node(n, PhaseIterGVN::NodeOrigin::Graph);
2979         }
2980         --i;
2981       }
2982     }
2983     // Parsing may have added top inputs to the root node (Path
2984     // leading to the Halt node proven dead). Make sure we get a
2985     // chance to clean them up.
2986     igvn._worklist.push(r);
2987     igvn.optimize();
2988   }
2989 }
2990 
2991 //------------------------------Optimize---------------------------------------
2992 // Given a graph, optimize it.
2993 void Compile::Optimize() {
2994   TracePhase tp(_t_optimizer);
2995 
2996 #ifndef PRODUCT
2997   if (env()->break_at_compile()) {
2998     BREAKPOINT;
2999   }
3000 
3001 #endif
3002 
3003   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
3004 #ifdef ASSERT
3005   bs->verify_gc_barriers(this, BarrierSetC2::BeforeOptimize);
3006 #endif
3007 
3008   ResourceMark rm;
3009 
3010   NOT_PRODUCT( verify_graph_edges(); )
3011 
3012   print_method(PHASE_AFTER_PARSING, 1);
3013 
3014  {
3015   // Iterative Global Value Numbering, including ideal transforms
3016   PhaseIterGVN igvn;
3017 #ifdef ASSERT
3018   _modified_nodes = new (comp_arena()) Unique_Node_List(comp_arena());
3019 #endif
3020   {
3021     TracePhase tp(_t_iterGVN);
3022     igvn.optimize(true);
3023   }
3024 
3025   if (failing())  return;
3026 
3027   print_method(PHASE_ITER_GVN1, 2);
3028 
3029   process_for_unstable_if_traps(igvn);
3030 
3031   if (failing())  return;
3032 
3033   inline_incrementally(igvn);
3034 
3035   print_method(PHASE_INCREMENTAL_INLINE, 2);
3036 
3037   if (failing())  return;
3038 
3039   if (eliminate_boxing()) {
3040     // Inline valueOf() methods now.
3041     inline_boxing_calls(igvn);
3042 
3043     if (failing())  return;
3044 
3045     if (AlwaysIncrementalInline || StressIncrementalInlining) {
3046       inline_incrementally(igvn);
3047     }
3048 
3049     print_method(PHASE_INCREMENTAL_BOXING_INLINE, 2);
3050 
3051     if (failing())  return;
3052   }
3053 
3054   // Remove the speculative part of types and clean up the graph from
3055   // the extra CastPP nodes whose only purpose is to carry them. Do
3056   // that early so that optimizations are not disrupted by the extra
3057   // CastPP nodes.
3058   remove_speculative_types(igvn);
3059 
3060   if (failing())  return;
3061 
3062   // No more new expensive nodes will be added to the list from here
3063   // so keep only the actual candidates for optimizations.
3064   cleanup_expensive_nodes(igvn);
3065 
3066   if (failing())  return;
3067 
3068   assert(EnableVectorSupport || !has_vbox_nodes(), "sanity");
3069   if (EnableVectorSupport && has_vbox_nodes()) {
3070     TracePhase tp(_t_vector);
3071     PhaseVector pv(igvn);
3072     pv.optimize_vector_boxes();
3073     if (failing())  return;
3074     print_method(PHASE_ITER_GVN_AFTER_VECTOR, 2);
3075   }
3076   assert(!has_vbox_nodes(), "sanity");
3077 
3078   if (!failing() && RenumberLiveNodes && live_nodes() + NodeLimitFudgeFactor < unique()) {
3079     Compile::TracePhase tp(_t_renumberLive);
3080     igvn_worklist()->ensure_empty(); // should be done with igvn
3081     {
3082       ResourceMark rm;
3083       PhaseRenumberLive prl(initial_gvn(), *igvn_worklist());
3084     }
3085     igvn.reset();
3086     igvn.optimize(true);
3087     if (failing()) return;
3088   }
3089 
3090   // Now that all inlining is over and no PhaseRemoveUseless will run, cut edge from root to loop
3091   // safepoints
3092   remove_root_to_sfpts_edges(igvn);
3093 
3094   // Process inline type nodes now that all inlining is over
3095   process_inline_types(igvn);
3096 
3097   adjust_flat_array_access_aliases(igvn);
3098 
3099   if (failing())  return;
3100 
3101   if (C->macro_count() > 0) {
3102     // Eliminate some macro nodes before EA to reduce analysis pressure
3103     PhaseMacroExpand mexp(igvn);
3104     mexp.eliminate_macro_nodes(/* eliminate_locks= */ false);
3105     if (failing()) {
3106       return;
3107     }
3108     igvn.set_delay_transform(false);
3109     print_method(PHASE_ITER_GVN_AFTER_ELIMINATION, 2);
3110   }
3111 
3112   _print_phase_loop_opts = has_loops();
3113   if (_print_phase_loop_opts) {
3114     print_method(PHASE_BEFORE_LOOP_OPTS, 2);
3115   }
3116 
3117   // Perform escape analysis
3118   if (do_escape_analysis() && ConnectionGraph::has_candidates(this)) {
3119     if (has_loops()) {
3120       // Cleanup graph (remove dead nodes).
3121       TracePhase tp(_t_idealLoop);
3122       PhaseIdealLoop::optimize(igvn, LoopOptsMaxUnroll);
3123       if (failing()) {
3124         return;
3125       }
3126       print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
3127       if (C->macro_count() > 0) {
3128         // Eliminate some macro nodes before EA to reduce analysis pressure
3129         PhaseMacroExpand mexp(igvn);
3130         mexp.eliminate_macro_nodes(/* eliminate_locks= */ false);
3131         if (failing()) {
3132           return;
3133         }
3134         igvn.set_delay_transform(false);
3135         print_method(PHASE_ITER_GVN_AFTER_ELIMINATION, 2);
3136       }
3137     }
3138 
3139     bool progress;
3140     do {
3141       ConnectionGraph::do_analysis(this, &igvn);
3142 
3143       if (failing())  return;
3144 
3145       int mcount = macro_count(); // Record number of allocations and locks before IGVN
3146 
3147       // Optimize out fields loads from scalar replaceable allocations.
3148       igvn.optimize(true);
3149       print_method(PHASE_ITER_GVN_AFTER_EA, 2);
3150 
3151       if (failing()) return;
3152 
3153       if (congraph() != nullptr && macro_count() > 0) {
3154         TracePhase tp(_t_macroEliminate);
3155         PhaseMacroExpand mexp(igvn);
3156         mexp.eliminate_macro_nodes();
3157         if (failing()) {
3158           return;
3159         }
3160         print_method(PHASE_AFTER_MACRO_ELIMINATION, 2);
3161 
3162         igvn.set_delay_transform(false);
3163         print_method(PHASE_ITER_GVN_AFTER_ELIMINATION, 2);
3164       }
3165 
3166       ConnectionGraph::verify_ram_nodes(this, root());
3167       if (failing())  return;
3168 
3169       progress = do_iterative_escape_analysis() &&
3170                  (macro_count() < mcount) &&
3171                  ConnectionGraph::has_candidates(this);
3172       // Try again if candidates exist and made progress
3173       // by removing some allocations and/or locks.
3174     } while (progress);
3175   }
3176 
3177   process_flat_accesses(igvn);
3178   if (failing()) {
3179     return;
3180   }
3181 
3182   // Loop transforms on the ideal graph.  Range Check Elimination,
3183   // peeling, unrolling, etc.
3184 
3185   // Set loop opts counter
3186   if((_loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) {
3187     {
3188       TracePhase tp(_t_idealLoop);
3189       PhaseIdealLoop::optimize(igvn, LoopOptsDefault);
3190       _loop_opts_cnt--;
3191       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP1, 2);
3192       if (failing())  return;
3193     }
3194     // Loop opts pass if partial peeling occurred in previous pass
3195     if(PartialPeelLoop && major_progress() && (_loop_opts_cnt > 0)) {
3196       TracePhase tp(_t_idealLoop);
3197       PhaseIdealLoop::optimize(igvn, LoopOptsSkipSplitIf);
3198       _loop_opts_cnt--;
3199       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP2, 2);
3200       if (failing())  return;
3201     }
3202     // Loop opts pass for loop-unrolling before CCP
3203     if(major_progress() && (_loop_opts_cnt > 0)) {
3204       TracePhase tp(_t_idealLoop);
3205       PhaseIdealLoop::optimize(igvn, LoopOptsSkipSplitIf);
3206       _loop_opts_cnt--;
3207       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP3, 2);
3208     }
3209     if (!failing()) {
3210       // Verify that last round of loop opts produced a valid graph
3211       PhaseIdealLoop::verify(igvn);
3212     }
3213   }
3214   if (failing())  return;
3215 
3216   // Conditional Constant Propagation;
3217   print_method(PHASE_BEFORE_CCP1, 2);
3218   PhaseCCP ccp( &igvn );
3219   assert( true, "Break here to ccp.dump_nodes_and_types(_root,999,1)");
3220   {
3221     TracePhase tp(_t_ccp);
3222     ccp.do_transform();
3223   }
3224   print_method(PHASE_CCP1, 2);
3225 
3226   assert( true, "Break here to ccp.dump_old2new_map()");
3227 
3228   // Iterative Global Value Numbering, including ideal transforms
3229   {
3230     TracePhase tp(_t_iterGVN2);
3231     igvn.reset_from_igvn(&ccp);
3232     igvn.optimize(true);
3233   }
3234   print_method(PHASE_ITER_GVN2, 2);
3235 
3236   if (failing())  return;
3237 
3238   // Loop transforms on the ideal graph.  Range Check Elimination,
3239   // peeling, unrolling, etc.
3240   if (!optimize_loops(igvn, LoopOptsDefault)) {
3241     return;
3242   }
3243 
3244   C->clear_major_progress(); // ensure that major progress is now clear
3245 
3246   process_for_post_loop_opts_igvn(igvn);
3247 
3248   if (failing())  return;
3249 
3250   // Once loop optimizations are over, it is safe to get rid of all reachability fence nodes and
3251   // migrate reachability edges to safepoints.
3252   if (OptimizeReachabilityFences && _reachability_fences.length() > 0) {
3253     TracePhase tp1(_t_idealLoop);
3254     TracePhase tp2(_t_reachability);
3255     PhaseIdealLoop::optimize(igvn, PostLoopOptsExpandReachabilityFences);
3256     print_method(PHASE_EXPAND_REACHABILITY_FENCES, 2);
3257     if (failing())  return;
3258     assert(_reachability_fences.length() == 0 || PreserveReachabilityFencesOnConstants, "no RF nodes allowed");
3259   }
3260 
3261   process_for_merge_stores_igvn(igvn);
3262 
3263   if (failing())  return;
3264 
3265 #ifdef ASSERT
3266   bs->verify_gc_barriers(this, BarrierSetC2::BeforeMacroExpand);
3267 #endif
3268 
3269   if (_late_inlines.length() > 0) {
3270     // More opportunities to optimize virtual and MH calls.
3271     // Though it's maybe too late to perform inlining, strength-reducing them to direct calls is still an option.
3272     process_late_inline_calls_no_inline(igvn);
3273     if (failing()) {
3274       return;
3275     }
3276     process_inline_types(igvn);
3277   }
3278   assert(_late_inlines.length() == 0, "late inline queue must be drained");
3279 
3280   {
3281     TracePhase tp(_t_macroExpand);
3282     PhaseMacroExpand mex(igvn);
3283     // Last attempt to eliminate macro nodes.
3284     mex.eliminate_macro_nodes();
3285     if (failing()) {
3286       return;
3287     }
3288 
3289     print_method(PHASE_BEFORE_MACRO_EXPANSION, 3);
3290     // Do not allow new macro nodes once we start to eliminate and expand
3291     C->reset_allow_macro_nodes();
3292     // Last attempt to eliminate macro nodes before expand
3293     mex.eliminate_macro_nodes();
3294     if (failing()) {
3295       return;
3296     }
3297     mex.eliminate_opaque_looplimit_macro_nodes();
3298     if (failing()) {
3299       return;
3300     }
3301     print_method(PHASE_AFTER_MACRO_ELIMINATION, 2);
3302     if (mex.expand_macro_nodes()) {
3303       assert(failing(), "must bail out w/ explicit message");
3304       return;
3305     }
3306     print_method(PHASE_AFTER_MACRO_EXPANSION, 2);
3307   }
3308 
3309   // Process inline type nodes again and remove them. From here
3310   // on we don't need to keep track of field values anymore.
3311   process_inline_types(igvn, /* remove= */ true);
3312 
3313   {
3314     TracePhase tp(_t_barrierExpand);
3315     if (bs->expand_barriers(this, igvn)) {
3316       assert(failing(), "must bail out w/ explicit message");
3317       return;
3318     }
3319     print_method(PHASE_BARRIER_EXPANSION, 2);
3320   }
3321 
3322   if (C->max_vector_size() > 0) {
3323     C->optimize_logic_cones(igvn);
3324     igvn.optimize();
3325     if (failing()) return;
3326   }
3327 
3328   DEBUG_ONLY( _modified_nodes = nullptr; )
3329   DEBUG_ONLY( _late_inlines.clear(); )
3330 
3331   assert(igvn._worklist.size() == 0, "not empty");
3332  } // (End scope of igvn; run destructor if necessary for asserts.)
3333 
3334  check_no_dead_use();
3335 
3336  // We will never use the NodeHash table any more. Clear it so that final_graph_reshaping does not have
3337  // to remove hashes to unlock nodes for modifications.
3338  C->node_hash()->clear();
3339 
3340  // A method with only infinite loops has no edges entering loops from root
3341  {
3342    TracePhase tp(_t_graphReshaping);
3343    if (final_graph_reshaping()) {
3344      assert(failing(), "must bail out w/ explicit message");
3345      return;
3346    }
3347  }
3348 
3349  print_method(PHASE_OPTIMIZE_FINISHED, 2);
3350  DEBUG_ONLY(set_phase_optimize_finished();)
3351 }
3352 
3353 #ifdef ASSERT
3354 void Compile::check_no_dead_use() const {
3355   ResourceMark rm;
3356   Unique_Node_List wq;
3357   wq.push(root());
3358   for (uint i = 0; i < wq.size(); ++i) {
3359     Node* n = wq.at(i);
3360     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3361       Node* u = n->fast_out(j);
3362       if (u->outcnt() == 0 && !u->is_Con()) {
3363         u->dump();
3364         fatal("no reachable node should have no use");
3365       }
3366       wq.push(u);
3367     }
3368   }
3369 }
3370 #endif
3371 
3372 void Compile::inline_vector_reboxing_calls() {
3373   if (C->_vector_reboxing_late_inlines.length() > 0) {
3374     _late_inlines_pos = C->_late_inlines.length();
3375     while (_vector_reboxing_late_inlines.length() > 0) {
3376       CallGenerator* cg = _vector_reboxing_late_inlines.pop();
3377       cg->do_late_inline();
3378       if (failing())  return;
3379       print_method(PHASE_INLINE_VECTOR_REBOX, 3, cg->call_node());
3380     }
3381     _vector_reboxing_late_inlines.trunc_to(0);
3382   }
3383 }
3384 
3385 bool Compile::has_vbox_nodes() {
3386   if (C->_vector_reboxing_late_inlines.length() > 0) {
3387     return true;
3388   }
3389   for (int macro_idx = C->macro_count() - 1; macro_idx >= 0; macro_idx--) {
3390     Node * n = C->macro_node(macro_idx);
3391     assert(n->is_macro(), "only macro nodes expected here");
3392     if (n->Opcode() == Op_VectorUnbox || n->Opcode() == Op_VectorBox || n->Opcode() == Op_VectorBoxAllocate) {
3393       return true;
3394     }
3395   }
3396   return false;
3397 }
3398 
3399 //---------------------------- Bitwise operation packing optimization ---------------------------
3400 
3401 static bool is_vector_unary_bitwise_op(Node* n) {
3402   return n->Opcode() == Op_XorV &&
3403          VectorNode::is_vector_bitwise_not_pattern(n);
3404 }
3405 
3406 static bool is_vector_binary_bitwise_op(Node* n) {
3407   switch (n->Opcode()) {
3408     case Op_AndV:
3409     case Op_OrV:
3410       return true;
3411 
3412     case Op_XorV:
3413       return !is_vector_unary_bitwise_op(n);
3414 
3415     default:
3416       return false;
3417   }
3418 }
3419 
3420 static bool is_vector_ternary_bitwise_op(Node* n) {
3421   return n->Opcode() == Op_MacroLogicV;
3422 }
3423 
3424 static bool is_vector_bitwise_op(Node* n) {
3425   return is_vector_unary_bitwise_op(n)  ||
3426          is_vector_binary_bitwise_op(n) ||
3427          is_vector_ternary_bitwise_op(n);
3428 }
3429 
3430 static bool is_vector_bitwise_cone_root(Node* n) {
3431   if (n->bottom_type()->isa_vectmask() || !is_vector_bitwise_op(n)) {
3432     return false;
3433   }
3434   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3435     if (is_vector_bitwise_op(n->fast_out(i))) {
3436       return false;
3437     }
3438   }
3439   return true;
3440 }
3441 
3442 static uint collect_unique_inputs(Node* n, Unique_Node_List& inputs) {
3443   uint cnt = 0;
3444   if (is_vector_bitwise_op(n)) {
3445     uint inp_cnt = n->is_predicated_vector() ? n->req()-1 : n->req();
3446     if (VectorNode::is_vector_bitwise_not_pattern(n)) {
3447       for (uint i = 1; i < inp_cnt; i++) {
3448         Node* in = n->in(i);
3449         bool skip = VectorNode::is_all_ones_vector(in);
3450         if (!skip && !inputs.member(in)) {
3451           inputs.push(in);
3452           cnt++;
3453         }
3454       }
3455       assert(cnt <= 1, "not unary");
3456     } else {
3457       uint last_req = inp_cnt;
3458       if (is_vector_ternary_bitwise_op(n)) {
3459         last_req = inp_cnt - 1; // skip last input
3460       }
3461       for (uint i = 1; i < last_req; i++) {
3462         Node* def = n->in(i);
3463         if (!inputs.member(def)) {
3464           inputs.push(def);
3465           cnt++;
3466         }
3467       }
3468     }
3469   } else { // not a bitwise operations
3470     if (!inputs.member(n)) {
3471       inputs.push(n);
3472       cnt++;
3473     }
3474   }
3475   return cnt;
3476 }
3477 
3478 void Compile::collect_logic_cone_roots(Unique_Node_List& list) {
3479   Unique_Node_List useful_nodes;
3480   C->identify_useful_nodes(useful_nodes);
3481 
3482   for (uint i = 0; i < useful_nodes.size(); i++) {
3483     Node* n = useful_nodes.at(i);
3484     if (is_vector_bitwise_cone_root(n)) {
3485       list.push(n);
3486     }
3487   }
3488 }
3489 
3490 Node* Compile::xform_to_MacroLogicV(PhaseIterGVN& igvn,
3491                                     const TypeVect* vt,
3492                                     Unique_Node_List& partition,
3493                                     Unique_Node_List& inputs) {
3494   assert(partition.size() == 2 || partition.size() == 3, "not supported");
3495   assert(inputs.size()    == 2 || inputs.size()    == 3, "not supported");
3496   assert(Matcher::match_rule_supported_vector(Op_MacroLogicV, vt->length(), vt->element_basic_type()), "not supported");
3497 
3498   Node* in1 = inputs.at(0);
3499   Node* in2 = inputs.at(1);
3500   Node* in3 = (inputs.size() == 3 ? inputs.at(2) : in2);
3501 
3502   uint func = compute_truth_table(partition, inputs);
3503 
3504   Node* pn = partition.at(partition.size() - 1);
3505   Node* mask = pn->is_predicated_vector() ? pn->in(pn->req()-1) : nullptr;
3506   return igvn.transform(MacroLogicVNode::make(igvn, in1, in2, in3, mask, func, vt));
3507 }
3508 
3509 static uint extract_bit(uint func, uint pos) {
3510   return (func & (1 << pos)) >> pos;
3511 }
3512 
3513 //
3514 //  A macro logic node represents a truth table. It has 4 inputs,
3515 //  First three inputs corresponds to 3 columns of a truth table
3516 //  and fourth input captures the logic function.
3517 //
3518 //  eg.  fn = (in1 AND in2) OR in3;
3519 //
3520 //      MacroNode(in1,in2,in3,fn)
3521 //
3522 //  -----------------
3523 //  in1 in2 in3  fn
3524 //  -----------------
3525 //  0    0   0    0
3526 //  0    0   1    1
3527 //  0    1   0    0
3528 //  0    1   1    1
3529 //  1    0   0    0
3530 //  1    0   1    1
3531 //  1    1   0    1
3532 //  1    1   1    1
3533 //
3534 
3535 uint Compile::eval_macro_logic_op(uint func, uint in1 , uint in2, uint in3) {
3536   int res = 0;
3537   for (int i = 0; i < 8; i++) {
3538     int bit1 = extract_bit(in1, i);
3539     int bit2 = extract_bit(in2, i);
3540     int bit3 = extract_bit(in3, i);
3541 
3542     int func_bit_pos = (bit1 << 2 | bit2 << 1 | bit3);
3543     int func_bit = extract_bit(func, func_bit_pos);
3544 
3545     res |= func_bit << i;
3546   }
3547   return res;
3548 }
3549 
3550 static uint eval_operand(Node* n, HashTable<Node*,uint>& eval_map) {
3551   assert(n != nullptr, "");
3552   assert(eval_map.contains(n), "absent");
3553   return *(eval_map.get(n));
3554 }
3555 
3556 static void eval_operands(Node* n,
3557                           uint& func1, uint& func2, uint& func3,
3558                           HashTable<Node*,uint>& eval_map) {
3559   assert(is_vector_bitwise_op(n), "");
3560 
3561   if (is_vector_unary_bitwise_op(n)) {
3562     Node* opnd = n->in(1);
3563     if (VectorNode::is_vector_bitwise_not_pattern(n) && VectorNode::is_all_ones_vector(opnd)) {
3564       opnd = n->in(2);
3565     }
3566     func1 = eval_operand(opnd, eval_map);
3567   } else if (is_vector_binary_bitwise_op(n)) {
3568     func1 = eval_operand(n->in(1), eval_map);
3569     func2 = eval_operand(n->in(2), eval_map);
3570   } else {
3571     assert(is_vector_ternary_bitwise_op(n), "unknown operation");
3572     func1 = eval_operand(n->in(1), eval_map);
3573     func2 = eval_operand(n->in(2), eval_map);
3574     func3 = eval_operand(n->in(3), eval_map);
3575   }
3576 }
3577 
3578 uint Compile::compute_truth_table(Unique_Node_List& partition, Unique_Node_List& inputs) {
3579   assert(inputs.size() <= 3, "sanity");
3580   ResourceMark rm;
3581   uint res = 0;
3582   HashTable<Node*,uint> eval_map;
3583 
3584   // Populate precomputed functions for inputs.
3585   // Each input corresponds to one column of 3 input truth-table.
3586   uint input_funcs[] = { 0xAA,   // (_, _, c) -> c
3587                          0xCC,   // (_, b, _) -> b
3588                          0xF0 }; // (a, _, _) -> a
3589   for (uint i = 0; i < inputs.size(); i++) {
3590     eval_map.put(inputs.at(i), input_funcs[2-i]);
3591   }
3592 
3593   for (uint i = 0; i < partition.size(); i++) {
3594     Node* n = partition.at(i);
3595 
3596     uint func1 = 0, func2 = 0, func3 = 0;
3597     eval_operands(n, func1, func2, func3, eval_map);
3598 
3599     switch (n->Opcode()) {
3600       case Op_OrV:
3601         assert(func3 == 0, "not binary");
3602         res = func1 | func2;
3603         break;
3604       case Op_AndV:
3605         assert(func3 == 0, "not binary");
3606         res = func1 & func2;
3607         break;
3608       case Op_XorV:
3609         if (VectorNode::is_vector_bitwise_not_pattern(n)) {
3610           assert(func2 == 0 && func3 == 0, "not unary");
3611           res = (~func1) & 0xFF;
3612         } else {
3613           assert(func3 == 0, "not binary");
3614           res = func1 ^ func2;
3615         }
3616         break;
3617       case Op_MacroLogicV:
3618         // Ordering of inputs may change during evaluation of sub-tree
3619         // containing MacroLogic node as a child node, thus a re-evaluation
3620         // makes sure that function is evaluated in context of current
3621         // inputs.
3622         res = eval_macro_logic_op(n->in(4)->get_int(), func1, func2, func3);
3623         break;
3624 
3625       default: assert(false, "not supported: %s", n->Name());
3626     }
3627     assert(res <= 0xFF, "invalid");
3628     eval_map.put(n, res);
3629   }
3630   return res;
3631 }
3632 
3633 // Criteria under which nodes gets packed into a macro logic node:-
3634 //  1) Parent and both child nodes are all unmasked or masked with
3635 //     same predicates.
3636 //  2) Masked parent can be packed with left child if it is predicated
3637 //     and both have same predicates.
3638 //  3) Masked parent can be packed with right child if its un-predicated
3639 //     or has matching predication condition.
3640 //  4) An unmasked parent can be packed with an unmasked child.
3641 bool Compile::compute_logic_cone(Node* n, Unique_Node_List& partition, Unique_Node_List& inputs) {
3642   assert(partition.size() == 0, "not empty");
3643   assert(inputs.size() == 0, "not empty");
3644   if (is_vector_ternary_bitwise_op(n)) {
3645     return false;
3646   }
3647 
3648   bool is_unary_op = is_vector_unary_bitwise_op(n);
3649   if (is_unary_op) {
3650     assert(collect_unique_inputs(n, inputs) == 1, "not unary");
3651     return false; // too few inputs
3652   }
3653 
3654   bool pack_left_child = true;
3655   bool pack_right_child = true;
3656 
3657   bool left_child_LOP = is_vector_bitwise_op(n->in(1));
3658   bool right_child_LOP = is_vector_bitwise_op(n->in(2));
3659 
3660   int left_child_input_cnt = 0;
3661   int right_child_input_cnt = 0;
3662 
3663   bool parent_is_predicated = n->is_predicated_vector();
3664   bool left_child_predicated = n->in(1)->is_predicated_vector();
3665   bool right_child_predicated = n->in(2)->is_predicated_vector();
3666 
3667   Node* parent_pred = parent_is_predicated ? n->in(n->req()-1) : nullptr;
3668   Node* left_child_pred = left_child_predicated ? n->in(1)->in(n->in(1)->req()-1) : nullptr;
3669   Node* right_child_pred = right_child_predicated ? n->in(1)->in(n->in(1)->req()-1) : nullptr;
3670 
3671   do {
3672     if (pack_left_child && left_child_LOP &&
3673         ((!parent_is_predicated && !left_child_predicated) ||
3674         ((parent_is_predicated && left_child_predicated &&
3675           parent_pred == left_child_pred)))) {
3676        partition.push(n->in(1));
3677        left_child_input_cnt = collect_unique_inputs(n->in(1), inputs);
3678     } else {
3679        inputs.push(n->in(1));
3680        left_child_input_cnt = 1;
3681     }
3682 
3683     if (pack_right_child && right_child_LOP &&
3684         (!right_child_predicated ||
3685          (right_child_predicated && parent_is_predicated &&
3686           parent_pred == right_child_pred))) {
3687        partition.push(n->in(2));
3688        right_child_input_cnt = collect_unique_inputs(n->in(2), inputs);
3689     } else {
3690        inputs.push(n->in(2));
3691        right_child_input_cnt = 1;
3692     }
3693 
3694     if (inputs.size() > 3) {
3695       assert(partition.size() > 0, "");
3696       inputs.clear();
3697       partition.clear();
3698       if (left_child_input_cnt > right_child_input_cnt) {
3699         pack_left_child = false;
3700       } else {
3701         pack_right_child = false;
3702       }
3703     } else {
3704       break;
3705     }
3706   } while(true);
3707 
3708   if(partition.size()) {
3709     partition.push(n);
3710   }
3711 
3712   return (partition.size() == 2 || partition.size() == 3) &&
3713          (inputs.size()    == 2 || inputs.size()    == 3);
3714 }
3715 
3716 void Compile::process_logic_cone_root(PhaseIterGVN &igvn, Node *n, VectorSet &visited) {
3717   assert(is_vector_bitwise_op(n), "not a root");
3718 
3719   visited.set(n->_idx);
3720 
3721   // 1) Do a DFS walk over the logic cone.
3722   for (uint i = 1; i < n->req(); i++) {
3723     Node* in = n->in(i);
3724     if (!visited.test(in->_idx) && is_vector_bitwise_op(in)) {
3725       process_logic_cone_root(igvn, in, visited);
3726     }
3727   }
3728 
3729   // 2) Bottom up traversal: Merge node[s] with
3730   // the parent to form macro logic node.
3731   Unique_Node_List partition;
3732   Unique_Node_List inputs;
3733   if (compute_logic_cone(n, partition, inputs)) {
3734     const TypeVect* vt = n->bottom_type()->is_vect();
3735     Node* pn = partition.at(partition.size() - 1);
3736     Node* mask = pn->is_predicated_vector() ? pn->in(pn->req()-1) : nullptr;
3737     if (mask == nullptr ||
3738         Matcher::match_rule_supported_vector_masked(Op_MacroLogicV, vt->length(), vt->element_basic_type())) {
3739       Node* macro_logic = xform_to_MacroLogicV(igvn, vt, partition, inputs);
3740       VectorNode::trace_new_vector(macro_logic, "MacroLogic");
3741       igvn.replace_node(n, macro_logic);
3742     }
3743   }
3744 }
3745 
3746 void Compile::optimize_logic_cones(PhaseIterGVN &igvn) {
3747   ResourceMark rm;
3748   if (Matcher::match_rule_supported(Op_MacroLogicV)) {
3749     Unique_Node_List list;
3750     collect_logic_cone_roots(list);
3751 
3752     while (list.size() > 0) {
3753       Node* n = list.pop();
3754       const TypeVect* vt = n->bottom_type()->is_vect();
3755       bool supported = Matcher::match_rule_supported_vector(Op_MacroLogicV, vt->length(), vt->element_basic_type());
3756       if (supported) {
3757         VectorSet visited(comp_arena());
3758         process_logic_cone_root(igvn, n, visited);
3759       }
3760     }
3761   }
3762 }
3763 
3764 //------------------------------Code_Gen---------------------------------------
3765 // Given a graph, generate code for it
3766 void Compile::Code_Gen() {
3767   if (failing()) {
3768     return;
3769   }
3770 
3771   // Perform instruction selection.  You might think we could reclaim Matcher
3772   // memory PDQ, but actually the Matcher is used in generating spill code.
3773   // Internals of the Matcher (including some VectorSets) must remain live
3774   // for awhile - thus I cannot reclaim Matcher memory lest a VectorSet usage
3775   // set a bit in reclaimed memory.
3776 
3777   // In debug mode can dump m._nodes.dump() for mapping of ideal to machine
3778   // nodes.  Mapping is only valid at the root of each matched subtree.
3779   NOT_PRODUCT( verify_graph_edges(); )
3780 
3781   Matcher matcher;
3782   _matcher = &matcher;
3783   {
3784     TracePhase tp(_t_matcher);
3785     matcher.match();
3786     if (failing()) {
3787       return;
3788     }
3789   }
3790   // In debug mode can dump m._nodes.dump() for mapping of ideal to machine
3791   // nodes.  Mapping is only valid at the root of each matched subtree.
3792   NOT_PRODUCT( verify_graph_edges(); )
3793 
3794   // If you have too many nodes, or if matching has failed, bail out
3795   check_node_count(0, "out of nodes matching instructions");
3796   if (failing()) {
3797     return;
3798   }
3799 
3800   print_method(PHASE_MATCHING, 2);
3801 
3802   // Build a proper-looking CFG
3803   PhaseCFG cfg(node_arena(), root(), matcher);
3804   if (failing()) {
3805     return;
3806   }
3807   _cfg = &cfg;
3808   {
3809     TracePhase tp(_t_scheduler);
3810     bool success = cfg.do_global_code_motion();
3811     if (!success) {
3812       return;
3813     }
3814 
3815     print_method(PHASE_GLOBAL_CODE_MOTION, 2);
3816     NOT_PRODUCT( verify_graph_edges(); )
3817     cfg.verify();
3818     if (failing()) {
3819       return;
3820     }
3821   }
3822 
3823   PhaseChaitin regalloc(unique(), cfg, matcher, false);
3824   _regalloc = &regalloc;
3825   {
3826     TracePhase tp(_t_registerAllocation);
3827     // Perform register allocation.  After Chaitin, use-def chains are
3828     // no longer accurate (at spill code) and so must be ignored.
3829     // Node->LRG->reg mappings are still accurate.
3830     _regalloc->Register_Allocate();
3831 
3832     // Bail out if the allocator builds too many nodes
3833     if (failing()) {
3834       return;
3835     }
3836 
3837     print_method(PHASE_REGISTER_ALLOCATION, 2);
3838   }
3839 
3840   // Prior to register allocation we kept empty basic blocks in case the
3841   // the allocator needed a place to spill.  After register allocation we
3842   // are not adding any new instructions.  If any basic block is empty, we
3843   // can now safely remove it.
3844   {
3845     TracePhase tp(_t_blockOrdering);
3846     cfg.remove_empty_blocks();
3847     if (do_freq_based_layout()) {
3848       PhaseBlockLayout layout(cfg);
3849     } else {
3850       cfg.set_loop_alignment();
3851     }
3852     cfg.fixup_flow();
3853     cfg.remove_unreachable_blocks();
3854     cfg.verify_dominator_tree();
3855     print_method(PHASE_BLOCK_ORDERING, 3);
3856   }
3857 
3858   // Apply peephole optimizations
3859   if( OptoPeephole ) {
3860     TracePhase tp(_t_peephole);
3861     PhasePeephole peep( _regalloc, cfg);
3862     peep.do_transform();
3863     print_method(PHASE_PEEPHOLE, 3);
3864   }
3865 
3866   // Do late expand if CPU requires this.
3867   if (Matcher::require_postalloc_expand) {
3868     TracePhase tp(_t_postalloc_expand);
3869     cfg.postalloc_expand(_regalloc);
3870     print_method(PHASE_POSTALLOC_EXPAND, 3);
3871   }
3872 
3873 #ifdef ASSERT
3874   {
3875     CompilationMemoryStatistic::do_test_allocations();
3876     if (failing()) return;
3877   }
3878 #endif
3879 
3880   // Convert Nodes to instruction bits in a buffer
3881   {
3882     TracePhase tp(_t_output);
3883     PhaseOutput output;
3884     output.Output();
3885     if (failing())  return;
3886     output.install();
3887     print_method(PHASE_FINAL_CODE, 1); // Compile::_output is not null here
3888   }
3889 
3890   // He's dead, Jim.
3891   _cfg     = (PhaseCFG*)((intptr_t)0xdeadbeef);
3892   _regalloc = (PhaseChaitin*)((intptr_t)0xdeadbeef);
3893 }
3894 
3895 //------------------------------Final_Reshape_Counts---------------------------
3896 // This class defines counters to help identify when a method
3897 // may/must be executed using hardware with only 24-bit precision.
3898 struct Final_Reshape_Counts : public StackObj {
3899   int  _call_count;             // count non-inlined 'common' calls
3900   int  _float_count;            // count float ops requiring 24-bit precision
3901   int  _double_count;           // count double ops requiring more precision
3902   int  _java_call_count;        // count non-inlined 'java' calls
3903   int  _inner_loop_count;       // count loops which need alignment
3904   VectorSet _visited;           // Visitation flags
3905   Node_List _tests;             // Set of IfNodes & PCTableNodes
3906 
3907   Final_Reshape_Counts() :
3908     _call_count(0), _float_count(0), _double_count(0),
3909     _java_call_count(0), _inner_loop_count(0) { }
3910 
3911   void inc_call_count  () { _call_count  ++; }
3912   void inc_float_count () { _float_count ++; }
3913   void inc_double_count() { _double_count++; }
3914   void inc_java_call_count() { _java_call_count++; }
3915   void inc_inner_loop_count() { _inner_loop_count++; }
3916 
3917   int  get_call_count  () const { return _call_count  ; }
3918   int  get_float_count () const { return _float_count ; }
3919   int  get_double_count() const { return _double_count; }
3920   int  get_java_call_count() const { return _java_call_count; }
3921   int  get_inner_loop_count() const { return _inner_loop_count; }
3922 };
3923 
3924 //------------------------------final_graph_reshaping_impl----------------------
3925 // Implement items 1-5 from final_graph_reshaping below.
3926 void Compile::final_graph_reshaping_impl(Node *n, Final_Reshape_Counts& frc, Unique_Node_List& dead_nodes) {
3927 
3928   if ( n->outcnt() == 0 ) return; // dead node
3929   uint nop = n->Opcode();
3930 
3931   // Check for 2-input instruction with "last use" on right input.
3932   // Swap to left input.  Implements item (2).
3933   if( n->req() == 3 &&          // two-input instruction
3934       n->in(1)->outcnt() > 1 && // left use is NOT a last use
3935       (!n->in(1)->is_Phi() || n->in(1)->in(2) != n) && // it is not data loop
3936       n->in(2)->outcnt() == 1 &&// right use IS a last use
3937       !n->in(2)->is_Con() ) {   // right use is not a constant
3938     // Check for commutative opcode
3939     switch( nop ) {
3940     case Op_AddI:  case Op_AddF:  case Op_AddD:  case Op_AddHF:  case Op_AddL:
3941     case Op_MaxI:  case Op_MaxL:  case Op_MaxF:  case Op_MaxD:
3942     case Op_MinI:  case Op_MinL:  case Op_MinF:  case Op_MinD:
3943     case Op_MulI:  case Op_MulF:  case Op_MulD:  case Op_MulHF:  case Op_MulL:
3944     case Op_AndL:  case Op_XorL:  case Op_OrL:
3945     case Op_AndI:  case Op_XorI:  case Op_OrI: {
3946       // Move "last use" input to left by swapping inputs
3947       n->swap_edges(1, 2);
3948       break;
3949     }
3950     default:
3951       break;
3952     }
3953   }
3954 
3955 #ifdef ASSERT
3956   if( n->is_Mem() ) {
3957     int alias_idx = get_alias_index(n->as_Mem()->adr_type());
3958     assert( n->in(0) != nullptr || alias_idx != Compile::AliasIdxRaw ||
3959             // oop will be recorded in oop map if load crosses safepoint
3960             (n->is_Load() && (n->as_Load()->bottom_type()->isa_oopptr() ||
3961                               LoadNode::is_immutable_value(n->in(MemNode::Address)))),
3962             "raw memory operations should have control edge");
3963   }
3964   if (n->is_MemBar()) {
3965     MemBarNode* mb = n->as_MemBar();
3966     if (mb->trailing_store() || mb->trailing_load_store()) {
3967       assert(mb->leading_membar()->trailing_membar() == mb, "bad membar pair");
3968       Node* mem = BarrierSet::barrier_set()->barrier_set_c2()->step_over_gc_barrier(mb->in(MemBarNode::Precedent));
3969       assert((mb->trailing_store() && mem->is_Store() && mem->as_Store()->is_release()) ||
3970              (mb->trailing_load_store() && mem->is_LoadStore()), "missing mem op");
3971     } else if (mb->leading()) {
3972       assert(mb->trailing_membar()->leading_membar() == mb, "bad membar pair");
3973     }
3974   }
3975 #endif
3976   // Count FPU ops and common calls, implements item (3)
3977   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->final_graph_reshaping(this, n, nop, dead_nodes);
3978   if (!gc_handled) {
3979     final_graph_reshaping_main_switch(n, frc, nop, dead_nodes);
3980   }
3981 
3982   // Collect CFG split points
3983   if (n->is_MultiBranch() && !n->is_RangeCheck()) {
3984     frc._tests.push(n);
3985   }
3986 }
3987 
3988 void Compile::handle_div_mod_op(Node* n, BasicType bt, bool is_unsigned) {
3989   if (!UseDivMod) {
3990     return;
3991   }
3992 
3993   // Check if "a % b" and "a / b" both exist
3994   Node* d = n->find_similar(Op_DivIL(bt, is_unsigned));
3995   if (d == nullptr) {
3996     return;
3997   }
3998 
3999   // Replace them with a fused divmod if supported
4000   if (Matcher::has_match_rule(Op_DivModIL(bt, is_unsigned))) {
4001     DivModNode* divmod = DivModNode::make(n, bt, is_unsigned);
4002     // If the divisor input for a Div (or Mod etc.) is not zero, then the control input of the Div is set to zero.
4003     // It could be that the divisor input is found not zero because its type is narrowed down by a CastII in the
4004     // subgraph for that input. Range check CastIIs are removed during final graph reshape. To preserve the dependency
4005     // carried by a CastII, precedence edges are added to the Div node. We need to transfer the precedence edges to the
4006     // DivMod node so the dependency is not lost.
4007     divmod->add_prec_from(n);
4008     divmod->add_prec_from(d);
4009     d->subsume_by(divmod->div_proj(), this);
4010     n->subsume_by(divmod->mod_proj(), this);
4011   } else {
4012     // Replace "a % b" with "a - ((a / b) * b)"
4013     Node* mult = MulNode::make(d, d->in(2), bt);
4014     Node* sub = SubNode::make(d->in(1), mult, bt);
4015     n->subsume_by(sub, this);
4016   }
4017 }
4018 
4019 void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& frc, uint nop, Unique_Node_List& dead_nodes) {
4020   switch( nop ) {
4021   // Count all float operations that may use FPU
4022   case Op_AddHF:
4023   case Op_MulHF:
4024   case Op_AddF:
4025   case Op_SubF:
4026   case Op_MulF:
4027   case Op_DivF:
4028   case Op_NegF:
4029   case Op_ModF:
4030   case Op_ConvI2F:
4031   case Op_ConF:
4032   case Op_CmpF:
4033   case Op_CmpF3:
4034   case Op_StoreF:
4035   case Op_LoadF:
4036   // case Op_ConvL2F: // longs are split into 32-bit halves
4037     frc.inc_float_count();
4038     break;
4039 
4040   case Op_ConvF2D:
4041   case Op_ConvD2F:
4042     frc.inc_float_count();
4043     frc.inc_double_count();
4044     break;
4045 
4046   // Count all double operations that may use FPU
4047   case Op_AddD:
4048   case Op_SubD:
4049   case Op_MulD:
4050   case Op_DivD:
4051   case Op_NegD:
4052   case Op_ModD:
4053   case Op_ConvI2D:
4054   case Op_ConvD2I:
4055   // case Op_ConvL2D: // handled by leaf call
4056   // case Op_ConvD2L: // handled by leaf call
4057   case Op_ConD:
4058   case Op_CmpD:
4059   case Op_CmpD3:
4060   case Op_StoreD:
4061   case Op_LoadD:
4062   case Op_LoadD_unaligned:
4063     frc.inc_double_count();
4064     break;
4065   case Op_Opaque1:              // Remove Opaque Nodes before matching
4066     n->subsume_by(n->in(1), this);
4067     break;
4068   case Op_CallLeafPure: {
4069     // If the pure call is not supported, then lower to a CallLeaf.
4070     if (!Matcher::match_rule_supported(Op_CallLeafPure)) {
4071       CallNode* call = n->as_Call();
4072       CallNode* new_call = new CallLeafNode(call->tf(), call->entry_point(),
4073                                             call->_name, TypeRawPtr::BOTTOM);
4074       new_call->init_req(TypeFunc::Control, call->in(TypeFunc::Control));
4075       new_call->init_req(TypeFunc::I_O, C->top());
4076       new_call->init_req(TypeFunc::Memory, C->top());
4077       new_call->init_req(TypeFunc::ReturnAdr, C->top());
4078       new_call->init_req(TypeFunc::FramePtr, C->top());
4079       for (unsigned int i = TypeFunc::Parms; i < call->tf()->domain_sig()->cnt(); i++) {
4080         new_call->init_req(i, call->in(i));
4081       }
4082       n->subsume_by(new_call, this);
4083     }
4084     frc.inc_call_count();
4085     break;
4086   }
4087   case Op_CallStaticJava:
4088   case Op_CallJava:
4089   case Op_CallDynamicJava:
4090     frc.inc_java_call_count(); // Count java call site;
4091   case Op_CallRuntime:
4092   case Op_CallLeaf:
4093   case Op_CallLeafVector:
4094   case Op_CallLeafNoFP: {
4095     assert (n->is_Call(), "");
4096     CallNode *call = n->as_Call();
4097     // Count call sites where the FP mode bit would have to be flipped.
4098     // Do not count uncommon runtime calls:
4099     // uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
4100     // _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
4101     if (!call->is_CallStaticJava() || !call->as_CallStaticJava()->_name) {
4102       frc.inc_call_count();   // Count the call site
4103     } else {                  // See if uncommon argument is shared
4104       Node *n = call->in(TypeFunc::Parms);
4105       int nop = n->Opcode();
4106       // Clone shared simple arguments to uncommon calls, item (1).
4107       if (n->outcnt() > 1 &&
4108           !n->is_Proj() &&
4109           nop != Op_CreateEx &&
4110           nop != Op_CheckCastPP &&
4111           nop != Op_DecodeN &&
4112           nop != Op_DecodeNKlass &&
4113           !n->is_Mem() &&
4114           !n->is_Phi()) {
4115         Node *x = n->clone();
4116         call->set_req(TypeFunc::Parms, x);
4117       }
4118     }
4119     break;
4120   }
4121   case Op_StoreB:
4122   case Op_StoreC:
4123   case Op_StoreI:
4124   case Op_StoreL:
4125   case Op_StoreLSpecial:
4126   case Op_CompareAndSwapB:
4127   case Op_CompareAndSwapS:
4128   case Op_CompareAndSwapI:
4129   case Op_CompareAndSwapL:
4130   case Op_CompareAndSwapP:
4131   case Op_CompareAndSwapN:
4132   case Op_WeakCompareAndSwapB:
4133   case Op_WeakCompareAndSwapS:
4134   case Op_WeakCompareAndSwapI:
4135   case Op_WeakCompareAndSwapL:
4136   case Op_WeakCompareAndSwapP:
4137   case Op_WeakCompareAndSwapN:
4138   case Op_CompareAndExchangeB:
4139   case Op_CompareAndExchangeS:
4140   case Op_CompareAndExchangeI:
4141   case Op_CompareAndExchangeL:
4142   case Op_CompareAndExchangeP:
4143   case Op_CompareAndExchangeN:
4144   case Op_GetAndAddS:
4145   case Op_GetAndAddB:
4146   case Op_GetAndAddI:
4147   case Op_GetAndAddL:
4148   case Op_GetAndSetS:
4149   case Op_GetAndSetB:
4150   case Op_GetAndSetI:
4151   case Op_GetAndSetL:
4152   case Op_GetAndSetP:
4153   case Op_GetAndSetN:
4154   case Op_StoreP:
4155   case Op_StoreN:
4156   case Op_StoreNKlass:
4157   case Op_LoadB:
4158   case Op_LoadUB:
4159   case Op_LoadUS:
4160   case Op_LoadI:
4161   case Op_LoadKlass:
4162   case Op_LoadNKlass:
4163   case Op_LoadL:
4164   case Op_LoadL_unaligned:
4165   case Op_LoadP:
4166   case Op_LoadN:
4167   case Op_LoadRange:
4168   case Op_LoadS:
4169     break;
4170 
4171   case Op_AddP: {               // Assert sane base pointers
4172     Node *addp = n->in(AddPNode::Address);
4173     assert(n->as_AddP()->address_input_has_same_base(), "Base pointers must match (addp %u)", addp->_idx );
4174 #ifdef _LP64
4175     if (addp->Opcode() == Op_ConP &&
4176         addp == n->in(AddPNode::Base) &&
4177         n->in(AddPNode::Offset)->is_Con()) {
4178       // If the transformation of ConP to ConN+DecodeN is beneficial depends
4179       // on the platform and on the compressed oops mode.
4180       // Use addressing with narrow klass to load with offset on x86.
4181       // Some platforms can use the constant pool to load ConP.
4182       // Do this transformation here since IGVN will convert ConN back to ConP.
4183       const Type* t = addp->bottom_type();
4184       bool is_oop   = t->isa_oopptr() != nullptr;
4185       bool is_klass = t->isa_klassptr() != nullptr;
4186 
4187       if ((is_oop   && UseCompressedOops          && Matcher::const_oop_prefer_decode()  ) ||
4188           (is_klass && Matcher::const_klass_prefer_decode() &&
4189            t->isa_klassptr()->exact_klass()->is_in_encoding_range())) {
4190         Node* nn = nullptr;
4191 
4192         int op = is_oop ? Op_ConN : Op_ConNKlass;
4193 
4194         // Look for existing ConN node of the same exact type.
4195         Node* r  = root();
4196         uint cnt = r->outcnt();
4197         for (uint i = 0; i < cnt; i++) {
4198           Node* m = r->raw_out(i);
4199           if (m!= nullptr && m->Opcode() == op &&
4200               m->bottom_type()->make_ptr() == t) {
4201             nn = m;
4202             break;
4203           }
4204         }
4205         if (nn != nullptr) {
4206           // Decode a narrow oop to match address
4207           // [R12 + narrow_oop_reg<<3 + offset]
4208           if (is_oop) {
4209             nn = new DecodeNNode(nn, t);
4210           } else {
4211             nn = new DecodeNKlassNode(nn, t);
4212           }
4213           // Check for succeeding AddP which uses the same Base.
4214           // Otherwise we will run into the assertion above when visiting that guy.
4215           for (uint i = 0; i < n->outcnt(); ++i) {
4216             Node *out_i = n->raw_out(i);
4217             if (out_i && out_i->is_AddP() && out_i->in(AddPNode::Base) == addp) {
4218               out_i->set_req(AddPNode::Base, nn);
4219 #ifdef ASSERT
4220               for (uint j = 0; j < out_i->outcnt(); ++j) {
4221                 Node *out_j = out_i->raw_out(j);
4222                 assert(out_j == nullptr || !out_j->is_AddP() || out_j->in(AddPNode::Base) != addp,
4223                        "more than 2 AddP nodes in a chain (out_j %u)", out_j->_idx);
4224               }
4225 #endif
4226             }
4227           }
4228           n->set_req(AddPNode::Base, nn);
4229           n->set_req(AddPNode::Address, nn);
4230           if (addp->outcnt() == 0) {
4231             addp->disconnect_inputs(this);
4232           }
4233         }
4234       }
4235     }
4236 #endif
4237     break;
4238   }
4239 
4240   case Op_CastPP: {
4241     // Remove CastPP nodes to gain more freedom during scheduling but
4242     // keep the dependency they encode as control or precedence edges
4243     // (if control is set already) on memory operations. Some CastPP
4244     // nodes don't have a control (don't carry a dependency): skip
4245     // those.
4246     if (n->in(0) != nullptr) {
4247       ResourceMark rm;
4248       Unique_Node_List wq;
4249       wq.push(n);
4250       for (uint next = 0; next < wq.size(); ++next) {
4251         Node *m = wq.at(next);
4252         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
4253           Node* use = m->fast_out(i);
4254           if (use->is_Mem() || use->is_EncodeNarrowPtr()) {
4255             use->ensure_control_or_add_prec(n->in(0));
4256           } else {
4257             switch(use->Opcode()) {
4258             case Op_AddP:
4259             case Op_DecodeN:
4260             case Op_DecodeNKlass:
4261             case Op_CheckCastPP:
4262             case Op_CastPP:
4263               wq.push(use);
4264               break;
4265             }
4266           }
4267         }
4268       }
4269     }
4270     const bool is_LP64 = LP64_ONLY(true) NOT_LP64(false);
4271     if (is_LP64 && n->in(1)->is_DecodeN() && Matcher::gen_narrow_oop_implicit_null_checks()) {
4272       Node* in1 = n->in(1);
4273       const Type* t = n->bottom_type();
4274       Node* new_in1 = in1->clone();
4275       new_in1->as_DecodeN()->set_type(t);
4276 
4277       if (!Matcher::narrow_oop_use_complex_address()) {
4278         //
4279         // x86, ARM and friends can handle 2 adds in addressing mode
4280         // and Matcher can fold a DecodeN node into address by using
4281         // a narrow oop directly and do implicit null check in address:
4282         //
4283         // [R12 + narrow_oop_reg<<3 + offset]
4284         // NullCheck narrow_oop_reg
4285         //
4286         // On other platforms (Sparc) we have to keep new DecodeN node and
4287         // use it to do implicit null check in address:
4288         //
4289         // decode_not_null narrow_oop_reg, base_reg
4290         // [base_reg + offset]
4291         // NullCheck base_reg
4292         //
4293         // Pin the new DecodeN node to non-null path on these platform (Sparc)
4294         // to keep the information to which null check the new DecodeN node
4295         // corresponds to use it as value in implicit_null_check().
4296         //
4297         new_in1->set_req(0, n->in(0));
4298       }
4299 
4300       n->subsume_by(new_in1, this);
4301       if (in1->outcnt() == 0) {
4302         in1->disconnect_inputs(this);
4303       }
4304     } else {
4305       n->subsume_by(n->in(1), this);
4306       if (n->outcnt() == 0) {
4307         n->disconnect_inputs(this);
4308       }
4309     }
4310     break;
4311   }
4312   case Op_CastII: {
4313     n->as_CastII()->remove_range_check_cast(this);
4314     break;
4315   }
4316 #ifdef _LP64
4317   case Op_CmpP:
4318     // Do this transformation here to preserve CmpPNode::sub() and
4319     // other TypePtr related Ideal optimizations (for example, ptr nullness).
4320     if (n->in(1)->is_DecodeNarrowPtr() || n->in(2)->is_DecodeNarrowPtr()) {
4321       Node* in1 = n->in(1);
4322       Node* in2 = n->in(2);
4323       if (!in1->is_DecodeNarrowPtr()) {
4324         in2 = in1;
4325         in1 = n->in(2);
4326       }
4327       assert(in1->is_DecodeNarrowPtr(), "sanity");
4328 
4329       Node* new_in2 = nullptr;
4330       if (in2->is_DecodeNarrowPtr()) {
4331         assert(in2->Opcode() == in1->Opcode(), "must be same node type");
4332         new_in2 = in2->in(1);
4333       } else if (in2->Opcode() == Op_ConP) {
4334         const Type* t = in2->bottom_type();
4335         if (t == TypePtr::NULL_PTR) {
4336           assert(in1->is_DecodeN(), "compare klass to null?");
4337           // Don't convert CmpP null check into CmpN if compressed
4338           // oops implicit null check is not generated.
4339           // This will allow to generate normal oop implicit null check.
4340           if (Matcher::gen_narrow_oop_implicit_null_checks())
4341             new_in2 = ConNode::make(TypeNarrowOop::NULL_PTR);
4342           //
4343           // This transformation together with CastPP transformation above
4344           // will generated code for implicit null checks for compressed oops.
4345           //
4346           // The original code after Optimize()
4347           //
4348           //    LoadN memory, narrow_oop_reg
4349           //    decode narrow_oop_reg, base_reg
4350           //    CmpP base_reg, nullptr
4351           //    CastPP base_reg // NotNull
4352           //    Load [base_reg + offset], val_reg
4353           //
4354           // after these transformations will be
4355           //
4356           //    LoadN memory, narrow_oop_reg
4357           //    CmpN narrow_oop_reg, nullptr
4358           //    decode_not_null narrow_oop_reg, base_reg
4359           //    Load [base_reg + offset], val_reg
4360           //
4361           // and the uncommon path (== nullptr) will use narrow_oop_reg directly
4362           // since narrow oops can be used in debug info now (see the code in
4363           // final_graph_reshaping_walk()).
4364           //
4365           // At the end the code will be matched to
4366           // on x86:
4367           //
4368           //    Load_narrow_oop memory, narrow_oop_reg
4369           //    Load [R12 + narrow_oop_reg<<3 + offset], val_reg
4370           //    NullCheck narrow_oop_reg
4371           //
4372           // and on sparc:
4373           //
4374           //    Load_narrow_oop memory, narrow_oop_reg
4375           //    decode_not_null narrow_oop_reg, base_reg
4376           //    Load [base_reg + offset], val_reg
4377           //    NullCheck base_reg
4378           //
4379         } else if (t->isa_oopptr()) {
4380           new_in2 = ConNode::make(t->make_narrowoop());
4381         } else if (t->isa_klassptr()) {
4382           ciKlass* klass = t->is_klassptr()->exact_klass();
4383           if (klass->is_in_encoding_range()) {
4384             new_in2 = ConNode::make(t->make_narrowklass());
4385           }
4386         }
4387       }
4388       if (new_in2 != nullptr) {
4389         Node* cmpN = new CmpNNode(in1->in(1), new_in2);
4390         n->subsume_by(cmpN, this);
4391         if (in1->outcnt() == 0) {
4392           in1->disconnect_inputs(this);
4393         }
4394         if (in2->outcnt() == 0) {
4395           in2->disconnect_inputs(this);
4396         }
4397       }
4398     }
4399     break;
4400 
4401   case Op_DecodeN:
4402   case Op_DecodeNKlass:
4403     assert(!n->in(1)->is_EncodeNarrowPtr(), "should be optimized out");
4404     // DecodeN could be pinned when it can't be fold into
4405     // an address expression, see the code for Op_CastPP above.
4406     assert(n->in(0) == nullptr || (UseCompressedOops && !Matcher::narrow_oop_use_complex_address()), "no control");
4407     break;
4408 
4409   case Op_EncodeP:
4410   case Op_EncodePKlass: {
4411     Node* in1 = n->in(1);
4412     if (in1->is_DecodeNarrowPtr()) {
4413       n->subsume_by(in1->in(1), this);
4414     } else if (in1->Opcode() == Op_ConP) {
4415       const Type* t = in1->bottom_type();
4416       if (t == TypePtr::NULL_PTR) {
4417         assert(t->isa_oopptr(), "null klass?");
4418         n->subsume_by(ConNode::make(TypeNarrowOop::NULL_PTR), this);
4419       } else if (t->isa_oopptr()) {
4420         n->subsume_by(ConNode::make(t->make_narrowoop()), this);
4421       } else if (t->isa_klassptr()) {
4422         ciKlass* klass = t->is_klassptr()->exact_klass();
4423         if (klass->is_in_encoding_range()) {
4424           n->subsume_by(ConNode::make(t->make_narrowklass()), this);
4425         } else {
4426           assert(false, "unencodable klass in ConP -> EncodeP");
4427           C->record_failure("unencodable klass in ConP -> EncodeP");
4428         }
4429       }
4430     }
4431     if (in1->outcnt() == 0) {
4432       in1->disconnect_inputs(this);
4433     }
4434     break;
4435   }
4436 
4437   case Op_Proj: {
4438     if (OptimizeStringConcat || IncrementalInline) {
4439       ProjNode* proj = n->as_Proj();
4440       if (proj->_is_io_use) {
4441         assert(proj->_con == TypeFunc::I_O || proj->_con == TypeFunc::Memory, "");
4442         // Separate projections were used for the exception path which
4443         // are normally removed by a late inline.  If it wasn't inlined
4444         // then they will hang around and should just be replaced with
4445         // the original one. Merge them.
4446         Node* non_io_proj = proj->in(0)->as_Multi()->proj_out_or_null(proj->_con, false /*is_io_use*/);
4447         if (non_io_proj  != nullptr) {
4448           proj->subsume_by(non_io_proj , this);
4449         }
4450       }
4451     }
4452     break;
4453   }
4454 
4455   case Op_Phi:
4456     if (n->as_Phi()->bottom_type()->isa_narrowoop() || n->as_Phi()->bottom_type()->isa_narrowklass()) {
4457       // The EncodeP optimization may create Phi with the same edges
4458       // for all paths. It is not handled well by Register Allocator.
4459       Node* unique_in = n->in(1);
4460       assert(unique_in != nullptr, "");
4461       uint cnt = n->req();
4462       for (uint i = 2; i < cnt; i++) {
4463         Node* m = n->in(i);
4464         assert(m != nullptr, "");
4465         if (unique_in != m)
4466           unique_in = nullptr;
4467       }
4468       if (unique_in != nullptr) {
4469         n->subsume_by(unique_in, this);
4470       }
4471     }
4472     break;
4473 
4474 #endif
4475 
4476   case Op_ModI:
4477     handle_div_mod_op(n, T_INT, false);
4478     break;
4479 
4480   case Op_ModL:
4481     handle_div_mod_op(n, T_LONG, false);
4482     break;
4483 
4484   case Op_UModI:
4485     handle_div_mod_op(n, T_INT, true);
4486     break;
4487 
4488   case Op_UModL:
4489     handle_div_mod_op(n, T_LONG, true);
4490     break;
4491 
4492   case Op_LoadVector:
4493   case Op_StoreVector:
4494 #ifdef ASSERT
4495     // Add VerifyVectorAlignment node between adr and load / store.
4496     if (VerifyAlignVector && Matcher::has_match_rule(Op_VerifyVectorAlignment)) {
4497       bool must_verify_alignment = n->is_LoadVector() ? n->as_LoadVector()->must_verify_alignment() :
4498                                                         n->as_StoreVector()->must_verify_alignment();
4499       if (must_verify_alignment) {
4500         jlong vector_width = n->is_LoadVector() ? n->as_LoadVector()->memory_size() :
4501                                                   n->as_StoreVector()->memory_size();
4502         // The memory access should be aligned to the vector width in bytes.
4503         // However, the underlying array is possibly less well aligned, but at least
4504         // to ObjectAlignmentInBytes. Hence, even if multiple arrays are accessed in
4505         // a loop we can expect at least the following alignment:
4506         jlong guaranteed_alignment = MIN2(vector_width, (jlong)ObjectAlignmentInBytes);
4507         assert(2 <= guaranteed_alignment && guaranteed_alignment <= 64, "alignment must be in range");
4508         assert(is_power_of_2(guaranteed_alignment), "alignment must be power of 2");
4509         // Create mask from alignment. e.g. 0b1000 -> 0b0111
4510         jlong mask = guaranteed_alignment - 1;
4511         Node* mask_con = ConLNode::make(mask);
4512         VerifyVectorAlignmentNode* va = new VerifyVectorAlignmentNode(n->in(MemNode::Address), mask_con);
4513         n->set_req(MemNode::Address, va);
4514       }
4515     }
4516 #endif
4517     break;
4518 
4519   case Op_LoadVectorGather:
4520   case Op_StoreVectorScatter:
4521   case Op_LoadVectorGatherMasked:
4522   case Op_StoreVectorScatterMasked:
4523   case Op_VectorCmpMasked:
4524   case Op_VectorMaskGen:
4525   case Op_LoadVectorMasked:
4526   case Op_StoreVectorMasked:
4527     break;
4528 
4529   case Op_AddReductionVI:
4530   case Op_AddReductionVL:
4531   case Op_AddReductionVHF:
4532   case Op_AddReductionVF:
4533   case Op_AddReductionVD:
4534   case Op_MulReductionVI:
4535   case Op_MulReductionVL:
4536   case Op_MulReductionVHF:
4537   case Op_MulReductionVF:
4538   case Op_MulReductionVD:
4539   case Op_MinReductionV:
4540   case Op_MaxReductionV:
4541   case Op_UMinReductionV:
4542   case Op_UMaxReductionV:
4543   case Op_AndReductionV:
4544   case Op_OrReductionV:
4545   case Op_XorReductionV:
4546     break;
4547 
4548   case Op_PackB:
4549   case Op_PackS:
4550   case Op_PackI:
4551   case Op_PackF:
4552   case Op_PackL:
4553   case Op_PackD:
4554     if (n->req()-1 > 2) {
4555       // Replace many operand PackNodes with a binary tree for matching
4556       PackNode* p = (PackNode*) n;
4557       Node* btp = p->binary_tree_pack(1, n->req());
4558       n->subsume_by(btp, this);
4559     }
4560     break;
4561   case Op_Loop:
4562     // When StressCountedLoop is enabled, this loop may intentionally avoid a counted loop conversion.
4563     // This is expected behavior for the stress mode, which exercises alternative compilation paths.
4564     if (!StressCountedLoop) {
4565       assert(!n->as_Loop()->is_loop_nest_inner_loop() || _loop_opts_cnt == 0, "should have been turned into a counted loop");
4566     }
4567   case Op_CountedLoop:
4568   case Op_LongCountedLoop:
4569   case Op_OuterStripMinedLoop:
4570     if (n->as_Loop()->is_inner_loop()) {
4571       frc.inc_inner_loop_count();
4572     }
4573     n->as_Loop()->verify_strip_mined(0);
4574     break;
4575   case Op_LShiftI:
4576   case Op_RShiftI:
4577   case Op_URShiftI:
4578   case Op_LShiftL:
4579   case Op_RShiftL:
4580   case Op_URShiftL:
4581     if (Matcher::need_masked_shift_count) {
4582       // The cpu's shift instructions don't restrict the count to the
4583       // lower 5/6 bits. We need to do the masking ourselves.
4584       Node* in2 = n->in(2);
4585       juint mask = (n->bottom_type() == TypeInt::INT) ? (BitsPerInt - 1) : (BitsPerLong - 1);
4586       const TypeInt* t = in2->find_int_type();
4587       if (t != nullptr && t->is_con()) {
4588         juint shift = t->get_con();
4589         if (shift > mask) { // Unsigned cmp
4590           n->set_req(2, ConNode::make(TypeInt::make(shift & mask)));
4591         }
4592       } else {
4593         if (t == nullptr || t->_lo < 0 || t->_hi > (int)mask) {
4594           Node* shift = new AndINode(in2, ConNode::make(TypeInt::make(mask)));
4595           n->set_req(2, shift);
4596         }
4597       }
4598       if (in2->outcnt() == 0) { // Remove dead node
4599         in2->disconnect_inputs(this);
4600       }
4601     }
4602     break;
4603   case Op_MemBarStoreStore:
4604   case Op_MemBarRelease:
4605     // Break the link with AllocateNode: it is no longer useful and
4606     // confuses register allocation.
4607     if (n->req() > MemBarNode::Precedent) {
4608       n->set_req(MemBarNode::Precedent, top());
4609     }
4610     break;
4611   case Op_MemBarAcquire: {
4612     if (n->as_MemBar()->trailing_load() && n->req() > MemBarNode::Precedent) {
4613       // At parse time, the trailing MemBarAcquire for a volatile load
4614       // is created with an edge to the load. After optimizations,
4615       // that input may be a chain of Phis. If those phis have no
4616       // other use, then the MemBarAcquire keeps them alive and
4617       // register allocation can be confused.
4618       dead_nodes.push(n->in(MemBarNode::Precedent));
4619       n->set_req(MemBarNode::Precedent, top());
4620     }
4621     break;
4622   }
4623   case Op_Blackhole:
4624     break;
4625   case Op_RangeCheck: {
4626     RangeCheckNode* rc = n->as_RangeCheck();
4627     Node* iff = new IfNode(rc->in(0), rc->in(1), rc->_prob, rc->_fcnt);
4628     n->subsume_by(iff, this);
4629     frc._tests.push(iff);
4630     break;
4631   }
4632   case Op_ConvI2L: {
4633     if (!Matcher::convi2l_type_required) {
4634       // Code generation on some platforms doesn't need accurate
4635       // ConvI2L types. Widening the type can help remove redundant
4636       // address computations.
4637       n->as_Type()->set_type(TypeLong::INT);
4638       ResourceMark rm;
4639       Unique_Node_List wq;
4640       wq.push(n);
4641       for (uint next = 0; next < wq.size(); next++) {
4642         Node *m = wq.at(next);
4643 
4644         for(;;) {
4645           // Loop over all nodes with identical inputs edges as m
4646           Node* k = m->find_similar(m->Opcode());
4647           if (k == nullptr) {
4648             break;
4649           }
4650           // Push their uses so we get a chance to remove node made
4651           // redundant
4652           for (DUIterator_Fast imax, i = k->fast_outs(imax); i < imax; i++) {
4653             Node* u = k->fast_out(i);
4654             if (u->Opcode() == Op_LShiftL ||
4655                 u->Opcode() == Op_AddL ||
4656                 u->Opcode() == Op_SubL ||
4657                 u->Opcode() == Op_AddP) {
4658               wq.push(u);
4659             }
4660           }
4661           // Replace all nodes with identical edges as m with m
4662           k->subsume_by(m, this);
4663         }
4664       }
4665     }
4666     break;
4667   }
4668   case Op_CmpUL: {
4669     if (!Matcher::has_match_rule(Op_CmpUL)) {
4670       // No support for unsigned long comparisons
4671       ConINode* sign_pos = new ConINode(TypeInt::make(BitsPerLong - 1));
4672       Node* sign_bit_mask = new RShiftLNode(n->in(1), sign_pos);
4673       Node* orl = new OrLNode(n->in(1), sign_bit_mask);
4674       ConLNode* remove_sign_mask = new ConLNode(TypeLong::make(max_jlong));
4675       Node* andl = new AndLNode(orl, remove_sign_mask);
4676       Node* cmp = new CmpLNode(andl, n->in(2));
4677       n->subsume_by(cmp, this);
4678     }
4679     break;
4680   }
4681 #ifdef ASSERT
4682   case Op_InlineType: {
4683     n->dump(-1);
4684     assert(false, "inline type node was not removed");
4685     break;
4686   }
4687   case Op_ConNKlass: {
4688     const TypePtr* tp = n->as_Type()->type()->make_ptr();
4689     ciKlass* klass = tp->is_klassptr()->exact_klass();
4690     assert(klass->is_in_encoding_range(), "klass cannot be compressed");
4691     break;
4692   }
4693 #endif
4694   default:
4695     assert(!n->is_Call(), "");
4696     assert(!n->is_Mem(), "");
4697     assert(nop != Op_ProfileBoolean, "should be eliminated during IGVN");
4698     break;
4699   }
4700 }
4701 
4702 //------------------------------final_graph_reshaping_walk---------------------
4703 // Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
4704 // requires that the walk visits a node's inputs before visiting the node.
4705 void Compile::final_graph_reshaping_walk(Node_Stack& nstack, Node* root, Final_Reshape_Counts& frc, Unique_Node_List& dead_nodes) {
4706   Unique_Node_List sfpt;
4707 
4708   frc._visited.set(root->_idx); // first, mark node as visited
4709   uint cnt = root->req();
4710   Node *n = root;
4711   uint  i = 0;
4712   while (true) {
4713     if (i < cnt) {
4714       // Place all non-visited non-null inputs onto stack
4715       Node* m = n->in(i);
4716       ++i;
4717       if (m != nullptr && !frc._visited.test_set(m->_idx)) {
4718         if (m->is_SafePoint() && m->as_SafePoint()->jvms() != nullptr) {
4719           // compute worst case interpreter size in case of a deoptimization
4720           update_interpreter_frame_size(m->as_SafePoint()->jvms()->interpreter_frame_size());
4721 
4722           sfpt.push(m);
4723         }
4724         cnt = m->req();
4725         nstack.push(n, i); // put on stack parent and next input's index
4726         n = m;
4727         i = 0;
4728       }
4729     } else {
4730       // Now do post-visit work
4731       final_graph_reshaping_impl(n, frc, dead_nodes);
4732       if (nstack.is_empty())
4733         break;             // finished
4734       n = nstack.node();   // Get node from stack
4735       cnt = n->req();
4736       i = nstack.index();
4737       nstack.pop();        // Shift to the next node on stack
4738     }
4739   }
4740 
4741   expand_reachability_edges(sfpt);
4742 
4743   // Skip next transformation if compressed oops are not used.
4744   if (UseCompressedOops && !Matcher::gen_narrow_oop_implicit_null_checks())
4745     return;
4746 
4747   // Go over ReachabilityFence nodes to skip DecodeN nodes for referents.
4748   // The sole purpose of RF node is to keep the referent oop alive and
4749   // decoding the oop for that is not needed.
4750   for (int i = 0; i < C->reachability_fences_count(); i++) {
4751     ReachabilityFenceNode* rf = C->reachability_fence(i);
4752     DecodeNNode* dn = rf->in(1)->isa_DecodeN();
4753     if (dn != nullptr) {
4754       if (!dn->has_non_debug_uses() || Matcher::narrow_oop_use_complex_address()) {
4755         rf->set_req(1, dn->in(1));
4756         if (dn->outcnt() == 0) {
4757           dn->disconnect_inputs(this);
4758         }
4759       }
4760     }
4761   }
4762 
4763   // Go over safepoints nodes to skip DecodeN/DecodeNKlass nodes for debug edges.
4764   // It could be done for an uncommon traps or any safepoints/calls
4765   // if the DecodeN/DecodeNKlass node is referenced only in a debug info.
4766   while (sfpt.size() > 0) {
4767     n = sfpt.pop();
4768     JVMState *jvms = n->as_SafePoint()->jvms();
4769     assert(jvms != nullptr, "sanity");
4770     int start = jvms->debug_start();
4771     int end   = n->req();
4772     bool is_uncommon = (n->is_CallStaticJava() &&
4773                         n->as_CallStaticJava()->uncommon_trap_request() != 0);
4774     for (int j = start; j < end; j++) {
4775       Node* in = n->in(j);
4776       if (in->is_DecodeNarrowPtr() && (is_uncommon || !in->has_non_debug_uses())) {
4777         n->set_req(j, in->in(1));
4778         if (in->outcnt() == 0) {
4779           in->disconnect_inputs(this);
4780         }
4781       }
4782     }
4783   }
4784 }
4785 
4786 //------------------------------final_graph_reshaping--------------------------
4787 // Final Graph Reshaping.
4788 //
4789 // (1) Clone simple inputs to uncommon calls, so they can be scheduled late
4790 //     and not commoned up and forced early.  Must come after regular
4791 //     optimizations to avoid GVN undoing the cloning.  Clone constant
4792 //     inputs to Loop Phis; these will be split by the allocator anyways.
4793 //     Remove Opaque nodes.
4794 // (2) Move last-uses by commutative operations to the left input to encourage
4795 //     Intel update-in-place two-address operations and better register usage
4796 //     on RISCs.  Must come after regular optimizations to avoid GVN Ideal
4797 //     calls canonicalizing them back.
4798 // (3) Count the number of double-precision FP ops, single-precision FP ops
4799 //     and call sites.  On Intel, we can get correct rounding either by
4800 //     forcing singles to memory (requires extra stores and loads after each
4801 //     FP bytecode) or we can set a rounding mode bit (requires setting and
4802 //     clearing the mode bit around call sites).  The mode bit is only used
4803 //     if the relative frequency of single FP ops to calls is low enough.
4804 //     This is a key transform for SPEC mpeg_audio.
4805 // (4) Detect infinite loops; blobs of code reachable from above but not
4806 //     below.  Several of the Code_Gen algorithms fail on such code shapes,
4807 //     so we simply bail out.  Happens a lot in ZKM.jar, but also happens
4808 //     from time to time in other codes (such as -Xcomp finalizer loops, etc).
4809 //     Detection is by looking for IfNodes where only 1 projection is
4810 //     reachable from below or CatchNodes missing some targets.
4811 // (5) Assert for insane oop offsets in debug mode.
4812 
4813 bool Compile::final_graph_reshaping() {
4814   // an infinite loop may have been eliminated by the optimizer,
4815   // in which case the graph will be empty.
4816   if (root()->req() == 1) {
4817     // Do not compile method that is only a trivial infinite loop,
4818     // since the content of the loop may have been eliminated.
4819     record_method_not_compilable("trivial infinite loop");
4820     return true;
4821   }
4822 
4823   // Expensive nodes have their control input set to prevent the GVN
4824   // from freely commoning them. There's no GVN beyond this point so
4825   // no need to keep the control input. We want the expensive nodes to
4826   // be freely moved to the least frequent code path by gcm.
4827   assert(OptimizeExpensiveOps || expensive_count() == 0, "optimization off but list non empty?");
4828   for (int i = 0; i < expensive_count(); i++) {
4829     _expensive_nodes.at(i)->set_req(0, nullptr);
4830   }
4831 
4832   Final_Reshape_Counts frc;
4833 
4834   // Visit everybody reachable!
4835   // Allocate stack of size C->live_nodes()/2 to avoid frequent realloc
4836   Node_Stack nstack(live_nodes() >> 1);
4837   Unique_Node_List dead_nodes;
4838   final_graph_reshaping_walk(nstack, root(), frc, dead_nodes);
4839 
4840   // Check for unreachable (from below) code (i.e., infinite loops).
4841   for( uint i = 0; i < frc._tests.size(); i++ ) {
4842     MultiBranchNode *n = frc._tests[i]->as_MultiBranch();
4843     // Get number of CFG targets.
4844     // Note that PCTables include exception targets after calls.
4845     uint required_outcnt = n->required_outcnt();
4846     if (n->outcnt() != required_outcnt) {
4847       // Check for a few special cases.  Rethrow Nodes never take the
4848       // 'fall-thru' path, so expected kids is 1 less.
4849       if (n->is_PCTable() && n->in(0) && n->in(0)->in(0)) {
4850         if (n->in(0)->in(0)->is_Call()) {
4851           CallNode* call = n->in(0)->in(0)->as_Call();
4852           if (call->entry_point() == OptoRuntime::rethrow_stub()) {
4853             required_outcnt--;      // Rethrow always has 1 less kid
4854           } else if (call->req() > TypeFunc::Parms &&
4855                      call->is_CallDynamicJava()) {
4856             // Check for null receiver. In such case, the optimizer has
4857             // detected that the virtual call will always result in a null
4858             // pointer exception. The fall-through projection of this CatchNode
4859             // will not be populated.
4860             Node* arg0 = call->in(TypeFunc::Parms);
4861             if (arg0->is_Type() &&
4862                 arg0->as_Type()->type()->higher_equal(TypePtr::NULL_PTR)) {
4863               required_outcnt--;
4864             }
4865           } else if (call->entry_point() == OptoRuntime::new_array_Java() ||
4866                      call->entry_point() == OptoRuntime::new_array_nozero_Java()) {
4867             // Check for illegal array length. In such case, the optimizer has
4868             // detected that the allocation attempt will always result in an
4869             // exception. There is no fall-through projection of this CatchNode .
4870             assert(call->is_CallStaticJava(), "static call expected");
4871             assert(call->req() == call->jvms()->endoff() + 1, "missing extra input");
4872             uint valid_length_test_input = call->req() - 1;
4873             Node* valid_length_test = call->in(valid_length_test_input);
4874             call->del_req(valid_length_test_input);
4875             if (valid_length_test->find_int_con(1) == 0) {
4876               required_outcnt--;
4877             }
4878             dead_nodes.push(valid_length_test);
4879             assert(n->outcnt() == required_outcnt, "malformed control flow");
4880             continue;
4881           }
4882         }
4883       }
4884 
4885       // Recheck with a better notion of 'required_outcnt'
4886       if (n->outcnt() != required_outcnt) {
4887         record_method_not_compilable("malformed control flow");
4888         return true;            // Not all targets reachable!
4889       }
4890     } else if (n->is_PCTable() && n->in(0) && n->in(0)->in(0) && n->in(0)->in(0)->is_Call()) {
4891       CallNode* call = n->in(0)->in(0)->as_Call();
4892       if (call->entry_point() == OptoRuntime::new_array_Java() ||
4893           call->entry_point() == OptoRuntime::new_array_nozero_Java()) {
4894         assert(call->is_CallStaticJava(), "static call expected");
4895         assert(call->req() == call->jvms()->endoff() + 1, "missing extra input");
4896         uint valid_length_test_input = call->req() - 1;
4897         dead_nodes.push(call->in(valid_length_test_input));
4898         call->del_req(valid_length_test_input); // valid length test useless now
4899       }
4900     }
4901     // Check that I actually visited all kids.  Unreached kids
4902     // must be infinite loops.
4903     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++)
4904       if (!frc._visited.test(n->fast_out(j)->_idx)) {
4905         record_method_not_compilable("infinite loop");
4906         return true;            // Found unvisited kid; must be unreach
4907       }
4908 
4909     // Here so verification code in final_graph_reshaping_walk()
4910     // always see an OuterStripMinedLoopEnd
4911     if (n->is_OuterStripMinedLoopEnd() || n->is_LongCountedLoopEnd()) {
4912       IfNode* init_iff = n->as_If();
4913       Node* iff = new IfNode(init_iff->in(0), init_iff->in(1), init_iff->_prob, init_iff->_fcnt);
4914       n->subsume_by(iff, this);
4915     }
4916   }
4917 
4918   while (dead_nodes.size() > 0) {
4919     Node* m = dead_nodes.pop();
4920     if (m->outcnt() == 0 && m != top()) {
4921       for (uint j = 0; j < m->req(); j++) {
4922         Node* in = m->in(j);
4923         if (in != nullptr) {
4924           dead_nodes.push(in);
4925         }
4926       }
4927       m->disconnect_inputs(this);
4928     }
4929   }
4930 
4931   set_java_calls(frc.get_java_call_count());
4932   set_inner_loops(frc.get_inner_loop_count());
4933 
4934   // No infinite loops, no reason to bail out.
4935   return false;
4936 }
4937 
4938 //-----------------------------too_many_traps----------------------------------
4939 // Report if there are too many traps at the current method and bci.
4940 // Return true if there was a trap, and/or PerMethodTrapLimit is exceeded.
4941 bool Compile::too_many_traps(ciMethod* method,
4942                              int bci,
4943                              Deoptimization::DeoptReason reason) {
4944   ciMethodData* md = method->method_data();
4945   if (md->is_empty()) {
4946     // Assume the trap has not occurred, or that it occurred only
4947     // because of a transient condition during start-up in the interpreter.
4948     return false;
4949   }
4950   ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : nullptr;
4951   if (md->has_trap_at(bci, m, reason) != 0) {
4952     // Assume PerBytecodeTrapLimit==0, for a more conservative heuristic.
4953     // Also, if there are multiple reasons, or if there is no per-BCI record,
4954     // assume the worst.
4955     if (log())
4956       log()->elem("observe trap='%s' count='%d'",
4957                   Deoptimization::trap_reason_name(reason),
4958                   md->trap_count(reason));
4959     return true;
4960   } else {
4961     // Ignore method/bci and see if there have been too many globally.
4962     return too_many_traps(reason, md);
4963   }
4964 }
4965 
4966 // Less-accurate variant which does not require a method and bci.
4967 bool Compile::too_many_traps(Deoptimization::DeoptReason reason,
4968                              ciMethodData* logmd) {
4969   if (trap_count(reason) >= Deoptimization::per_method_trap_limit(reason)) {
4970     // Too many traps globally.
4971     // Note that we use cumulative trap_count, not just md->trap_count.
4972     if (log()) {
4973       int mcount = (logmd == nullptr)? -1: (int)logmd->trap_count(reason);
4974       log()->elem("observe trap='%s' count='0' mcount='%d' ccount='%d'",
4975                   Deoptimization::trap_reason_name(reason),
4976                   mcount, trap_count(reason));
4977     }
4978     return true;
4979   } else {
4980     // The coast is clear.
4981     return false;
4982   }
4983 }
4984 
4985 //--------------------------too_many_recompiles--------------------------------
4986 // Report if there are too many recompiles at the current method and bci.
4987 // Consults PerBytecodeRecompilationCutoff and PerMethodRecompilationCutoff.
4988 // Is not eager to return true, since this will cause the compiler to use
4989 // Action_none for a trap point, to avoid too many recompilations.
4990 bool Compile::too_many_recompiles(ciMethod* method,
4991                                   int bci,
4992                                   Deoptimization::DeoptReason reason) {
4993   ciMethodData* md = method->method_data();
4994   if (md->is_empty()) {
4995     // Assume the trap has not occurred, or that it occurred only
4996     // because of a transient condition during start-up in the interpreter.
4997     return false;
4998   }
4999   // Pick a cutoff point well within PerBytecodeRecompilationCutoff.
5000   uint bc_cutoff = (uint) PerBytecodeRecompilationCutoff / 8;
5001   uint m_cutoff  = (uint) PerMethodRecompilationCutoff / 2 + 1;  // not zero
5002   Deoptimization::DeoptReason per_bc_reason
5003     = Deoptimization::reason_recorded_per_bytecode_if_any(reason);
5004   ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : nullptr;
5005   if ((per_bc_reason == Deoptimization::Reason_none
5006        || md->has_trap_at(bci, m, reason) != 0)
5007       // The trap frequency measure we care about is the recompile count:
5008       && md->trap_recompiled_at(bci, m)
5009       && md->overflow_recompile_count() >= bc_cutoff) {
5010     // Do not emit a trap here if it has already caused recompilations.
5011     // Also, if there are multiple reasons, or if there is no per-BCI record,
5012     // assume the worst.
5013     if (log())
5014       log()->elem("observe trap='%s recompiled' count='%d' recompiles2='%d'",
5015                   Deoptimization::trap_reason_name(reason),
5016                   md->trap_count(reason),
5017                   md->overflow_recompile_count());
5018     return true;
5019   } else if (trap_count(reason) != 0
5020              && decompile_count() >= m_cutoff) {
5021     // Too many recompiles globally, and we have seen this sort of trap.
5022     // Use cumulative decompile_count, not just md->decompile_count.
5023     if (log())
5024       log()->elem("observe trap='%s' count='%d' mcount='%d' decompiles='%d' mdecompiles='%d'",
5025                   Deoptimization::trap_reason_name(reason),
5026                   md->trap_count(reason), trap_count(reason),
5027                   md->decompile_count(), decompile_count());
5028     return true;
5029   } else {
5030     // The coast is clear.
5031     return false;
5032   }
5033 }
5034 
5035 // Compute when not to trap. Used by matching trap based nodes and
5036 // NullCheck optimization.
5037 void Compile::set_allowed_deopt_reasons() {
5038   _allowed_reasons = 0;
5039   if (is_method_compilation()) {
5040     for (int rs = (int)Deoptimization::Reason_none+1; rs < Compile::trapHistLength; rs++) {
5041       assert(rs < BitsPerInt, "recode bit map");
5042       if (!too_many_traps((Deoptimization::DeoptReason) rs)) {
5043         _allowed_reasons |= nth_bit(rs);
5044       }
5045     }
5046   }
5047 }
5048 
5049 bool Compile::needs_clinit_barrier(ciMethod* method, ciMethod* accessing_method) {
5050   return method->is_static() && needs_clinit_barrier(method->holder(), accessing_method);
5051 }
5052 
5053 bool Compile::needs_clinit_barrier(ciField* field, ciMethod* accessing_method) {
5054   return field->is_static() && needs_clinit_barrier(field->holder(), accessing_method);
5055 }
5056 
5057 bool Compile::needs_clinit_barrier(ciInstanceKlass* holder, ciMethod* accessing_method) {
5058   if (holder->is_initialized()) {
5059     return false;
5060   }
5061   if (holder->is_being_initialized()) {
5062     if (accessing_method->holder() == holder) {
5063       // Access inside a class. The barrier can be elided when access happens in <clinit>,
5064       // <init>, or a static method. In all those cases, there was an initialization
5065       // barrier on the holder klass passed.
5066       if (accessing_method->is_class_initializer() ||
5067           accessing_method->is_object_constructor() ||
5068           accessing_method->is_static()) {
5069         return false;
5070       }
5071     } else if (accessing_method->holder()->is_subclass_of(holder)) {
5072       // Access from a subclass. The barrier can be elided only when access happens in <clinit>.
5073       // In case of <init> or a static method, the barrier is on the subclass is not enough:
5074       // child class can become fully initialized while its parent class is still being initialized.
5075       if (accessing_method->is_class_initializer()) {
5076         return false;
5077       }
5078     }
5079     ciMethod* root = method(); // the root method of compilation
5080     if (root != accessing_method) {
5081       return needs_clinit_barrier(holder, root); // check access in the context of compilation root
5082     }
5083   }
5084   return true;
5085 }
5086 
5087 #ifndef PRODUCT
5088 //------------------------------verify_bidirectional_edges---------------------
5089 // For each input edge to a node (ie - for each Use-Def edge), verify that
5090 // there is a corresponding Def-Use edge.
5091 void Compile::verify_bidirectional_edges(Unique_Node_List& visited, const Unique_Node_List* root_and_safepoints) const {
5092   // Allocate stack of size C->live_nodes()/16 to avoid frequent realloc
5093   uint stack_size = live_nodes() >> 4;
5094   Node_List nstack(MAX2(stack_size, (uint) OptoNodeListSize));
5095   if (root_and_safepoints != nullptr) {
5096     assert(root_and_safepoints->member(_root), "root is not in root_and_safepoints");
5097     for (uint i = 0, limit = root_and_safepoints->size(); i < limit; i++) {
5098       Node* root_or_safepoint = root_and_safepoints->at(i);
5099       // If the node is a safepoint, let's check if it still has a control input
5100       // Lack of control input signifies that this node was killed by CCP or
5101       // recursively by remove_globally_dead_node and it shouldn't be a starting
5102       // point.
5103       if (!root_or_safepoint->is_SafePoint() || root_or_safepoint->in(0) != nullptr) {
5104         nstack.push(root_or_safepoint);
5105       }
5106     }
5107   } else {
5108     nstack.push(_root);
5109   }
5110 
5111   while (nstack.size() > 0) {
5112     Node* n = nstack.pop();
5113     if (visited.member(n)) {
5114       continue;
5115     }
5116     visited.push(n);
5117 
5118     // Walk over all input edges, checking for correspondence
5119     uint length = n->len();
5120     for (uint i = 0; i < length; i++) {
5121       Node* in = n->in(i);
5122       if (in != nullptr && !visited.member(in)) {
5123         nstack.push(in); // Put it on stack
5124       }
5125       if (in != nullptr && !in->is_top()) {
5126         // Count instances of `next`
5127         int cnt = 0;
5128         for (uint idx = 0; idx < in->_outcnt; idx++) {
5129           if (in->_out[idx] == n) {
5130             cnt++;
5131           }
5132         }
5133         assert(cnt > 0, "Failed to find Def-Use edge.");
5134         // Check for duplicate edges
5135         // walk the input array downcounting the input edges to n
5136         for (uint j = 0; j < length; j++) {
5137           if (n->in(j) == in) {
5138             cnt--;
5139           }
5140         }
5141         assert(cnt == 0, "Mismatched edge count.");
5142       } else if (in == nullptr) {
5143         assert(i == 0 || i >= n->req() ||
5144                n->is_Region() || n->is_Phi() || n->is_ArrayCopy() ||
5145                (n->is_Allocate() && i >= AllocateNode::InlineType) ||
5146                (n->is_Unlock() && i == (n->req() - 1)) ||
5147                (n->is_MemBar() && i == 5), // the precedence edge to a membar can be removed during macro node expansion
5148               "only region, phi, arraycopy, allocate, unlock or membar nodes have null data edges");
5149       } else {
5150         assert(in->is_top(), "sanity");
5151         // Nothing to check.
5152       }
5153     }
5154   }
5155 }
5156 
5157 //------------------------------verify_graph_edges---------------------------
5158 // Walk the Graph and verify that there is a one-to-one correspondence
5159 // between Use-Def edges and Def-Use edges in the graph.
5160 void Compile::verify_graph_edges(bool no_dead_code, const Unique_Node_List* root_and_safepoints) const {
5161   if (VerifyGraphEdges) {
5162     Unique_Node_List visited;
5163 
5164     // Call graph walk to check edges
5165     verify_bidirectional_edges(visited, root_and_safepoints);
5166     if (no_dead_code) {
5167       // Now make sure that no visited node is used by an unvisited node.
5168       bool dead_nodes = false;
5169       Unique_Node_List checked;
5170       while (visited.size() > 0) {
5171         Node* n = visited.pop();
5172         checked.push(n);
5173         for (uint i = 0; i < n->outcnt(); i++) {
5174           Node* use = n->raw_out(i);
5175           if (checked.member(use))  continue;  // already checked
5176           if (visited.member(use))  continue;  // already in the graph
5177           if (use->is_Con())        continue;  // a dead ConNode is OK
5178           // At this point, we have found a dead node which is DU-reachable.
5179           if (!dead_nodes) {
5180             tty->print_cr("*** Dead nodes reachable via DU edges:");
5181             dead_nodes = true;
5182           }
5183           use->dump(2);
5184           tty->print_cr("---");
5185           checked.push(use);  // No repeats; pretend it is now checked.
5186         }
5187       }
5188       assert(!dead_nodes, "using nodes must be reachable from root");
5189     }
5190   }
5191 }
5192 #endif
5193 
5194 // The Compile object keeps track of failure reasons separately from the ciEnv.
5195 // This is required because there is not quite a 1-1 relation between the
5196 // ciEnv and its compilation task and the Compile object.  Note that one
5197 // ciEnv might use two Compile objects, if C2Compiler::compile_method decides
5198 // to backtrack and retry without subsuming loads.  Other than this backtracking
5199 // behavior, the Compile's failure reason is quietly copied up to the ciEnv
5200 // by the logic in C2Compiler.
5201 void Compile::record_failure(const char* reason DEBUG_ONLY(COMMA bool allow_multiple_failures)) {
5202   if (log() != nullptr) {
5203     log()->elem("failure reason='%s' phase='compile'", reason);
5204   }
5205   if (_failure_reason.get() == nullptr) {
5206     // Record the first failure reason.
5207     _failure_reason.set(reason);
5208     if (CaptureBailoutInformation) {
5209       _first_failure_details = new CompilationFailureInfo(reason);
5210     }
5211   } else {
5212     assert(!StressBailout || allow_multiple_failures, "should have handled previous failure.");
5213   }
5214 
5215   if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
5216     C->print_method(PHASE_FAILURE, 1);
5217   }
5218   _root = nullptr;  // flush the graph, too
5219 }
5220 
5221 Compile::TracePhase::TracePhase(const char* name, PhaseTraceId id)
5222   : TraceTime(name, &Phase::timers[id], CITime, CITimeVerbose),
5223     _compile(Compile::current()),
5224     _log(nullptr),
5225     _dolog(CITimeVerbose)
5226 {
5227   assert(_compile != nullptr, "sanity check");
5228   assert(id != PhaseTraceId::_t_none, "Don't use none");
5229   if (_dolog) {
5230     _log = _compile->log();
5231   }
5232   if (_log != nullptr) {
5233     _log->begin_head("phase name='%s' nodes='%d' live='%d'", phase_name(), _compile->unique(), _compile->live_nodes());
5234     _log->stamp();
5235     _log->end_head();
5236   }
5237 
5238   // Inform memory statistic, if enabled
5239   if (CompilationMemoryStatistic::enabled()) {
5240     CompilationMemoryStatistic::on_phase_start((int)id, name);
5241   }
5242 }
5243 
5244 Compile::TracePhase::TracePhase(PhaseTraceId id)
5245   : TracePhase(Phase::get_phase_trace_id_text(id), id) {}
5246 
5247 Compile::TracePhase::~TracePhase() {
5248 
5249   // Inform memory statistic, if enabled
5250   if (CompilationMemoryStatistic::enabled()) {
5251     CompilationMemoryStatistic::on_phase_end();
5252   }
5253 
5254   if (_compile->failing_internal()) {
5255     if (_log != nullptr) {
5256       _log->done("phase");
5257     }
5258     return; // timing code, not stressing bailouts.
5259   }
5260 #ifdef ASSERT
5261   if (PrintIdealNodeCount) {
5262     tty->print_cr("phase name='%s' nodes='%d' live='%d' live_graph_walk='%d'",
5263                   phase_name(), _compile->unique(), _compile->live_nodes(), _compile->count_live_nodes_by_graph_walk());
5264   }
5265 
5266   if (VerifyIdealNodeCount) {
5267     _compile->print_missing_nodes();
5268   }
5269 #endif
5270 
5271   if (_log != nullptr) {
5272     _log->done("phase name='%s' nodes='%d' live='%d'", phase_name(), _compile->unique(), _compile->live_nodes());
5273   }
5274 }
5275 
5276 //----------------------------static_subtype_check-----------------------------
5277 // Shortcut important common cases when superklass is exact:
5278 // (0) superklass is java.lang.Object (can occur in reflective code)
5279 // (1) subklass is already limited to a subtype of superklass => always ok
5280 // (2) subklass does not overlap with superklass => always fail
5281 // (3) superklass has NO subtypes and we can check with a simple compare.
5282 Compile::SubTypeCheckResult Compile::static_subtype_check(const TypeKlassPtr* superk, const TypeKlassPtr* subk, bool skip) {
5283   if (skip) {
5284     return SSC_full_test;       // Let caller generate the general case.
5285   }
5286 
5287   if (subk->is_java_subtype_of(superk)) {
5288     return SSC_always_true; // (0) and (1)  this test cannot fail
5289   }
5290 
5291   if (!subk->maybe_java_subtype_of(superk)) {
5292     return SSC_always_false; // (2) true path dead; no dynamic test needed
5293   }
5294 
5295   const Type* superelem = superk;
5296   if (superk->isa_aryklassptr()) {
5297     int ignored;
5298     superelem = superk->is_aryklassptr()->base_element_type(ignored);
5299 
5300     // Do not fold the subtype check to an array klass pointer comparison for null-able inline type arrays
5301     // because null-free [LMyValue <: null-able [LMyValue but the klasses are different. Perform a full test.
5302     if (!superk->is_aryklassptr()->is_null_free() && superk->is_aryklassptr()->elem()->isa_instklassptr() &&
5303         superk->is_aryklassptr()->elem()->is_instklassptr()->instance_klass()->is_inlinetype()) {
5304       return SSC_full_test;
5305     }
5306   }
5307 
5308   if (superelem->isa_instklassptr()) {
5309     ciInstanceKlass* ik = superelem->is_instklassptr()->instance_klass();
5310     if (!ik->has_subklass()) {
5311       if (!ik->is_final()) {
5312         // Add a dependency if there is a chance of a later subclass.
5313         dependencies()->assert_leaf_type(ik);
5314       }
5315       if (!superk->maybe_java_subtype_of(subk)) {
5316         return SSC_always_false;
5317       }
5318       return SSC_easy_test;     // (3) caller can do a simple ptr comparison
5319     }
5320   } else {
5321     // A primitive array type has no subtypes.
5322     return SSC_easy_test;       // (3) caller can do a simple ptr comparison
5323   }
5324 
5325   return SSC_full_test;
5326 }
5327 
5328 Node* Compile::conv_I2X_index(PhaseGVN* phase, Node* idx, const TypeInt* sizetype, Node* ctrl) {
5329 #ifdef _LP64
5330   // The scaled index operand to AddP must be a clean 64-bit value.
5331   // Java allows a 32-bit int to be incremented to a negative
5332   // value, which appears in a 64-bit register as a large
5333   // positive number.  Using that large positive number as an
5334   // operand in pointer arithmetic has bad consequences.
5335   // On the other hand, 32-bit overflow is rare, and the possibility
5336   // can often be excluded, if we annotate the ConvI2L node with
5337   // a type assertion that its value is known to be a small positive
5338   // number.  (The prior range check has ensured this.)
5339   // This assertion is used by ConvI2LNode::Ideal.
5340   int index_max = max_jint - 1;  // array size is max_jint, index is one less
5341   if (sizetype != nullptr && sizetype->_hi > 0) {
5342     index_max = sizetype->_hi - 1;
5343   }
5344   const TypeInt* iidxtype = TypeInt::make(0, index_max, Type::WidenMax);
5345   idx = constrained_convI2L(phase, idx, iidxtype, ctrl);
5346 #endif
5347   return idx;
5348 }
5349 
5350 // Convert integer value to a narrowed long type dependent on ctrl (for example, a range check)
5351 Node* Compile::constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl, bool carry_dependency) {
5352   if (ctrl != nullptr) {
5353     // Express control dependency by a CastII node with a narrow type.
5354     // Make the CastII node dependent on the control input to prevent the narrowed ConvI2L
5355     // node from floating above the range check during loop optimizations. Otherwise, the
5356     // ConvI2L node may be eliminated independently of the range check, causing the data path
5357     // to become TOP while the control path is still there (although it's unreachable).
5358     value = new CastIINode(ctrl, value, itype, carry_dependency ? ConstraintCastNode::DependencyType::NonFloatingNarrowing : ConstraintCastNode::DependencyType::FloatingNarrowing, true /* range check dependency */);
5359     value = phase->transform(value);
5360   }
5361   const TypeLong* ltype = TypeLong::make(itype->_lo, itype->_hi, itype->_widen);
5362   return phase->transform(new ConvI2LNode(value, ltype));
5363 }
5364 
5365 void Compile::dump_print_inlining() {
5366   inline_printer()->print_on(tty);
5367 }
5368 
5369 void Compile::log_late_inline(CallGenerator* cg) {
5370   if (log() != nullptr) {
5371     log()->head("late_inline method='%d' inline_id='" JLONG_FORMAT "'", log()->identify(cg->method()),
5372                 cg->unique_id());
5373     JVMState* p = cg->call_node()->jvms();
5374     while (p != nullptr) {
5375       log()->elem("jvms bci='%d' method='%d'", p->bci(), log()->identify(p->method()));
5376       p = p->caller();
5377     }
5378     log()->tail("late_inline");
5379   }
5380 }
5381 
5382 void Compile::log_late_inline_failure(CallGenerator* cg, const char* msg) {
5383   log_late_inline(cg);
5384   if (log() != nullptr) {
5385     log()->inline_fail(msg);
5386   }
5387 }
5388 
5389 void Compile::log_inline_id(CallGenerator* cg) {
5390   if (log() != nullptr) {
5391     // The LogCompilation tool needs a unique way to identify late
5392     // inline call sites. This id must be unique for this call site in
5393     // this compilation. Try to have it unique across compilations as
5394     // well because it can be convenient when grepping through the log
5395     // file.
5396     // Distinguish OSR compilations from others in case CICountOSR is
5397     // on.
5398     jlong id = ((jlong)unique()) + (((jlong)compile_id()) << 33) + (CICountOSR && is_osr_compilation() ? ((jlong)1) << 32 : 0);
5399     cg->set_unique_id(id);
5400     log()->elem("inline_id id='" JLONG_FORMAT "'", id);
5401   }
5402 }
5403 
5404 void Compile::log_inline_failure(const char* msg) {
5405   if (C->log() != nullptr) {
5406     C->log()->inline_fail(msg);
5407   }
5408 }
5409 
5410 
5411 // Dump inlining replay data to the stream.
5412 // Don't change thread state and acquire any locks.
5413 void Compile::dump_inline_data(outputStream* out) {
5414   InlineTree* inl_tree = ilt();
5415   if (inl_tree != nullptr) {
5416     out->print(" inline %d", inl_tree->count());
5417     inl_tree->dump_replay_data(out);
5418   }
5419 }
5420 
5421 void Compile::dump_inline_data_reduced(outputStream* out) {
5422   assert(ReplayReduce, "");
5423 
5424   InlineTree* inl_tree = ilt();
5425   if (inl_tree == nullptr) {
5426     return;
5427   }
5428   // Enable iterative replay file reduction
5429   // Output "compile" lines for depth 1 subtrees,
5430   // simulating that those trees were compiled
5431   // instead of inlined.
5432   for (int i = 0; i < inl_tree->subtrees().length(); ++i) {
5433     InlineTree* sub = inl_tree->subtrees().at(i);
5434     if (sub->inline_level() != 1) {
5435       continue;
5436     }
5437 
5438     ciMethod* method = sub->method();
5439     int entry_bci = -1;
5440     int comp_level = env()->task()->comp_level();
5441     out->print("compile ");
5442     method->dump_name_as_ascii(out);
5443     out->print(" %d %d", entry_bci, comp_level);
5444     out->print(" inline %d", sub->count());
5445     sub->dump_replay_data(out, -1);
5446     out->cr();
5447   }
5448 }
5449 
5450 int Compile::cmp_expensive_nodes(Node* n1, Node* n2) {
5451   if (n1->Opcode() < n2->Opcode())      return -1;
5452   else if (n1->Opcode() > n2->Opcode()) return 1;
5453 
5454   assert(n1->req() == n2->req(), "can't compare %s nodes: n1->req() = %d, n2->req() = %d", NodeClassNames[n1->Opcode()], n1->req(), n2->req());
5455   for (uint i = 1; i < n1->req(); i++) {
5456     if (n1->in(i) < n2->in(i))      return -1;
5457     else if (n1->in(i) > n2->in(i)) return 1;
5458   }
5459 
5460   return 0;
5461 }
5462 
5463 int Compile::cmp_expensive_nodes(Node** n1p, Node** n2p) {
5464   Node* n1 = *n1p;
5465   Node* n2 = *n2p;
5466 
5467   return cmp_expensive_nodes(n1, n2);
5468 }
5469 
5470 void Compile::sort_expensive_nodes() {
5471   if (!expensive_nodes_sorted()) {
5472     _expensive_nodes.sort(cmp_expensive_nodes);
5473   }
5474 }
5475 
5476 bool Compile::expensive_nodes_sorted() const {
5477   for (int i = 1; i < _expensive_nodes.length(); i++) {
5478     if (cmp_expensive_nodes(_expensive_nodes.adr_at(i), _expensive_nodes.adr_at(i-1)) < 0) {
5479       return false;
5480     }
5481   }
5482   return true;
5483 }
5484 
5485 bool Compile::should_optimize_expensive_nodes(PhaseIterGVN &igvn) {
5486   if (_expensive_nodes.length() == 0) {
5487     return false;
5488   }
5489 
5490   assert(OptimizeExpensiveOps, "optimization off?");
5491 
5492   // Take this opportunity to remove dead nodes from the list
5493   int j = 0;
5494   for (int i = 0; i < _expensive_nodes.length(); i++) {
5495     Node* n = _expensive_nodes.at(i);
5496     if (!n->is_unreachable(igvn)) {
5497       assert(n->is_expensive(), "should be expensive");
5498       _expensive_nodes.at_put(j, n);
5499       j++;
5500     }
5501   }
5502   _expensive_nodes.trunc_to(j);
5503 
5504   // Then sort the list so that similar nodes are next to each other
5505   // and check for at least two nodes of identical kind with same data
5506   // inputs.
5507   sort_expensive_nodes();
5508 
5509   for (int i = 0; i < _expensive_nodes.length()-1; i++) {
5510     if (cmp_expensive_nodes(_expensive_nodes.adr_at(i), _expensive_nodes.adr_at(i+1)) == 0) {
5511       return true;
5512     }
5513   }
5514 
5515   return false;
5516 }
5517 
5518 void Compile::cleanup_expensive_nodes(PhaseIterGVN &igvn) {
5519   if (_expensive_nodes.length() == 0) {
5520     return;
5521   }
5522 
5523   assert(OptimizeExpensiveOps, "optimization off?");
5524 
5525   // Sort to bring similar nodes next to each other and clear the
5526   // control input of nodes for which there's only a single copy.
5527   sort_expensive_nodes();
5528 
5529   int j = 0;
5530   int identical = 0;
5531   int i = 0;
5532   bool modified = false;
5533   for (; i < _expensive_nodes.length()-1; i++) {
5534     assert(j <= i, "can't write beyond current index");
5535     if (_expensive_nodes.at(i)->Opcode() == _expensive_nodes.at(i+1)->Opcode()) {
5536       identical++;
5537       _expensive_nodes.at_put(j++, _expensive_nodes.at(i));
5538       continue;
5539     }
5540     if (identical > 0) {
5541       _expensive_nodes.at_put(j++, _expensive_nodes.at(i));
5542       identical = 0;
5543     } else {
5544       Node* n = _expensive_nodes.at(i);
5545       igvn.replace_input_of(n, 0, nullptr);
5546       igvn.hash_insert(n);
5547       modified = true;
5548     }
5549   }
5550   if (identical > 0) {
5551     _expensive_nodes.at_put(j++, _expensive_nodes.at(i));
5552   } else if (_expensive_nodes.length() >= 1) {
5553     Node* n = _expensive_nodes.at(i);
5554     igvn.replace_input_of(n, 0, nullptr);
5555     igvn.hash_insert(n);
5556     modified = true;
5557   }
5558   _expensive_nodes.trunc_to(j);
5559   if (modified) {
5560     igvn.optimize();
5561   }
5562 }
5563 
5564 void Compile::add_expensive_node(Node * n) {
5565   assert(!_expensive_nodes.contains(n), "duplicate entry in expensive list");
5566   assert(n->is_expensive(), "expensive nodes with non-null control here only");
5567   assert(!n->is_CFG() && !n->is_Mem(), "no cfg or memory nodes here");
5568   if (OptimizeExpensiveOps) {
5569     _expensive_nodes.append(n);
5570   } else {
5571     // Clear control input and let IGVN optimize expensive nodes if
5572     // OptimizeExpensiveOps is off.
5573     n->set_req(0, nullptr);
5574   }
5575 }
5576 
5577 /**
5578  * Track coarsened Lock and Unlock nodes.
5579  */
5580 
5581 class Lock_List : public Node_List {
5582   uint _origin_cnt;
5583 public:
5584   Lock_List(Arena *a, uint cnt) : Node_List(a), _origin_cnt(cnt) {}
5585   uint origin_cnt() const { return _origin_cnt; }
5586 };
5587 
5588 void Compile::add_coarsened_locks(GrowableArray<AbstractLockNode*>& locks) {
5589   int length = locks.length();
5590   if (length > 0) {
5591     // Have to keep this list until locks elimination during Macro nodes elimination.
5592     Lock_List* locks_list = new (comp_arena()) Lock_List(comp_arena(), length);
5593     AbstractLockNode* alock = locks.at(0);
5594     BoxLockNode* box = alock->box_node()->as_BoxLock();
5595     for (int i = 0; i < length; i++) {
5596       AbstractLockNode* lock = locks.at(i);
5597       assert(lock->is_coarsened(), "expecting only coarsened AbstractLock nodes, but got '%s'[%d] node", lock->Name(), lock->_idx);
5598       locks_list->push(lock);
5599       BoxLockNode* this_box = lock->box_node()->as_BoxLock();
5600       if (this_box != box) {
5601         // Locking regions (BoxLock) could be Unbalanced here:
5602         //  - its coarsened locks were eliminated in earlier
5603         //    macro nodes elimination followed by loop unroll
5604         //  - it is OSR locking region (no Lock node)
5605         // Preserve Unbalanced status in such cases.
5606         if (!this_box->is_unbalanced()) {
5607           this_box->set_coarsened();
5608         }
5609         if (!box->is_unbalanced()) {
5610           box->set_coarsened();
5611         }
5612       }
5613     }
5614     _coarsened_locks.append(locks_list);
5615   }
5616 }
5617 
5618 void Compile::remove_useless_coarsened_locks(Unique_Node_List& useful) {
5619   int count = coarsened_count();
5620   for (int i = 0; i < count; i++) {
5621     Node_List* locks_list = _coarsened_locks.at(i);
5622     for (uint j = 0; j < locks_list->size(); j++) {
5623       Node* lock = locks_list->at(j);
5624       assert(lock->is_AbstractLock(), "sanity");
5625       if (!useful.member(lock)) {
5626         locks_list->yank(lock);
5627       }
5628     }
5629   }
5630 }
5631 
5632 void Compile::remove_coarsened_lock(Node* n) {
5633   if (n->is_AbstractLock()) {
5634     int count = coarsened_count();
5635     for (int i = 0; i < count; i++) {
5636       Node_List* locks_list = _coarsened_locks.at(i);
5637       locks_list->yank(n);
5638     }
5639   }
5640 }
5641 
5642 bool Compile::coarsened_locks_consistent() {
5643   int count = coarsened_count();
5644   for (int i = 0; i < count; i++) {
5645     bool unbalanced = false;
5646     bool modified = false; // track locks kind modifications
5647     Lock_List* locks_list = (Lock_List*)_coarsened_locks.at(i);
5648     uint size = locks_list->size();
5649     if (size == 0) {
5650       unbalanced = false; // All locks were eliminated - good
5651     } else if (size != locks_list->origin_cnt()) {
5652       unbalanced = true; // Some locks were removed from list
5653     } else {
5654       for (uint j = 0; j < size; j++) {
5655         Node* lock = locks_list->at(j);
5656         // All nodes in group should have the same state (modified or not)
5657         if (!lock->as_AbstractLock()->is_coarsened()) {
5658           if (j == 0) {
5659             // first on list was modified, the rest should be too for consistency
5660             modified = true;
5661           } else if (!modified) {
5662             // this lock was modified but previous locks on the list were not
5663             unbalanced = true;
5664             break;
5665           }
5666         } else if (modified) {
5667           // previous locks on list were modified but not this lock
5668           unbalanced = true;
5669           break;
5670         }
5671       }
5672     }
5673     if (unbalanced) {
5674       // unbalanced monitor enter/exit - only some [un]lock nodes were removed or modified
5675 #ifdef ASSERT
5676       if (PrintEliminateLocks) {
5677         tty->print_cr("=== unbalanced coarsened locks ===");
5678         for (uint l = 0; l < size; l++) {
5679           locks_list->at(l)->dump();
5680         }
5681       }
5682 #endif
5683       record_failure(C2Compiler::retry_no_locks_coarsening());
5684       return false;
5685     }
5686   }
5687   return true;
5688 }
5689 
5690 // Mark locking regions (identified by BoxLockNode) as unbalanced if
5691 // locks coarsening optimization removed Lock/Unlock nodes from them.
5692 // Such regions become unbalanced because coarsening only removes part
5693 // of Lock/Unlock nodes in region. As result we can't execute other
5694 // locks elimination optimizations which assume all code paths have
5695 // corresponding pair of Lock/Unlock nodes - they are balanced.
5696 void Compile::mark_unbalanced_boxes() const {
5697   int count = coarsened_count();
5698   for (int i = 0; i < count; i++) {
5699     Node_List* locks_list = _coarsened_locks.at(i);
5700     uint size = locks_list->size();
5701     if (size > 0) {
5702       AbstractLockNode* alock = locks_list->at(0)->as_AbstractLock();
5703       BoxLockNode* box = alock->box_node()->as_BoxLock();
5704       if (alock->is_coarsened()) {
5705         // coarsened_locks_consistent(), which is called before this method, verifies
5706         // that the rest of Lock/Unlock nodes on locks_list are also coarsened.
5707         assert(!box->is_eliminated(), "regions with coarsened locks should not be marked as eliminated");
5708         for (uint j = 1; j < size; j++) {
5709           assert(locks_list->at(j)->as_AbstractLock()->is_coarsened(), "only coarsened locks are expected here");
5710           BoxLockNode* this_box = locks_list->at(j)->as_AbstractLock()->box_node()->as_BoxLock();
5711           if (box != this_box) {
5712             assert(!this_box->is_eliminated(), "regions with coarsened locks should not be marked as eliminated");
5713             box->set_unbalanced();
5714             this_box->set_unbalanced();
5715           }
5716         }
5717       }
5718     }
5719   }
5720 }
5721 
5722 /**
5723  * Remove the speculative part of types and clean up the graph
5724  */
5725 void Compile::remove_speculative_types(PhaseIterGVN &igvn) {
5726   if (UseTypeSpeculation) {
5727     Unique_Node_List worklist;
5728     worklist.push(root());
5729     int modified = 0;
5730     // Go over all type nodes that carry a speculative type, drop the
5731     // speculative part of the type and enqueue the node for an igvn
5732     // which may optimize it out.
5733     for (uint next = 0; next < worklist.size(); ++next) {
5734       Node *n  = worklist.at(next);
5735       if (n->is_Type()) {
5736         TypeNode* tn = n->as_Type();
5737         const Type* t = tn->type();
5738         const Type* t_no_spec = t->remove_speculative();
5739         if (t_no_spec != t) {
5740           bool in_hash = igvn.hash_delete(n);
5741           assert(in_hash || n->hash() == Node::NO_HASH, "node should be in igvn hash table");
5742           tn->set_type(t_no_spec);
5743           igvn.hash_insert(n);
5744           igvn._worklist.push(n); // give it a chance to go away
5745           modified++;
5746         }
5747       }
5748       // Iterate over outs - endless loops is unreachable from below
5749       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
5750         Node *m = n->fast_out(i);
5751         if (not_a_node(m)) {
5752           continue;
5753         }
5754         worklist.push(m);
5755       }
5756     }
5757     // Drop the speculative part of all types in the igvn's type table
5758     igvn.remove_speculative_types();
5759     if (modified > 0) {
5760       igvn.optimize();
5761       if (failing())  return;
5762     }
5763 #ifdef ASSERT
5764     // Verify that after the IGVN is over no speculative type has resurfaced
5765     worklist.clear();
5766     worklist.push(root());
5767     for (uint next = 0; next < worklist.size(); ++next) {
5768       Node *n  = worklist.at(next);
5769       const Type* t = igvn.type_or_null(n);
5770       assert((t == nullptr) || (t == t->remove_speculative()), "no more speculative types");
5771       if (n->is_Type()) {
5772         t = n->as_Type()->type();
5773         assert(t == t->remove_speculative(), "no more speculative types");
5774       }
5775       // Iterate over outs - endless loops is unreachable from below
5776       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
5777         Node *m = n->fast_out(i);
5778         if (not_a_node(m)) {
5779           continue;
5780         }
5781         worklist.push(m);
5782       }
5783     }
5784     igvn.check_no_speculative_types();
5785 #endif
5786   }
5787 }
5788 
5789 // Auxiliary methods to support randomized stressing/fuzzing.
5790 
5791 void Compile::initialize_stress_seed(const DirectiveSet* directive) {
5792   if (FLAG_IS_DEFAULT(StressSeed) || (FLAG_IS_ERGO(StressSeed) && directive->RepeatCompilationOption)) {
5793     _stress_seed = static_cast<uint>(Ticks::now().nanoseconds());
5794     FLAG_SET_ERGO(StressSeed, _stress_seed);
5795   } else {
5796     _stress_seed = StressSeed;
5797   }
5798   if (_log != nullptr) {
5799     _log->elem("stress_test seed='%u'", _stress_seed);
5800   }
5801 }
5802 
5803 int Compile::random() {
5804   _stress_seed = os::next_random(_stress_seed);
5805   return static_cast<int>(_stress_seed);
5806 }
5807 
5808 // This method can be called the arbitrary number of times, with current count
5809 // as the argument. The logic allows selecting a single candidate from the
5810 // running list of candidates as follows:
5811 //    int count = 0;
5812 //    Cand* selected = null;
5813 //    while(cand = cand->next()) {
5814 //      if (randomized_select(++count)) {
5815 //        selected = cand;
5816 //      }
5817 //    }
5818 //
5819 // Including count equalizes the chances any candidate is "selected".
5820 // This is useful when we don't have the complete list of candidates to choose
5821 // from uniformly. In this case, we need to adjust the randomicity of the
5822 // selection, or else we will end up biasing the selection towards the latter
5823 // candidates.
5824 //
5825 // Quick back-envelope calculation shows that for the list of n candidates
5826 // the equal probability for the candidate to persist as "best" can be
5827 // achieved by replacing it with "next" k-th candidate with the probability
5828 // of 1/k. It can be easily shown that by the end of the run, the
5829 // probability for any candidate is converged to 1/n, thus giving the
5830 // uniform distribution among all the candidates.
5831 //
5832 // We don't care about the domain size as long as (RANDOMIZED_DOMAIN / count) is large.
5833 #define RANDOMIZED_DOMAIN_POW 29
5834 #define RANDOMIZED_DOMAIN (1 << RANDOMIZED_DOMAIN_POW)
5835 #define RANDOMIZED_DOMAIN_MASK ((1 << (RANDOMIZED_DOMAIN_POW + 1)) - 1)
5836 bool Compile::randomized_select(int count) {
5837   assert(count > 0, "only positive");
5838   return (random() & RANDOMIZED_DOMAIN_MASK) < (RANDOMIZED_DOMAIN / count);
5839 }
5840 
5841 #ifdef ASSERT
5842 // Failures are geometrically distributed with probability 1/StressBailoutMean.
5843 bool Compile::fail_randomly() {
5844   if ((random() % StressBailoutMean) != 0) {
5845     return false;
5846   }
5847   record_failure("StressBailout");
5848   return true;
5849 }
5850 
5851 bool Compile::failure_is_artificial() {
5852   return C->failure_reason_is("StressBailout");
5853 }
5854 #endif
5855 
5856 CloneMap&     Compile::clone_map()                 { return _clone_map; }
5857 void          Compile::set_clone_map(Dict* d)      { _clone_map._dict = d; }
5858 
5859 void NodeCloneInfo::dump_on(outputStream* st) const {
5860   st->print(" {%d:%d} ", idx(), gen());
5861 }
5862 
5863 void CloneMap::clone(Node* old, Node* nnn, int gen) {
5864   uint64_t val = value(old->_idx);
5865   NodeCloneInfo cio(val);
5866   assert(val != 0, "old node should be in the map");
5867   NodeCloneInfo cin(cio.idx(), gen + cio.gen());
5868   insert(nnn->_idx, cin.get());
5869 #ifndef PRODUCT
5870   if (is_debug()) {
5871     tty->print_cr("CloneMap::clone inserted node %d info {%d:%d} into CloneMap", nnn->_idx, cin.idx(), cin.gen());
5872   }
5873 #endif
5874 }
5875 
5876 void CloneMap::verify_insert_and_clone(Node* old, Node* nnn, int gen) {
5877   NodeCloneInfo cio(value(old->_idx));
5878   if (cio.get() == 0) {
5879     cio.set(old->_idx, 0);
5880     insert(old->_idx, cio.get());
5881 #ifndef PRODUCT
5882     if (is_debug()) {
5883       tty->print_cr("CloneMap::verify_insert_and_clone inserted node %d info {%d:%d} into CloneMap", old->_idx, cio.idx(), cio.gen());
5884     }
5885 #endif
5886   }
5887   clone(old, nnn, gen);
5888 }
5889 
5890 int CloneMap::max_gen() const {
5891   int g = 0;
5892   DictI di(_dict);
5893   for(; di.test(); ++di) {
5894     int t = gen(di._key);
5895     if (g < t) {
5896       g = t;
5897 #ifndef PRODUCT
5898       if (is_debug()) {
5899         tty->print_cr("CloneMap::max_gen() update max=%d from %d", g, _2_node_idx_t(di._key));
5900       }
5901 #endif
5902     }
5903   }
5904   return g;
5905 }
5906 
5907 void CloneMap::dump(node_idx_t key, outputStream* st) const {
5908   uint64_t val = value(key);
5909   if (val != 0) {
5910     NodeCloneInfo ni(val);
5911     ni.dump_on(st);
5912   }
5913 }
5914 
5915 void Compile::shuffle_macro_nodes() {
5916   shuffle_array(*C, _macro_nodes);
5917 }
5918 
5919 // Move Allocate nodes to the start of the list
5920 void Compile::sort_macro_nodes() {
5921   int count = macro_count();
5922   int allocates = 0;
5923   for (int i = 0; i < count; i++) {
5924     Node* n = macro_node(i);
5925     if (n->is_Allocate()) {
5926       if (i != allocates) {
5927         Node* tmp = macro_node(allocates);
5928         _macro_nodes.at_put(allocates, n);
5929         _macro_nodes.at_put(i, tmp);
5930       }
5931       allocates++;
5932     }
5933   }
5934 }
5935 
5936 void Compile::print_method(CompilerPhaseType compile_phase, int level, Node* n) {
5937   if (failing_internal()) { return; } // failing_internal to not stress bailouts from printing code.
5938   EventCompilerPhase event(UNTIMED);
5939   if (event.should_commit()) {
5940     CompilerEvent::PhaseEvent::post(event, C->_latest_stage_start_counter, compile_phase, C->_compile_id, level);
5941   }
5942 #ifndef PRODUCT
5943   ResourceMark rm;
5944   stringStream ss;
5945   ss.print_raw(CompilerPhaseTypeHelper::to_description(compile_phase));
5946   int iter = ++_igv_phase_iter[compile_phase];
5947   if (iter > 1) {
5948     ss.print(" %d", iter);
5949   }
5950   if (n != nullptr) {
5951     ss.print(": %d %s", n->_idx, NodeClassNames[n->Opcode()]);
5952     if (n->is_Call()) {
5953       CallNode* call = n->as_Call();
5954       if (call->_name != nullptr) {
5955         // E.g. uncommon traps etc.
5956         ss.print(" - %s", call->_name);
5957       } else if (call->is_CallJava()) {
5958         CallJavaNode* call_java = call->as_CallJava();
5959         if (call_java->method() != nullptr) {
5960           ss.print(" -");
5961           call_java->method()->print_short_name(&ss);
5962         }
5963       }
5964     }
5965   }
5966 
5967   const char* name = ss.as_string();
5968   if (should_print_igv(level)) {
5969     _igv_printer->print_graph(name);
5970   }
5971   if (should_print_phase(level)) {
5972     print_phase(name);
5973   }
5974   if (should_print_ideal_phase(compile_phase)) {
5975     print_ideal_ir(CompilerPhaseTypeHelper::to_name(compile_phase));
5976   }
5977 #endif
5978   C->_latest_stage_start_counter.stamp();
5979 }
5980 
5981 // Only used from CompileWrapper
5982 void Compile::begin_method() {
5983 #ifndef PRODUCT
5984   if (_method != nullptr && should_print_igv(1)) {
5985     _igv_printer->begin_method();
5986   }
5987 #endif
5988   C->_latest_stage_start_counter.stamp();
5989 }
5990 
5991 // Only used from CompileWrapper
5992 void Compile::end_method() {
5993   EventCompilerPhase event(UNTIMED);
5994   if (event.should_commit()) {
5995     CompilerEvent::PhaseEvent::post(event, C->_latest_stage_start_counter, PHASE_END, C->_compile_id, 1);
5996   }
5997 
5998 #ifndef PRODUCT
5999   if (_method != nullptr && should_print_igv(1)) {
6000     _igv_printer->end_method();
6001   }
6002 #endif
6003 }
6004 
6005 #ifndef PRODUCT
6006 bool Compile::should_print_phase(const int level) const {
6007   return PrintPhaseLevel >= 0 && directive()->PhasePrintLevelOption >= level &&
6008          _method != nullptr; // Do not print phases for stubs.
6009 }
6010 
6011 bool Compile::should_print_ideal_phase(CompilerPhaseType cpt) const {
6012   return _directive->should_print_ideal_phase(cpt);
6013 }
6014 
6015 void Compile::init_igv() {
6016   if (_igv_printer == nullptr) {
6017     _igv_printer = IdealGraphPrinter::printer();
6018     _igv_printer->set_compile(this);
6019   }
6020 }
6021 
6022 bool Compile::should_print_igv(const int level) {
6023   PRODUCT_RETURN_(return false;);
6024 
6025   if (PrintIdealGraphLevel < 0) { // disabled by the user
6026     return false;
6027   }
6028 
6029   bool need = directive()->IGVPrintLevelOption >= level;
6030   if (need) {
6031     Compile::init_igv();
6032   }
6033   return need;
6034 }
6035 
6036 IdealGraphPrinter* Compile::_debug_file_printer = nullptr;
6037 IdealGraphPrinter* Compile::_debug_network_printer = nullptr;
6038 
6039 // Called from debugger. Prints method to the default file with the default phase name.
6040 // This works regardless of any Ideal Graph Visualizer flags set or not.
6041 // Use in debugger (gdb/rr): p igv_print($sp, $fp, $pc).
6042 void igv_print(void* sp, void* fp, void* pc) {
6043   frame fr(sp, fp, pc);
6044   Compile::current()->igv_print_method_to_file(nullptr, false, &fr);
6045 }
6046 
6047 // Same as igv_print() above but with a specified phase name.
6048 void igv_print(const char* phase_name, void* sp, void* fp, void* pc) {
6049   frame fr(sp, fp, pc);
6050   Compile::current()->igv_print_method_to_file(phase_name, false, &fr);
6051 }
6052 
6053 // Called from debugger. Prints method with the default phase name to the default network or the one specified with
6054 // the network flags for the Ideal Graph Visualizer, or to the default file depending on the 'network' argument.
6055 // This works regardless of any Ideal Graph Visualizer flags set or not.
6056 // Use in debugger (gdb/rr): p igv_print(true, $sp, $fp, $pc).
6057 void igv_print(bool network, void* sp, void* fp, void* pc) {
6058   frame fr(sp, fp, pc);
6059   if (network) {
6060     Compile::current()->igv_print_method_to_network(nullptr, &fr);
6061   } else {
6062     Compile::current()->igv_print_method_to_file(nullptr, false, &fr);
6063   }
6064 }
6065 
6066 // Same as igv_print(bool network, ...) above but with a specified phase name.
6067 // Use in debugger (gdb/rr): p igv_print(true, "MyPhase", $sp, $fp, $pc).
6068 void igv_print(bool network, const char* phase_name, void* sp, void* fp, void* pc) {
6069   frame fr(sp, fp, pc);
6070   if (network) {
6071     Compile::current()->igv_print_method_to_network(phase_name, &fr);
6072   } else {
6073     Compile::current()->igv_print_method_to_file(phase_name, false, &fr);
6074   }
6075 }
6076 
6077 // Called from debugger. Normal write to the default _printer. Only works if Ideal Graph Visualizer printing flags are set.
6078 void igv_print_default() {
6079   Compile::current()->print_method(PHASE_DEBUG, 0);
6080 }
6081 
6082 // Called from debugger, especially when replaying a trace in which the program state cannot be altered like with rr replay.
6083 // A method is appended to an existing default file with the default phase name. This means that igv_append() must follow
6084 // an earlier igv_print(*) call which sets up the file. This works regardless of any Ideal Graph Visualizer flags set or not.
6085 // Use in debugger (gdb/rr): p igv_append($sp, $fp, $pc).
6086 void igv_append(void* sp, void* fp, void* pc) {
6087   frame fr(sp, fp, pc);
6088   Compile::current()->igv_print_method_to_file(nullptr, true, &fr);
6089 }
6090 
6091 // Same as igv_append(...) above but with a specified phase name.
6092 // Use in debugger (gdb/rr): p igv_append("MyPhase", $sp, $fp, $pc).
6093 void igv_append(const char* phase_name, void* sp, void* fp, void* pc) {
6094   frame fr(sp, fp, pc);
6095   Compile::current()->igv_print_method_to_file(phase_name, true, &fr);
6096 }
6097 
6098 void Compile::igv_print_method_to_file(const char* phase_name, bool append, const frame* fr) {
6099   const char* file_name = "custom_debug.xml";
6100   if (_debug_file_printer == nullptr) {
6101     _debug_file_printer = new IdealGraphPrinter(C, file_name, append);
6102   } else {
6103     _debug_file_printer->update_compiled_method(C->method());
6104   }
6105   tty->print_cr("Method %s to %s", append ? "appended" : "printed", file_name);
6106   _debug_file_printer->print_graph(phase_name, fr);
6107 }
6108 
6109 void Compile::igv_print_method_to_network(const char* phase_name, const frame* fr) {
6110   ResourceMark rm;
6111   GrowableArray<const Node*> empty_list;
6112   igv_print_graph_to_network(phase_name, empty_list, fr);
6113 }
6114 
6115 void Compile::igv_print_graph_to_network(const char* name, GrowableArray<const Node*>& visible_nodes, const frame* fr) {
6116   if (_debug_network_printer == nullptr) {
6117     _debug_network_printer = new IdealGraphPrinter(C);
6118   } else {
6119     _debug_network_printer->update_compiled_method(C->method());
6120   }
6121   tty->print_cr("Method printed over network stream to IGV");
6122   _debug_network_printer->print(name, C->root(), visible_nodes, fr);
6123 }
6124 #endif // !PRODUCT
6125 
6126 Node* Compile::narrow_value(BasicType bt, Node* value, const Type* type, PhaseGVN* phase, bool transform_res) {
6127   if (type != nullptr && phase->type(value)->higher_equal(type)) {
6128     return value;
6129   }
6130   Node* result = nullptr;
6131   if (bt == T_BYTE) {
6132     result = phase->transform(new LShiftINode(value, phase->intcon(24)));
6133     result = new RShiftINode(result, phase->intcon(24));
6134   } else if (bt == T_BOOLEAN) {
6135     result = new AndINode(value, phase->intcon(0xFF));
6136   } else if (bt == T_CHAR) {
6137     result = new AndINode(value,phase->intcon(0xFFFF));
6138   } else if (bt == T_FLOAT) {
6139     result = new MoveI2FNode(value);
6140   } else {
6141     assert(bt == T_SHORT, "unexpected narrow type");
6142     result = phase->transform(new LShiftINode(value, phase->intcon(16)));
6143     result = new RShiftINode(result, phase->intcon(16));
6144   }
6145   if (transform_res) {
6146     result = phase->transform(result);
6147   }
6148   return result;
6149 }
6150 
6151 void Compile::record_method_not_compilable_oom() {
6152   record_method_not_compilable(CompilationMemoryStatistic::failure_reason_memlimit());
6153 }
6154 
6155 #ifndef PRODUCT
6156 // Collects all the control inputs from nodes on the worklist and from their data dependencies
6157 static void find_candidate_control_inputs(Unique_Node_List& worklist, Unique_Node_List& candidates) {
6158   // Follow non-control edges until we reach CFG nodes
6159   for (uint i = 0; i < worklist.size(); i++) {
6160     const Node* n = worklist.at(i);
6161     for (uint j = 0; j < n->req(); j++) {
6162       Node* in = n->in(j);
6163       if (in == nullptr || in->is_Root()) {
6164         continue;
6165       }
6166       if (in->is_CFG()) {
6167         if (in->is_Call()) {
6168           // The return value of a call is only available if the call did not result in an exception
6169           Node* control_proj_use = in->as_Call()->proj_out(TypeFunc::Control)->unique_out();
6170           if (control_proj_use->is_Catch()) {
6171             Node* fall_through = control_proj_use->as_Catch()->proj_out(CatchProjNode::fall_through_index);
6172             candidates.push(fall_through);
6173             continue;
6174           }
6175         }
6176 
6177         if (in->is_Multi()) {
6178           // We got here by following data inputs so we should only have one control use
6179           // (no IfNode, etc)
6180           assert(!n->is_MultiBranch(), "unexpected node type: %s", n->Name());
6181           candidates.push(in->as_Multi()->proj_out(TypeFunc::Control));
6182         } else {
6183           candidates.push(in);
6184         }
6185       } else {
6186         worklist.push(in);
6187       }
6188     }
6189   }
6190 }
6191 
6192 // Returns the candidate node that is a descendant to all the other candidates
6193 static Node* pick_control(Unique_Node_List& candidates) {
6194   Unique_Node_List worklist;
6195   worklist.copy(candidates);
6196 
6197   // Traverse backwards through the CFG
6198   for (uint i = 0; i < worklist.size(); i++) {
6199     const Node* n = worklist.at(i);
6200     if (n->is_Root()) {
6201       continue;
6202     }
6203     for (uint j = 0; j < n->req(); j++) {
6204       // Skip backedge of loops to avoid cycles
6205       if (n->is_Loop() && j == LoopNode::LoopBackControl) {
6206         continue;
6207       }
6208 
6209       Node* pred = n->in(j);
6210       if (pred != nullptr && pred != n && pred->is_CFG()) {
6211         worklist.push(pred);
6212         // if pred is an ancestor of n, then pred is an ancestor to at least one candidate
6213         candidates.remove(pred);
6214       }
6215     }
6216   }
6217 
6218   assert(candidates.size() == 1, "unexpected control flow");
6219   return candidates.at(0);
6220 }
6221 
6222 // Initialize a parameter input for a debug print call, using a placeholder for jlong and jdouble
6223 static void debug_print_init_parm(Node* call, Node* parm, Node* half, int* pos) {
6224   call->init_req((*pos)++, parm);
6225   const BasicType bt = parm->bottom_type()->basic_type();
6226   if (bt == T_LONG || bt == T_DOUBLE) {
6227     call->init_req((*pos)++, half);
6228   }
6229 }
6230 
6231 Node* Compile::make_debug_print_call(const char* str, address call_addr, PhaseGVN* gvn,
6232                               Node* parm0, Node* parm1,
6233                               Node* parm2, Node* parm3,
6234                               Node* parm4, Node* parm5,
6235                               Node* parm6) const {
6236   Node* str_node = gvn->transform(new ConPNode(TypeRawPtr::make(((address) str))));
6237   const TypeFunc* type = OptoRuntime::debug_print_Type(parm0, parm1, parm2, parm3, parm4, parm5, parm6);
6238   Node* call = new CallLeafNode(type, call_addr, "debug_print", TypeRawPtr::BOTTOM);
6239 
6240   // find the most suitable control input
6241   Unique_Node_List worklist, candidates;
6242   if (parm0 != nullptr) { worklist.push(parm0);
6243   if (parm1 != nullptr) { worklist.push(parm1);
6244   if (parm2 != nullptr) { worklist.push(parm2);
6245   if (parm3 != nullptr) { worklist.push(parm3);
6246   if (parm4 != nullptr) { worklist.push(parm4);
6247   if (parm5 != nullptr) { worklist.push(parm5);
6248   if (parm6 != nullptr) { worklist.push(parm6);
6249   /* close each nested if ===> */  } } } } } } }
6250   find_candidate_control_inputs(worklist, candidates);
6251   Node* control = nullptr;
6252   if (candidates.size() == 0) {
6253     control = C->start()->proj_out(TypeFunc::Control);
6254   } else {
6255     control = pick_control(candidates);
6256   }
6257 
6258   // find all the previous users of the control we picked
6259   GrowableArray<Node*> users_of_control;
6260   for (DUIterator_Fast kmax, i = control->fast_outs(kmax); i < kmax; i++) {
6261     Node* use = control->fast_out(i);
6262     if (use->is_CFG() && use != control) {
6263       users_of_control.push(use);
6264     }
6265   }
6266 
6267   // we do not actually care about IO and memory as it uses neither
6268   call->init_req(TypeFunc::Control,   control);
6269   call->init_req(TypeFunc::I_O,       top());
6270   call->init_req(TypeFunc::Memory,    top());
6271   call->init_req(TypeFunc::FramePtr,  C->start()->proj_out(TypeFunc::FramePtr));
6272   call->init_req(TypeFunc::ReturnAdr, top());
6273 
6274   int pos = TypeFunc::Parms;
6275   call->init_req(pos++, str_node);
6276   if (parm0 != nullptr) { debug_print_init_parm(call, parm0, top(), &pos);
6277   if (parm1 != nullptr) { debug_print_init_parm(call, parm1, top(), &pos);
6278   if (parm2 != nullptr) { debug_print_init_parm(call, parm2, top(), &pos);
6279   if (parm3 != nullptr) { debug_print_init_parm(call, parm3, top(), &pos);
6280   if (parm4 != nullptr) { debug_print_init_parm(call, parm4, top(), &pos);
6281   if (parm5 != nullptr) { debug_print_init_parm(call, parm5, top(), &pos);
6282   if (parm6 != nullptr) { debug_print_init_parm(call, parm6, top(), &pos);
6283   /* close each nested if ===> */  } } } } } } }
6284   assert(call->in(call->req()-1) != nullptr, "must initialize all parms");
6285 
6286   call = gvn->transform(call);
6287   Node* call_control_proj = gvn->transform(new ProjNode(call, TypeFunc::Control));
6288 
6289   // rewire previous users to have the new call as control instead
6290   PhaseIterGVN* igvn = gvn->is_IterGVN();
6291   for (int i = 0; i < users_of_control.length(); i++) {
6292     Node* use = users_of_control.at(i);
6293     for (uint j = 0; j < use->req(); j++) {
6294       if (use->in(j) == control) {
6295         if (igvn != nullptr) {
6296           igvn->replace_input_of(use, j, call_control_proj);
6297         } else {
6298           gvn->hash_delete(use);
6299           use->set_req(j, call_control_proj);
6300           gvn->hash_insert(use);
6301         }
6302       }
6303     }
6304   }
6305 
6306   return call;
6307 }
6308 #endif // !PRODUCT