1 /*
   2  * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_OPTO_COMPILE_HPP
  26 #define SHARE_OPTO_COMPILE_HPP
  27 
  28 #include "asm/codeBuffer.hpp"
  29 #include "ci/compilerInterface.hpp"
  30 #include "code/debugInfoRec.hpp"
  31 #include "compiler/cHeapStringHolder.hpp"
  32 #include "compiler/compileBroker.hpp"
  33 #include "compiler/compiler_globals.hpp"
  34 #include "compiler/compilerEvent.hpp"
  35 #include "libadt/dict.hpp"
  36 #include "libadt/vectset.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "oops/methodData.hpp"
  39 #include "opto/idealGraphPrinter.hpp"
  40 #include "opto/phase.hpp"
  41 #include "opto/phasetype.hpp"
  42 #include "opto/printinlining.hpp"
  43 #include "opto/regmask.hpp"
  44 #include "runtime/deoptimization.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 #include "runtime/timerTrace.hpp"
  47 #include "runtime/vmThread.hpp"
  48 #include "utilities/growableArray.hpp"
  49 #include "utilities/ticks.hpp"
  50 #include "utilities/vmEnums.hpp"
  51 
  52 class AbstractLockNode;
  53 class AddPNode;
  54 class Block;
  55 class Bundle;
  56 class CallGenerator;
  57 class CallNode;
  58 class CallStaticJavaNode;
  59 class CloneMap;
  60 class CompilationFailureInfo;
  61 class ConnectionGraph;
  62 class IdealGraphPrinter;
  63 class InlineTree;
  64 class Matcher;
  65 class MachConstantNode;
  66 class MachConstantBaseNode;
  67 class MachNode;
  68 class MachOper;
  69 class MachSafePointNode;
  70 class Node;
  71 class Node_Array;
  72 class Node_List;
  73 class Node_Notes;
  74 class NodeHash;
  75 class NodeCloneInfo;
  76 class OpaqueTemplateAssertionPredicateNode;
  77 class OptoReg;
  78 class ParsePredicateNode;
  79 class PhaseCFG;
  80 class PhaseGVN;
  81 class PhaseIterGVN;
  82 class PhaseRegAlloc;
  83 class PhaseCCP;
  84 class PhaseOutput;
  85 class ReachabilityFenceNode;
  86 class RootNode;
  87 class relocInfo;
  88 class StartNode;
  89 class SafePointNode;
  90 class JVMState;
  91 class Type;
  92 class TypeInt;
  93 class TypeInteger;
  94 class TypeKlassPtr;
  95 class TypePtr;
  96 class TypeOopPtr;
  97 class TypeFunc;
  98 class TypeVect;
  99 class Type_Array;
 100 class Unique_Node_List;
 101 class UnstableIfTrap;
 102 class InlineTypeNode;
 103 class nmethod;
 104 class Node_Stack;
 105 struct Final_Reshape_Counts;
 106 class VerifyMeetResult;
 107 
 108 enum LoopOptsMode {
 109   LoopOptsDefault,
 110   LoopOptsNone,
 111   LoopOptsMaxUnroll,
 112   LoopOptsShenandoahExpand,
 113   LoopOptsSkipSplitIf,
 114   LoopOptsVerify,
 115   PostLoopOptsExpandReachabilityFences
 116 };
 117 
 118 // The type of all node counts and indexes.
 119 // It must hold at least 16 bits, but must also be fast to load and store.
 120 // This type, if less than 32 bits, could limit the number of possible nodes.
 121 // (To make this type platform-specific, move to globalDefinitions_xxx.hpp.)
 122 typedef unsigned int node_idx_t;
 123 
 124 class NodeCloneInfo {
 125  private:
 126   uint64_t _idx_clone_orig;
 127  public:
 128 
 129   void set_idx(node_idx_t idx) {
 130     _idx_clone_orig = (_idx_clone_orig & CONST64(0xFFFFFFFF00000000)) | idx;
 131   }
 132   node_idx_t idx() const { return (node_idx_t)(_idx_clone_orig & 0xFFFFFFFF); }
 133 
 134   void set_gen(int generation) {
 135     uint64_t g = (uint64_t)generation << 32;
 136     _idx_clone_orig = (_idx_clone_orig & 0xFFFFFFFF) | g;
 137   }
 138   int gen() const { return (int)(_idx_clone_orig >> 32); }
 139 
 140   void set(uint64_t x) { _idx_clone_orig = x; }
 141   void set(node_idx_t x, int g) { set_idx(x); set_gen(g); }
 142   uint64_t get() const { return _idx_clone_orig; }
 143 
 144   NodeCloneInfo(uint64_t idx_clone_orig) : _idx_clone_orig(idx_clone_orig) {}
 145   NodeCloneInfo(node_idx_t x, int g) : _idx_clone_orig(0) { set(x, g); }
 146 
 147   void dump_on(outputStream* st) const;
 148 };
 149 
 150 class CloneMap {
 151   friend class Compile;
 152  private:
 153   bool      _debug;
 154   Dict*     _dict;
 155   int       _clone_idx;   // current cloning iteration/generation in loop unroll
 156  public:
 157   void*     _2p(node_idx_t key)   const          { return (void*)(intptr_t)key; } // 2 conversion functions to make gcc happy
 158   node_idx_t _2_node_idx_t(const void* k) const  { return (node_idx_t)(intptr_t)k; }
 159   Dict*     dict()                const          { return _dict; }
 160   void insert(node_idx_t key, uint64_t val)      { assert(_dict->operator[](_2p(key)) == nullptr, "key existed"); _dict->Insert(_2p(key), (void*)val); }
 161   void insert(node_idx_t key, NodeCloneInfo& ci) { insert(key, ci.get()); }
 162   void remove(node_idx_t key)                    { _dict->Delete(_2p(key)); }
 163   uint64_t value(node_idx_t key)  const          { return (uint64_t)_dict->operator[](_2p(key)); }
 164   node_idx_t idx(node_idx_t key)  const          { return NodeCloneInfo(value(key)).idx(); }
 165   int gen(node_idx_t key)         const          { return NodeCloneInfo(value(key)).gen(); }
 166   int gen(const void* k)          const          { return gen(_2_node_idx_t(k)); }
 167   int max_gen()                   const;
 168   void clone(Node* old, Node* nnn, int gen);
 169   void verify_insert_and_clone(Node* old, Node* nnn, int gen);
 170   void dump(node_idx_t key, outputStream* st) const;
 171 
 172   int  clone_idx() const                         { return _clone_idx; }
 173   void set_clone_idx(int x)                      { _clone_idx = x; }
 174   bool is_debug()                 const          { return _debug; }
 175   void set_debug(bool debug)                     { _debug = debug; }
 176 
 177   bool same_idx(node_idx_t k1, node_idx_t k2)  const { return idx(k1) == idx(k2); }
 178   bool same_gen(node_idx_t k1, node_idx_t k2)  const { return gen(k1) == gen(k2); }
 179 };
 180 
 181 class Options {
 182   friend class Compile;
 183  private:
 184   const bool _subsume_loads;         // Load can be matched as part of a larger op.
 185   const bool _do_escape_analysis;    // Do escape analysis.
 186   const bool _do_iterative_escape_analysis;  // Do iterative escape analysis.
 187   const bool _do_reduce_allocation_merges;  // Do try to reduce allocation merges.
 188   const bool _eliminate_boxing;      // Do boxing elimination.
 189   const bool _do_locks_coarsening;   // Do locks coarsening
 190   const bool _do_superword;          // Do SuperWord
 191   const bool _install_code;          // Install the code that was compiled
 192  public:
 193   Options(bool subsume_loads,
 194           bool do_escape_analysis,
 195           bool do_iterative_escape_analysis,
 196           bool do_reduce_allocation_merges,
 197           bool eliminate_boxing,
 198           bool do_locks_coarsening,
 199           bool do_superword,
 200           bool install_code) :
 201           _subsume_loads(subsume_loads),
 202           _do_escape_analysis(do_escape_analysis),
 203           _do_iterative_escape_analysis(do_iterative_escape_analysis),
 204           _do_reduce_allocation_merges(do_reduce_allocation_merges),
 205           _eliminate_boxing(eliminate_boxing),
 206           _do_locks_coarsening(do_locks_coarsening),
 207           _do_superword(do_superword),
 208           _install_code(install_code) {
 209   }
 210 
 211   static Options for_runtime_stub() {
 212     return Options(
 213        /* subsume_loads = */ true,
 214        /* do_escape_analysis = */ false,
 215        /* do_iterative_escape_analysis = */ false,
 216        /* do_reduce_allocation_merges = */ false,
 217        /* eliminate_boxing = */ false,
 218        /* do_lock_coarsening = */ false,
 219        /* do_superword = */ true,
 220        /* install_code = */ true
 221     );
 222   }
 223 };
 224 
 225 //------------------------------Compile----------------------------------------
 226 // This class defines a top-level Compiler invocation.
 227 
 228 class Compile : public Phase {
 229 
 230  public:
 231   // Fixed alias indexes.  (See also MergeMemNode.)
 232   enum {
 233     AliasIdxTop = 1,  // pseudo-index, aliases to nothing (used as sentinel value)
 234     AliasIdxBot = 2,  // pseudo-index, aliases to everything
 235     AliasIdxRaw = 3   // hard-wired index for TypeRawPtr::BOTTOM
 236   };
 237 
 238   // Variant of TraceTime(nullptr, &_t_accumulator, CITime);
 239   // Integrated with logging.  If logging is turned on, and CITimeVerbose is true,
 240   // then brackets are put into the log, with time stamps and node counts.
 241   // (The time collection itself is always conditionalized on CITime.)
 242   class TracePhase : public TraceTime {
 243    private:
 244     Compile* const _compile;
 245     CompileLog* _log;
 246     const bool _dolog;
 247    public:
 248     TracePhase(PhaseTraceId phaseTraceId);
 249     TracePhase(const char* name, PhaseTraceId phaseTraceId);
 250     ~TracePhase();
 251     const char* phase_name() const { return title(); }
 252   };
 253 
 254   // Information per category of alias (memory slice)
 255   class AliasType {
 256    private:
 257     friend class Compile;
 258 
 259     int             _index;         // unique index, used with MergeMemNode
 260     const TypePtr*  _adr_type;      // normalized address type
 261     ciField*        _field;         // relevant instance field, or null if none
 262     const Type*     _element;       // relevant array element type, or null if none
 263     bool            _is_rewritable; // false if the memory is write-once only
 264     int             _general_index; // if this is type is an instance, the general
 265                                     // type that this is an instance of
 266 
 267     void Init(int i, const TypePtr* at);
 268 
 269    public:
 270     int             index()         const { return _index; }
 271     const TypePtr*  adr_type()      const { return _adr_type; }
 272     ciField*        field()         const { return _field; }
 273     const Type*     element()       const { return _element; }
 274     bool            is_rewritable() const { return _is_rewritable; }
 275     bool            is_volatile()   const { return (_field ? _field->is_volatile() : false); }
 276     int             general_index() const { return (_general_index != 0) ? _general_index : _index; }
 277 
 278     void set_rewritable(bool z) { _is_rewritable = z; }
 279     void set_field(ciField* f) {
 280       assert(!_field,"");
 281       _field = f;
 282       if (f->is_final() || f->is_stable()) {
 283         // In the case of @Stable, multiple writes are possible but may be assumed to be no-ops.
 284         _is_rewritable = false;
 285       }
 286     }
 287     void set_element(const Type* e) {
 288       assert(_element == nullptr, "");
 289       _element = e;
 290     }
 291 
 292     BasicType basic_type() const;
 293 
 294     void print_on(outputStream* st) PRODUCT_RETURN;
 295   };
 296 
 297   enum {
 298     logAliasCacheSize = 6,
 299     AliasCacheSize = (1<<logAliasCacheSize)
 300   };
 301   struct AliasCacheEntry { const TypePtr* _adr_type; int _index; };  // simple duple type
 302   enum {
 303     trapHistLength = MethodData::_trap_hist_limit
 304   };
 305 
 306  private:
 307   // Fixed parameters to this compilation.
 308   const int             _compile_id;
 309   const Options         _options;               // Compilation options
 310   ciMethod*             _method;                // The method being compiled.
 311   int                   _entry_bci;             // entry bci for osr methods.
 312   const TypeFunc*       _tf;                    // My kind of signature
 313   InlineTree*           _ilt;                   // Ditto (temporary).
 314   address               _stub_function;         // VM entry for stub being compiled, or null
 315   const char*           _stub_name;             // Name of stub or adapter being compiled, or null
 316   StubId                   _stub_id;               // unique id for stub or NO_STUBID
 317   address               _stub_entry_point;      // Compile code entry for generated stub, or null
 318 
 319   // Control of this compilation.
 320   int                   _max_inline_size;       // Max inline size for this compilation
 321   int                   _freq_inline_size;      // Max hot method inline size for this compilation
 322   int                   _fixed_slots;           // count of frame slots not allocated by the register
 323                                                 // allocator i.e. locks, original deopt pc, etc.
 324   uintx                 _max_node_limit;        // Max unique node count during a single compilation.
 325 
 326   bool                  _post_loop_opts_phase;  // Loop opts are finished.
 327   bool                  _merge_stores_phase;    // Phase for merging stores, after post loop opts phase.
 328   bool                  _allow_macro_nodes;     // True if we allow creation of macro nodes.
 329 
 330   /* If major progress is set:
 331    *   Marks that the loop tree information (get_ctrl, idom, get_loop, etc.) could be invalid, and we need to rebuild the loop tree.
 332    *   It also indicates that the graph was changed in a way that is promising to be able to apply more loop optimization.
 333    * If major progress is not set:
 334    *   Loop tree information is valid.
 335    *   If major progress is not set at the end of a loop opts phase, then we can stop loop opts, because we do not expect any further progress if we did more loop opts phases.
 336    *
 337    * This is not 100% accurate, the semantics of major progress has become less clear over time, but this is the general idea.
 338    */
 339   bool                  _major_progress;
 340   bool                  _inlining_progress;     // progress doing incremental inlining?
 341   bool                  _inlining_incrementally;// Are we doing incremental inlining (post parse)
 342   bool                  _strength_reduction;    // Are we doing strength reduction to direct call
 343   bool                  _do_cleanup;            // Cleanup is needed before proceeding with incremental inlining
 344   bool                  _has_loops;             // True if the method _may_ have some loops
 345   bool                  _has_split_ifs;         // True if the method _may_ have some split-if
 346   bool                  _has_unsafe_access;     // True if the method _may_ produce faults in unsafe loads or stores.
 347   bool                  _has_stringbuilder;     // True StringBuffers or StringBuilders are allocated
 348   bool                  _has_boxed_value;       // True if a boxed object is allocated
 349   bool                  _has_reserved_stack_access; // True if the method or an inlined method is annotated with ReservedStackAccess
 350   bool                  _has_circular_inline_type; // True if method loads an inline type with a circular, non-flat field
 351   bool                  _needs_nm_slot;         // True if an extra stack slot is needed to hold the null marker at scalarized returns
 352   uint                  _max_vector_size;       // Maximum size of generated vectors
 353   bool                  _clear_upper_avx;       // Clear upper bits of ymm registers using vzeroupper
 354   uint                  _trap_hist[trapHistLength];  // Cumulative traps
 355   bool                  _trap_can_recompile;    // Have we emitted a recompiling trap?
 356   uint                  _decompile_count;       // Cumulative decompilation counts.
 357   bool                  _do_inlining;           // True if we intend to do inlining
 358   bool                  _do_scheduling;         // True if we intend to do scheduling
 359   bool                  _do_freq_based_layout;  // True if we intend to do frequency based block layout
 360   bool                  _do_vector_loop;        // True if allowed to execute loop in parallel iterations
 361   bool                  _use_cmove;             // True if CMove should be used without profitability analysis
 362   bool                  _do_aliasing;           // True if we intend to do aliasing
 363   bool                  _print_assembly;        // True if we should dump assembly code for this compilation
 364   bool                  _print_inlining;        // True if we should print inlining for this compilation
 365   bool                  _print_intrinsics;      // True if we should print intrinsics for this compilation
 366   bool                  _print_phase_loop_opts; // True if we should print before and after loop opts phase
 367 #ifndef PRODUCT
 368   uint                  _phase_counter;         // Counter for the number of already printed phases
 369   uint                  _igv_idx;               // Counter for IGV node identifiers
 370   uint                  _igv_phase_iter[PHASE_NUM_TYPES]; // Counters for IGV phase iterations
 371   bool                  _trace_opto_output;
 372   bool                  _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
 373 #endif
 374   bool                  _has_irreducible_loop;  // Found irreducible loops
 375   bool                  _has_monitors;          // Metadata transfered to nmethod to enable Continuations lock-detection fastpath
 376   bool                  _has_scoped_access;     // For shared scope closure
 377   bool                  _clinit_barrier_on_entry; // True if clinit barrier is needed on nmethod entry
 378   int                   _loop_opts_cnt;         // loop opts round
 379   bool                  _has_flat_accesses;     // Any known flat array accesses?
 380   bool                  _flat_accesses_share_alias; // Initially all flat array share a single slice
 381   bool                  _scalarize_in_safepoints; // Scalarize inline types in safepoint debug info
 382   uint                  _stress_seed;           // Seed for stress testing
 383 
 384   // Compilation environment.
 385   Arena                 _comp_arena;            // Arena with lifetime equivalent to Compile
 386   void*                 _barrier_set_state;     // Potential GC barrier state for Compile
 387   ciEnv*                _env;                   // CI interface
 388   DirectiveSet*         _directive;             // Compiler directive
 389   CompileLog*           _log;                   // from CompilerThread
 390   CHeapStringHolder     _failure_reason;        // for record_failure/failing pattern
 391   CompilationFailureInfo* _first_failure_details; // Details for the first failure happening during compilation
 392   GrowableArray<CallGenerator*> _intrinsics;    // List of intrinsics.
 393   GrowableArray<Node*>  _macro_nodes;           // List of nodes which need to be expanded before matching.
 394   GrowableArray<ParsePredicateNode*> _parse_predicates; // List of Parse Predicates.
 395   // List of OpaqueTemplateAssertionPredicateNode nodes for Template Assertion Predicates which can be seen as list
 396   // of Template Assertion Predicates themselves.
 397   GrowableArray<OpaqueTemplateAssertionPredicateNode*>  _template_assertion_predicate_opaques;
 398   GrowableArray<Node*>  _expensive_nodes;       // List of nodes that are expensive to compute and that we'd better not let the GVN freely common
 399   GrowableArray<ReachabilityFenceNode*> _reachability_fences; // List of reachability fences
 400   GrowableArray<Node*>  _for_post_loop_igvn;    // List of nodes for IGVN after loop opts are over
 401   GrowableArray<Node*>  _inline_type_nodes;     // List of InlineType nodes
 402   GrowableArray<Node*>  _flat_access_nodes;     // List of LoadFlat and StoreFlat nodes
 403   GrowableArray<Node*>  _for_merge_stores_igvn; // List of nodes for IGVN merge stores
 404   GrowableArray<UnstableIfTrap*> _unstable_if_traps;        // List of ifnodes after IGVN
 405   GrowableArray<Node_List*> _coarsened_locks;   // List of coarsened Lock and Unlock nodes
 406   ConnectionGraph*      _congraph;
 407 #ifndef PRODUCT
 408   IdealGraphPrinter*    _igv_printer;
 409   static IdealGraphPrinter* _debug_file_printer;
 410   static IdealGraphPrinter* _debug_network_printer;
 411 #endif
 412 
 413 
 414   // Node management
 415   uint                  _unique;                // Counter for unique Node indices
 416   uint                  _dead_node_count;       // Number of dead nodes; VectorSet::Size() is O(N).
 417                                                 // So use this to keep count and make the call O(1).
 418   VectorSet             _dead_node_list;        // Set of dead nodes
 419   DEBUG_ONLY(Unique_Node_List* _modified_nodes;)   // List of nodes which inputs were modified
 420   DEBUG_ONLY(bool       _phase_optimize_finished;) // Used for live node verification while creating new nodes
 421 
 422   DEBUG_ONLY(bool       _phase_verify_ideal_loop;) // Are we in PhaseIdealLoop verification?
 423 
 424   // Arenas for new-space and old-space nodes.
 425   // Swapped between using _node_arena.
 426   // The lifetime of the old-space nodes is during xform.
 427   Arena                 _node_arena_one;
 428   Arena                 _node_arena_two;
 429   Arena*                _node_arena;
 430 public:
 431   Arena* swap_old_and_new() {
 432     Arena* filled_arena_ptr = _node_arena;
 433     Arena* old_arena_ptr = old_arena();
 434     old_arena_ptr->destruct_contents();
 435     _node_arena = old_arena_ptr;
 436     return filled_arena_ptr;
 437   }
 438 private:
 439   RootNode*             _root;                  // Unique root of compilation, or null after bail-out.
 440   Node*                 _top;                   // Unique top node.  (Reset by various phases.)
 441 
 442   Node*                 _immutable_memory;      // Initial memory state
 443 
 444   Node*                 _recent_alloc_obj;
 445   Node*                 _recent_alloc_ctl;
 446 
 447   // Constant table
 448   MachConstantBaseNode* _mach_constant_base_node;  // Constant table base node singleton.
 449 
 450 
 451   // Blocked array of debugging and profiling information,
 452   // tracked per node.
 453   enum { _log2_node_notes_block_size = 8,
 454          _node_notes_block_size = (1<<_log2_node_notes_block_size)
 455   };
 456   GrowableArray<Node_Notes*>* _node_note_array;
 457   Node_Notes*           _default_node_notes;  // default notes for new nodes
 458 
 459   // After parsing and every bulk phase we hang onto the Root instruction.
 460   // The RootNode instruction is where the whole program begins.  It produces
 461   // the initial Control and BOTTOM for everybody else.
 462 
 463   // Type management
 464   Arena                 _Compile_types;         // Arena for all types
 465   Arena*                _type_arena;            // Alias for _Compile_types except in Initialize_shared()
 466   Dict*                 _type_dict;             // Intern table
 467   CloneMap              _clone_map;             // used for recording history of cloned nodes
 468   size_t                _type_last_size;        // Last allocation size (see Type::operator new/delete)
 469   ciMethod*             _last_tf_m;             // Cache for
 470   const TypeFunc*       _last_tf;               //  TypeFunc::make
 471   AliasType**           _alias_types;           // List of alias types seen so far.
 472   int                   _num_alias_types;       // Logical length of _alias_types
 473   int                   _max_alias_types;       // Physical length of _alias_types
 474   AliasCacheEntry       _alias_cache[AliasCacheSize]; // Gets aliases w/o data structure walking
 475 
 476   // Parsing, optimization
 477   PhaseGVN*             _initial_gvn;           // Results of parse-time PhaseGVN
 478 
 479   // Shared worklist for all IGVN rounds. Nodes can be pushed to it at any time.
 480   // If pushed outside IGVN, the Node is processed in the next IGVN round.
 481   Unique_Node_List*     _igvn_worklist;
 482 
 483   // Shared type array for GVN, IGVN and CCP. It maps node idx -> Type*.
 484   Type_Array*           _types;
 485 
 486   // Shared node hash table for GVN, IGVN and CCP.
 487   NodeHash*             _node_hash;
 488 
 489   GrowableArray<CallGenerator*> _late_inlines;        // List of CallGenerators to be revisited after main parsing has finished.
 490   GrowableArray<CallGenerator*> _string_late_inlines; // same but for string operations
 491   GrowableArray<CallGenerator*> _boxing_late_inlines; // same but for boxing operations
 492 
 493   GrowableArray<CallGenerator*> _vector_reboxing_late_inlines; // same but for vector reboxing operations
 494 
 495   int                           _late_inlines_pos;    // Where in the queue should the next late inlining candidate go (emulate depth first inlining)
 496   bool                          _has_mh_late_inlines; // Can there still be a method handle late inlining pending?
 497                                                       // false: there can't be one
 498                                                       // true: we've enqueued one at some point so there may still be one
 499 
 500   // "MemLimit" directive was specified and the memory limit was hit during compilation
 501   bool                          _oom;
 502 
 503   // Only keep nodes in the expensive node list that need to be optimized
 504   void cleanup_expensive_nodes(PhaseIterGVN &igvn);
 505   // Use for sorting expensive nodes to bring similar nodes together
 506   static int cmp_expensive_nodes(Node** n1, Node** n2);
 507   // Expensive nodes list already sorted?
 508   bool expensive_nodes_sorted() const;
 509   // Remove the speculative part of types and clean up the graph
 510   void remove_speculative_types(PhaseIterGVN &igvn);
 511 
 512   void* _replay_inline_data; // Pointer to data loaded from file
 513 
 514   void log_late_inline_failure(CallGenerator* cg, const char* msg);
 515   DEBUG_ONLY(bool _exception_backedge;)
 516 
 517   void record_method_not_compilable_oom();
 518 
 519   InlinePrinter _inline_printer;
 520 
 521 public:
 522   void* barrier_set_state() const { return _barrier_set_state; }
 523 
 524   InlinePrinter* inline_printer() { return &_inline_printer; }
 525 
 526 #ifndef PRODUCT
 527   IdealGraphPrinter* igv_printer() { return _igv_printer; }
 528   void reset_igv_phase_iter(CompilerPhaseType cpt) { _igv_phase_iter[cpt] = 0; }
 529 #endif
 530 
 531   void log_late_inline(CallGenerator* cg);
 532   void log_inline_id(CallGenerator* cg);
 533   void log_inline_failure(const char* msg);
 534 
 535   void* replay_inline_data() const { return _replay_inline_data; }
 536 
 537   // Dump inlining replay data to the stream.
 538   void dump_inline_data(outputStream* out);
 539   void dump_inline_data_reduced(outputStream* out);
 540 
 541  private:
 542   // Matching, CFG layout, allocation, code generation
 543   PhaseCFG*             _cfg;                   // Results of CFG finding
 544   int                   _java_calls;            // Number of java calls in the method
 545   int                   _inner_loops;           // Number of inner loops in the method
 546   Matcher*              _matcher;               // Engine to map ideal to machine instructions
 547   PhaseRegAlloc*        _regalloc;              // Results of register allocation.
 548   RegMask               _FIRST_STACK_mask;      // All stack slots usable for spills (depends on frame layout)
 549   Arena*                _indexSet_arena;        // control IndexSet allocation within PhaseChaitin
 550   void*                 _indexSet_free_block_list; // free list of IndexSet bit blocks
 551   int                   _interpreter_frame_size;
 552 
 553   // Holds dynamically allocated extensions of short-lived register masks. Such
 554   // extensions are potentially quite large and need tight resource marks which
 555   // may conflict with other allocations in the default resource area.
 556   // Therefore, we use a dedicated resource area for register masks.
 557   ResourceArea          _regmask_arena;
 558 
 559   PhaseOutput*          _output;
 560 
 561  public:
 562   // Accessors
 563 
 564   // The Compile instance currently active in this (compiler) thread.
 565   static Compile* current() {
 566     return (Compile*) ciEnv::current()->compiler_data();
 567   }
 568 
 569   int interpreter_frame_size() const            { return _interpreter_frame_size; }
 570 
 571   PhaseOutput*      output() const              { return _output; }
 572   void              set_output(PhaseOutput* o)  { _output = o; }
 573 
 574   // ID for this compilation.  Useful for setting breakpoints in the debugger.
 575   int               compile_id() const          { return _compile_id; }
 576   DirectiveSet*     directive() const           { return _directive; }
 577 
 578   // Does this compilation allow instructions to subsume loads?  User
 579   // instructions that subsume a load may result in an unschedulable
 580   // instruction sequence.
 581   bool              subsume_loads() const       { return _options._subsume_loads; }
 582   /** Do escape analysis. */
 583   bool              do_escape_analysis() const  { return _options._do_escape_analysis; }
 584   bool              do_iterative_escape_analysis() const  { return _options._do_iterative_escape_analysis; }
 585   bool              do_reduce_allocation_merges() const  { return _options._do_reduce_allocation_merges; }
 586   /** Do boxing elimination. */
 587   bool              eliminate_boxing() const    { return _options._eliminate_boxing; }
 588   /** Do aggressive boxing elimination. */
 589   bool              aggressive_unboxing() const { return _options._eliminate_boxing && AggressiveUnboxing; }
 590   bool              should_install_code() const { return _options._install_code; }
 591   /** Do locks coarsening. */
 592   bool              do_locks_coarsening() const { return _options._do_locks_coarsening; }
 593   bool              do_superword() const        { return _options._do_superword; }
 594 
 595   // Other fixed compilation parameters.
 596   ciMethod*         method() const              { return _method; }
 597   int               entry_bci() const           { return _entry_bci; }
 598   bool              is_osr_compilation() const  { return _entry_bci != InvocationEntryBci; }
 599   bool              is_method_compilation() const { return (_method != nullptr && !_method->flags().is_native()); }
 600   const TypeFunc*   tf() const                  { assert(_tf!=nullptr, ""); return _tf; }
 601   void         init_tf(const TypeFunc* tf)      { assert(_tf==nullptr, ""); _tf = tf; }
 602   InlineTree*       ilt() const                 { return _ilt; }
 603   address           stub_function() const       { return _stub_function; }
 604   const char*       stub_name() const           { return _stub_name; }
 605   StubId            stub_id() const             { return _stub_id; }
 606   address           stub_entry_point() const    { return _stub_entry_point; }
 607   void          set_stub_entry_point(address z) { _stub_entry_point = z; }
 608 
 609   // Control of this compilation.
 610   int               fixed_slots() const         { assert(_fixed_slots >= 0, "");         return _fixed_slots; }
 611   void          set_fixed_slots(int n)          { _fixed_slots = n; }
 612   void          set_inlining_progress(bool z)   { _inlining_progress = z; }
 613   int               inlining_progress() const   { return _inlining_progress; }
 614   void          set_inlining_incrementally(bool z) { _inlining_incrementally = z; }
 615   int               inlining_incrementally() const { return _inlining_incrementally; }
 616   void          set_strength_reduction(bool z)  { _strength_reduction = z; }
 617   bool              strength_reduction() const  { return _strength_reduction; }
 618   void          set_do_cleanup(bool z)          { _do_cleanup = z; }
 619   int               do_cleanup() const          { return _do_cleanup; }
 620   bool              major_progress() const      { return _major_progress; }
 621   void          set_major_progress()            { _major_progress = true; }
 622   void          restore_major_progress(bool progress) { _major_progress = _major_progress || progress; }
 623   void        clear_major_progress()            { _major_progress = false; }
 624   int               max_inline_size() const     { return _max_inline_size; }
 625   void          set_freq_inline_size(int n)     { _freq_inline_size = n; }
 626   int               freq_inline_size() const    { return _freq_inline_size; }
 627   void          set_max_inline_size(int n)      { _max_inline_size = n; }
 628   bool              has_loops() const           { return _has_loops; }
 629   void          set_has_loops(bool z)           { _has_loops = z; }
 630   bool              has_split_ifs() const       { return _has_split_ifs; }
 631   void          set_has_split_ifs(bool z)       { _has_split_ifs = z; }
 632   bool              has_unsafe_access() const   { return _has_unsafe_access; }
 633   void          set_has_unsafe_access(bool z)   { _has_unsafe_access = z; }
 634   bool              has_stringbuilder() const   { return _has_stringbuilder; }
 635   void          set_has_stringbuilder(bool z)   { _has_stringbuilder = z; }
 636   bool              has_boxed_value() const     { return _has_boxed_value; }
 637   void          set_has_boxed_value(bool z)     { _has_boxed_value = z; }
 638   bool              has_reserved_stack_access() const { return _has_reserved_stack_access; }
 639   void          set_has_reserved_stack_access(bool z) { _has_reserved_stack_access = z; }
 640   bool              has_circular_inline_type() const { return _has_circular_inline_type; }
 641   void          set_has_circular_inline_type(bool z) { _has_circular_inline_type = z; }
 642   uint              max_vector_size() const     { return _max_vector_size; }
 643   void          set_max_vector_size(uint s)     { _max_vector_size = s; }
 644   bool              clear_upper_avx() const     { return _clear_upper_avx; }
 645   void          set_clear_upper_avx(bool s)     { _clear_upper_avx = s; }
 646   void          set_trap_count(uint r, uint c)  { assert(r < trapHistLength, "oob");        _trap_hist[r] = c; }
 647   uint              trap_count(uint r) const    { assert(r < trapHistLength, "oob"); return _trap_hist[r]; }
 648   bool              trap_can_recompile() const  { return _trap_can_recompile; }
 649   void          set_trap_can_recompile(bool z)  { _trap_can_recompile = z; }
 650   uint              decompile_count() const     { return _decompile_count; }
 651   void          set_decompile_count(uint c)     { _decompile_count = c; }
 652   bool              allow_range_check_smearing() const;
 653   bool              do_inlining() const         { return _do_inlining; }
 654   void          set_do_inlining(bool z)         { _do_inlining = z; }
 655   bool              do_scheduling() const       { return _do_scheduling; }
 656   void          set_do_scheduling(bool z)       { _do_scheduling = z; }
 657   bool              do_freq_based_layout() const{ return _do_freq_based_layout; }
 658   void          set_do_freq_based_layout(bool z){ _do_freq_based_layout = z; }
 659   bool              do_vector_loop() const      { return _do_vector_loop; }
 660   void          set_do_vector_loop(bool z)      { _do_vector_loop = z; }
 661   bool              use_cmove() const           { return _use_cmove; }
 662   void          set_use_cmove(bool z)           { _use_cmove = z; }
 663   bool              do_aliasing() const          { return _do_aliasing; }
 664   bool              print_assembly() const       { return _print_assembly; }
 665   void          set_print_assembly(bool z)       { _print_assembly = z; }
 666   bool              print_inlining() const       { return _print_inlining; }
 667   void          set_print_inlining(bool z)       { _print_inlining = z; }
 668   bool              print_intrinsics() const     { return _print_intrinsics; }
 669   void          set_print_intrinsics(bool z)     { _print_intrinsics = z; }
 670   uint              max_node_limit() const       { return (uint)_max_node_limit; }
 671   void          set_max_node_limit(uint n)       { _max_node_limit = n; }
 672   bool              clinit_barrier_on_entry()       { return _clinit_barrier_on_entry; }
 673   void          set_clinit_barrier_on_entry(bool z) { _clinit_barrier_on_entry = z; }
 674   void          set_flat_accesses()              { _has_flat_accesses = true; }
 675   bool          flat_accesses_share_alias() const { return _flat_accesses_share_alias; }
 676   void          set_flat_accesses_share_alias(bool z) { _flat_accesses_share_alias = z; }
 677   bool          scalarize_in_safepoints() const { return _scalarize_in_safepoints; }
 678   void          set_scalarize_in_safepoints(bool z) { _scalarize_in_safepoints = z; }
 679 
 680   // Support for scalarized inline type calling convention
 681   bool              has_scalarized_args() const  { return _method != nullptr && _method->has_scalarized_args(); }
 682   bool              needs_stack_repair()  const  { return _method != nullptr && _method->c2_needs_stack_repair(); }
 683   bool              needs_nm_slot()       const  { return _needs_nm_slot; }
 684   void          set_needs_nm_slot(bool v)        { _needs_nm_slot = v; }
 685 
 686   bool              has_monitors() const         { return _has_monitors; }
 687   void          set_has_monitors(bool v)         { _has_monitors = v; }
 688   bool              has_scoped_access() const    { return _has_scoped_access; }
 689   void          set_has_scoped_access(bool v)    { _has_scoped_access = v; }
 690 
 691   // check the CompilerOracle for special behaviours for this compile
 692   bool          method_has_option(CompileCommandEnum option) const {
 693     return method() != nullptr && method()->has_option(option);
 694   }
 695 
 696 #ifndef PRODUCT
 697   uint          next_igv_idx()                  { return _igv_idx++; }
 698   bool          trace_opto_output() const       { return _trace_opto_output; }
 699   void          print_phase(const char* phase_name);
 700   void          print_ideal_ir(const char* compile_phase_name) const;
 701   bool          should_print_ideal() const      { return _directive->PrintIdealOption; }
 702   bool              parsed_irreducible_loop() const { return _parsed_irreducible_loop; }
 703   void          set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; }
 704   int _in_dump_cnt;  // Required for dumping ir nodes.
 705 #endif
 706   bool              has_irreducible_loop() const { return _has_irreducible_loop; }
 707   void          set_has_irreducible_loop(bool z) { _has_irreducible_loop = z; }
 708 
 709   Ticks _latest_stage_start_counter;
 710 
 711   void begin_method();
 712   void end_method();
 713 
 714   void print_method(CompilerPhaseType compile_phase, int level, Node* n = nullptr);
 715 
 716 #ifndef PRODUCT
 717   bool should_print_igv(int level);
 718   bool should_print_phase(int level) const;
 719   bool should_print_ideal_phase(CompilerPhaseType cpt) const;
 720   void init_igv();
 721   void dump_igv(const char* graph_name, int level = 3) {
 722     if (should_print_igv(level)) {
 723       _igv_printer->print_graph(graph_name, nullptr);
 724     }
 725   }
 726 
 727   void igv_print_method_to_file(const char* phase_name = nullptr, bool append = false, const frame* fr = nullptr);
 728   void igv_print_method_to_network(const char* phase_name = nullptr, const frame* fr = nullptr);
 729   void igv_print_graph_to_network(const char* name, GrowableArray<const Node*>& visible_nodes, const frame* fr);
 730   static IdealGraphPrinter* debug_file_printer() { return _debug_file_printer; }
 731   static IdealGraphPrinter* debug_network_printer() { return _debug_network_printer; }
 732 #endif
 733 
 734   const GrowableArray<ParsePredicateNode*>& parse_predicates() const {
 735     return _parse_predicates;
 736   }
 737 
 738   const GrowableArray<OpaqueTemplateAssertionPredicateNode*>& template_assertion_predicate_opaques() const {
 739     return _template_assertion_predicate_opaques;
 740   }
 741 
 742   int           macro_count()             const { return _macro_nodes.length(); }
 743   int           parse_predicate_count()   const { return _parse_predicates.length(); }
 744   int           template_assertion_predicate_count() const { return _template_assertion_predicate_opaques.length(); }
 745   int           expensive_count()         const { return _expensive_nodes.length(); }
 746   int           coarsened_count()         const { return _coarsened_locks.length(); }
 747   Node*         macro_node(int idx)       const { return _macro_nodes.at(idx); }
 748 
 749   Node*         expensive_node(int idx)   const { return _expensive_nodes.at(idx); }
 750 
 751   ReachabilityFenceNode* reachability_fence(int idx) const { return _reachability_fences.at(idx); }
 752   int                    reachability_fences_count() const { return _reachability_fences.length(); }
 753 
 754   ConnectionGraph* congraph()                   { return _congraph;}
 755   void set_congraph(ConnectionGraph* congraph)  { _congraph = congraph;}
 756   void add_macro_node(Node * n) {
 757     //assert(n->is_macro(), "must be a macro node");
 758     assert(!_macro_nodes.contains(n), "duplicate entry in expand list");
 759     _macro_nodes.append(n);
 760   }
 761   void remove_macro_node(Node* n) {
 762     // this function may be called twice for a node so we can only remove it
 763     // if it's still existing.
 764     _macro_nodes.remove_if_existing(n);
 765     // Remove from coarsened locks list if present
 766     if (coarsened_count() > 0) {
 767       remove_coarsened_lock(n);
 768     }
 769   }
 770   void add_expensive_node(Node* n);
 771   void remove_expensive_node(Node* n) {
 772     _expensive_nodes.remove_if_existing(n);
 773   }
 774 
 775   void add_reachability_fence(ReachabilityFenceNode* rf) {
 776     _reachability_fences.append(rf);
 777   }
 778 
 779   void remove_reachability_fence(ReachabilityFenceNode* n) {
 780     _reachability_fences.remove_if_existing(n);
 781   }
 782 
 783   void add_parse_predicate(ParsePredicateNode* n) {
 784     assert(!_parse_predicates.contains(n), "duplicate entry in Parse Predicate list");
 785     _parse_predicates.append(n);
 786   }
 787 
 788   void remove_parse_predicate(ParsePredicateNode* n) {
 789     if (parse_predicate_count() > 0) {
 790       _parse_predicates.remove_if_existing(n);
 791     }
 792   }
 793 
 794   void add_template_assertion_predicate_opaque(OpaqueTemplateAssertionPredicateNode* n) {
 795     assert(!_template_assertion_predicate_opaques.contains(n),
 796            "Duplicate entry in Template Assertion Predicate OpaqueTemplateAssertionPredicate list");
 797     _template_assertion_predicate_opaques.append(n);
 798   }
 799 
 800   void remove_template_assertion_predicate_opaque(OpaqueTemplateAssertionPredicateNode* n) {
 801     if (template_assertion_predicate_count() > 0) {
 802       _template_assertion_predicate_opaques.remove_if_existing(n);
 803     }
 804   }
 805   void add_coarsened_locks(GrowableArray<AbstractLockNode*>& locks);
 806   void remove_coarsened_lock(Node* n);
 807   bool coarsened_locks_consistent();
 808   void mark_unbalanced_boxes() const;
 809 
 810   bool       post_loop_opts_phase() { return _post_loop_opts_phase;  }
 811   void   set_post_loop_opts_phase() { _post_loop_opts_phase = true;  }
 812   void reset_post_loop_opts_phase() { _post_loop_opts_phase = false; }
 813 
 814 #ifdef ASSERT
 815   bool       phase_verify_ideal_loop() const { return _phase_verify_ideal_loop; }
 816   void   set_phase_verify_ideal_loop() { _phase_verify_ideal_loop = true; }
 817   void reset_phase_verify_ideal_loop() { _phase_verify_ideal_loop = false; }
 818 #endif
 819 
 820   bool       allow_macro_nodes() { return _allow_macro_nodes;  }
 821   void reset_allow_macro_nodes() { _allow_macro_nodes = false;  }
 822 
 823   void record_for_post_loop_opts_igvn(Node* n);
 824   void remove_from_post_loop_opts_igvn(Node* n);
 825   void process_for_post_loop_opts_igvn(PhaseIterGVN& igvn);
 826 
 827   // Keep track of inline type nodes for later processing
 828   void add_inline_type(Node* n);
 829   void remove_inline_type(Node* n);
 830 
 831   bool clear_argument_if_only_used_as_buffer_at_calls(Node* result_cast, PhaseIterGVN& igvn);
 832 
 833   void process_inline_types(PhaseIterGVN &igvn, bool remove = false);
 834 
 835   void add_flat_access(Node* n);
 836   void remove_flat_access(Node* n);
 837   void process_flat_accesses(PhaseIterGVN& igvn);
 838 
 839   template <class F>
 840   void for_each_flat_access(F consumer) {
 841     for (int i = _flat_access_nodes.length() - 1; i >= 0; i--) {
 842       consumer(_flat_access_nodes.at(i));
 843     }
 844   }
 845 
 846   void adjust_flat_array_access_aliases(PhaseIterGVN& igvn);
 847 
 848   void record_unstable_if_trap(UnstableIfTrap* trap);
 849   bool remove_unstable_if_trap(CallStaticJavaNode* unc, bool yield);
 850   void remove_useless_unstable_if_traps(Unique_Node_List &useful);
 851   void process_for_unstable_if_traps(PhaseIterGVN& igvn);
 852 
 853   bool     merge_stores_phase() { return _merge_stores_phase;  }
 854   void set_merge_stores_phase() { _merge_stores_phase = true;  }
 855   void record_for_merge_stores_igvn(Node* n);
 856   void remove_from_merge_stores_igvn(Node* n);
 857   void process_for_merge_stores_igvn(PhaseIterGVN& igvn);
 858 
 859   void shuffle_late_inlines();
 860   void shuffle_macro_nodes();
 861   void sort_macro_nodes();
 862 
 863   void mark_parse_predicate_nodes_useless(PhaseIterGVN& igvn);
 864 
 865   // Are there candidate expensive nodes for optimization?
 866   bool should_optimize_expensive_nodes(PhaseIterGVN &igvn);
 867   // Check whether n1 and n2 are similar
 868   static int cmp_expensive_nodes(Node* n1, Node* n2);
 869   // Sort expensive nodes to locate similar expensive nodes
 870   void sort_expensive_nodes();
 871 
 872   // Compilation environment.
 873   Arena*      comp_arena()           { return &_comp_arena; }
 874   ciEnv*      env() const            { return _env; }
 875   CompileLog* log() const            { return _log; }
 876 
 877   bool        failing_internal() const {
 878     return _env->failing() ||
 879            _failure_reason.get() != nullptr;
 880   }
 881 
 882   const char* failure_reason() const {
 883     return _env->failing() ? _env->failure_reason()
 884                            : _failure_reason.get();
 885   }
 886 
 887   const CompilationFailureInfo* first_failure_details() const { return _first_failure_details; }
 888 
 889   bool failing() {
 890     if (failing_internal()) {
 891       return true;
 892     }
 893 #ifdef ASSERT
 894     // Disable stress code for PhaseIdealLoop verification (would have cascading effects).
 895     if (phase_verify_ideal_loop()) {
 896       return false;
 897     }
 898     if (StressBailout) {
 899       return fail_randomly();
 900     }
 901 #endif
 902     return false;
 903   }
 904 
 905 #ifdef ASSERT
 906   bool fail_randomly();
 907   bool failure_is_artificial();
 908 #endif
 909 
 910   bool failure_reason_is(const char* r) const {
 911     return (r == _failure_reason.get()) ||
 912            (r != nullptr &&
 913             _failure_reason.get() != nullptr &&
 914             strcmp(r, _failure_reason.get()) == 0);
 915   }
 916 
 917   void record_failure(const char* reason DEBUG_ONLY(COMMA bool allow_multiple_failures = false));
 918   void record_method_not_compilable(const char* reason DEBUG_ONLY(COMMA bool allow_multiple_failures = false)) {
 919     env()->record_method_not_compilable(reason);
 920     // Record failure reason.
 921     record_failure(reason DEBUG_ONLY(COMMA allow_multiple_failures));
 922   }
 923   bool check_node_count(uint margin, const char* reason) {
 924     if (oom()) {
 925       record_method_not_compilable_oom();
 926       return true;
 927     }
 928     if (live_nodes() + margin > max_node_limit()) {
 929       record_method_not_compilable(reason);
 930       return true;
 931     } else {
 932       return false;
 933     }
 934   }
 935   bool oom() const { return _oom; }
 936   void set_oom()   { _oom = true; }
 937 
 938   // Node management
 939   uint         unique() const              { return _unique; }
 940   uint         next_unique()               { return _unique++; }
 941   void         set_unique(uint i)          { _unique = i; }
 942   Arena*       node_arena()                { return _node_arena; }
 943   Arena*       old_arena()                 { return (&_node_arena_one == _node_arena) ? &_node_arena_two : &_node_arena_one; }
 944   RootNode*    root() const                { return _root; }
 945   void         set_root(RootNode* r)       { _root = r; }
 946   StartNode*   start() const;              // (Derived from root.)
 947   void         verify_start(StartNode* s) const NOT_DEBUG_RETURN;
 948   Node*        immutable_memory();
 949 
 950   Node*        recent_alloc_ctl() const    { return _recent_alloc_ctl; }
 951   Node*        recent_alloc_obj() const    { return _recent_alloc_obj; }
 952   void         set_recent_alloc(Node* ctl, Node* obj) {
 953                                                   _recent_alloc_ctl = ctl;
 954                                                   _recent_alloc_obj = obj;
 955                                            }
 956   void         record_dead_node(uint idx)  { if (_dead_node_list.test_set(idx)) return;
 957                                              _dead_node_count++;
 958                                            }
 959   void         reset_dead_node_list()      { _dead_node_list.reset();
 960                                              _dead_node_count = 0;
 961                                            }
 962   uint          live_nodes() const         {
 963     int  val = _unique - _dead_node_count;
 964     assert (val >= 0, "number of tracked dead nodes %d more than created nodes %d", _unique, _dead_node_count);
 965             return (uint) val;
 966                                            }
 967 #ifdef ASSERT
 968   void         set_phase_optimize_finished() { _phase_optimize_finished = true; }
 969   bool         phase_optimize_finished() const { return _phase_optimize_finished; }
 970   uint         count_live_nodes_by_graph_walk();
 971   void         print_missing_nodes();
 972 #endif
 973 
 974   // Record modified nodes to check that they are put on IGVN worklist
 975   void         record_modified_node(Node* n) NOT_DEBUG_RETURN;
 976   void         remove_modified_node(Node* n) NOT_DEBUG_RETURN;
 977   DEBUG_ONLY( Unique_Node_List*   modified_nodes() const { return _modified_nodes; } )
 978 
 979   MachConstantBaseNode*     mach_constant_base_node();
 980   bool                  has_mach_constant_base_node() const { return _mach_constant_base_node != nullptr; }
 981   // Generated by adlc, true if CallNode requires MachConstantBase.
 982   bool                      needs_deep_clone_jvms();
 983 
 984   // Handy undefined Node
 985   Node*             top() const                 { return _top; }
 986 
 987   // these are used by guys who need to know about creation and transformation of top:
 988   Node*             cached_top_node()           { return _top; }
 989   void          set_cached_top_node(Node* tn);
 990 
 991   GrowableArray<Node_Notes*>* node_note_array() const { return _node_note_array; }
 992   void set_node_note_array(GrowableArray<Node_Notes*>* arr) { _node_note_array = arr; }
 993   Node_Notes* default_node_notes() const        { return _default_node_notes; }
 994   void    set_default_node_notes(Node_Notes* n) { _default_node_notes = n; }
 995 
 996   Node_Notes*       node_notes_at(int idx);
 997 
 998   inline bool   set_node_notes_at(int idx, Node_Notes* value);
 999   // Copy notes from source to dest, if they exist.
1000   // Overwrite dest only if source provides something.
1001   // Return true if information was moved.
1002   bool copy_node_notes_to(Node* dest, Node* source);
1003 
1004   // Workhorse function to sort out the blocked Node_Notes array:
1005   Node_Notes* locate_node_notes(GrowableArray<Node_Notes*>* arr,
1006                                 int idx, bool can_grow = false);
1007 
1008   void grow_node_notes(GrowableArray<Node_Notes*>* arr, int grow_by);
1009 
1010   // Type management
1011   Arena*            type_arena()                { return _type_arena; }
1012   Dict*             type_dict()                 { return _type_dict; }
1013   size_t            type_last_size()            { return _type_last_size; }
1014   int               num_alias_types()           { return _num_alias_types; }
1015 
1016   void          init_type_arena()                       { _type_arena = &_Compile_types; }
1017   void          set_type_arena(Arena* a)                { _type_arena = a; }
1018   void          set_type_dict(Dict* d)                  { _type_dict = d; }
1019   void          set_type_last_size(size_t sz)           { _type_last_size = sz; }
1020 
1021   const TypeFunc* last_tf(ciMethod* m) {
1022     return (m == _last_tf_m) ? _last_tf : nullptr;
1023   }
1024   void set_last_tf(ciMethod* m, const TypeFunc* tf) {
1025     assert(m != nullptr || tf == nullptr, "");
1026     _last_tf_m = m;
1027     _last_tf = tf;
1028   }
1029 
1030   AliasType*        alias_type(int                idx)  { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; }
1031   AliasType*        alias_type(const TypePtr* adr_type, ciField* field = nullptr, bool uncached = false) { return find_alias_type(adr_type, false, field, uncached); }
1032   bool         have_alias_type(const TypePtr* adr_type);
1033   AliasType*        alias_type(ciField*         field);
1034 
1035   int               get_alias_index(const TypePtr* at, bool uncached = false) { return alias_type(at, nullptr, uncached)->index(); }
1036   const TypePtr*    get_adr_type(uint aidx)             { return alias_type(aidx)->adr_type(); }
1037   int               get_general_index(uint aidx)        { return alias_type(aidx)->general_index(); }
1038 
1039   // Building nodes
1040   void              rethrow_exceptions(JVMState* jvms);
1041   void              return_values(JVMState* jvms);
1042   JVMState*         build_start_state(StartNode* start, const TypeFunc* tf);
1043 
1044   // Decide how to build a call.
1045   // The profile factor is a discount to apply to this site's interp. profile.
1046   CallGenerator*    call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch,
1047                                    JVMState* jvms, bool allow_inline, float profile_factor, ciKlass* speculative_receiver_type = nullptr,
1048                                    bool allow_intrinsics = true);
1049   bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
1050     return C->directive()->should_delay_inline(call_method) ||
1051            should_delay_string_inlining(call_method, jvms) ||
1052            should_delay_boxing_inlining(call_method, jvms) ||
1053            should_delay_vector_inlining(call_method, jvms);
1054   }
1055   bool should_delay_string_inlining(ciMethod* call_method, JVMState* jvms);
1056   bool should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms);
1057   bool should_delay_vector_inlining(ciMethod* call_method, JVMState* jvms);
1058   bool should_delay_vector_reboxing_inlining(ciMethod* call_method, JVMState* jvms);
1059 
1060   // Helper functions to identify inlining potential at call-site
1061   ciMethod* optimize_virtual_call(ciMethod* caller, ciInstanceKlass* klass,
1062                                   ciKlass* holder, ciMethod* callee,
1063                                   const TypeOopPtr* receiver_type, bool is_virtual,
1064                                   bool &call_does_dispatch, int &vtable_index,
1065                                   bool check_access = true);
1066   ciMethod* optimize_inlining(ciMethod* caller, ciInstanceKlass* klass, ciKlass* holder,
1067                               ciMethod* callee, const TypeOopPtr* receiver_type,
1068                               bool check_access = true);
1069 
1070   // Report if there were too many traps at a current method and bci.
1071   // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
1072   // If there is no MDO at all, report no trap unless told to assume it.
1073   bool too_many_traps(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
1074   // This version, unspecific to a particular bci, asks if
1075   // PerMethodTrapLimit was exceeded for all inlined methods seen so far.
1076   bool too_many_traps(Deoptimization::DeoptReason reason,
1077                       // Privately used parameter for logging:
1078                       ciMethodData* logmd = nullptr);
1079   // Report if there were too many recompiles at a method and bci.
1080   bool too_many_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
1081   // Report if there were too many traps or recompiles at a method and bci.
1082   bool too_many_traps_or_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason) {
1083     return too_many_traps(method, bci, reason) ||
1084            too_many_recompiles(method, bci, reason);
1085   }
1086   // Return a bitset with the reasons where deoptimization is allowed,
1087   // i.e., where there were not too many uncommon traps.
1088   int _allowed_reasons;
1089   int      allowed_deopt_reasons() { return _allowed_reasons; }
1090   void set_allowed_deopt_reasons();
1091 
1092   // Parsing, optimization
1093   PhaseGVN*         initial_gvn()               { return _initial_gvn; }
1094   Unique_Node_List* igvn_worklist() {
1095     assert(_igvn_worklist != nullptr, "must be created in Compile::Compile");
1096     return _igvn_worklist;
1097   }
1098   Type_Array* types() {
1099     assert(_types != nullptr, "must be created in Compile::Compile");
1100     return _types;
1101   }
1102   NodeHash* node_hash() {
1103     assert(_node_hash != nullptr, "must be created in Compile::Compile");
1104     return _node_hash;
1105   }
1106   inline void       record_for_igvn(Node* n);   // Body is after class Unique_Node_List in node.hpp.
1107   inline void       remove_for_igvn(Node* n);   // Body is after class Unique_Node_List in node.hpp.
1108   void          set_initial_gvn(PhaseGVN *gvn)           { _initial_gvn = gvn; }
1109 
1110   // Replace n by nn using initial_gvn, calling hash_delete and
1111   // record_for_igvn as needed.
1112   void gvn_replace_by(Node* n, Node* nn);
1113 
1114 
1115   void              identify_useful_nodes(Unique_Node_List &useful);
1116   void              update_dead_node_list(Unique_Node_List &useful);
1117   void disconnect_useless_nodes(Unique_Node_List& useful, Unique_Node_List& worklist, const Unique_Node_List* root_and_safepoints = nullptr);
1118 
1119   void              remove_useless_node(Node* dead);
1120 
1121   // Record this CallGenerator for inlining at the end of parsing.
1122   void              add_late_inline(CallGenerator* cg)        {
1123     _late_inlines.insert_before(_late_inlines_pos, cg);
1124     if (StressIncrementalInlining) {
1125       assert(_late_inlines_pos < _late_inlines.length(), "unthinkable!");
1126       if (_late_inlines.length() - _late_inlines_pos >= 2) {
1127         int j = (C->random() % (_late_inlines.length() - _late_inlines_pos)) + _late_inlines_pos;
1128         swap(_late_inlines.at(_late_inlines_pos), _late_inlines.at(j));
1129       }
1130     }
1131     _late_inlines_pos++;
1132   }
1133 
1134   void              prepend_late_inline(CallGenerator* cg)    {
1135     _late_inlines.insert_before(0, cg);
1136   }
1137 
1138   void              add_string_late_inline(CallGenerator* cg) {
1139     _string_late_inlines.push(cg);
1140   }
1141 
1142   void              add_boxing_late_inline(CallGenerator* cg) {
1143     _boxing_late_inlines.push(cg);
1144   }
1145 
1146   void              add_vector_reboxing_late_inline(CallGenerator* cg) {
1147     _vector_reboxing_late_inlines.push(cg);
1148   }
1149 
1150   template<typename N, ENABLE_IF(std::is_base_of<Node, N>::value)>
1151   void remove_useless_nodes(GrowableArray<N*>& node_list, Unique_Node_List& useful);
1152 
1153   void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful);
1154   void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Node* dead);
1155 
1156   void remove_useless_coarsened_locks(Unique_Node_List& useful);
1157 
1158   void dump_print_inlining();
1159 
1160   bool over_inlining_cutoff() const {
1161     if (!inlining_incrementally()) {
1162       return unique() > (uint)NodeCountInliningCutoff;
1163     } else {
1164       // Give some room for incremental inlining algorithm to "breathe"
1165       // and avoid thrashing when live node count is close to the limit.
1166       // Keep in mind that live_nodes() isn't accurate during inlining until
1167       // dead node elimination step happens (see Compile::inline_incrementally).
1168       return live_nodes() > (uint)LiveNodeCountInliningCutoff * 11 / 10;
1169     }
1170   }
1171 
1172   void mark_has_mh_late_inlines() { _has_mh_late_inlines = true; }
1173   bool has_mh_late_inlines() const { return _has_mh_late_inlines; }
1174 
1175   bool inline_incrementally_one();
1176   void inline_incrementally_cleanup(PhaseIterGVN& igvn);
1177   void inline_incrementally(PhaseIterGVN& igvn);
1178   bool should_stress_inlining() { return StressIncrementalInlining && (random() % 2) == 0; }
1179   bool should_delay_inlining() { return AlwaysIncrementalInline || should_stress_inlining(); }
1180   void inline_string_calls(bool parse_time);
1181   void inline_boxing_calls(PhaseIterGVN& igvn);
1182   bool optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode);
1183   void remove_root_to_sfpts_edges(PhaseIterGVN& igvn);
1184 
1185   void inline_vector_reboxing_calls();
1186   bool has_vbox_nodes();
1187 
1188   void process_late_inline_calls_no_inline(PhaseIterGVN& igvn);
1189 
1190   // Matching, CFG layout, allocation, code generation
1191   PhaseCFG*         cfg()                       { return _cfg; }
1192   bool              has_java_calls() const      { return _java_calls > 0; }
1193   int               java_calls() const          { return _java_calls; }
1194   int               inner_loops() const         { return _inner_loops; }
1195   Matcher*          matcher()                   { return _matcher; }
1196   PhaseRegAlloc*    regalloc()                  { return _regalloc; }
1197   RegMask&          FIRST_STACK_mask()          { return _FIRST_STACK_mask; }
1198   ResourceArea*     regmask_arena()             { return &_regmask_arena; }
1199   Arena*            indexSet_arena()            { return _indexSet_arena; }
1200   void*             indexSet_free_block_list()  { return _indexSet_free_block_list; }
1201   DebugInformationRecorder* debug_info()        { return env()->debug_info(); }
1202 
1203   void  update_interpreter_frame_size(int size) {
1204     if (_interpreter_frame_size < size) {
1205       _interpreter_frame_size = size;
1206     }
1207   }
1208 
1209   void          set_matcher(Matcher* m)                 { _matcher = m; }
1210 //void          set_regalloc(PhaseRegAlloc* ra)           { _regalloc = ra; }
1211   void          set_indexSet_arena(Arena* a)            { _indexSet_arena = a; }
1212   void          set_indexSet_free_block_list(void* p)   { _indexSet_free_block_list = p; }
1213 
1214   void  set_java_calls(int z) { _java_calls  = z; }
1215   void set_inner_loops(int z) { _inner_loops = z; }
1216 
1217   Dependencies* dependencies() { return env()->dependencies(); }
1218 
1219   // Major entry point.  Given a Scope, compile the associated method.
1220   // For normal compilations, entry_bci is InvocationEntryBci.  For on stack
1221   // replacement, entry_bci indicates the bytecode for which to compile a
1222   // continuation.
1223   Compile(ciEnv* ci_env, ciMethod* target,
1224           int entry_bci, Options options, DirectiveSet* directive);
1225 
1226   // Second major entry point.  From the TypeFunc signature, generate code
1227   // to pass arguments from the Java calling convention to the C calling
1228   // convention.
1229   Compile(ciEnv* ci_env, const TypeFunc *(*gen)(),
1230           address stub_function, const char *stub_name,
1231           StubId stub_id, int is_fancy_jump, bool pass_tls,
1232           bool return_pc, DirectiveSet* directive);
1233 
1234   ~Compile();
1235 
1236   // Are we compiling a method?
1237   bool has_method() { return method() != nullptr; }
1238 
1239   // Maybe print some information about this compile.
1240   void print_compile_messages();
1241 
1242   // Final graph reshaping, a post-pass after the regular optimizer is done.
1243   bool final_graph_reshaping();
1244 
1245   // returns true if adr is completely contained in the given alias category
1246   bool must_alias(const TypePtr* adr, int alias_idx);
1247 
1248   // returns true if adr overlaps with the given alias category
1249   bool can_alias(const TypePtr* adr, int alias_idx);
1250 
1251   // Stack slots that may be unused by the calling convention but must
1252   // otherwise be preserved.  On Intel this includes the return address.
1253   // On PowerPC it includes the 4 words holding the old TOC & LR glue.
1254   uint in_preserve_stack_slots() {
1255     return SharedRuntime::in_preserve_stack_slots();
1256   }
1257 
1258   // "Top of Stack" slots that may be unused by the calling convention but must
1259   // otherwise be preserved.
1260   // On Intel these are not necessary and the value can be zero.
1261   static uint out_preserve_stack_slots() {
1262     return SharedRuntime::out_preserve_stack_slots();
1263   }
1264 
1265   // Number of outgoing stack slots killed above the out_preserve_stack_slots
1266   // for calls to C.  Supports the var-args backing area for register parms.
1267   uint varargs_C_out_slots_killed() const;
1268 
1269   // Number of Stack Slots consumed by a synchronization entry
1270   int sync_stack_slots() const;
1271 
1272   // Compute the name of old_SP.  See <arch>.ad for frame layout.
1273   OptoReg::Name compute_old_SP();
1274 
1275  private:
1276   // Phase control:
1277   void Init(bool aliasing);                      // Prepare for a single compilation
1278   void Optimize();                               // Given a graph, optimize it
1279   void Code_Gen();                               // Generate code from a graph
1280 
1281   // Management of the AliasType table.
1282   void grow_alias_types();
1283   AliasCacheEntry* probe_alias_cache(const TypePtr* adr_type);
1284   const TypePtr *flatten_alias_type(const TypePtr* adr_type) const;
1285   AliasType* find_alias_type(const TypePtr* adr_type, bool no_create, ciField* field, bool uncached = false);
1286 
1287   void verify_top(Node*) const PRODUCT_RETURN;
1288 
1289   // Intrinsic setup.
1290   CallGenerator* make_vm_intrinsic(ciMethod* m, bool is_virtual);          // constructor
1291   int            intrinsic_insertion_index(ciMethod* m, bool is_virtual, bool& found);  // helper
1292   CallGenerator* find_intrinsic(ciMethod* m, bool is_virtual);             // query fn
1293   void           register_intrinsic(CallGenerator* cg);                    // update fn
1294 
1295 #ifndef PRODUCT
1296   static juint  _intrinsic_hist_count[];
1297   static jubyte _intrinsic_hist_flags[];
1298 #endif
1299   // Function calls made by the public function final_graph_reshaping.
1300   // No need to be made public as they are not called elsewhere.
1301   void final_graph_reshaping_impl(Node *n, Final_Reshape_Counts& frc, Unique_Node_List& dead_nodes);
1302   void final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& frc, uint nop, Unique_Node_List& dead_nodes);
1303   void final_graph_reshaping_walk(Node_Stack& nstack, Node* root, Final_Reshape_Counts& frc, Unique_Node_List& dead_nodes);
1304   void handle_div_mod_op(Node* n, BasicType bt, bool is_unsigned);
1305 
1306   // Logic cone optimization.
1307   void optimize_logic_cones(PhaseIterGVN &igvn);
1308   void collect_logic_cone_roots(Unique_Node_List& list);
1309   void process_logic_cone_root(PhaseIterGVN &igvn, Node* n, VectorSet& visited);
1310   bool compute_logic_cone(Node* n, Unique_Node_List& partition, Unique_Node_List& inputs);
1311   uint compute_truth_table(Unique_Node_List& partition, Unique_Node_List& inputs);
1312   uint eval_macro_logic_op(uint func, uint op1, uint op2, uint op3);
1313   Node* xform_to_MacroLogicV(PhaseIterGVN &igvn, const TypeVect* vt, Unique_Node_List& partitions, Unique_Node_List& inputs);
1314   void check_no_dead_use() const NOT_DEBUG_RETURN;
1315 
1316  public:
1317 
1318   // Note:  Histogram array size is about 1 Kb.
1319   enum {                        // flag bits:
1320     _intrinsic_worked = 1,      // succeeded at least once
1321     _intrinsic_failed = 2,      // tried it but it failed
1322     _intrinsic_disabled = 4,    // was requested but disabled (e.g., -XX:-InlineUnsafeOps)
1323     _intrinsic_virtual = 8,     // was seen in the virtual form (rare)
1324     _intrinsic_both = 16        // was seen in the non-virtual form (usual)
1325   };
1326   // Update histogram.  Return boolean if this is a first-time occurrence.
1327   static bool gather_intrinsic_statistics(vmIntrinsics::ID id,
1328                                           bool is_virtual, int flags) PRODUCT_RETURN0;
1329   static void print_intrinsic_statistics() PRODUCT_RETURN;
1330 
1331   // Graph verification code
1332   // Walk the node list, verifying that there is a one-to-one correspondence
1333   // between Use-Def edges and Def-Use edges. The option no_dead_code enables
1334   // stronger checks that the graph is strongly connected from starting points
1335   // in both directions.
1336   // root_and_safepoints is used to give the starting points for the traversal.
1337   // If not supplied, only root is used. When this check is called after CCP,
1338   // we need to start traversal from Root and safepoints, just like CCP does its
1339   // own traversal (see PhaseCCP::transform for reasons).
1340   //
1341   // To call this function, there are 2 ways to go:
1342   // - give root_and_safepoints to start traversal everywhere needed (like after CCP)
1343   // - if the whole graph is assumed to be reachable from Root's input,
1344   //   root_and_safepoints is not needed (like in PhaseRemoveUseless).
1345   //
1346   // Failure to specify root_and_safepoints in case the graph is not fully
1347   // reachable from Root's input make this check unsound (can miss inconsistencies)
1348   // and even incomplete (can make up non-existing problems) if no_dead_code is
1349   // true.
1350   void verify_graph_edges(bool no_dead_code = false, const Unique_Node_List* root_and_safepoints = nullptr) const PRODUCT_RETURN;
1351 
1352   // Verify bi-directional correspondence of edges
1353   void verify_bidirectional_edges(Unique_Node_List& visited, const Unique_Node_List* root_and_safepoints = nullptr) const;
1354 
1355   // End-of-run dumps.
1356   static void print_statistics() PRODUCT_RETURN;
1357 
1358   // Verify ADLC assumptions during startup
1359   static void adlc_verification() PRODUCT_RETURN;
1360 
1361   // Definitions of pd methods
1362   static void pd_compiler2_init();
1363 
1364   // Materialize reachability fences from reachability edges on safepoints.
1365   void expand_reachability_edges(Unique_Node_List& safepoints);
1366 
1367   // Static parse-time type checking logic for gen_subtype_check:
1368   enum SubTypeCheckResult { SSC_always_false, SSC_always_true, SSC_easy_test, SSC_full_test };
1369   SubTypeCheckResult static_subtype_check(const TypeKlassPtr* superk, const TypeKlassPtr* subk, bool skip = StressReflectiveCode);
1370 
1371   static Node* conv_I2X_index(PhaseGVN* phase, Node* offset, const TypeInt* sizetype,
1372                               // Optional control dependency (for example, on range check)
1373                               Node* ctrl = nullptr);
1374 
1375   // Convert integer value to a narrowed long type dependent on ctrl (for example, a range check)
1376   static Node* constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl, bool carry_dependency = false);
1377 
1378   // Auxiliary method for randomized fuzzing/stressing
1379   int random();
1380   bool randomized_select(int count);
1381 
1382   // seed random number generation and log the seed for repeatability.
1383   void initialize_stress_seed(const DirectiveSet* directive);
1384 
1385   // supporting clone_map
1386   CloneMap&     clone_map();
1387   void          set_clone_map(Dict* d);
1388 
1389   bool needs_clinit_barrier(ciField* ik,         ciMethod* accessing_method);
1390   bool needs_clinit_barrier(ciMethod* ik,        ciMethod* accessing_method);
1391   bool needs_clinit_barrier(ciInstanceKlass* ik, ciMethod* accessing_method);
1392 
1393 #ifdef ASSERT
1394   VerifyMeetResult* _type_verify;
1395   void set_exception_backedge() { _exception_backedge = true; }
1396   bool has_exception_backedge() const { return _exception_backedge; }
1397 #endif
1398 
1399   static bool push_thru_add(PhaseGVN* phase, Node* z, const TypeInteger* tz, const TypeInteger*& rx, const TypeInteger*& ry,
1400                             BasicType out_bt, BasicType in_bt);
1401 
1402   static Node* narrow_value(BasicType bt, Node* value, const Type* type, PhaseGVN* phase, bool transform_res);
1403 
1404 #ifndef PRODUCT
1405 private:
1406   // getting rid of the template makes things easier
1407   Node* make_debug_print_call(const char* str, address call_addr, PhaseGVN* gvn,
1408                               Node* parm0 = nullptr, Node* parm1 = nullptr,
1409                               Node* parm2 = nullptr, Node* parm3 = nullptr,
1410                               Node* parm4 = nullptr, Node* parm5 = nullptr,
1411                               Node* parm6 = nullptr) const;
1412 
1413 public:
1414   // Creates a CallLeafNode for a runtime call that prints a static string and the values of the
1415   // nodes passed as arguments.
1416   // This function also takes care of doing the necessary wiring, including finding a suitable control
1417   // based on the nodes that need to be printed. Note that passing nodes that have incompatible controls
1418   // is undefined behavior.
1419   template <typename... TT, typename... NN>
1420   Node* make_debug_print(const char* str, PhaseGVN* gvn, NN... in) {
1421     address call_addr = CAST_FROM_FN_PTR(address, SharedRuntime::debug_print<TT...>);
1422     return make_debug_print_call(str, call_addr, gvn, in...);
1423   }
1424 #endif
1425 };
1426 
1427 #endif // SHARE_OPTO_COMPILE_HPP