1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OPTO_COMPILE_HPP 26 #define SHARE_OPTO_COMPILE_HPP 27 28 #include "asm/codeBuffer.hpp" 29 #include "ci/compilerInterface.hpp" 30 #include "code/debugInfoRec.hpp" 31 #include "compiler/cHeapStringHolder.hpp" 32 #include "compiler/compileBroker.hpp" 33 #include "compiler/compiler_globals.hpp" 34 #include "compiler/compilerEvent.hpp" 35 #include "libadt/dict.hpp" 36 #include "libadt/vectset.hpp" 37 #include "memory/resourceArea.hpp" 38 #include "oops/methodData.hpp" 39 #include "opto/idealGraphPrinter.hpp" 40 #include "opto/phase.hpp" 41 #include "opto/phasetype.hpp" 42 #include "opto/printinlining.hpp" 43 #include "opto/regmask.hpp" 44 #include "runtime/deoptimization.hpp" 45 #include "runtime/sharedRuntime.hpp" 46 #include "runtime/timerTrace.hpp" 47 #include "runtime/vmThread.hpp" 48 #include "utilities/ticks.hpp" 49 #include "utilities/vmEnums.hpp" 50 51 class AbstractLockNode; 52 class AddPNode; 53 class Block; 54 class Bundle; 55 class CallGenerator; 56 class CallNode; 57 class CallStaticJavaNode; 58 class CloneMap; 59 class CompilationFailureInfo; 60 class ConnectionGraph; 61 class IdealGraphPrinter; 62 class InlineTree; 63 class Matcher; 64 class MachConstantNode; 65 class MachConstantBaseNode; 66 class MachNode; 67 class MachOper; 68 class MachSafePointNode; 69 class Node; 70 class Node_Array; 71 class Node_List; 72 class Node_Notes; 73 class NodeHash; 74 class NodeCloneInfo; 75 class OpaqueTemplateAssertionPredicateNode; 76 class OptoReg; 77 class ParsePredicateNode; 78 class PhaseCFG; 79 class PhaseGVN; 80 class PhaseIterGVN; 81 class PhaseRegAlloc; 82 class PhaseCCP; 83 class PhaseOutput; 84 class RootNode; 85 class relocInfo; 86 class StartNode; 87 class SafePointNode; 88 class JVMState; 89 class Type; 90 class TypeInt; 91 class TypeInteger; 92 class TypeKlassPtr; 93 class TypePtr; 94 class TypeOopPtr; 95 class TypeFunc; 96 class TypeVect; 97 class Type_Array; 98 class Unique_Node_List; 99 class UnstableIfTrap; 100 class InlineTypeNode; 101 class nmethod; 102 class Node_Stack; 103 struct Final_Reshape_Counts; 104 class VerifyMeetResult; 105 106 enum LoopOptsMode { 107 LoopOptsDefault, 108 LoopOptsNone, 109 LoopOptsMaxUnroll, 110 LoopOptsShenandoahExpand, 111 LoopOptsSkipSplitIf, 112 LoopOptsVerify 113 }; 114 115 // The type of all node counts and indexes. 116 // It must hold at least 16 bits, but must also be fast to load and store. 117 // This type, if less than 32 bits, could limit the number of possible nodes. 118 // (To make this type platform-specific, move to globalDefinitions_xxx.hpp.) 119 typedef unsigned int node_idx_t; 120 121 class NodeCloneInfo { 122 private: 123 uint64_t _idx_clone_orig; 124 public: 125 126 void set_idx(node_idx_t idx) { 127 _idx_clone_orig = (_idx_clone_orig & CONST64(0xFFFFFFFF00000000)) | idx; 128 } 129 node_idx_t idx() const { return (node_idx_t)(_idx_clone_orig & 0xFFFFFFFF); } 130 131 void set_gen(int generation) { 132 uint64_t g = (uint64_t)generation << 32; 133 _idx_clone_orig = (_idx_clone_orig & 0xFFFFFFFF) | g; 134 } 135 int gen() const { return (int)(_idx_clone_orig >> 32); } 136 137 void set(uint64_t x) { _idx_clone_orig = x; } 138 void set(node_idx_t x, int g) { set_idx(x); set_gen(g); } 139 uint64_t get() const { return _idx_clone_orig; } 140 141 NodeCloneInfo(uint64_t idx_clone_orig) : _idx_clone_orig(idx_clone_orig) {} 142 NodeCloneInfo(node_idx_t x, int g) : _idx_clone_orig(0) { set(x, g); } 143 144 void dump_on(outputStream* st) const; 145 }; 146 147 class CloneMap { 148 friend class Compile; 149 private: 150 bool _debug; 151 Dict* _dict; 152 int _clone_idx; // current cloning iteration/generation in loop unroll 153 public: 154 void* _2p(node_idx_t key) const { return (void*)(intptr_t)key; } // 2 conversion functions to make gcc happy 155 node_idx_t _2_node_idx_t(const void* k) const { return (node_idx_t)(intptr_t)k; } 156 Dict* dict() const { return _dict; } 157 void insert(node_idx_t key, uint64_t val) { assert(_dict->operator[](_2p(key)) == nullptr, "key existed"); _dict->Insert(_2p(key), (void*)val); } 158 void insert(node_idx_t key, NodeCloneInfo& ci) { insert(key, ci.get()); } 159 void remove(node_idx_t key) { _dict->Delete(_2p(key)); } 160 uint64_t value(node_idx_t key) const { return (uint64_t)_dict->operator[](_2p(key)); } 161 node_idx_t idx(node_idx_t key) const { return NodeCloneInfo(value(key)).idx(); } 162 int gen(node_idx_t key) const { return NodeCloneInfo(value(key)).gen(); } 163 int gen(const void* k) const { return gen(_2_node_idx_t(k)); } 164 int max_gen() const; 165 void clone(Node* old, Node* nnn, int gen); 166 void verify_insert_and_clone(Node* old, Node* nnn, int gen); 167 void dump(node_idx_t key, outputStream* st) const; 168 169 int clone_idx() const { return _clone_idx; } 170 void set_clone_idx(int x) { _clone_idx = x; } 171 bool is_debug() const { return _debug; } 172 void set_debug(bool debug) { _debug = debug; } 173 174 bool same_idx(node_idx_t k1, node_idx_t k2) const { return idx(k1) == idx(k2); } 175 bool same_gen(node_idx_t k1, node_idx_t k2) const { return gen(k1) == gen(k2); } 176 }; 177 178 class Options { 179 friend class Compile; 180 private: 181 const bool _subsume_loads; // Load can be matched as part of a larger op. 182 const bool _do_escape_analysis; // Do escape analysis. 183 const bool _do_iterative_escape_analysis; // Do iterative escape analysis. 184 const bool _do_reduce_allocation_merges; // Do try to reduce allocation merges. 185 const bool _eliminate_boxing; // Do boxing elimination. 186 const bool _do_locks_coarsening; // Do locks coarsening 187 const bool _do_superword; // Do SuperWord 188 const bool _install_code; // Install the code that was compiled 189 public: 190 Options(bool subsume_loads, 191 bool do_escape_analysis, 192 bool do_iterative_escape_analysis, 193 bool do_reduce_allocation_merges, 194 bool eliminate_boxing, 195 bool do_locks_coarsening, 196 bool do_superword, 197 bool install_code) : 198 _subsume_loads(subsume_loads), 199 _do_escape_analysis(do_escape_analysis), 200 _do_iterative_escape_analysis(do_iterative_escape_analysis), 201 _do_reduce_allocation_merges(do_reduce_allocation_merges), 202 _eliminate_boxing(eliminate_boxing), 203 _do_locks_coarsening(do_locks_coarsening), 204 _do_superword(do_superword), 205 _install_code(install_code) { 206 } 207 208 static Options for_runtime_stub() { 209 return Options( 210 /* subsume_loads = */ true, 211 /* do_escape_analysis = */ false, 212 /* do_iterative_escape_analysis = */ false, 213 /* do_reduce_allocation_merges = */ false, 214 /* eliminate_boxing = */ false, 215 /* do_lock_coarsening = */ false, 216 /* do_superword = */ true, 217 /* install_code = */ true 218 ); 219 } 220 }; 221 222 //------------------------------Compile---------------------------------------- 223 // This class defines a top-level Compiler invocation. 224 225 class Compile : public Phase { 226 227 public: 228 // Fixed alias indexes. (See also MergeMemNode.) 229 enum { 230 AliasIdxTop = 1, // pseudo-index, aliases to nothing (used as sentinel value) 231 AliasIdxBot = 2, // pseudo-index, aliases to everything 232 AliasIdxRaw = 3 // hard-wired index for TypeRawPtr::BOTTOM 233 }; 234 235 // Variant of TraceTime(nullptr, &_t_accumulator, CITime); 236 // Integrated with logging. If logging is turned on, and CITimeVerbose is true, 237 // then brackets are put into the log, with time stamps and node counts. 238 // (The time collection itself is always conditionalized on CITime.) 239 class TracePhase : public TraceTime { 240 private: 241 Compile* const _compile; 242 CompileLog* _log; 243 const bool _dolog; 244 public: 245 TracePhase(PhaseTraceId phaseTraceId); 246 TracePhase(const char* name, PhaseTraceId phaseTraceId); 247 ~TracePhase(); 248 const char* phase_name() const { return title(); } 249 }; 250 251 // Information per category of alias (memory slice) 252 class AliasType { 253 private: 254 friend class Compile; 255 256 int _index; // unique index, used with MergeMemNode 257 const TypePtr* _adr_type; // normalized address type 258 ciField* _field; // relevant instance field, or null if none 259 const Type* _element; // relevant array element type, or null if none 260 bool _is_rewritable; // false if the memory is write-once only 261 int _general_index; // if this is type is an instance, the general 262 // type that this is an instance of 263 264 void Init(int i, const TypePtr* at); 265 266 public: 267 int index() const { return _index; } 268 const TypePtr* adr_type() const { return _adr_type; } 269 ciField* field() const { return _field; } 270 const Type* element() const { return _element; } 271 bool is_rewritable() const { return _is_rewritable; } 272 bool is_volatile() const { return (_field ? _field->is_volatile() : false); } 273 int general_index() const { return (_general_index != 0) ? _general_index : _index; } 274 275 void set_rewritable(bool z) { _is_rewritable = z; } 276 void set_field(ciField* f) { 277 assert(!_field,""); 278 _field = f; 279 if (f->is_final() || f->is_stable()) { 280 // In the case of @Stable, multiple writes are possible but may be assumed to be no-ops. 281 _is_rewritable = false; 282 } 283 } 284 void set_element(const Type* e) { 285 assert(_element == nullptr, ""); 286 _element = e; 287 } 288 289 BasicType basic_type() const; 290 291 void print_on(outputStream* st) PRODUCT_RETURN; 292 }; 293 294 enum { 295 logAliasCacheSize = 6, 296 AliasCacheSize = (1<<logAliasCacheSize) 297 }; 298 struct AliasCacheEntry { const TypePtr* _adr_type; int _index; }; // simple duple type 299 enum { 300 trapHistLength = MethodData::_trap_hist_limit 301 }; 302 303 private: 304 // Fixed parameters to this compilation. 305 const int _compile_id; 306 const Options _options; // Compilation options 307 ciMethod* _method; // The method being compiled. 308 int _entry_bci; // entry bci for osr methods. 309 const TypeFunc* _tf; // My kind of signature 310 InlineTree* _ilt; // Ditto (temporary). 311 address _stub_function; // VM entry for stub being compiled, or null 312 const char* _stub_name; // Name of stub or adapter being compiled, or null 313 int _stub_id; // unique id for stub or -1 314 address _stub_entry_point; // Compile code entry for generated stub, or null 315 316 // Control of this compilation. 317 int _max_inline_size; // Max inline size for this compilation 318 int _freq_inline_size; // Max hot method inline size for this compilation 319 int _fixed_slots; // count of frame slots not allocated by the register 320 // allocator i.e. locks, original deopt pc, etc. 321 uintx _max_node_limit; // Max unique node count during a single compilation. 322 323 bool _post_loop_opts_phase; // Loop opts are finished. 324 bool _merge_stores_phase; // Phase for merging stores, after post loop opts phase. 325 bool _allow_macro_nodes; // True if we allow creation of macro nodes. 326 327 int _major_progress; // Count of something big happening 328 bool _inlining_progress; // progress doing incremental inlining? 329 bool _inlining_incrementally;// Are we doing incremental inlining (post parse) 330 bool _do_cleanup; // Cleanup is needed before proceeding with incremental inlining 331 bool _has_loops; // True if the method _may_ have some loops 332 bool _has_split_ifs; // True if the method _may_ have some split-if 333 bool _has_unsafe_access; // True if the method _may_ produce faults in unsafe loads or stores. 334 bool _has_stringbuilder; // True StringBuffers or StringBuilders are allocated 335 bool _has_boxed_value; // True if a boxed object is allocated 336 bool _has_reserved_stack_access; // True if the method or an inlined method is annotated with ReservedStackAccess 337 bool _has_circular_inline_type; // True if method loads an inline type with a circular, non-flat field 338 uint _max_vector_size; // Maximum size of generated vectors 339 bool _clear_upper_avx; // Clear upper bits of ymm registers using vzeroupper 340 uint _trap_hist[trapHistLength]; // Cumulative traps 341 bool _trap_can_recompile; // Have we emitted a recompiling trap? 342 uint _decompile_count; // Cumulative decompilation counts. 343 bool _do_inlining; // True if we intend to do inlining 344 bool _do_scheduling; // True if we intend to do scheduling 345 bool _do_freq_based_layout; // True if we intend to do frequency based block layout 346 bool _do_vector_loop; // True if allowed to execute loop in parallel iterations 347 bool _use_cmove; // True if CMove should be used without profitability analysis 348 bool _do_aliasing; // True if we intend to do aliasing 349 bool _print_assembly; // True if we should dump assembly code for this compilation 350 bool _print_inlining; // True if we should print inlining for this compilation 351 bool _print_intrinsics; // True if we should print intrinsics for this compilation 352 #ifndef PRODUCT 353 uint _phase_counter; // Counter for the number of already printed phases 354 uint _igv_idx; // Counter for IGV node identifiers 355 uint _igv_phase_iter[PHASE_NUM_TYPES]; // Counters for IGV phase iterations 356 bool _trace_opto_output; 357 bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing 358 #endif 359 bool _has_irreducible_loop; // Found irreducible loops 360 // JSR 292 361 bool _has_method_handle_invokes; // True if this method has MethodHandle invokes. 362 bool _has_monitors; // Metadata transfered to nmethod to enable Continuations lock-detection fastpath 363 bool _has_scoped_access; // For shared scope closure 364 bool _clinit_barrier_on_entry; // True if clinit barrier is needed on nmethod entry 365 int _loop_opts_cnt; // loop opts round 366 bool _has_flat_accesses; // Any known flat array accesses? 367 bool _flat_accesses_share_alias; // Initially all flat array share a single slice 368 bool _scalarize_in_safepoints; // Scalarize inline types in safepoint debug info 369 uint _stress_seed; // Seed for stress testing 370 371 // Compilation environment. 372 Arena _comp_arena; // Arena with lifetime equivalent to Compile 373 void* _barrier_set_state; // Potential GC barrier state for Compile 374 ciEnv* _env; // CI interface 375 DirectiveSet* _directive; // Compiler directive 376 CompileLog* _log; // from CompilerThread 377 CHeapStringHolder _failure_reason; // for record_failure/failing pattern 378 CompilationFailureInfo* _first_failure_details; // Details for the first failure happening during compilation 379 GrowableArray<CallGenerator*> _intrinsics; // List of intrinsics. 380 GrowableArray<Node*> _macro_nodes; // List of nodes which need to be expanded before matching. 381 GrowableArray<ParsePredicateNode*> _parse_predicates; // List of Parse Predicates. 382 // List of OpaqueTemplateAssertionPredicateNode nodes for Template Assertion Predicates which can be seen as list 383 // of Template Assertion Predicates themselves. 384 GrowableArray<OpaqueTemplateAssertionPredicateNode*> _template_assertion_predicate_opaques; 385 GrowableArray<Node*> _expensive_nodes; // List of nodes that are expensive to compute and that we'd better not let the GVN freely common 386 GrowableArray<Node*> _for_post_loop_igvn; // List of nodes for IGVN after loop opts are over 387 GrowableArray<Node*> _inline_type_nodes; // List of InlineType nodes 388 GrowableArray<Node*> _for_merge_stores_igvn; // List of nodes for IGVN merge stores 389 GrowableArray<UnstableIfTrap*> _unstable_if_traps; // List of ifnodes after IGVN 390 GrowableArray<Node_List*> _coarsened_locks; // List of coarsened Lock and Unlock nodes 391 ConnectionGraph* _congraph; 392 #ifndef PRODUCT 393 IdealGraphPrinter* _igv_printer; 394 static IdealGraphPrinter* _debug_file_printer; 395 static IdealGraphPrinter* _debug_network_printer; 396 #endif 397 398 399 // Node management 400 uint _unique; // Counter for unique Node indices 401 uint _dead_node_count; // Number of dead nodes; VectorSet::Size() is O(N). 402 // So use this to keep count and make the call O(1). 403 VectorSet _dead_node_list; // Set of dead nodes 404 DEBUG_ONLY(Unique_Node_List* _modified_nodes;) // List of nodes which inputs were modified 405 DEBUG_ONLY(bool _phase_optimize_finished;) // Used for live node verification while creating new nodes 406 407 DEBUG_ONLY(bool _phase_verify_ideal_loop;) // Are we in PhaseIdealLoop verification? 408 409 // Arenas for new-space and old-space nodes. 410 // Swapped between using _node_arena. 411 // The lifetime of the old-space nodes is during xform. 412 Arena _node_arena_one; 413 Arena _node_arena_two; 414 Arena* _node_arena; 415 public: 416 Arena* swap_old_and_new() { 417 Arena* filled_arena_ptr = _node_arena; 418 Arena* old_arena_ptr = old_arena(); 419 old_arena_ptr->destruct_contents(); 420 _node_arena = old_arena_ptr; 421 return filled_arena_ptr; 422 } 423 private: 424 RootNode* _root; // Unique root of compilation, or null after bail-out. 425 Node* _top; // Unique top node. (Reset by various phases.) 426 427 Node* _immutable_memory; // Initial memory state 428 429 Node* _recent_alloc_obj; 430 Node* _recent_alloc_ctl; 431 432 // Constant table 433 MachConstantBaseNode* _mach_constant_base_node; // Constant table base node singleton. 434 435 436 // Blocked array of debugging and profiling information, 437 // tracked per node. 438 enum { _log2_node_notes_block_size = 8, 439 _node_notes_block_size = (1<<_log2_node_notes_block_size) 440 }; 441 GrowableArray<Node_Notes*>* _node_note_array; 442 Node_Notes* _default_node_notes; // default notes for new nodes 443 444 // After parsing and every bulk phase we hang onto the Root instruction. 445 // The RootNode instruction is where the whole program begins. It produces 446 // the initial Control and BOTTOM for everybody else. 447 448 // Type management 449 Arena _Compile_types; // Arena for all types 450 Arena* _type_arena; // Alias for _Compile_types except in Initialize_shared() 451 Dict* _type_dict; // Intern table 452 CloneMap _clone_map; // used for recording history of cloned nodes 453 size_t _type_last_size; // Last allocation size (see Type::operator new/delete) 454 ciMethod* _last_tf_m; // Cache for 455 const TypeFunc* _last_tf; // TypeFunc::make 456 AliasType** _alias_types; // List of alias types seen so far. 457 int _num_alias_types; // Logical length of _alias_types 458 int _max_alias_types; // Physical length of _alias_types 459 AliasCacheEntry _alias_cache[AliasCacheSize]; // Gets aliases w/o data structure walking 460 461 // Parsing, optimization 462 PhaseGVN* _initial_gvn; // Results of parse-time PhaseGVN 463 464 // Shared worklist for all IGVN rounds. Nodes can be pushed to it at any time. 465 // If pushed outside IGVN, the Node is processed in the next IGVN round. 466 Unique_Node_List* _igvn_worklist; 467 468 // Shared type array for GVN, IGVN and CCP. It maps node idx -> Type*. 469 Type_Array* _types; 470 471 // Shared node hash table for GVN, IGVN and CCP. 472 NodeHash* _node_hash; 473 474 GrowableArray<CallGenerator*> _late_inlines; // List of CallGenerators to be revisited after main parsing has finished. 475 GrowableArray<CallGenerator*> _string_late_inlines; // same but for string operations 476 GrowableArray<CallGenerator*> _boxing_late_inlines; // same but for boxing operations 477 478 GrowableArray<CallGenerator*> _vector_reboxing_late_inlines; // same but for vector reboxing operations 479 480 int _late_inlines_pos; // Where in the queue should the next late inlining candidate go (emulate depth first inlining) 481 uint _number_of_mh_late_inlines; // number of method handle late inlining still pending 482 483 // "MemLimit" directive was specified and the memory limit was hit during compilation 484 bool _oom; 485 486 // Only keep nodes in the expensive node list that need to be optimized 487 void cleanup_expensive_nodes(PhaseIterGVN &igvn); 488 // Use for sorting expensive nodes to bring similar nodes together 489 static int cmp_expensive_nodes(Node** n1, Node** n2); 490 // Expensive nodes list already sorted? 491 bool expensive_nodes_sorted() const; 492 // Remove the speculative part of types and clean up the graph 493 void remove_speculative_types(PhaseIterGVN &igvn); 494 495 void* _replay_inline_data; // Pointer to data loaded from file 496 497 void log_late_inline_failure(CallGenerator* cg, const char* msg); 498 DEBUG_ONLY(bool _exception_backedge;) 499 500 void record_method_not_compilable_oom(); 501 502 InlinePrinter _inline_printer; 503 504 public: 505 void* barrier_set_state() const { return _barrier_set_state; } 506 507 InlinePrinter* inline_printer() { return &_inline_printer; } 508 509 #ifndef PRODUCT 510 IdealGraphPrinter* igv_printer() { return _igv_printer; } 511 void reset_igv_phase_iter(CompilerPhaseType cpt) { _igv_phase_iter[cpt] = 0; } 512 #endif 513 514 void log_late_inline(CallGenerator* cg); 515 void log_inline_id(CallGenerator* cg); 516 void log_inline_failure(const char* msg); 517 518 void* replay_inline_data() const { return _replay_inline_data; } 519 520 // Dump inlining replay data to the stream. 521 void dump_inline_data(outputStream* out); 522 void dump_inline_data_reduced(outputStream* out); 523 524 private: 525 // Matching, CFG layout, allocation, code generation 526 PhaseCFG* _cfg; // Results of CFG finding 527 int _java_calls; // Number of java calls in the method 528 int _inner_loops; // Number of inner loops in the method 529 Matcher* _matcher; // Engine to map ideal to machine instructions 530 PhaseRegAlloc* _regalloc; // Results of register allocation. 531 RegMask _FIRST_STACK_mask; // All stack slots usable for spills (depends on frame layout) 532 Arena* _indexSet_arena; // control IndexSet allocation within PhaseChaitin 533 void* _indexSet_free_block_list; // free list of IndexSet bit blocks 534 int _interpreter_frame_size; 535 536 PhaseOutput* _output; 537 538 public: 539 // Accessors 540 541 // The Compile instance currently active in this (compiler) thread. 542 static Compile* current() { 543 return (Compile*) ciEnv::current()->compiler_data(); 544 } 545 546 int interpreter_frame_size() const { return _interpreter_frame_size; } 547 548 PhaseOutput* output() const { return _output; } 549 void set_output(PhaseOutput* o) { _output = o; } 550 551 // ID for this compilation. Useful for setting breakpoints in the debugger. 552 int compile_id() const { return _compile_id; } 553 DirectiveSet* directive() const { return _directive; } 554 555 // Does this compilation allow instructions to subsume loads? User 556 // instructions that subsume a load may result in an unschedulable 557 // instruction sequence. 558 bool subsume_loads() const { return _options._subsume_loads; } 559 /** Do escape analysis. */ 560 bool do_escape_analysis() const { return _options._do_escape_analysis; } 561 bool do_iterative_escape_analysis() const { return _options._do_iterative_escape_analysis; } 562 bool do_reduce_allocation_merges() const { return _options._do_reduce_allocation_merges; } 563 /** Do boxing elimination. */ 564 bool eliminate_boxing() const { return _options._eliminate_boxing; } 565 /** Do aggressive boxing elimination. */ 566 bool aggressive_unboxing() const { return _options._eliminate_boxing && AggressiveUnboxing; } 567 bool should_install_code() const { return _options._install_code; } 568 /** Do locks coarsening. */ 569 bool do_locks_coarsening() const { return _options._do_locks_coarsening; } 570 bool do_superword() const { return _options._do_superword; } 571 572 // Other fixed compilation parameters. 573 ciMethod* method() const { return _method; } 574 int entry_bci() const { return _entry_bci; } 575 bool is_osr_compilation() const { return _entry_bci != InvocationEntryBci; } 576 bool is_method_compilation() const { return (_method != nullptr && !_method->flags().is_native()); } 577 const TypeFunc* tf() const { assert(_tf!=nullptr, ""); return _tf; } 578 void init_tf(const TypeFunc* tf) { assert(_tf==nullptr, ""); _tf = tf; } 579 InlineTree* ilt() const { return _ilt; } 580 address stub_function() const { return _stub_function; } 581 const char* stub_name() const { return _stub_name; } 582 int stub_id() const { return _stub_id; } 583 address stub_entry_point() const { return _stub_entry_point; } 584 void set_stub_entry_point(address z) { _stub_entry_point = z; } 585 586 // Control of this compilation. 587 int fixed_slots() const { assert(_fixed_slots >= 0, ""); return _fixed_slots; } 588 void set_fixed_slots(int n) { _fixed_slots = n; } 589 int major_progress() const { return _major_progress; } 590 void set_inlining_progress(bool z) { _inlining_progress = z; } 591 int inlining_progress() const { return _inlining_progress; } 592 void set_inlining_incrementally(bool z) { _inlining_incrementally = z; } 593 int inlining_incrementally() const { return _inlining_incrementally; } 594 void set_do_cleanup(bool z) { _do_cleanup = z; } 595 int do_cleanup() const { return _do_cleanup; } 596 void set_major_progress() { _major_progress++; } 597 void restore_major_progress(int progress) { _major_progress += progress; } 598 void clear_major_progress() { _major_progress = 0; } 599 int max_inline_size() const { return _max_inline_size; } 600 void set_freq_inline_size(int n) { _freq_inline_size = n; } 601 int freq_inline_size() const { return _freq_inline_size; } 602 void set_max_inline_size(int n) { _max_inline_size = n; } 603 bool has_loops() const { return _has_loops; } 604 void set_has_loops(bool z) { _has_loops = z; } 605 bool has_split_ifs() const { return _has_split_ifs; } 606 void set_has_split_ifs(bool z) { _has_split_ifs = z; } 607 bool has_unsafe_access() const { return _has_unsafe_access; } 608 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; } 609 bool has_stringbuilder() const { return _has_stringbuilder; } 610 void set_has_stringbuilder(bool z) { _has_stringbuilder = z; } 611 bool has_boxed_value() const { return _has_boxed_value; } 612 void set_has_boxed_value(bool z) { _has_boxed_value = z; } 613 bool has_reserved_stack_access() const { return _has_reserved_stack_access; } 614 void set_has_reserved_stack_access(bool z) { _has_reserved_stack_access = z; } 615 bool has_circular_inline_type() const { return _has_circular_inline_type; } 616 void set_has_circular_inline_type(bool z) { _has_circular_inline_type = z; } 617 uint max_vector_size() const { return _max_vector_size; } 618 void set_max_vector_size(uint s) { _max_vector_size = s; } 619 bool clear_upper_avx() const { return _clear_upper_avx; } 620 void set_clear_upper_avx(bool s) { _clear_upper_avx = s; } 621 void set_trap_count(uint r, uint c) { assert(r < trapHistLength, "oob"); _trap_hist[r] = c; } 622 uint trap_count(uint r) const { assert(r < trapHistLength, "oob"); return _trap_hist[r]; } 623 bool trap_can_recompile() const { return _trap_can_recompile; } 624 void set_trap_can_recompile(bool z) { _trap_can_recompile = z; } 625 uint decompile_count() const { return _decompile_count; } 626 void set_decompile_count(uint c) { _decompile_count = c; } 627 bool allow_range_check_smearing() const; 628 bool do_inlining() const { return _do_inlining; } 629 void set_do_inlining(bool z) { _do_inlining = z; } 630 bool do_scheduling() const { return _do_scheduling; } 631 void set_do_scheduling(bool z) { _do_scheduling = z; } 632 bool do_freq_based_layout() const{ return _do_freq_based_layout; } 633 void set_do_freq_based_layout(bool z){ _do_freq_based_layout = z; } 634 bool do_vector_loop() const { return _do_vector_loop; } 635 void set_do_vector_loop(bool z) { _do_vector_loop = z; } 636 bool use_cmove() const { return _use_cmove; } 637 void set_use_cmove(bool z) { _use_cmove = z; } 638 bool do_aliasing() const { return _do_aliasing; } 639 bool print_assembly() const { return _print_assembly; } 640 void set_print_assembly(bool z) { _print_assembly = z; } 641 bool print_inlining() const { return _print_inlining; } 642 void set_print_inlining(bool z) { _print_inlining = z; } 643 bool print_intrinsics() const { return _print_intrinsics; } 644 void set_print_intrinsics(bool z) { _print_intrinsics = z; } 645 uint max_node_limit() const { return (uint)_max_node_limit; } 646 void set_max_node_limit(uint n) { _max_node_limit = n; } 647 bool clinit_barrier_on_entry() { return _clinit_barrier_on_entry; } 648 void set_clinit_barrier_on_entry(bool z) { _clinit_barrier_on_entry = z; } 649 void set_flat_accesses() { _has_flat_accesses = true; } 650 bool flat_accesses_share_alias() const { return _flat_accesses_share_alias; } 651 void set_flat_accesses_share_alias(bool z) { _flat_accesses_share_alias = z; } 652 bool scalarize_in_safepoints() const { return _scalarize_in_safepoints; } 653 void set_scalarize_in_safepoints(bool z) { _scalarize_in_safepoints = z; } 654 655 // Support for scalarized inline type calling convention 656 bool has_scalarized_args() const { return _method != nullptr && _method->has_scalarized_args(); } 657 bool needs_stack_repair() const { return _method != nullptr && _method->get_Method()->c2_needs_stack_repair(); } 658 659 bool has_monitors() const { return _has_monitors; } 660 void set_has_monitors(bool v) { _has_monitors = v; } 661 bool has_scoped_access() const { return _has_scoped_access; } 662 void set_has_scoped_access(bool v) { _has_scoped_access = v; } 663 664 // check the CompilerOracle for special behaviours for this compile 665 bool method_has_option(CompileCommandEnum option) const { 666 return method() != nullptr && method()->has_option(option); 667 } 668 669 #ifndef PRODUCT 670 uint next_igv_idx() { return _igv_idx++; } 671 bool trace_opto_output() const { return _trace_opto_output; } 672 void print_phase(const char* phase_name); 673 void print_ideal_ir(const char* phase_name); 674 bool should_print_ideal() const { return _directive->PrintIdealOption; } 675 bool parsed_irreducible_loop() const { return _parsed_irreducible_loop; } 676 void set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; } 677 int _in_dump_cnt; // Required for dumping ir nodes. 678 #endif 679 bool has_irreducible_loop() const { return _has_irreducible_loop; } 680 void set_has_irreducible_loop(bool z) { _has_irreducible_loop = z; } 681 682 // JSR 292 683 bool has_method_handle_invokes() const { return _has_method_handle_invokes; } 684 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; } 685 686 Ticks _latest_stage_start_counter; 687 688 void begin_method(); 689 void end_method(); 690 691 void print_method(CompilerPhaseType cpt, int level, Node* n = nullptr); 692 693 #ifndef PRODUCT 694 bool should_print_igv(int level); 695 bool should_print_phase(int level) const; 696 bool should_print_ideal_phase(CompilerPhaseType cpt) const; 697 void init_igv(); 698 void dump_igv(const char* graph_name, int level = 3) { 699 if (should_print_igv(level)) { 700 _igv_printer->print_graph(graph_name, nullptr); 701 } 702 } 703 704 void igv_print_method_to_file(const char* phase_name = nullptr, bool append = false, const frame* fr = nullptr); 705 void igv_print_method_to_network(const char* phase_name = nullptr, const frame* fr = nullptr); 706 void igv_print_graph_to_network(const char* name, GrowableArray<const Node*>& visible_nodes, const frame* fr); 707 static IdealGraphPrinter* debug_file_printer() { return _debug_file_printer; } 708 static IdealGraphPrinter* debug_network_printer() { return _debug_network_printer; } 709 #endif 710 711 const GrowableArray<ParsePredicateNode*>& parse_predicates() const { 712 return _parse_predicates; 713 } 714 715 const GrowableArray<OpaqueTemplateAssertionPredicateNode*>& template_assertion_predicate_opaques() const { 716 return _template_assertion_predicate_opaques; 717 } 718 719 int macro_count() const { return _macro_nodes.length(); } 720 int parse_predicate_count() const { return _parse_predicates.length(); } 721 int template_assertion_predicate_count() const { return _template_assertion_predicate_opaques.length(); } 722 int expensive_count() const { return _expensive_nodes.length(); } 723 int coarsened_count() const { return _coarsened_locks.length(); } 724 725 Node* macro_node(int idx) const { return _macro_nodes.at(idx); } 726 727 Node* expensive_node(int idx) const { return _expensive_nodes.at(idx); } 728 729 ConnectionGraph* congraph() { return _congraph;} 730 void set_congraph(ConnectionGraph* congraph) { _congraph = congraph;} 731 void add_macro_node(Node * n) { 732 //assert(n->is_macro(), "must be a macro node"); 733 assert(!_macro_nodes.contains(n), "duplicate entry in expand list"); 734 _macro_nodes.append(n); 735 } 736 void remove_macro_node(Node* n) { 737 // this function may be called twice for a node so we can only remove it 738 // if it's still existing. 739 _macro_nodes.remove_if_existing(n); 740 // Remove from coarsened locks list if present 741 if (coarsened_count() > 0) { 742 remove_coarsened_lock(n); 743 } 744 } 745 void add_expensive_node(Node* n); 746 void remove_expensive_node(Node* n) { 747 _expensive_nodes.remove_if_existing(n); 748 } 749 750 void add_parse_predicate(ParsePredicateNode* n) { 751 assert(!_parse_predicates.contains(n), "duplicate entry in Parse Predicate list"); 752 _parse_predicates.append(n); 753 } 754 755 void remove_parse_predicate(ParsePredicateNode* n) { 756 if (parse_predicate_count() > 0) { 757 _parse_predicates.remove_if_existing(n); 758 } 759 } 760 761 void add_template_assertion_predicate_opaque(OpaqueTemplateAssertionPredicateNode* n) { 762 assert(!_template_assertion_predicate_opaques.contains(n), 763 "Duplicate entry in Template Assertion Predicate OpaqueTemplateAssertionPredicate list"); 764 _template_assertion_predicate_opaques.append(n); 765 } 766 767 void remove_template_assertion_predicate_opaque(OpaqueTemplateAssertionPredicateNode* n) { 768 if (template_assertion_predicate_count() > 0) { 769 _template_assertion_predicate_opaques.remove_if_existing(n); 770 } 771 } 772 void add_coarsened_locks(GrowableArray<AbstractLockNode*>& locks); 773 void remove_coarsened_lock(Node* n); 774 bool coarsened_locks_consistent(); 775 void mark_unbalanced_boxes() const; 776 777 bool post_loop_opts_phase() { return _post_loop_opts_phase; } 778 void set_post_loop_opts_phase() { _post_loop_opts_phase = true; } 779 void reset_post_loop_opts_phase() { _post_loop_opts_phase = false; } 780 781 #ifdef ASSERT 782 bool phase_verify_ideal_loop() const { return _phase_verify_ideal_loop; } 783 void set_phase_verify_ideal_loop() { _phase_verify_ideal_loop = true; } 784 void reset_phase_verify_ideal_loop() { _phase_verify_ideal_loop = false; } 785 #endif 786 787 bool allow_macro_nodes() { return _allow_macro_nodes; } 788 void reset_allow_macro_nodes() { _allow_macro_nodes = false; } 789 790 void record_for_post_loop_opts_igvn(Node* n); 791 void remove_from_post_loop_opts_igvn(Node* n); 792 void process_for_post_loop_opts_igvn(PhaseIterGVN& igvn); 793 794 // Keep track of inline type nodes for later processing 795 void add_inline_type(Node* n); 796 void remove_inline_type(Node* n); 797 void process_inline_types(PhaseIterGVN &igvn, bool remove = false); 798 799 void adjust_flat_array_access_aliases(PhaseIterGVN& igvn); 800 801 void record_unstable_if_trap(UnstableIfTrap* trap); 802 bool remove_unstable_if_trap(CallStaticJavaNode* unc, bool yield); 803 void remove_useless_unstable_if_traps(Unique_Node_List &useful); 804 void process_for_unstable_if_traps(PhaseIterGVN& igvn); 805 806 bool merge_stores_phase() { return _merge_stores_phase; } 807 void set_merge_stores_phase() { _merge_stores_phase = true; } 808 void record_for_merge_stores_igvn(Node* n); 809 void remove_from_merge_stores_igvn(Node* n); 810 void process_for_merge_stores_igvn(PhaseIterGVN& igvn); 811 812 void shuffle_macro_nodes(); 813 void sort_macro_nodes(); 814 815 void mark_parse_predicate_nodes_useless(PhaseIterGVN& igvn); 816 817 // Are there candidate expensive nodes for optimization? 818 bool should_optimize_expensive_nodes(PhaseIterGVN &igvn); 819 // Check whether n1 and n2 are similar 820 static int cmp_expensive_nodes(Node* n1, Node* n2); 821 // Sort expensive nodes to locate similar expensive nodes 822 void sort_expensive_nodes(); 823 824 // Compilation environment. 825 Arena* comp_arena() { return &_comp_arena; } 826 ciEnv* env() const { return _env; } 827 CompileLog* log() const { return _log; } 828 829 bool failing_internal() const { 830 return _env->failing() || 831 _failure_reason.get() != nullptr; 832 } 833 834 const char* failure_reason() const { 835 return _env->failing() ? _env->failure_reason() 836 : _failure_reason.get(); 837 } 838 839 const CompilationFailureInfo* first_failure_details() const { return _first_failure_details; } 840 841 bool failing() { 842 if (failing_internal()) { 843 return true; 844 } 845 #ifdef ASSERT 846 // Disable stress code for PhaseIdealLoop verification (would have cascading effects). 847 if (phase_verify_ideal_loop()) { 848 return false; 849 } 850 if (StressBailout) { 851 return fail_randomly(); 852 } 853 #endif 854 return false; 855 } 856 857 #ifdef ASSERT 858 bool fail_randomly(); 859 bool failure_is_artificial(); 860 #endif 861 862 bool failure_reason_is(const char* r) const { 863 return (r == _failure_reason.get()) || 864 (r != nullptr && 865 _failure_reason.get() != nullptr && 866 strcmp(r, _failure_reason.get()) == 0); 867 } 868 869 void record_failure(const char* reason DEBUG_ONLY(COMMA bool allow_multiple_failures = false)); 870 void record_method_not_compilable(const char* reason DEBUG_ONLY(COMMA bool allow_multiple_failures = false)) { 871 env()->record_method_not_compilable(reason); 872 // Record failure reason. 873 record_failure(reason DEBUG_ONLY(COMMA allow_multiple_failures)); 874 } 875 bool check_node_count(uint margin, const char* reason) { 876 if (oom()) { 877 record_method_not_compilable_oom(); 878 return true; 879 } 880 if (live_nodes() + margin > max_node_limit()) { 881 record_method_not_compilable(reason); 882 return true; 883 } else { 884 return false; 885 } 886 } 887 bool oom() const { return _oom; } 888 void set_oom() { _oom = true; } 889 890 // Node management 891 uint unique() const { return _unique; } 892 uint next_unique() { return _unique++; } 893 void set_unique(uint i) { _unique = i; } 894 Arena* node_arena() { return _node_arena; } 895 Arena* old_arena() { return (&_node_arena_one == _node_arena) ? &_node_arena_two : &_node_arena_one; } 896 RootNode* root() const { return _root; } 897 void set_root(RootNode* r) { _root = r; } 898 StartNode* start() const; // (Derived from root.) 899 void verify_start(StartNode* s) const NOT_DEBUG_RETURN; 900 Node* immutable_memory(); 901 902 Node* recent_alloc_ctl() const { return _recent_alloc_ctl; } 903 Node* recent_alloc_obj() const { return _recent_alloc_obj; } 904 void set_recent_alloc(Node* ctl, Node* obj) { 905 _recent_alloc_ctl = ctl; 906 _recent_alloc_obj = obj; 907 } 908 void record_dead_node(uint idx) { if (_dead_node_list.test_set(idx)) return; 909 _dead_node_count++; 910 } 911 void reset_dead_node_list() { _dead_node_list.reset(); 912 _dead_node_count = 0; 913 } 914 uint live_nodes() const { 915 int val = _unique - _dead_node_count; 916 assert (val >= 0, "number of tracked dead nodes %d more than created nodes %d", _unique, _dead_node_count); 917 return (uint) val; 918 } 919 #ifdef ASSERT 920 void set_phase_optimize_finished() { _phase_optimize_finished = true; } 921 bool phase_optimize_finished() const { return _phase_optimize_finished; } 922 uint count_live_nodes_by_graph_walk(); 923 void print_missing_nodes(); 924 #endif 925 926 // Record modified nodes to check that they are put on IGVN worklist 927 void record_modified_node(Node* n) NOT_DEBUG_RETURN; 928 void remove_modified_node(Node* n) NOT_DEBUG_RETURN; 929 DEBUG_ONLY( Unique_Node_List* modified_nodes() const { return _modified_nodes; } ) 930 931 MachConstantBaseNode* mach_constant_base_node(); 932 bool has_mach_constant_base_node() const { return _mach_constant_base_node != nullptr; } 933 // Generated by adlc, true if CallNode requires MachConstantBase. 934 bool needs_deep_clone_jvms(); 935 936 // Handy undefined Node 937 Node* top() const { return _top; } 938 939 // these are used by guys who need to know about creation and transformation of top: 940 Node* cached_top_node() { return _top; } 941 void set_cached_top_node(Node* tn); 942 943 GrowableArray<Node_Notes*>* node_note_array() const { return _node_note_array; } 944 void set_node_note_array(GrowableArray<Node_Notes*>* arr) { _node_note_array = arr; } 945 Node_Notes* default_node_notes() const { return _default_node_notes; } 946 void set_default_node_notes(Node_Notes* n) { _default_node_notes = n; } 947 948 Node_Notes* node_notes_at(int idx); 949 950 inline bool set_node_notes_at(int idx, Node_Notes* value); 951 // Copy notes from source to dest, if they exist. 952 // Overwrite dest only if source provides something. 953 // Return true if information was moved. 954 bool copy_node_notes_to(Node* dest, Node* source); 955 956 // Workhorse function to sort out the blocked Node_Notes array: 957 Node_Notes* locate_node_notes(GrowableArray<Node_Notes*>* arr, 958 int idx, bool can_grow = false); 959 960 void grow_node_notes(GrowableArray<Node_Notes*>* arr, int grow_by); 961 962 // Type management 963 Arena* type_arena() { return _type_arena; } 964 Dict* type_dict() { return _type_dict; } 965 size_t type_last_size() { return _type_last_size; } 966 int num_alias_types() { return _num_alias_types; } 967 968 void init_type_arena() { _type_arena = &_Compile_types; } 969 void set_type_arena(Arena* a) { _type_arena = a; } 970 void set_type_dict(Dict* d) { _type_dict = d; } 971 void set_type_last_size(size_t sz) { _type_last_size = sz; } 972 973 const TypeFunc* last_tf(ciMethod* m) { 974 return (m == _last_tf_m) ? _last_tf : nullptr; 975 } 976 void set_last_tf(ciMethod* m, const TypeFunc* tf) { 977 assert(m != nullptr || tf == nullptr, ""); 978 _last_tf_m = m; 979 _last_tf = tf; 980 } 981 982 AliasType* alias_type(int idx) { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; } 983 AliasType* alias_type(const TypePtr* adr_type, ciField* field = nullptr, bool uncached = false) { return find_alias_type(adr_type, false, field, uncached); } 984 bool have_alias_type(const TypePtr* adr_type); 985 AliasType* alias_type(ciField* field); 986 987 int get_alias_index(const TypePtr* at, bool uncached = false) { return alias_type(at, nullptr, uncached)->index(); } 988 const TypePtr* get_adr_type(uint aidx) { return alias_type(aidx)->adr_type(); } 989 int get_general_index(uint aidx) { return alias_type(aidx)->general_index(); } 990 991 // Building nodes 992 void rethrow_exceptions(JVMState* jvms); 993 void return_values(JVMState* jvms); 994 JVMState* build_start_state(StartNode* start, const TypeFunc* tf); 995 996 // Decide how to build a call. 997 // The profile factor is a discount to apply to this site's interp. profile. 998 CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch, 999 JVMState* jvms, bool allow_inline, float profile_factor, ciKlass* speculative_receiver_type = nullptr, 1000 bool allow_intrinsics = true); 1001 bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) { 1002 return should_delay_string_inlining(call_method, jvms) || 1003 should_delay_boxing_inlining(call_method, jvms) || 1004 should_delay_vector_inlining(call_method, jvms); 1005 } 1006 bool should_delay_string_inlining(ciMethod* call_method, JVMState* jvms); 1007 bool should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms); 1008 bool should_delay_vector_inlining(ciMethod* call_method, JVMState* jvms); 1009 bool should_delay_vector_reboxing_inlining(ciMethod* call_method, JVMState* jvms); 1010 1011 // Helper functions to identify inlining potential at call-site 1012 ciMethod* optimize_virtual_call(ciMethod* caller, ciInstanceKlass* klass, 1013 ciKlass* holder, ciMethod* callee, 1014 const TypeOopPtr* receiver_type, bool is_virtual, 1015 bool &call_does_dispatch, int &vtable_index, 1016 bool check_access = true); 1017 ciMethod* optimize_inlining(ciMethod* caller, ciInstanceKlass* klass, ciKlass* holder, 1018 ciMethod* callee, const TypeOopPtr* receiver_type, 1019 bool check_access = true); 1020 1021 // Report if there were too many traps at a current method and bci. 1022 // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded. 1023 // If there is no MDO at all, report no trap unless told to assume it. 1024 bool too_many_traps(ciMethod* method, int bci, Deoptimization::DeoptReason reason); 1025 // This version, unspecific to a particular bci, asks if 1026 // PerMethodTrapLimit was exceeded for all inlined methods seen so far. 1027 bool too_many_traps(Deoptimization::DeoptReason reason, 1028 // Privately used parameter for logging: 1029 ciMethodData* logmd = nullptr); 1030 // Report if there were too many recompiles at a method and bci. 1031 bool too_many_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason); 1032 // Report if there were too many traps or recompiles at a method and bci. 1033 bool too_many_traps_or_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason) { 1034 return too_many_traps(method, bci, reason) || 1035 too_many_recompiles(method, bci, reason); 1036 } 1037 // Return a bitset with the reasons where deoptimization is allowed, 1038 // i.e., where there were not too many uncommon traps. 1039 int _allowed_reasons; 1040 int allowed_deopt_reasons() { return _allowed_reasons; } 1041 void set_allowed_deopt_reasons(); 1042 1043 // Parsing, optimization 1044 PhaseGVN* initial_gvn() { return _initial_gvn; } 1045 Unique_Node_List* igvn_worklist() { 1046 assert(_igvn_worklist != nullptr, "must be created in Compile::Compile"); 1047 return _igvn_worklist; 1048 } 1049 Type_Array* types() { 1050 assert(_types != nullptr, "must be created in Compile::Compile"); 1051 return _types; 1052 } 1053 NodeHash* node_hash() { 1054 assert(_node_hash != nullptr, "must be created in Compile::Compile"); 1055 return _node_hash; 1056 } 1057 inline void record_for_igvn(Node* n); // Body is after class Unique_Node_List in node.hpp. 1058 inline void remove_for_igvn(Node* n); // Body is after class Unique_Node_List in node.hpp. 1059 void set_initial_gvn(PhaseGVN *gvn) { _initial_gvn = gvn; } 1060 1061 // Replace n by nn using initial_gvn, calling hash_delete and 1062 // record_for_igvn as needed. 1063 void gvn_replace_by(Node* n, Node* nn); 1064 1065 1066 void identify_useful_nodes(Unique_Node_List &useful); 1067 void update_dead_node_list(Unique_Node_List &useful); 1068 void disconnect_useless_nodes(Unique_Node_List& useful, Unique_Node_List& worklist, const Unique_Node_List* root_and_safepoints = nullptr); 1069 1070 void remove_useless_node(Node* dead); 1071 1072 // Record this CallGenerator for inlining at the end of parsing. 1073 void add_late_inline(CallGenerator* cg) { 1074 _late_inlines.insert_before(_late_inlines_pos, cg); 1075 _late_inlines_pos++; 1076 } 1077 1078 void prepend_late_inline(CallGenerator* cg) { 1079 _late_inlines.insert_before(0, cg); 1080 } 1081 1082 void add_string_late_inline(CallGenerator* cg) { 1083 _string_late_inlines.push(cg); 1084 } 1085 1086 void add_boxing_late_inline(CallGenerator* cg) { 1087 _boxing_late_inlines.push(cg); 1088 } 1089 1090 void add_vector_reboxing_late_inline(CallGenerator* cg) { 1091 _vector_reboxing_late_inlines.push(cg); 1092 } 1093 1094 template<typename N, ENABLE_IF(std::is_base_of<Node, N>::value)> 1095 void remove_useless_nodes(GrowableArray<N*>& node_list, Unique_Node_List& useful); 1096 1097 void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful); 1098 void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Node* dead); 1099 1100 void remove_useless_coarsened_locks(Unique_Node_List& useful); 1101 1102 void dump_print_inlining(); 1103 1104 bool over_inlining_cutoff() const { 1105 if (!inlining_incrementally()) { 1106 return unique() > (uint)NodeCountInliningCutoff; 1107 } else { 1108 // Give some room for incremental inlining algorithm to "breathe" 1109 // and avoid thrashing when live node count is close to the limit. 1110 // Keep in mind that live_nodes() isn't accurate during inlining until 1111 // dead node elimination step happens (see Compile::inline_incrementally). 1112 return live_nodes() > (uint)LiveNodeCountInliningCutoff * 11 / 10; 1113 } 1114 } 1115 1116 void inc_number_of_mh_late_inlines() { _number_of_mh_late_inlines++; } 1117 void dec_number_of_mh_late_inlines() { assert(_number_of_mh_late_inlines > 0, "_number_of_mh_late_inlines < 0 !"); _number_of_mh_late_inlines--; } 1118 bool has_mh_late_inlines() const { return _number_of_mh_late_inlines > 0; } 1119 1120 bool inline_incrementally_one(); 1121 void inline_incrementally_cleanup(PhaseIterGVN& igvn); 1122 void inline_incrementally(PhaseIterGVN& igvn); 1123 bool should_delay_inlining() { return AlwaysIncrementalInline || (StressIncrementalInlining && (random() % 2) == 0); } 1124 void inline_string_calls(bool parse_time); 1125 void inline_boxing_calls(PhaseIterGVN& igvn); 1126 bool optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode); 1127 void remove_root_to_sfpts_edges(PhaseIterGVN& igvn); 1128 1129 void inline_vector_reboxing_calls(); 1130 bool has_vbox_nodes(); 1131 1132 void process_late_inline_calls_no_inline(PhaseIterGVN& igvn); 1133 1134 // Matching, CFG layout, allocation, code generation 1135 PhaseCFG* cfg() { return _cfg; } 1136 bool has_java_calls() const { return _java_calls > 0; } 1137 int java_calls() const { return _java_calls; } 1138 int inner_loops() const { return _inner_loops; } 1139 Matcher* matcher() { return _matcher; } 1140 PhaseRegAlloc* regalloc() { return _regalloc; } 1141 RegMask& FIRST_STACK_mask() { return _FIRST_STACK_mask; } 1142 Arena* indexSet_arena() { return _indexSet_arena; } 1143 void* indexSet_free_block_list() { return _indexSet_free_block_list; } 1144 DebugInformationRecorder* debug_info() { return env()->debug_info(); } 1145 1146 void update_interpreter_frame_size(int size) { 1147 if (_interpreter_frame_size < size) { 1148 _interpreter_frame_size = size; 1149 } 1150 } 1151 1152 void set_matcher(Matcher* m) { _matcher = m; } 1153 //void set_regalloc(PhaseRegAlloc* ra) { _regalloc = ra; } 1154 void set_indexSet_arena(Arena* a) { _indexSet_arena = a; } 1155 void set_indexSet_free_block_list(void* p) { _indexSet_free_block_list = p; } 1156 1157 void set_java_calls(int z) { _java_calls = z; } 1158 void set_inner_loops(int z) { _inner_loops = z; } 1159 1160 Dependencies* dependencies() { return env()->dependencies(); } 1161 1162 // Major entry point. Given a Scope, compile the associated method. 1163 // For normal compilations, entry_bci is InvocationEntryBci. For on stack 1164 // replacement, entry_bci indicates the bytecode for which to compile a 1165 // continuation. 1166 Compile(ciEnv* ci_env, ciMethod* target, 1167 int entry_bci, Options options, DirectiveSet* directive); 1168 1169 // Second major entry point. From the TypeFunc signature, generate code 1170 // to pass arguments from the Java calling convention to the C calling 1171 // convention. 1172 Compile(ciEnv* ci_env, const TypeFunc *(*gen)(), 1173 address stub_function, const char *stub_name, 1174 int stub_id, int is_fancy_jump, bool pass_tls, 1175 bool return_pc, DirectiveSet* directive); 1176 1177 ~Compile(); 1178 1179 // Are we compiling a method? 1180 bool has_method() { return method() != nullptr; } 1181 1182 // Maybe print some information about this compile. 1183 void print_compile_messages(); 1184 1185 // Final graph reshaping, a post-pass after the regular optimizer is done. 1186 bool final_graph_reshaping(); 1187 1188 // returns true if adr is completely contained in the given alias category 1189 bool must_alias(const TypePtr* adr, int alias_idx); 1190 1191 // returns true if adr overlaps with the given alias category 1192 bool can_alias(const TypePtr* adr, int alias_idx); 1193 1194 // Stack slots that may be unused by the calling convention but must 1195 // otherwise be preserved. On Intel this includes the return address. 1196 // On PowerPC it includes the 4 words holding the old TOC & LR glue. 1197 uint in_preserve_stack_slots() { 1198 return SharedRuntime::in_preserve_stack_slots(); 1199 } 1200 1201 // "Top of Stack" slots that may be unused by the calling convention but must 1202 // otherwise be preserved. 1203 // On Intel these are not necessary and the value can be zero. 1204 static uint out_preserve_stack_slots() { 1205 return SharedRuntime::out_preserve_stack_slots(); 1206 } 1207 1208 // Number of outgoing stack slots killed above the out_preserve_stack_slots 1209 // for calls to C. Supports the var-args backing area for register parms. 1210 uint varargs_C_out_slots_killed() const; 1211 1212 // Number of Stack Slots consumed by a synchronization entry 1213 int sync_stack_slots() const; 1214 1215 // Compute the name of old_SP. See <arch>.ad for frame layout. 1216 OptoReg::Name compute_old_SP(); 1217 1218 private: 1219 // Phase control: 1220 void Init(bool aliasing); // Prepare for a single compilation 1221 void Optimize(); // Given a graph, optimize it 1222 void Code_Gen(); // Generate code from a graph 1223 1224 // Management of the AliasType table. 1225 void grow_alias_types(); 1226 AliasCacheEntry* probe_alias_cache(const TypePtr* adr_type); 1227 const TypePtr *flatten_alias_type(const TypePtr* adr_type) const; 1228 AliasType* find_alias_type(const TypePtr* adr_type, bool no_create, ciField* field, bool uncached = false); 1229 1230 void verify_top(Node*) const PRODUCT_RETURN; 1231 1232 // Intrinsic setup. 1233 CallGenerator* make_vm_intrinsic(ciMethod* m, bool is_virtual); // constructor 1234 int intrinsic_insertion_index(ciMethod* m, bool is_virtual, bool& found); // helper 1235 CallGenerator* find_intrinsic(ciMethod* m, bool is_virtual); // query fn 1236 void register_intrinsic(CallGenerator* cg); // update fn 1237 1238 #ifndef PRODUCT 1239 static juint _intrinsic_hist_count[]; 1240 static jubyte _intrinsic_hist_flags[]; 1241 #endif 1242 // Function calls made by the public function final_graph_reshaping. 1243 // No need to be made public as they are not called elsewhere. 1244 void final_graph_reshaping_impl(Node *n, Final_Reshape_Counts& frc, Unique_Node_List& dead_nodes); 1245 void final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& frc, uint nop, Unique_Node_List& dead_nodes); 1246 void final_graph_reshaping_walk(Node_Stack& nstack, Node* root, Final_Reshape_Counts& frc, Unique_Node_List& dead_nodes); 1247 void handle_div_mod_op(Node* n, BasicType bt, bool is_unsigned); 1248 1249 // Logic cone optimization. 1250 void optimize_logic_cones(PhaseIterGVN &igvn); 1251 void collect_logic_cone_roots(Unique_Node_List& list); 1252 void process_logic_cone_root(PhaseIterGVN &igvn, Node* n, VectorSet& visited); 1253 bool compute_logic_cone(Node* n, Unique_Node_List& partition, Unique_Node_List& inputs); 1254 uint compute_truth_table(Unique_Node_List& partition, Unique_Node_List& inputs); 1255 uint eval_macro_logic_op(uint func, uint op1, uint op2, uint op3); 1256 Node* xform_to_MacroLogicV(PhaseIterGVN &igvn, const TypeVect* vt, Unique_Node_List& partitions, Unique_Node_List& inputs); 1257 void check_no_dead_use() const NOT_DEBUG_RETURN; 1258 1259 public: 1260 1261 // Note: Histogram array size is about 1 Kb. 1262 enum { // flag bits: 1263 _intrinsic_worked = 1, // succeeded at least once 1264 _intrinsic_failed = 2, // tried it but it failed 1265 _intrinsic_disabled = 4, // was requested but disabled (e.g., -XX:-InlineUnsafeOps) 1266 _intrinsic_virtual = 8, // was seen in the virtual form (rare) 1267 _intrinsic_both = 16 // was seen in the non-virtual form (usual) 1268 }; 1269 // Update histogram. Return boolean if this is a first-time occurrence. 1270 static bool gather_intrinsic_statistics(vmIntrinsics::ID id, 1271 bool is_virtual, int flags) PRODUCT_RETURN0; 1272 static void print_intrinsic_statistics() PRODUCT_RETURN; 1273 1274 // Graph verification code 1275 // Walk the node list, verifying that there is a one-to-one correspondence 1276 // between Use-Def edges and Def-Use edges. The option no_dead_code enables 1277 // stronger checks that the graph is strongly connected from starting points 1278 // in both directions. 1279 // root_and_safepoints is used to give the starting points for the traversal. 1280 // If not supplied, only root is used. When this check is called after CCP, 1281 // we need to start traversal from Root and safepoints, just like CCP does its 1282 // own traversal (see PhaseCCP::transform for reasons). 1283 // 1284 // To call this function, there are 2 ways to go: 1285 // - give root_and_safepoints to start traversal everywhere needed (like after CCP) 1286 // - if the whole graph is assumed to be reachable from Root's input, 1287 // root_and_safepoints is not needed (like in PhaseRemoveUseless). 1288 // 1289 // Failure to specify root_and_safepoints in case the graph is not fully 1290 // reachable from Root's input make this check unsound (can miss inconsistencies) 1291 // and even incomplete (can make up non-existing problems) if no_dead_code is 1292 // true. 1293 void verify_graph_edges(bool no_dead_code = false, const Unique_Node_List* root_and_safepoints = nullptr) const PRODUCT_RETURN; 1294 1295 // Verify bi-directional correspondence of edges 1296 void verify_bidirectional_edges(Unique_Node_List& visited, const Unique_Node_List* root_and_safepoints = nullptr) const; 1297 1298 // End-of-run dumps. 1299 static void print_statistics() PRODUCT_RETURN; 1300 1301 // Verify ADLC assumptions during startup 1302 static void adlc_verification() PRODUCT_RETURN; 1303 1304 // Definitions of pd methods 1305 static void pd_compiler2_init(); 1306 1307 // Static parse-time type checking logic for gen_subtype_check: 1308 enum SubTypeCheckResult { SSC_always_false, SSC_always_true, SSC_easy_test, SSC_full_test }; 1309 SubTypeCheckResult static_subtype_check(const TypeKlassPtr* superk, const TypeKlassPtr* subk, bool skip = StressReflectiveCode); 1310 1311 static Node* conv_I2X_index(PhaseGVN* phase, Node* offset, const TypeInt* sizetype, 1312 // Optional control dependency (for example, on range check) 1313 Node* ctrl = nullptr); 1314 1315 // Convert integer value to a narrowed long type dependent on ctrl (for example, a range check) 1316 static Node* constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl, bool carry_dependency = false); 1317 1318 Node* optimize_acmp(PhaseGVN* phase, Node* a, Node* b); 1319 1320 // Auxiliary method for randomized fuzzing/stressing 1321 int random(); 1322 bool randomized_select(int count); 1323 1324 // seed random number generation and log the seed for repeatability. 1325 void initialize_stress_seed(const DirectiveSet* directive); 1326 1327 // supporting clone_map 1328 CloneMap& clone_map(); 1329 void set_clone_map(Dict* d); 1330 1331 bool needs_clinit_barrier(ciField* ik, ciMethod* accessing_method); 1332 bool needs_clinit_barrier(ciMethod* ik, ciMethod* accessing_method); 1333 bool needs_clinit_barrier(ciInstanceKlass* ik, ciMethod* accessing_method); 1334 1335 #ifdef ASSERT 1336 VerifyMeetResult* _type_verify; 1337 void set_exception_backedge() { _exception_backedge = true; } 1338 bool has_exception_backedge() const { return _exception_backedge; } 1339 #endif 1340 1341 static bool push_thru_add(PhaseGVN* phase, Node* z, const TypeInteger* tz, const TypeInteger*& rx, const TypeInteger*& ry, 1342 BasicType out_bt, BasicType in_bt); 1343 1344 static Node* narrow_value(BasicType bt, Node* value, const Type* type, PhaseGVN* phase, bool transform_res); 1345 }; 1346 1347 #endif // SHARE_OPTO_COMPILE_HPP