1 /* 2 * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OPTO_PARSE_HPP 26 #define SHARE_OPTO_PARSE_HPP 27 28 #include "ci/ciMethodData.hpp" 29 #include "ci/ciTypeFlow.hpp" 30 #include "compiler/methodLiveness.hpp" 31 #include "libadt/vectset.hpp" 32 #include "oops/generateOopMap.hpp" 33 #include "opto/graphKit.hpp" 34 #include "opto/partialEscape.hpp" 35 #include "opto/subnode.hpp" 36 37 class BytecodeParseHistogram; 38 class InlineTree; 39 class Parse; 40 class SwitchRange; 41 42 43 //------------------------------InlineTree------------------------------------- 44 class InlineTree : public AnyObj { 45 friend class VMStructs; 46 47 Compile* C; // cache 48 JVMState* _caller_jvms; // state of caller 49 ciMethod* _method; // method being called by the caller_jvms 50 bool _late_inline; // method is inlined incrementally 51 InlineTree* _caller_tree; 52 uint _count_inline_bcs; // Accumulated count of inlined bytecodes 53 const int _max_inline_level; // the maximum inline level for this sub-tree (may be adjusted) 54 55 GrowableArray<InlineTree*> _subtrees; 56 57 bool pass_initial_checks(ciMethod* caller_method, int caller_bci, ciMethod* callee_method); 58 59 void print_impl(outputStream* stj, int indent) const PRODUCT_RETURN; 60 const char* _msg; 61 protected: 62 InlineTree(Compile* C, 63 const InlineTree* caller_tree, 64 ciMethod* callee_method, 65 JVMState* caller_jvms, 66 int caller_bci, 67 int max_inline_level); 68 InlineTree *build_inline_tree_for_callee(ciMethod* callee_method, 69 JVMState* caller_jvms, 70 int caller_bci); 71 bool try_to_inline(ciMethod* callee_method, 72 ciMethod* caller_method, 73 int caller_bci, 74 JVMState* jvms, 75 ciCallProfile& profile, 76 bool& should_delay); 77 bool should_inline(ciMethod* callee_method, 78 ciMethod* caller_method, 79 int caller_bci, 80 bool& should_delay, 81 ciCallProfile& profile); 82 bool should_not_inline(ciMethod* callee_method, 83 ciMethod* caller_method, 84 int caller_bci, 85 bool& should_delay, 86 ciCallProfile& profile); 87 bool is_not_reached(ciMethod* callee_method, 88 ciMethod* caller_method, 89 int caller_bci, 90 ciCallProfile& profile); 91 void print_inlining(ciMethod* callee_method, int caller_bci, 92 ciMethod* caller_method, bool success) const; 93 94 InlineTree* caller_tree() const { return _caller_tree; } 95 InlineTree* callee_at(int bci, ciMethod* m) const; 96 int stack_depth() const { return _caller_jvms ? _caller_jvms->depth() : 0; } 97 const char* msg() const { return _msg; } 98 void set_msg(const char* msg) { _msg = msg; } 99 public: 100 static const char* check_can_parse(ciMethod* callee); 101 102 static InlineTree* build_inline_tree_root(); 103 static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee); 104 105 // See if it is OK to inline. 106 // The receiver is the inline tree for the caller. 107 // 108 // The result is a temperature indication. If it is hot or cold, 109 // inlining is immediate or undesirable. Otherwise, the info block 110 // returned is newly allocated and may be enqueued. 111 // 112 // If the method is inlinable, a new inline subtree is created on the fly, 113 // and may be accessed by find_subtree_from_root. 114 // The call_method is the dest_method for a special or static invocation. 115 // The call_method is an optimized virtual method candidate otherwise. 116 bool ok_to_inline(ciMethod *call_method, JVMState* caller_jvms, ciCallProfile& profile, bool& should_delay); 117 118 void set_late_inline() { 119 _late_inline = true; 120 } 121 122 // Information about inlined method 123 JVMState* caller_jvms() const { return _caller_jvms; } 124 ciMethod *method() const { return _method; } 125 int caller_bci() const { return _caller_jvms ? _caller_jvms->bci() : InvocationEntryBci; } 126 uint count_inline_bcs() const { return _count_inline_bcs; } 127 int inline_level() const { return stack_depth(); } 128 129 #ifndef PRODUCT 130 private: 131 uint _count_inlines; // Count of inlined methods 132 public: 133 // Debug information collected during parse 134 uint count_inlines() const { return _count_inlines; }; 135 #endif 136 GrowableArray<InlineTree*> subtrees() { return _subtrees; } 137 138 void print_value_on(outputStream* st) const PRODUCT_RETURN; 139 140 bool _forced_inline; // Inlining was forced by CompilerOracle, ciReplay or annotation 141 bool forced_inline() const { return _forced_inline; } 142 // Count number of nodes in this subtree 143 int count() const; 144 // Dump inlining replay data to the stream. 145 void dump_replay_data(outputStream* out, int depth_adjust = 0); 146 }; 147 148 149 //----------------------------------------------------------------------------- 150 //------------------------------Parse------------------------------------------ 151 // Parse bytecodes, build a Graph 152 class Parse : public GraphKit { 153 public: 154 // Per-block information needed by the parser: 155 class Block { 156 private: 157 ciTypeFlow::Block* _flow; 158 int _pred_count; // how many predecessors in CFG? 159 int _preds_parsed; // how many of these have been parsed? 160 uint _count; // how many times executed? Currently only set by _goto's 161 bool _is_parsed; // has this block been parsed yet? 162 bool _is_handler; // is this block an exception handler? 163 bool _has_merged_backedge; // does this block have merged backedge? 164 SafePointNode* _start_map; // all values flowing into this block 165 mutable MethodLivenessResult _live_locals; // lazily initialized liveness bitmap 166 bool _has_predicates; // Were predicates added before parsing of the loop head? 167 168 int _num_successors; // Includes only normal control flow. 169 int _all_successors; // Include exception paths also. 170 Block** _successors; 171 Block** _predecessors; 172 Block* _from_block; 173 int _init_pnum; // the pnum of Block where _state is copied from. 174 175 const MethodLivenessResult& liveness() const { 176 if (!_live_locals.is_valid()) { 177 _live_locals = flow()->outer()->method()->liveness_at_bci(start()); 178 } 179 assert(_live_locals.is_valid(), "sanity check"); 180 return _live_locals; 181 } 182 public: 183 184 // Set up the block data structure itself. 185 Block(Parse* outer, int rpo); 186 187 // Set up the block's relations to other blocks. 188 void init_graph(Parse* outer); 189 190 ciTypeFlow::Block* flow() const { return _flow; } 191 int pred_count() const { return _pred_count; } 192 int preds_parsed() const { return _preds_parsed; } 193 bool is_parsed() const { return _is_parsed; } 194 bool is_handler() const { return _is_handler; } 195 void set_count( uint x ) { _count = x; } 196 uint count() const { return _count; } 197 198 SafePointNode* start_map() const { assert(is_merged(),""); return _start_map; } 199 void set_start_map(SafePointNode* m) { assert(!is_merged(), ""); _start_map = m; } 200 201 // True after any predecessor flows control into this block 202 bool is_merged() const { return _start_map != nullptr; } 203 Block* from_block() const { return _from_block; } 204 int init_pnum() const { return _init_pnum; } 205 PEAState& state() { 206 assert(is_merged(), "sanity check"); 207 return _start_map->jvms()->alloc_state(); 208 } 209 210 #ifdef ASSERT 211 // True after backedge predecessor flows control into this block 212 bool has_merged_backedge() const { return _has_merged_backedge; } 213 void mark_merged_backedge(Block* pred) { 214 assert(is_SEL_head(), "should be loop head"); 215 if (pred != nullptr && is_SEL_backedge(pred)) { 216 assert(is_parsed(), "block should be parsed before merging backedges"); 217 _has_merged_backedge = true; 218 } 219 } 220 #endif 221 222 // True when all non-exception predecessors have been parsed. 223 bool is_ready() const { return preds_parsed() == pred_count(); } 224 225 bool has_predicates() const { return _has_predicates; } 226 void set_has_predicates() { _has_predicates = true; } 227 228 int num_successors() const { return _num_successors; } 229 int all_successors() const { return _all_successors; } 230 Block* successor_at(int i) const { 231 assert((uint)i < (uint)all_successors(), ""); 232 return _successors[i]; 233 } 234 Block* successor_for_bci(int bci); 235 236 Block* predecessor_at(int i) const { 237 assert(DoPartialEscapeAnalysis, "_predecessors is only available when DoPartialEscapeAnalysis is ON"); 238 assert(i < _pred_count, ""); 239 return _predecessors[i]; 240 } 241 242 int start() const { return flow()->start(); } 243 int limit() const { return flow()->limit(); } 244 int rpo() const { return flow()->rpo(); } 245 int start_sp() const { return flow()->stack_size(); } 246 247 bool is_loop_head() const { return flow()->is_loop_head(); } 248 bool is_in_irreducible_loop() const { 249 return flow()->is_in_irreducible_loop(); 250 } 251 bool is_irreducible_loop_entry() const { 252 return flow()->is_irreducible_loop_head() || flow()->is_irreducible_loop_secondary_entry(); 253 } 254 void copy_irreducible_status_to(RegionNode* region, const JVMState* jvms) { 255 assert(!is_irreducible_loop_entry() || is_in_irreducible_loop(), "entry is part of irreducible loop"); 256 if (is_in_irreducible_loop()) { 257 // The block is in an irreducible loop of this method, so it is possible that this 258 // region becomes an irreducible loop entry. (no guarantee) 259 region->set_loop_status(RegionNode::LoopStatus::MaybeIrreducibleEntry); 260 } else if (jvms->caller() != nullptr) { 261 // The block is not in an irreducible loop of this method, hence it cannot ever 262 // be the entry of an irreducible loop. But it may be inside an irreducible loop 263 // of a caller of this inlined method. (limited guarantee) 264 assert(region->loop_status() == RegionNode::LoopStatus::NeverIrreducibleEntry, "status not changed"); 265 } else { 266 // The block is not in an irreducible loop of this method, and there is no outer 267 // method. This region will never be in an irreducible loop (strong guarantee) 268 region->set_loop_status(RegionNode::LoopStatus::Reducible); 269 } 270 } 271 bool is_SEL_head() const { return flow()->is_single_entry_loop_head(); } 272 bool is_SEL_backedge(Block* pred) const{ return is_SEL_head() && pred->rpo() >= rpo(); } 273 bool is_invariant_local(uint i) const { 274 const JVMState* jvms = start_map()->jvms(); 275 if (!jvms->is_loc(i) || flow()->outer()->has_irreducible_entry()) return false; 276 return flow()->is_invariant_local(i - jvms->locoff()); 277 } 278 bool can_elide_SEL_phi(uint i) const { assert(is_SEL_head(),""); return is_invariant_local(i); } 279 280 const Type* peek(int off=0) const { return stack_type_at(start_sp() - (off+1)); } 281 282 const Type* stack_type_at(int i) const; 283 const Type* local_type_at(int i) const; 284 static const Type* get_type(ciType* t) { return Type::get_typeflow_type(t); } 285 286 bool has_trap_at(int bci) const { return flow()->has_trap() && flow()->trap_bci() == bci; } 287 288 // Call this just before parsing a block. 289 void mark_parsed() { 290 assert(!_is_parsed, "must parse each block exactly once"); 291 _is_parsed = true; 292 } 293 294 // Return the phi/region input index for the "current" pred, 295 // and bump the pred number. For historical reasons these index 296 // numbers are handed out in descending order. The last index is 297 // always PhiNode::Input (i.e., 1). The value returned is known 298 // as a "path number" because it distinguishes by which path we are 299 // entering the block. 300 int next_path_num() { 301 assert(preds_parsed() < pred_count(), "too many preds?"); 302 return pred_count() - _preds_parsed++; 303 } 304 305 // Add a previously unaccounted predecessor to this block. 306 // This operates by increasing the size of the block's region 307 // and all its phi nodes (if any). The value returned is a 308 // path number ("pnum"). 309 int add_new_path(); 310 311 // Initialize me by recording the parser's map. My own map must be null. 312 void record_state(Parse* outer, int pnum); 313 }; 314 315 #ifndef PRODUCT 316 // BytecodeParseHistogram collects number of bytecodes parsed, nodes constructed, and transformations. 317 class BytecodeParseHistogram : public ArenaObj { 318 private: 319 enum BPHType { 320 BPH_transforms, 321 BPH_values 322 }; 323 static bool _initialized; 324 static uint _bytecodes_parsed [Bytecodes::number_of_codes]; 325 static uint _nodes_constructed[Bytecodes::number_of_codes]; 326 static uint _nodes_transformed[Bytecodes::number_of_codes]; 327 static uint _new_values [Bytecodes::number_of_codes]; 328 329 Bytecodes::Code _initial_bytecode; 330 int _initial_node_count; 331 int _initial_transforms; 332 int _initial_values; 333 334 Parse *_parser; 335 Compile *_compiler; 336 337 // Initialization 338 static void reset(); 339 340 // Return info being collected, select with global flag 'BytecodeParseInfo' 341 int current_count(BPHType info_selector); 342 343 public: 344 BytecodeParseHistogram(Parse *p, Compile *c); 345 static bool initialized(); 346 347 // Record info when starting to parse one bytecode 348 void set_initial_state( Bytecodes::Code bc ); 349 // Record results of parsing one bytecode 350 void record_change(); 351 352 // Profile printing 353 static void print(float cutoff = 0.01F); // cutoff in percent 354 }; 355 356 public: 357 // Record work done during parsing 358 BytecodeParseHistogram* _parse_histogram; 359 void set_parse_histogram(BytecodeParseHistogram *bph) { _parse_histogram = bph; } 360 BytecodeParseHistogram* parse_histogram() { return _parse_histogram; } 361 #endif 362 363 private: 364 friend class Block; 365 366 // Variables which characterize this compilation as a whole: 367 368 JVMState* _caller; // JVMS which carries incoming args & state. 369 PEAState* _caller_state; // current PEA state of caller 370 float _expected_uses; // expected number of calls to this code 371 float _prof_factor; // discount applied to my profile counts 372 int _depth; // Inline tree depth, for debug printouts 373 const TypeFunc*_tf; // My kind of function type 374 int _entry_bci; // the osr bci or InvocationEntryBci 375 376 ciTypeFlow* _flow; // Results of previous flow pass. 377 Block* _blocks; // Array of basic-block structs. 378 int _block_count; // Number of elements in _blocks. 379 380 GraphKit _exits; // Record all normal returns and throws here. 381 bool _wrote_final; // Did we write a final field? 382 bool _wrote_volatile; // Did we write a volatile field? 383 bool _wrote_stable; // Did we write a @Stable field? 384 bool _wrote_fields; // Did we write any field? 385 Node* _alloc_with_final; // An allocation node with final field 386 387 // Variables which track Java semantics during bytecode parsing: 388 389 Block* _block; // block currently getting parsed 390 ciBytecodeStream _iter; // stream of this method's bytecodes 391 392 const FastLockNode* _synch_lock; // FastLockNode for synchronized method 393 394 #ifndef PRODUCT 395 int _max_switch_depth; // Debugging SwitchRanges. 396 int _est_switch_depth; // Debugging SwitchRanges. 397 #endif 398 399 int _first_return; // true if return is the first to be parsed 400 bool _replaced_nodes_for_exceptions; // needs processing of replaced nodes in exception paths? 401 uint _new_idx; // any node with _idx above were new during this parsing. Used to trim the replaced nodes list. 402 403 public: 404 // Constructor 405 Parse(JVMState* caller, ciMethod* parse_method, float expected_uses, PEAState* caller_state = nullptr); 406 407 #ifndef PRODUCT 408 ~Parse(); 409 #endif 410 virtual Parse* is_Parse() const { return (Parse*)this; } 411 412 // Accessors. 413 JVMState* caller() const { return _caller; } 414 float expected_uses() const { return _expected_uses; } 415 float prof_factor() const { return _prof_factor; } 416 int depth() const { return _depth; } 417 const TypeFunc* tf() const { return _tf; } 418 // entry_bci() -- see osr_bci, etc. 419 420 ciTypeFlow* flow() const { return _flow; } 421 // blocks() -- see rpo_at, start_block, etc. 422 int block_count() const { return _block_count; } 423 424 GraphKit& exits() { return _exits; } 425 bool wrote_final() const { return _wrote_final; } 426 void set_wrote_final(bool z) { _wrote_final = z; } 427 bool wrote_volatile() const { return _wrote_volatile; } 428 void set_wrote_volatile(bool z) { _wrote_volatile = z; } 429 bool wrote_stable() const { return _wrote_stable; } 430 void set_wrote_stable(bool z) { _wrote_stable = z; } 431 bool wrote_fields() const { return _wrote_fields; } 432 void set_wrote_fields(bool z) { _wrote_fields = z; } 433 Node* alloc_with_final() const { return _alloc_with_final; } 434 void set_alloc_with_final(Node* n) { 435 if (DoPartialEscapeAnalysis) { 436 assert((_alloc_with_final == nullptr) || (_alloc_with_final == PEA()->is_alias(n)), 437 "different init objects?"); 438 _alloc_with_final = PEA()->is_alias(n); 439 } else { 440 assert((_alloc_with_final == nullptr) || (_alloc_with_final == n), "different init objects?"); 441 _alloc_with_final = n; 442 } 443 } 444 445 Block* block() const { return _block; } 446 ciBytecodeStream& iter() { return _iter; } 447 Bytecodes::Code bc() const { return _iter.cur_bc(); } 448 449 void set_block(Block* b) { _block = b; } 450 451 // Derived accessors: 452 bool is_osr_parse() const { 453 assert(_entry_bci != UnknownBci, "uninitialized _entry_bci"); 454 return _entry_bci != InvocationEntryBci; 455 } 456 bool is_normal_parse() const { return !is_osr_parse(); } 457 int osr_bci() const { assert(is_osr_parse(),""); return _entry_bci; } 458 459 void set_parse_bci(int bci); 460 461 // Must this parse be aborted? 462 bool failing() { return C->failing(); } 463 464 Block* rpo_at(int rpo) { 465 assert(0 <= rpo && rpo < _block_count, "oob"); 466 return &_blocks[rpo]; 467 } 468 Block* start_block() { 469 return rpo_at(flow()->start_block()->rpo()); 470 } 471 // Can return null if the flow pass did not complete a block. 472 Block* successor_for_bci(int bci) { 473 return block()->successor_for_bci(bci); 474 } 475 476 private: 477 // Create a JVMS & map for the initial state of this method. 478 SafePointNode* create_entry_map(); 479 480 // OSR helpers 481 Node *fetch_interpreter_state(int index, BasicType bt, Node *local_addrs, Node *local_addrs_base); 482 Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit); 483 void load_interpreter_state(Node* osr_buf); 484 485 // Functions for managing basic blocks: 486 void init_blocks(); 487 void load_state_from(Block* b); 488 void store_state_to(Block* b, int pnum) { b->record_state(this, pnum); } 489 490 // Parse all the basic blocks. 491 void do_all_blocks(); 492 493 // Parse the current basic block 494 void do_one_block(); 495 496 // Raise an error if we get a bad ciTypeFlow CFG. 497 void handle_missing_successor(int bci); 498 499 // first actions (before BCI 0) 500 void do_method_entry(); 501 502 // implementation of monitorenter/monitorexit 503 void do_monitor_enter(); 504 void do_monitor_exit(); 505 506 // Eagerly create phie throughout the state, to cope with back edges. 507 void ensure_phis_everywhere(); 508 509 // Merge the current mapping into the basic block starting at bci 510 void merge( int target_bci); 511 // Same as plain merge, except that it allocates a new path number. 512 void merge_new_path( int target_bci); 513 // Merge the current mapping into an exception handler. 514 void merge_exception(int target_bci); 515 // Helper: Merge the current mapping into the given basic block 516 void merge_common(Block* target, int pnum); 517 // Helper functions for merging individual cells. 518 PhiNode *ensure_phi( int idx, bool nocreate = false); 519 PhiNode *ensure_memory_phi(int idx, bool nocreate = false); 520 521 // Helper to merge the current memory state into the given basic block 522 void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi); 523 524 // Parse this bytecode, and alter the Parsers JVM->Node mapping 525 void do_one_bytecode(); 526 527 // helper function to generate array store check 528 void array_store_check(); 529 // Helper function to generate array load 530 void array_load(BasicType etype); 531 // Helper function to generate array store 532 void array_store(BasicType etype); 533 // Helper function to compute array addressing 534 Node* array_addressing(BasicType type, int vals, const Type*& elemtype); 535 536 void clinit_deopt(); 537 538 void rtm_deopt(); 539 540 // Pass current map to exits 541 void return_current(Node* value); 542 543 // Register finalizers on return from Object.<init> 544 void call_register_finalizer(); 545 546 // Insert a compiler safepoint into the graph 547 void add_safepoint(); 548 549 // Insert a compiler safepoint into the graph, if there is a back-branch. 550 void maybe_add_safepoint(int target_bci) { 551 if (target_bci <= bci()) { 552 add_safepoint(); 553 } 554 } 555 556 // Note: Intrinsic generation routines may be found in library_call.cpp. 557 558 // Helper function to setup Ideal Call nodes 559 void do_call(); 560 561 // Helper function to uncommon-trap or bailout for non-compilable call-sites 562 bool can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass *klass); 563 564 // Helper functions for type checking bytecodes: 565 void do_checkcast(); 566 void do_instanceof(); 567 568 // Helper functions for shifting & arithmetic 569 void modf(); 570 void modd(); 571 void l2f(); 572 573 // implementation of _get* and _put* bytecodes 574 void do_getstatic() { do_field_access(true, false); } 575 void do_getfield () { do_field_access(true, true); } 576 void do_putstatic() { do_field_access(false, false); } 577 void do_putfield () { do_field_access(false, true); } 578 579 // common code for making initial checks and forming addresses 580 void do_field_access(bool is_get, bool is_field); 581 582 // common code for actually performing the load or store 583 void do_get_xxx(Node* obj, ciField* field, bool is_field); 584 void do_put_xxx(Node* obj, ciField* field, bool is_field); 585 586 // implementation of object creation bytecodes 587 void do_new(); 588 void do_newarray(BasicType elemtype); 589 void do_anewarray(); 590 void do_multianewarray(); 591 Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs); 592 593 // implementation of jsr/ret 594 void do_jsr(); 595 void do_ret(); 596 597 float dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test); 598 float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci, Node* test); 599 bool seems_never_taken(float prob) const; 600 bool path_is_suitable_for_uncommon_trap(float prob) const; 601 bool seems_stable_comparison() const; 602 603 void do_ifnull(BoolTest::mask btest, Node* c); 604 void do_if(BoolTest::mask btest, Node* c); 605 int repush_if_args(); 606 void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path); 607 void sharpen_type_after_if(BoolTest::mask btest, 608 Node* con, const Type* tcon, 609 Node* val, const Type* tval); 610 void maybe_add_predicate_after_if(Block* path); 611 IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt); 612 void jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, bool unc); 613 void jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, bool unc); 614 void jump_if_always_fork(int dest_bci_if_true, bool unc); 615 616 friend class SwitchRange; 617 void do_tableswitch(); 618 void do_lookupswitch(); 619 void jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0); 620 bool create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi); 621 void linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi); 622 623 // helper function for call statistics 624 void count_compiled_calls(bool at_method_entry, bool is_inline) PRODUCT_RETURN; 625 626 Node_Notes* make_node_notes(Node_Notes* caller_nn); 627 628 // Helper functions for handling normal and abnormal exits. 629 void build_exits(); 630 631 // Fix up all exceptional control flow exiting a single bytecode. 632 void do_exceptions(); 633 634 // Fix up all exiting control flow at the end of the parse. 635 void do_exits(); 636 637 // Add Catch/CatchProjs 638 // The call is either a Java call or the VM's rethrow stub 639 void catch_call_exceptions(ciExceptionHandlerStream&); 640 641 // Handle all exceptions thrown by the inlined method. 642 // Also handles exceptions for individual bytecodes. 643 void catch_inline_exceptions(SafePointNode* ex_map); 644 645 // Merge the given map into correct exceptional exit state. 646 // Assumes that there is no applicable local handler. 647 void throw_to_exit(SafePointNode* ex_map); 648 649 // Use speculative type to optimize CmpP node 650 Node* optimize_cmp_with_klass(Node* c); 651 652 public: 653 #ifndef PRODUCT 654 // Handle PrintOpto, etc. 655 void show_parse_info(); 656 void dump_map_adr_mem() const; 657 static void print_statistics(); // Print some performance counters 658 void dump(); 659 void dump_bci(int bci); 660 #endif 661 }; 662 663 // Specialized uncommon_trap of unstable_if. C2 uses next_bci of path to update the live locals of it. 664 class UnstableIfTrap { 665 CallStaticJavaNode* const _unc; 666 bool _modified; // modified locals based on next_bci() 667 int _next_bci; 668 669 public: 670 UnstableIfTrap(CallStaticJavaNode* call, Parse::Block* path): _unc(call), _modified(false) { 671 assert(_unc != nullptr && Deoptimization::trap_request_reason(_unc->uncommon_trap_request()) == Deoptimization::Reason_unstable_if, 672 "invalid uncommon_trap call!"); 673 _next_bci = path != nullptr ? path->start() : -1; 674 } 675 676 // The starting point of the pruned block, where control goes when 677 // deoptimization does happen. 678 int next_bci() const { 679 return _next_bci; 680 } 681 682 bool modified() const { 683 return _modified; 684 } 685 686 void set_modified() { 687 _modified = true; 688 } 689 690 CallStaticJavaNode* uncommon_trap() const { 691 return _unc; 692 } 693 694 inline void* operator new(size_t x) throw() { 695 Compile* C = Compile::current(); 696 return C->comp_arena()->AmallocWords(x); 697 } 698 }; 699 700 #endif // SHARE_OPTO_PARSE_HPP