1 /*
  2  * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_OPTO_PARSE_HPP
 26 #define SHARE_OPTO_PARSE_HPP
 27 
 28 #include "ci/ciMethodData.hpp"
 29 #include "ci/ciTypeFlow.hpp"
 30 #include "compiler/methodLiveness.hpp"
 31 #include "libadt/vectset.hpp"
 32 #include "oops/generateOopMap.hpp"
 33 #include "opto/graphKit.hpp"
 34 #include "opto/subnode.hpp"
 35 
 36 class BytecodeParseHistogram;
 37 class InlineTree;
 38 class Parse;
 39 class SwitchRange;
 40 
 41 
 42 //------------------------------InlineTree-------------------------------------
 43 class InlineTree : public AnyObj {
 44   friend class VMStructs;
 45 
 46   Compile*    C;                  // cache
 47   JVMState*   _caller_jvms;       // state of caller
 48   ciMethod*   _method;            // method being called by the caller_jvms
 49   bool        _late_inline;       // method is inlined incrementally
 50   InlineTree* _caller_tree;
 51   uint        _count_inline_bcs;  // Accumulated count of inlined bytecodes
 52   const int   _max_inline_level;  // the maximum inline level for this sub-tree (may be adjusted)
 53 
 54   GrowableArray<InlineTree*> _subtrees;
 55 
 56   bool pass_initial_checks(ciMethod* caller_method, int caller_bci, ciMethod* callee_method);
 57 
 58   void print_impl(outputStream* stj, int indent) const PRODUCT_RETURN;
 59   const char* _msg;
 60 protected:
 61   InlineTree(Compile* C,
 62              const InlineTree* caller_tree,
 63              ciMethod* callee_method,
 64              JVMState* caller_jvms,
 65              int caller_bci,
 66              int max_inline_level);
 67   InlineTree *build_inline_tree_for_callee(ciMethod* callee_method,
 68                                            JVMState* caller_jvms,
 69                                            int caller_bci);
 70   bool        try_to_inline(ciMethod* callee_method,
 71                             ciMethod* caller_method,
 72                             int caller_bci,
 73                             JVMState* jvms,
 74                             ciCallProfile& profile,
 75                             bool& should_delay);
 76   bool        should_inline(ciMethod* callee_method,
 77                             ciMethod* caller_method,
 78                             int caller_bci,
 79                             bool& should_delay,
 80                             ciCallProfile& profile);
 81   bool        should_not_inline(ciMethod* callee_method,
 82                                 ciMethod* caller_method,
 83                                 int caller_bci,
 84                                 bool& should_delay,
 85                                 ciCallProfile& profile);
 86   bool        is_not_reached(ciMethod* callee_method,
 87                              ciMethod* caller_method,
 88                              int caller_bci,
 89                              ciCallProfile& profile);
 90   void        print_inlining(ciMethod* callee_method, int caller_bci,
 91                              ciMethod* caller_method, bool success) const;
 92 
 93   InlineTree* caller_tree()       const { return _caller_tree;  }
 94   InlineTree* callee_at(int bci, ciMethod* m) const;
 95   int         stack_depth()       const { return _caller_jvms ? _caller_jvms->depth() : 0; }
 96   const char* msg()               const { return _msg; }
 97   void        set_msg(const char* msg)  { _msg = msg; }
 98 public:
 99   static const char* check_can_parse(ciMethod* callee);
100 
101   static InlineTree* build_inline_tree_root();
102   static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee);
103 
104   // See if it is OK to inline.
105   // The receiver is the inline tree for the caller.
106   //
107   // The result is a temperature indication.  If it is hot or cold,
108   // inlining is immediate or undesirable.  Otherwise, the info block
109   // returned is newly allocated and may be enqueued.
110   //
111   // If the method is inlinable, a new inline subtree is created on the fly,
112   // and may be accessed by find_subtree_from_root.
113   // The call_method is the dest_method for a special or static invocation.
114   // The call_method is an optimized virtual method candidate otherwise.
115   bool ok_to_inline(ciMethod *call_method, JVMState* caller_jvms, ciCallProfile& profile, bool& should_delay);
116 
117   void set_late_inline() {
118     _late_inline = true;
119   }
120 
121   // Information about inlined method
122   JVMState*   caller_jvms()       const { return _caller_jvms; }
123   ciMethod   *method()            const { return _method; }
124   int         caller_bci()        const { return _caller_jvms ? _caller_jvms->bci() : InvocationEntryBci; }
125   uint        count_inline_bcs()  const { return _count_inline_bcs; }
126   int         inline_level()      const { return stack_depth(); }
127 
128 #ifndef PRODUCT
129 private:
130   uint        _count_inlines;     // Count of inlined methods
131 public:
132   // Debug information collected during parse
133   uint        count_inlines()     const { return _count_inlines; };
134 #endif
135   GrowableArray<InlineTree*> subtrees() { return _subtrees; }
136 
137   void print_value_on(outputStream* st) const PRODUCT_RETURN;
138 
139   bool        _forced_inline;     // Inlining was forced by CompilerOracle, ciReplay or annotation
140   bool        forced_inline()     const { return _forced_inline; }
141   // Count number of nodes in this subtree
142   int         count() const;
143   // Dump inlining replay data to the stream.
144   void dump_replay_data(outputStream* out, int depth_adjust = 0);
145 };
146 
147 
148 //-----------------------------------------------------------------------------
149 //------------------------------Parse------------------------------------------
150 // Parse bytecodes, build a Graph
151 class Parse : public GraphKit {
152  public:
153   // Per-block information needed by the parser:
154   class Block {
155    private:
156     ciTypeFlow::Block* _flow;
157     int                _pred_count;     // how many predecessors in CFG?
158     int                _preds_parsed;   // how many of these have been parsed?
159     uint               _count;          // how many times executed?  Currently only set by _goto's
160     bool               _is_parsed;      // has this block been parsed yet?
161     bool               _is_handler;     // is this block an exception handler?
162     bool               _has_merged_backedge; // does this block have merged backedge?
163     SafePointNode*     _start_map;      // all values flowing into this block
164     MethodLivenessResult _live_locals;  // lazily initialized liveness bitmap
165     bool               _has_predicates; // Were predicates added before parsing of the loop head?
166 
167     int                _num_successors; // Includes only normal control flow.
168     int                _all_successors; // Include exception paths also.
169     Block**            _successors;
170 
171    public:
172 
173     // Set up the block data structure itself.
174     Block(Parse* outer, int rpo);
175 
176     // Set up the block's relations to other blocks.
177     void init_graph(Parse* outer);
178 
179     ciTypeFlow::Block* flow() const        { return _flow; }
180     int pred_count() const                 { return _pred_count; }
181     int preds_parsed() const               { return _preds_parsed; }
182     bool is_parsed() const                 { return _is_parsed; }
183     bool is_handler() const                { return _is_handler; }
184     void set_count( uint x )               { _count = x; }
185     uint count() const                     { return _count; }
186 
187     SafePointNode* start_map() const       { assert(is_merged(),"");   return _start_map; }
188     void set_start_map(SafePointNode* m)   { assert(!is_merged(), ""); _start_map = m; }
189 
190     // True after any predecessor flows control into this block
191     bool is_merged() const                 { return _start_map != nullptr; }
192 
193 #ifdef ASSERT
194     // True after backedge predecessor flows control into this block
195     bool has_merged_backedge() const       { return _has_merged_backedge; }
196     void mark_merged_backedge(Block* pred) {
197       assert(is_SEL_head(), "should be loop head");
198       if (pred != nullptr && is_SEL_backedge(pred)) {
199         assert(is_parsed(), "block should be parsed before merging backedges");
200         _has_merged_backedge = true;
201       }
202     }
203 #endif
204 
205     // True when all non-exception predecessors have been parsed.
206     bool is_ready() const                  { return preds_parsed() == pred_count(); }
207 
208     bool has_predicates() const            { return _has_predicates; }
209     void set_has_predicates()              { _has_predicates = true; }
210 
211     int num_successors() const             { return _num_successors; }
212     int all_successors() const             { return _all_successors; }
213     Block* successor_at(int i) const {
214       assert((uint)i < (uint)all_successors(), "");
215       return _successors[i];
216     }
217     Block* successor_for_bci(int bci);
218 
219     int start() const                      { return flow()->start(); }
220     int limit() const                      { return flow()->limit(); }
221     int rpo() const                        { return flow()->rpo(); }
222     int start_sp() const                   { return flow()->stack_size(); }
223 
224     bool is_loop_head() const              { return flow()->is_loop_head(); }
225     bool is_in_irreducible_loop() const {
226       return flow()->is_in_irreducible_loop();
227     }
228     bool is_irreducible_loop_entry() const {
229       return flow()->is_irreducible_loop_head() || flow()->is_irreducible_loop_secondary_entry();
230     }
231     void copy_irreducible_status_to(RegionNode* region, const JVMState* jvms) {
232       assert(!is_irreducible_loop_entry() || is_in_irreducible_loop(), "entry is part of irreducible loop");
233       if (is_in_irreducible_loop()) {
234         // The block is in an irreducible loop of this method, so it is possible that this
235         // region becomes an irreducible loop entry. (no guarantee)
236         region->set_loop_status(RegionNode::LoopStatus::MaybeIrreducibleEntry);
237       } else if (jvms->caller() != nullptr) {
238         // The block is not in an irreducible loop of this method, hence it cannot ever
239         // be the entry of an irreducible loop. But it may be inside an irreducible loop
240         // of a caller of this inlined method. (limited guarantee)
241         assert(region->loop_status() == RegionNode::LoopStatus::NeverIrreducibleEntry, "status not changed");
242       } else {
243         // The block is not in an irreducible loop of this method, and there is no outer
244         // method. This region will never be in an irreducible loop (strong guarantee)
245         region->set_loop_status(RegionNode::LoopStatus::Reducible);
246       }
247     }
248     bool is_SEL_head() const               { return flow()->is_single_entry_loop_head(); }
249     bool is_SEL_backedge(Block* pred) const{ return is_SEL_head() && pred->rpo() >= rpo(); }
250     bool is_invariant_local(uint i) const  {
251       const JVMState* jvms = start_map()->jvms();
252       if (!jvms->is_loc(i) || flow()->outer()->has_irreducible_entry()) return false;
253       return flow()->is_invariant_local(i - jvms->locoff());
254     }
255     bool can_elide_SEL_phi(uint i) const  { assert(is_SEL_head(),""); return is_invariant_local(i); }
256 
257     const Type* peek(int off=0) const      { return stack_type_at(start_sp() - (off+1)); }
258 
259     const Type* stack_type_at(int i) const;
260     const Type* local_type_at(int i) const;
261     static const Type* get_type(ciType* t) { return Type::get_typeflow_type(t); }
262 
263     bool has_trap_at(int bci) const        { return flow()->has_trap() && flow()->trap_bci() == bci; }
264 
265     // Call this just before parsing a block.
266     void mark_parsed() {
267       assert(!_is_parsed, "must parse each block exactly once");
268       _is_parsed = true;
269     }
270 
271     // Return the phi/region input index for the "current" pred,
272     // and bump the pred number.  For historical reasons these index
273     // numbers are handed out in descending order.  The last index is
274     // always PhiNode::Input (i.e., 1).  The value returned is known
275     // as a "path number" because it distinguishes by which path we are
276     // entering the block.
277     int next_path_num() {
278       assert(preds_parsed() < pred_count(), "too many preds?");
279       return pred_count() - _preds_parsed++;
280     }
281 
282     // Add a previously unaccounted predecessor to this block.
283     // This operates by increasing the size of the block's region
284     // and all its phi nodes (if any).  The value returned is a
285     // path number ("pnum").
286     int add_new_path();
287 
288     // Initialize me by recording the parser's map.  My own map must be null.
289     void record_state(Parse* outer);
290   };
291 
292 #ifndef PRODUCT
293   // BytecodeParseHistogram collects number of bytecodes parsed, nodes constructed, and transformations.
294   class BytecodeParseHistogram : public ArenaObj {
295    private:
296     enum BPHType {
297       BPH_transforms,
298       BPH_values
299     };
300     static bool _initialized;
301     static uint _bytecodes_parsed [Bytecodes::number_of_codes];
302     static uint _nodes_constructed[Bytecodes::number_of_codes];
303     static uint _nodes_transformed[Bytecodes::number_of_codes];
304     static uint _new_values       [Bytecodes::number_of_codes];
305 
306     Bytecodes::Code _initial_bytecode;
307     int             _initial_node_count;
308     int             _initial_transforms;
309     int             _initial_values;
310 
311     Parse     *_parser;
312     Compile   *_compiler;
313 
314     // Initialization
315     static void reset();
316 
317     // Return info being collected, select with global flag 'BytecodeParseInfo'
318     int current_count(BPHType info_selector);
319 
320    public:
321     BytecodeParseHistogram(Parse *p, Compile *c);
322     static bool initialized();
323 
324     // Record info when starting to parse one bytecode
325     void set_initial_state( Bytecodes::Code bc );
326     // Record results of parsing one bytecode
327     void record_change();
328 
329     // Profile printing
330     static void print(float cutoff = 0.01F); // cutoff in percent
331   };
332 
333   public:
334     // Record work done during parsing
335     BytecodeParseHistogram* _parse_histogram;
336     void set_parse_histogram(BytecodeParseHistogram *bph) { _parse_histogram = bph; }
337     BytecodeParseHistogram* parse_histogram()      { return _parse_histogram; }
338 #endif
339 
340  private:
341   friend class Block;
342 
343   // Variables which characterize this compilation as a whole:
344 
345   JVMState*     _caller;        // JVMS which carries incoming args & state.
346   float         _expected_uses; // expected number of calls to this code
347   float         _prof_factor;   // discount applied to my profile counts
348   int           _depth;         // Inline tree depth, for debug printouts
349   const TypeFunc*_tf;           // My kind of function type
350   int           _entry_bci;     // the osr bci or InvocationEntryBci
351 
352   ciTypeFlow*   _flow;          // Results of previous flow pass.
353   Block*        _blocks;        // Array of basic-block structs.
354   int           _block_count;   // Number of elements in _blocks.
355 
356   GraphKit      _exits;         // Record all normal returns and throws here.
357   bool          _wrote_final;   // Did we write a final field?
358   bool          _wrote_volatile;     // Did we write a volatile field?
359   bool          _wrote_stable;       // Did we write a @Stable field?
360   bool          _wrote_fields;       // Did we write any field?
361   Node*         _alloc_with_final_or_stable; // An allocation node with final or @Stable field
362 
363   // Variables which track Java semantics during bytecode parsing:
364 
365   Block*            _block;     // block currently getting parsed
366   ciBytecodeStream  _iter;      // stream of this method's bytecodes
367 
368   const FastLockNode* _synch_lock; // FastLockNode for synchronized method
369 
370 #ifndef PRODUCT
371   int _max_switch_depth;        // Debugging SwitchRanges.
372   int _est_switch_depth;        // Debugging SwitchRanges.
373 #endif
374 
375   bool         _first_return;                  // true if return is the first to be parsed
376   bool         _replaced_nodes_for_exceptions; // needs processing of replaced nodes in exception paths?
377   uint         _new_idx;                       // any node with _idx above were new during this parsing. Used to trim the replaced nodes list.
378 
379  public:
380   // Constructor
381   Parse(JVMState* caller, ciMethod* parse_method, float expected_uses);
382 
383   virtual Parse* is_Parse() const { return (Parse*)this; }
384 
385   // Accessors.
386   JVMState*     caller()        const { return _caller; }
387   float         expected_uses() const { return _expected_uses; }
388   float         prof_factor()   const { return _prof_factor; }
389   int           depth()         const { return _depth; }
390   const TypeFunc* tf()          const { return _tf; }
391   //            entry_bci()     -- see osr_bci, etc.
392 
393   ciTypeFlow*   flow()          const { return _flow; }
394   //            blocks()        -- see rpo_at, start_block, etc.
395   int           block_count()   const { return _block_count; }
396 
397   GraphKit&     exits()               { return _exits; }
398   bool          wrote_final() const   { return _wrote_final; }
399   void      set_wrote_final(bool z)   { _wrote_final = z; }
400   bool          wrote_volatile() const { return _wrote_volatile; }
401   void      set_wrote_volatile(bool z) { _wrote_volatile = z; }
402   bool          wrote_stable() const  { return _wrote_stable; }
403   void      set_wrote_stable(bool z)  { _wrote_stable = z; }
404   bool         wrote_fields() const   { return _wrote_fields; }
405   void     set_wrote_fields(bool z)   { _wrote_fields = z; }
406   Node*    alloc_with_final_or_stable() const   { return _alloc_with_final_or_stable; }
407   void set_alloc_with_final_or_stable(Node* n)  {
408     assert((_alloc_with_final_or_stable == nullptr) || (_alloc_with_final_or_stable == n), "different init objects?");
409     _alloc_with_final_or_stable = n;
410   }
411 
412   Block*             block()    const { return _block; }
413   ciBytecodeStream&  iter()           { return _iter; }
414   Bytecodes::Code    bc()       const { return _iter.cur_bc(); }
415 
416   void set_block(Block* b)            { _block = b; }
417 
418   // Derived accessors:
419   bool is_osr_parse() const {
420     assert(_entry_bci != UnknownBci, "uninitialized _entry_bci");
421     return _entry_bci != InvocationEntryBci;
422   }
423   bool is_normal_parse() const  { return !is_osr_parse(); }
424   int osr_bci() const           { assert(is_osr_parse(),""); return _entry_bci; }
425 
426   void set_parse_bci(int bci);
427 
428   // Must this parse be aborted?
429   bool failing() const { return C->failing_internal(); } // might have cascading effects, not stressing bailouts for now.
430 
431   Block* rpo_at(int rpo) {
432     assert(0 <= rpo && rpo < _block_count, "oob");
433     return &_blocks[rpo];
434   }
435   Block* start_block() {
436     return rpo_at(flow()->start_block()->rpo());
437   }
438   // Can return null if the flow pass did not complete a block.
439   Block* successor_for_bci(int bci) {
440     return block()->successor_for_bci(bci);
441   }
442 
443  private:
444   // Create a JVMS & map for the initial state of this method.
445   SafePointNode* create_entry_map();
446 
447   // OSR helpers
448   Node* fetch_interpreter_state(int index, const Type* type, Node* local_addrs, Node* local_addrs_base);
449   Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit);
450   void  load_interpreter_state(Node* osr_buf);
451 
452   // Functions for managing basic blocks:
453   void init_blocks();
454   void load_state_from(Block* b);
455   void store_state_to(Block* b) { b->record_state(this); }
456 
457   // Parse all the basic blocks.
458   void do_all_blocks();
459 
460   // Parse the current basic block
461   void do_one_block();
462 
463   // Raise an error if we get a bad ciTypeFlow CFG.
464   void handle_missing_successor(int bci);
465 
466   // first actions (before BCI 0)
467   void do_method_entry();
468 
469   // implementation of monitorenter/monitorexit
470   void do_monitor_enter();
471   void do_monitor_exit();
472 
473   // Eagerly create phie throughout the state, to cope with back edges.
474   void ensure_phis_everywhere();
475 
476   // Merge the current mapping into the basic block starting at bci
477   void merge(          int target_bci);
478   // Same as plain merge, except that it allocates a new path number.
479   void merge_new_path( int target_bci);
480   // Merge the current mapping into an exception handler.
481   void merge_exception(int target_bci);
482   // Helper: Merge the current mapping into the given basic block
483   void merge_common(Block* target, int pnum);
484   // Helper functions for merging individual cells.
485   PhiNode *ensure_phi(       int idx, bool nocreate = false);
486   PhiNode *ensure_memory_phi(int idx, bool nocreate = false);
487   // Helper to merge the current memory state into the given basic block
488   void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi);
489 
490   // Parse this bytecode, and alter the Parsers JVM->Node mapping
491   void do_one_bytecode();
492 
493   // helper function to generate array store check
494   Node* array_store_check(Node*& adr, const Type*& elemtype);
495   // Helper function to generate array load
496   void array_load(BasicType etype);
497   // Helper function to generate array store
498   void array_store(BasicType etype);
499   // Helper function to compute array addressing
500   Node* array_addressing(BasicType type, int vals, const Type*& elemtype);
501   bool needs_range_check(const TypeInt* size_type, const Node* index) const;
502   Node* create_speculative_inline_type_array_checks(Node* array, const TypeAryPtr* array_type, const Type*& element_type);
503   Node* cast_to_speculative_array_type(Node* array, const TypeAryPtr*& array_type, const Type*& element_type);
504   Node* cast_to_profiled_array_type(Node* const array);
505   Node* speculate_non_null_free_array(Node* array, const TypeAryPtr*& array_type);
506   Node* speculate_non_flat_array(Node* array, const TypeAryPtr* array_type);
507   void create_range_check(Node* idx, Node* ary, const TypeInt* sizetype);
508   Node* record_profile_for_speculation_at_array_load(Node* ld);
509 
510   void clinit_deopt();
511 
512   // Pass current map to exits
513   void return_current(Node* value);
514 
515   // Register finalizers on return from Object.<init>
516   void call_register_finalizer();
517 
518   // Insert a compiler safepoint into the graph
519   void add_safepoint();
520 
521   // Insert a compiler safepoint into the graph, if there is a back-branch.
522   void maybe_add_safepoint(int target_bci) {
523     if (target_bci <= bci()) {
524       add_safepoint();
525     }
526   }
527 
528   // Note:  Intrinsic generation routines may be found in library_call.cpp.
529 
530   // Helper function to setup Ideal Call nodes
531   void do_call();
532 
533   // Helper function to uncommon-trap or bailout for non-compilable call-sites
534   bool can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass *klass);
535 
536   // Helper functions for type checking bytecodes:
537   void  do_checkcast();
538   void  do_instanceof();
539 
540   // Helper functions for shifting & arithmetic
541   void modf();
542   void modd();
543   void l2f();
544 
545   // implementation of _get* and _put* bytecodes
546   void do_getstatic() { do_field_access(true,  false); }
547   void do_getfield () { do_field_access(true,  true); }
548   void do_putstatic() { do_field_access(false, false); }
549   void do_putfield () { do_field_access(false, true); }
550 
551   // common code for making initial checks and forming addresses
552   void do_field_access(bool is_get, bool is_field);
553 
554   // common code for actually performing the load or store
555   void do_get_xxx(Node* obj, ciField* field);
556   void do_put_xxx(Node* obj, ciField* field, bool is_field);
557   void set_inline_type_field(Node* obj, ciField* field, Node* val);
558 
559   ciType* improve_abstract_inline_type_klass(ciType* field_klass);
560 
561   // implementation of object creation bytecodes
562   void do_new();
563   void do_newarray(BasicType elemtype);
564   void do_newarray();
565   void do_multianewarray();
566   Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs);
567 
568   // implementation of jsr/ret
569   void do_jsr();
570   void do_ret();
571 
572   float   dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test);
573   float   branch_prediction(float &cnt, BoolTest::mask btest, int target_bci, Node* test);
574   bool    seems_never_taken(float prob) const;
575   bool    path_is_suitable_for_uncommon_trap(float prob) const;
576 
577   void    do_ifnull(BoolTest::mask btest, Node* c);
578   void    do_if(BoolTest::mask btest, Node* c, bool can_trap = true, bool new_path = false, Node** ctrl_taken = nullptr);
579   void    do_acmp(BoolTest::mask btest, Node* left, Node* right);
580   void    acmp_always_null_input(Node* input, const TypeOopPtr* tinput, BoolTest::mask btest, Node* eq_region);
581   void    acmp_known_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, ciKlass* input_type, BoolTest::mask btest, Node* eq_region);
582   Node*   acmp_null_check(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, Node*& null_ctl);
583   void    acmp_unknown_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, BoolTest::mask btest, Node* eq_region);
584   int     repush_if_args();
585   void    adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path, bool can_trap = true);
586   void    sharpen_type_after_if(BoolTest::mask btest,
587                                 Node* con, const Type* tcon,
588                                 Node* val, const Type* tval);
589   void    maybe_add_predicate_after_if(Block* path);
590   IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt);
591   void    jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, bool unc);
592   void    jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, bool unc);
593   void    jump_if_always_fork(int dest_bci_if_true, bool unc);
594 
595   friend class SwitchRange;
596   void    do_tableswitch();
597   void    do_lookupswitch();
598   void    jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0);
599   bool    create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi);
600   void    linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi);
601 
602   // helper function for call statistics
603   void count_compiled_calls(bool at_method_entry, bool is_inline) PRODUCT_RETURN;
604 
605   Node_Notes* make_node_notes(Node_Notes* caller_nn);
606 
607   // Helper functions for handling normal and abnormal exits.
608   void build_exits();
609 
610   // Fix up all exceptional control flow exiting a single bytecode.
611   void do_exceptions();
612 
613   // Fix up all exiting control flow at the end of the parse.
614   void do_exits();
615 
616   // Add Catch/CatchProjs
617   // The call is either a Java call or the VM's rethrow stub
618   void catch_call_exceptions(ciExceptionHandlerStream&);
619 
620   // Handle all exceptions thrown by the inlined method.
621   // Also handles exceptions for individual bytecodes.
622   void catch_inline_exceptions(SafePointNode* ex_map);
623 
624   // Merge the given map into correct exceptional exit state.
625   // Assumes that there is no applicable local handler.
626   void throw_to_exit(SafePointNode* ex_map);
627 
628   // Use speculative type to optimize CmpP node
629   Node* optimize_cmp_with_klass(Node* c);
630 
631   // Stress unstable if traps
632   void stress_trap(IfNode* orig_iff, Node* counter, Node* incr_store);
633   // Increment counter used by StressUnstableIfTraps
634   void increment_trap_stress_counter(Node*& counter, Node*& incr_store);
635 
636  public:
637 #ifndef PRODUCT
638   // Handle PrintOpto, etc.
639   void show_parse_info();
640   void dump_map_adr_mem() const;
641   static void print_statistics(); // Print some performance counters
642   void dump();
643   void dump_bci(int bci);
644 #endif
645 };
646 
647 // Specialized uncommon_trap of unstable_if. C2 uses next_bci of path to update the live locals of it.
648 class UnstableIfTrap {
649   CallStaticJavaNode* const _unc;
650   bool _modified;            // modified locals based on next_bci()
651   int _next_bci;
652 
653 public:
654   UnstableIfTrap(CallStaticJavaNode* call, Parse::Block* path): _unc(call), _modified(false) {
655     assert(_unc != nullptr && Deoptimization::trap_request_reason(_unc->uncommon_trap_request()) == Deoptimization::Reason_unstable_if,
656           "invalid uncommon_trap call!");
657     _next_bci = path != nullptr ? path->start() : -1;
658   }
659 
660   // The starting point of the pruned block, where control goes when
661   // deoptimization does happen.
662   int next_bci() const {
663     return _next_bci;
664   }
665 
666   bool modified() const {
667     return _modified;
668   }
669 
670   void set_modified() {
671     _modified = true;
672   }
673 
674   CallStaticJavaNode* uncommon_trap() const {
675     return _unc;
676   }
677 
678   inline void* operator new(size_t x) throw() {
679     Compile* C = Compile::current();
680     return C->comp_arena()->AmallocWords(x);
681   }
682 };
683 
684 #endif // SHARE_OPTO_PARSE_HPP