1 /*
  2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_OPTO_PARSE_HPP
 26 #define SHARE_OPTO_PARSE_HPP
 27 
 28 #include "ci/ciMethodData.hpp"
 29 #include "ci/ciTypeFlow.hpp"
 30 #include "compiler/methodLiveness.hpp"
 31 #include "libadt/vectset.hpp"
 32 #include "oops/generateOopMap.hpp"
 33 #include "opto/graphKit.hpp"
 34 #include "opto/subnode.hpp"
 35 
 36 class BytecodeParseHistogram;
 37 class InlineTree;
 38 class Parse;
 39 class SwitchRange;
 40 
 41 
 42 //------------------------------InlineTree-------------------------------------
 43 class InlineTree : public AnyObj {
 44   friend class VMStructs;
 45 
 46   Compile*    C;                  // cache
 47   JVMState*   _caller_jvms;       // state of caller
 48   ciMethod*   _method;            // method being called by the caller_jvms
 49   bool        _late_inline;       // method is inlined incrementally
 50   InlineTree* _caller_tree;
 51   uint        _count_inline_bcs;  // Accumulated count of inlined bytecodes
 52   const int   _max_inline_level;  // the maximum inline level for this sub-tree (may be adjusted)
 53 
 54   GrowableArray<InlineTree*> _subtrees;
 55 
 56   bool pass_initial_checks(ciMethod* caller_method, int caller_bci, ciMethod* callee_method);
 57 
 58   void print_impl(outputStream* stj, int indent) const PRODUCT_RETURN;
 59   const char* _msg;
 60 protected:
 61   InlineTree(Compile* C,
 62              const InlineTree* caller_tree,
 63              ciMethod* callee_method,
 64              JVMState* caller_jvms,
 65              int caller_bci,
 66              int max_inline_level);
 67   InlineTree *build_inline_tree_for_callee(ciMethod* callee_method,
 68                                            JVMState* caller_jvms,
 69                                            int caller_bci);
 70   bool        try_to_inline(ciMethod* callee_method,
 71                             ciMethod* caller_method,
 72                             int caller_bci,
 73                             JVMState* jvms,
 74                             ciCallProfile& profile,
 75                             bool& should_delay);
 76   bool        should_inline(ciMethod* callee_method,
 77                             ciMethod* caller_method,
 78                             JVMState* caller_jvms,
 79                             bool& should_delay,
 80                             ciCallProfile& profile);
 81   bool        should_not_inline(ciMethod* callee_method,
 82                                 ciMethod* caller_method,
 83                                 int caller_bci,
 84                                 bool& should_delay,
 85                                 ciCallProfile& profile);
 86   bool        is_not_reached(ciMethod* callee_method,
 87                              ciMethod* caller_method,
 88                              int caller_bci,
 89                              ciCallProfile& profile);
 90   void print_inlining(ciMethod* callee_method, JVMState* jvm, bool success) const;
 91 
 92   InlineTree* caller_tree()       const { return _caller_tree;  }
 93   InlineTree* callee_at(int bci, ciMethod* m) const;
 94   int         stack_depth()       const { return _caller_jvms ? _caller_jvms->depth() : 0; }
 95   const char* msg()               const { return _msg; }
 96   void        set_msg(const char* msg)  { _msg = msg; }
 97 public:
 98   static const char* check_can_parse(ciMethod* callee);
 99 
100   static InlineTree* build_inline_tree_root();
101   static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee);
102 
103   // See if it is OK to inline.
104   // The receiver is the inline tree for the caller.
105   //
106   // The result is a temperature indication.  If it is hot or cold,
107   // inlining is immediate or undesirable.  Otherwise, the info block
108   // returned is newly allocated and may be enqueued.
109   //
110   // If the method is inlinable, a new inline subtree is created on the fly,
111   // and may be accessed by find_subtree_from_root.
112   // The call_method is the dest_method for a special or static invocation.
113   // The call_method is an optimized virtual method candidate otherwise.
114   bool ok_to_inline(ciMethod *call_method, JVMState* caller_jvms, ciCallProfile& profile, bool& should_delay);
115 
116   void set_late_inline() {
117     _late_inline = true;
118   }
119 
120   // Information about inlined method
121   JVMState*   caller_jvms()       const { return _caller_jvms; }
122   ciMethod   *method()            const { return _method; }
123   int         caller_bci()        const { return _caller_jvms ? _caller_jvms->bci() : InvocationEntryBci; }
124   uint        count_inline_bcs()  const { return _count_inline_bcs; }
125   int         inline_level()      const { return stack_depth(); }
126 
127 #ifndef PRODUCT
128 private:
129   uint        _count_inlines;     // Count of inlined methods
130 public:
131   // Debug information collected during parse
132   uint        count_inlines()     const { return _count_inlines; };
133 #endif
134   GrowableArray<InlineTree*> subtrees() { return _subtrees; }
135 
136   void print_value_on(outputStream* st) const PRODUCT_RETURN;
137 
138   bool        _forced_inline;     // Inlining was forced by CompilerOracle, ciReplay or annotation
139   bool        forced_inline()     const { return _forced_inline; }
140   // Count number of nodes in this subtree
141   int         count() const;
142   // Dump inlining replay data to the stream.
143   void dump_replay_data(outputStream* out, int depth_adjust = 0);
144 };
145 
146 
147 //-----------------------------------------------------------------------------
148 //------------------------------Parse------------------------------------------
149 // Parse bytecodes, build a Graph
150 class Parse : public GraphKit {
151  public:
152   // Per-block information needed by the parser:
153   class Block {
154    private:
155     ciTypeFlow::Block* _flow;
156     int                _pred_count;     // how many predecessors in CFG?
157     int                _preds_parsed;   // how many of these have been parsed?
158     uint               _count;          // how many times executed?  Currently only set by _goto's
159     bool               _is_parsed;      // has this block been parsed yet?
160     bool               _is_handler;     // is this block an exception handler?
161     bool               _has_merged_backedge; // does this block have merged backedge?
162     SafePointNode*     _start_map;      // all values flowing into this block
163     MethodLivenessResult _live_locals;  // lazily initialized liveness bitmap
164     bool               _has_predicates; // Were predicates added before parsing of the loop head?
165 
166     int                _num_successors; // Includes only normal control flow.
167     int                _all_successors; // Include exception paths also.
168     Block**            _successors;
169 
170    public:
171 
172     // Set up the block data structure itself.
173     Block(Parse* outer, int rpo);
174 
175     // Set up the block's relations to other blocks.
176     void init_graph(Parse* outer);
177 
178     ciTypeFlow::Block* flow() const        { return _flow; }
179     int pred_count() const                 { return _pred_count; }
180     int preds_parsed() const               { return _preds_parsed; }
181     bool is_parsed() const                 { return _is_parsed; }
182     bool is_handler() const                { return _is_handler; }
183     void set_count( uint x )               { _count = x; }
184     uint count() const                     { return _count; }
185 
186     SafePointNode* start_map() const       { assert(is_merged(),"");   return _start_map; }
187     void set_start_map(SafePointNode* m)   { assert(!is_merged(), ""); _start_map = m; }
188 
189     // True after any predecessor flows control into this block
190     bool is_merged() const                 { return _start_map != nullptr; }
191 
192 #ifdef ASSERT
193     // True after backedge predecessor flows control into this block
194     bool has_merged_backedge() const       { return _has_merged_backedge; }
195     void mark_merged_backedge(Block* pred) {
196       assert(is_SEL_head(), "should be loop head");
197       if (pred != nullptr && is_SEL_backedge(pred)) {
198         assert(is_parsed(), "block should be parsed before merging backedges");
199         _has_merged_backedge = true;
200       }
201     }
202 #endif
203 
204     // True when all non-exception predecessors have been parsed.
205     bool is_ready() const                  { return preds_parsed() == pred_count(); }
206 
207     bool has_predicates() const            { return _has_predicates; }
208     void set_has_predicates()              { _has_predicates = true; }
209 
210     int num_successors() const             { return _num_successors; }
211     int all_successors() const             { return _all_successors; }
212     Block* successor_at(int i) const {
213       assert((uint)i < (uint)all_successors(), "");
214       return _successors[i];
215     }
216     Block* successor_for_bci(int bci);
217 
218     int start() const                      { return flow()->start(); }
219     int limit() const                      { return flow()->limit(); }
220     int rpo() const                        { return flow()->rpo(); }
221     int start_sp() const                   { return flow()->stack_size(); }
222 
223     bool is_loop_head() const              { return flow()->is_loop_head(); }
224     bool is_in_irreducible_loop() const {
225       return flow()->is_in_irreducible_loop();
226     }
227     bool is_irreducible_loop_entry() const {
228       return flow()->is_irreducible_loop_head() || flow()->is_irreducible_loop_secondary_entry();
229     }
230     void copy_irreducible_status_to(RegionNode* region, const JVMState* jvms) {
231       assert(!is_irreducible_loop_entry() || is_in_irreducible_loop(), "entry is part of irreducible loop");
232       if (is_in_irreducible_loop()) {
233         // The block is in an irreducible loop of this method, so it is possible that this
234         // region becomes an irreducible loop entry. (no guarantee)
235         region->set_loop_status(RegionNode::LoopStatus::MaybeIrreducibleEntry);
236       } else if (jvms->caller() != nullptr) {
237         // The block is not in an irreducible loop of this method, hence it cannot ever
238         // be the entry of an irreducible loop. But it may be inside an irreducible loop
239         // of a caller of this inlined method. (limited guarantee)
240         assert(region->loop_status() == RegionNode::LoopStatus::NeverIrreducibleEntry, "status not changed");
241       } else {
242         // The block is not in an irreducible loop of this method, and there is no outer
243         // method. This region will never be in an irreducible loop (strong guarantee)
244         region->set_loop_status(RegionNode::LoopStatus::Reducible);
245       }
246     }
247     bool is_SEL_head() const               { return flow()->is_single_entry_loop_head(); }
248     bool is_SEL_backedge(Block* pred) const{ return is_SEL_head() && pred->rpo() >= rpo(); }
249     bool is_invariant_local(uint i) const  {
250       const JVMState* jvms = start_map()->jvms();
251       if (!jvms->is_loc(i) || flow()->outer()->has_irreducible_entry()) return false;
252       return flow()->is_invariant_local(i - jvms->locoff());
253     }
254     bool can_elide_SEL_phi(uint i) const  { assert(is_SEL_head(),""); return is_invariant_local(i); }
255 
256     const Type* peek(int off=0) const      { return stack_type_at(start_sp() - (off+1)); }
257 
258     const Type* stack_type_at(int i) const;
259     const Type* local_type_at(int i) const;
260     static const Type* get_type(ciType* t) { return Type::get_typeflow_type(t); }
261 
262     bool has_trap_at(int bci) const        { return flow()->has_trap() && flow()->trap_bci() == bci; }
263 
264     // Call this just before parsing a block.
265     void mark_parsed() {
266       assert(!_is_parsed, "must parse each block exactly once");
267       _is_parsed = true;
268     }
269 
270     // Return the phi/region input index for the "current" pred,
271     // and bump the pred number.  For historical reasons these index
272     // numbers are handed out in descending order.  The last index is
273     // always PhiNode::Input (i.e., 1).  The value returned is known
274     // as a "path number" because it distinguishes by which path we are
275     // entering the block.
276     int next_path_num() {
277       assert(preds_parsed() < pred_count(), "too many preds?");
278       return pred_count() - _preds_parsed++;
279     }
280 
281     // Add a previously unaccounted predecessor to this block.
282     // This operates by increasing the size of the block's region
283     // and all its phi nodes (if any).  The value returned is a
284     // path number ("pnum").
285     int add_new_path();
286 
287     // Initialize me by recording the parser's map.  My own map must be null.
288     void record_state(Parse* outer);
289   };
290 
291 #ifndef PRODUCT
292   // BytecodeParseHistogram collects number of bytecodes parsed, nodes constructed, and transformations.
293   class BytecodeParseHistogram : public ArenaObj {
294    private:
295     enum BPHType {
296       BPH_transforms,
297       BPH_values
298     };
299     static bool _initialized;
300     static uint _bytecodes_parsed [Bytecodes::number_of_codes];
301     static uint _nodes_constructed[Bytecodes::number_of_codes];
302     static uint _nodes_transformed[Bytecodes::number_of_codes];
303     static uint _new_values       [Bytecodes::number_of_codes];
304 
305     Bytecodes::Code _initial_bytecode;
306     int             _initial_node_count;
307     int             _initial_transforms;
308     int             _initial_values;
309 
310     Parse     *_parser;
311     Compile   *_compiler;
312 
313     // Initialization
314     static void reset();
315 
316     // Return info being collected, select with global flag 'BytecodeParseInfo'
317     int current_count(BPHType info_selector);
318 
319    public:
320     BytecodeParseHistogram(Parse *p, Compile *c);
321     static bool initialized();
322 
323     // Record info when starting to parse one bytecode
324     void set_initial_state( Bytecodes::Code bc );
325     // Record results of parsing one bytecode
326     void record_change();
327 
328     // Profile printing
329     static void print(float cutoff = 0.01F); // cutoff in percent
330   };
331 
332   public:
333     // Record work done during parsing
334     BytecodeParseHistogram* _parse_histogram;
335     void set_parse_histogram(BytecodeParseHistogram *bph) { _parse_histogram = bph; }
336     BytecodeParseHistogram* parse_histogram()      { return _parse_histogram; }
337 #endif
338 
339  private:
340   friend class Block;
341 
342   // Variables which characterize this compilation as a whole:
343 
344   JVMState*     _caller;        // JVMS which carries incoming args & state.
345   float         _expected_uses; // expected number of calls to this code
346   float         _prof_factor;   // discount applied to my profile counts
347   int           _depth;         // Inline tree depth, for debug printouts
348   const TypeFunc*_tf;           // My kind of function type
349   int           _entry_bci;     // the osr bci or InvocationEntryBci
350 
351   ciTypeFlow*   _flow;          // Results of previous flow pass.
352   Block*        _blocks;        // Array of basic-block structs.
353   int           _block_count;   // Number of elements in _blocks.
354 
355   GraphKit      _exits;         // Record all normal returns and throws here.
356   bool          _wrote_final;   // Did we write a final field?
357   bool          _wrote_volatile;     // Did we write a volatile field?
358   bool          _wrote_stable;       // Did we write a @Stable field?
359   bool          _wrote_fields;       // Did we write any field?
360   Node*         _alloc_with_final_or_stable; // An allocation node with final or @Stable field
361 
362   // Variables which track Java semantics during bytecode parsing:
363 
364   Block*            _block;     // block currently getting parsed
365   ciBytecodeStream  _iter;      // stream of this method's bytecodes
366 
367   const FastLockNode* _synch_lock; // FastLockNode for synchronized method
368 
369 #ifndef PRODUCT
370   int _max_switch_depth;        // Debugging SwitchRanges.
371   int _est_switch_depth;        // Debugging SwitchRanges.
372 #endif
373 
374   bool         _first_return;                  // true if return is the first to be parsed
375   bool         _replaced_nodes_for_exceptions; // needs processing of replaced nodes in exception paths?
376   uint         _new_idx;                       // any node with _idx above were new during this parsing. Used to trim the replaced nodes list.
377 
378  public:
379   // Constructor
380   Parse(JVMState* caller, ciMethod* parse_method, float expected_uses);
381 
382   virtual Parse* is_Parse() const { return (Parse*)this; }
383 
384   // Accessors.
385   JVMState*     caller()        const { return _caller; }
386   float         expected_uses() const { return _expected_uses; }
387   float         prof_factor()   const { return _prof_factor; }
388   int           depth()         const { return _depth; }
389   const TypeFunc* tf()          const { return _tf; }
390   //            entry_bci()     -- see osr_bci, etc.
391 
392   ciTypeFlow*   flow()          const { return _flow; }
393   //            blocks()        -- see rpo_at, start_block, etc.
394   int           block_count()   const { return _block_count; }
395 
396   GraphKit&     exits()               { return _exits; }
397   bool          wrote_final() const   { return _wrote_final; }
398   void      set_wrote_final(bool z)   { _wrote_final = z; }
399   bool          wrote_volatile() const { return _wrote_volatile; }
400   void      set_wrote_volatile(bool z) { _wrote_volatile = z; }
401   bool          wrote_stable() const  { return _wrote_stable; }
402   void      set_wrote_stable(bool z)  { _wrote_stable = z; }
403   bool         wrote_fields() const   { return _wrote_fields; }
404   void     set_wrote_fields(bool z)   { _wrote_fields = z; }
405   Node*    alloc_with_final_or_stable() const   { return _alloc_with_final_or_stable; }
406   void set_alloc_with_final_or_stable(Node* n)  {
407     assert((_alloc_with_final_or_stable == nullptr) || (_alloc_with_final_or_stable == n), "different init objects?");
408     _alloc_with_final_or_stable = n;
409   }
410 
411   Block*             block()    const { return _block; }
412   ciBytecodeStream&  iter()           { return _iter; }
413   Bytecodes::Code    bc()       const { return _iter.cur_bc(); }
414 
415   void set_block(Block* b)            { _block = b; }
416 
417   // Derived accessors:
418   bool is_osr_parse() const {
419     assert(_entry_bci != UnknownBci, "uninitialized _entry_bci");
420     return _entry_bci != InvocationEntryBci;
421   }
422   bool is_normal_parse() const  { return !is_osr_parse(); }
423   int osr_bci() const           { assert(is_osr_parse(),""); return _entry_bci; }
424 
425   void set_parse_bci(int bci);
426 
427   // Must this parse be aborted?
428   bool failing() const { return C->failing_internal(); } // might have cascading effects, not stressing bailouts for now.
429 
430   Block* rpo_at(int rpo) {
431     assert(0 <= rpo && rpo < _block_count, "oob");
432     return &_blocks[rpo];
433   }
434   Block* start_block() {
435     return rpo_at(flow()->start_block()->rpo());
436   }
437   // Can return null if the flow pass did not complete a block.
438   Block* successor_for_bci(int bci) {
439     return block()->successor_for_bci(bci);
440   }
441 
442  private:
443   // Create a JVMS & map for the initial state of this method.
444   SafePointNode* create_entry_map();
445 
446   // OSR helpers
447   Node *fetch_interpreter_state(int index, BasicType bt, Node *local_addrs, Node *local_addrs_base);
448   Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit);
449   void  load_interpreter_state(Node* osr_buf);
450 
451   // Functions for managing basic blocks:
452   void init_blocks();
453   void load_state_from(Block* b);
454   void store_state_to(Block* b) { b->record_state(this); }
455 
456   // Parse all the basic blocks.
457   void do_all_blocks();
458 
459   // Parse the current basic block
460   void do_one_block();
461 
462   // Raise an error if we get a bad ciTypeFlow CFG.
463   void handle_missing_successor(int bci);
464 
465   // first actions (before BCI 0)
466   void do_method_entry();
467 
468   // implementation of monitorenter/monitorexit
469   void do_monitor_enter();
470   void do_monitor_exit();
471 
472   // Eagerly create phie throughout the state, to cope with back edges.
473   void ensure_phis_everywhere();
474 
475   // Merge the current mapping into the basic block starting at bci
476   void merge(          int target_bci);
477   // Same as plain merge, except that it allocates a new path number.
478   void merge_new_path( int target_bci);
479   // Merge the current mapping into an exception handler.
480   void merge_exception(int target_bci);
481   // Helper: Merge the current mapping into the given basic block
482   void merge_common(Block* target, int pnum);
483   // Helper functions for merging individual cells.
484   PhiNode *ensure_phi(       int idx, bool nocreate = false);
485   PhiNode *ensure_memory_phi(int idx, bool nocreate = false);
486   // Helper to merge the current memory state into the given basic block
487   void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi);
488 
489   // Parse this bytecode, and alter the Parsers JVM->Node mapping
490   void do_one_bytecode();
491 
492   // helper function to generate array store check
493   void array_store_check();
494   // Helper function to generate array load
495   void array_load(BasicType etype);
496   // Helper function to generate array store
497   void array_store(BasicType etype);
498   // Helper function to compute array addressing
499   Node* array_addressing(BasicType type, int vals, const Type*& elemtype);
500 
501   void clinit_deopt();
502 
503   // Pass current map to exits
504   void return_current(Node* value);
505 
506   // Register finalizers on return from Object.<init>
507   void call_register_finalizer();
508 
509   // Insert a compiler safepoint into the graph
510   void add_safepoint();
511 
512   // Insert a compiler safepoint into the graph, if there is a back-branch.
513   void maybe_add_safepoint(int target_bci) {
514     if (target_bci <= bci()) {
515       add_safepoint();
516     }
517   }
518 
519   // Note:  Intrinsic generation routines may be found in library_call.cpp.
520 
521   // Helper function to setup Ideal Call nodes
522   void do_call();
523 
524   // Helper function to uncommon-trap or bailout for non-compilable call-sites
525   bool can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass *klass);
526 
527   // Helper functions for type checking bytecodes:
528   void  do_checkcast();
529   void  do_instanceof();
530 
531   // Helper functions for shifting & arithmetic
532   Node* floating_point_mod(Node* a, Node* b, BasicType type);
533   void l2f();
534 
535   // implementation of _get* and _put* bytecodes
536   void do_getstatic() { do_field_access(true,  false); }
537   void do_getfield () { do_field_access(true,  true); }
538   void do_putstatic() { do_field_access(false, false); }
539   void do_putfield () { do_field_access(false, true); }
540 
541   // common code for making initial checks and forming addresses
542   void do_field_access(bool is_get, bool is_field);
543 
544   // common code for actually performing the load or store
545   void do_get_xxx(Node* obj, ciField* field, bool is_field);
546   void do_put_xxx(Node* obj, ciField* field, bool is_field);
547 
548   // implementation of object creation bytecodes
549   void do_new();
550   void do_newarray(BasicType elemtype);
551   void do_anewarray();
552   void do_multianewarray();
553   Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs);
554 
555   // implementation of jsr/ret
556   void do_jsr();
557   void do_ret();
558 
559   float   dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test);
560   float   branch_prediction(float &cnt, BoolTest::mask btest, int target_bci, Node* test);
561   bool    seems_never_taken(float prob) const;
562   bool    path_is_suitable_for_uncommon_trap(float prob) const;
563 
564   void    do_ifnull(BoolTest::mask btest, Node* c);
565   void    do_if(BoolTest::mask btest, Node* c);
566   int     repush_if_args();
567   void    adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path);
568   void    sharpen_type_after_if(BoolTest::mask btest,
569                                 Node* con, const Type* tcon,
570                                 Node* val, const Type* tval);
571   void    maybe_add_predicate_after_if(Block* path);
572   IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt);
573   void    jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, bool unc);
574   void    jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, bool unc);
575   void    jump_if_always_fork(int dest_bci_if_true, bool unc);
576 
577   friend class SwitchRange;
578   void    do_tableswitch();
579   void    do_lookupswitch();
580   void    jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0);
581   bool    create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi);
582   void    linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi);
583 
584   // helper function for call statistics
585   void count_compiled_calls(bool at_method_entry, bool is_inline) PRODUCT_RETURN;
586 
587   Node_Notes* make_node_notes(Node_Notes* caller_nn);
588 
589   // Helper functions for handling normal and abnormal exits.
590   void build_exits();
591 
592   // Fix up all exceptional control flow exiting a single bytecode.
593   void do_exceptions();
594 
595   // Fix up all exiting control flow at the end of the parse.
596   void do_exits();
597 
598   // Add Catch/CatchProjs
599   // The call is either a Java call or the VM's rethrow stub
600   void catch_call_exceptions(ciExceptionHandlerStream&);
601 
602   // Handle all exceptions thrown by the inlined method.
603   // Also handles exceptions for individual bytecodes.
604   void catch_inline_exceptions(SafePointNode* ex_map);
605 
606   // Merge the given map into correct exceptional exit state.
607   // Assumes that there is no applicable local handler.
608   void throw_to_exit(SafePointNode* ex_map);
609 
610   // Use speculative type to optimize CmpP node
611   Node* optimize_cmp_with_klass(Node* c);
612 
613   // Stress unstable if traps
614   void stress_trap(IfNode* orig_iff, Node* counter, Node* incr_store);
615   // Increment counter used by StressUnstableIfTraps
616   void increment_trap_stress_counter(Node*& counter, Node*& incr_store);
617 
618  public:
619 #ifndef PRODUCT
620   // Handle PrintOpto, etc.
621   void show_parse_info();
622   void dump_map_adr_mem() const;
623   static void print_statistics(); // Print some performance counters
624   void dump();
625   void dump_bci(int bci);
626 #endif
627 };
628 
629 // Specialized uncommon_trap of unstable_if. C2 uses next_bci of path to update the live locals of it.
630 class UnstableIfTrap {
631   CallStaticJavaNode* const _unc;
632   bool _modified;            // modified locals based on next_bci()
633   int _next_bci;
634 
635 public:
636   UnstableIfTrap(CallStaticJavaNode* call, Parse::Block* path): _unc(call), _modified(false) {
637     assert(_unc != nullptr && Deoptimization::trap_request_reason(_unc->uncommon_trap_request()) == Deoptimization::Reason_unstable_if,
638           "invalid uncommon_trap call!");
639     _next_bci = path != nullptr ? path->start() : -1;
640   }
641 
642   // The starting point of the pruned block, where control goes when
643   // deoptimization does happen.
644   int next_bci() const {
645     return _next_bci;
646   }
647 
648   bool modified() const {
649     return _modified;
650   }
651 
652   void set_modified() {
653     _modified = true;
654   }
655 
656   CallStaticJavaNode* uncommon_trap() const {
657     return _unc;
658   }
659 
660   inline void* operator new(size_t x) throw() {
661     Compile* C = Compile::current();
662     return C->comp_arena()->AmallocWords(x);
663   }
664 };
665 
666 #endif // SHARE_OPTO_PARSE_HPP