< prev index next >

src/hotspot/share/opto/parse.hpp

Print this page

 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_OPTO_PARSE_HPP
 26 #define SHARE_OPTO_PARSE_HPP
 27 
 28 #include "ci/ciMethodData.hpp"
 29 #include "ci/ciTypeFlow.hpp"
 30 #include "compiler/methodLiveness.hpp"
 31 #include "libadt/vectset.hpp"
 32 #include "oops/generateOopMap.hpp"
 33 #include "opto/graphKit.hpp"

 34 #include "opto/subnode.hpp"
 35 
 36 class BytecodeParseHistogram;
 37 class InlineTree;
 38 class Parse;
 39 class SwitchRange;
 40 
 41 
 42 //------------------------------InlineTree-------------------------------------
 43 class InlineTree : public AnyObj {
 44   friend class VMStructs;
 45 
 46   Compile*    C;                  // cache
 47   JVMState*   _caller_jvms;       // state of caller
 48   ciMethod*   _method;            // method being called by the caller_jvms
 49   bool        _late_inline;       // method is inlined incrementally
 50   InlineTree* _caller_tree;
 51   uint        _count_inline_bcs;  // Accumulated count of inlined bytecodes
 52   const int   _max_inline_level;  // the maximum inline level for this sub-tree (may be adjusted)
 53 

144   void dump_replay_data(outputStream* out, int depth_adjust = 0);
145 };
146 
147 
148 //-----------------------------------------------------------------------------
149 //------------------------------Parse------------------------------------------
150 // Parse bytecodes, build a Graph
151 class Parse : public GraphKit {
152  public:
153   // Per-block information needed by the parser:
154   class Block {
155    private:
156     ciTypeFlow::Block* _flow;
157     int                _pred_count;     // how many predecessors in CFG?
158     int                _preds_parsed;   // how many of these have been parsed?
159     uint               _count;          // how many times executed?  Currently only set by _goto's
160     bool               _is_parsed;      // has this block been parsed yet?
161     bool               _is_handler;     // is this block an exception handler?
162     bool               _has_merged_backedge; // does this block have merged backedge?
163     SafePointNode*     _start_map;      // all values flowing into this block
164     MethodLivenessResult _live_locals;  // lazily initialized liveness bitmap
165     bool               _has_predicates; // Were predicates added before parsing of the loop head?
166 
167     int                _num_successors; // Includes only normal control flow.
168     int                _all_successors; // Include exception paths also.
169     Block**            _successors;



170 







171    public:
172 
173     // Set up the block data structure itself.
174     Block(Parse* outer, int rpo);
175 
176     // Set up the block's relations to other blocks.
177     void init_graph(Parse* outer);
178 
179     ciTypeFlow::Block* flow() const        { return _flow; }
180     int pred_count() const                 { return _pred_count; }
181     int preds_parsed() const               { return _preds_parsed; }
182     bool is_parsed() const                 { return _is_parsed; }
183     bool is_handler() const                { return _is_handler; }
184     void set_count( uint x )               { _count = x; }
185     uint count() const                     { return _count; }
186 
187     SafePointNode* start_map() const       { assert(is_merged(),"");   return _start_map; }
188     void set_start_map(SafePointNode* m)   { assert(!is_merged(), ""); _start_map = m; }
189 
190     // True after any predecessor flows control into this block
191     bool is_merged() const                 { return _start_map != nullptr; }






192 
193 #ifdef ASSERT
194     // True after backedge predecessor flows control into this block
195     bool has_merged_backedge() const       { return _has_merged_backedge; }
196     void mark_merged_backedge(Block* pred) {
197       assert(is_SEL_head(), "should be loop head");
198       if (pred != nullptr && is_SEL_backedge(pred)) {
199         assert(is_parsed(), "block should be parsed before merging backedges");
200         _has_merged_backedge = true;
201       }
202     }
203 #endif
204 
205     // True when all non-exception predecessors have been parsed.
206     bool is_ready() const                  { return preds_parsed() == pred_count(); }
207 
208     bool has_predicates() const            { return _has_predicates; }
209     void set_has_predicates()              { _has_predicates = true; }
210 
211     int num_successors() const             { return _num_successors; }
212     int all_successors() const             { return _all_successors; }
213     Block* successor_at(int i) const {
214       assert((uint)i < (uint)all_successors(), "");
215       return _successors[i];
216     }
217     Block* successor_for_bci(int bci);
218 






219     int start() const                      { return flow()->start(); }
220     int limit() const                      { return flow()->limit(); }
221     int rpo() const                        { return flow()->rpo(); }
222     int start_sp() const                   { return flow()->stack_size(); }
223 
224     bool is_loop_head() const              { return flow()->is_loop_head(); }
225     bool is_in_irreducible_loop() const {
226       return flow()->is_in_irreducible_loop();
227     }
228     bool is_irreducible_loop_entry() const {
229       return flow()->is_irreducible_loop_head() || flow()->is_irreducible_loop_secondary_entry();
230     }
231     void copy_irreducible_status_to(RegionNode* region, const JVMState* jvms) {
232       assert(!is_irreducible_loop_entry() || is_in_irreducible_loop(), "entry is part of irreducible loop");
233       if (is_in_irreducible_loop()) {
234         // The block is in an irreducible loop of this method, so it is possible that this
235         // region becomes an irreducible loop entry. (no guarantee)
236         region->set_loop_status(RegionNode::LoopStatus::MaybeIrreducibleEntry);
237       } else if (jvms->caller() != nullptr) {
238         // The block is not in an irreducible loop of this method, hence it cannot ever

269     }
270 
271     // Return the phi/region input index for the "current" pred,
272     // and bump the pred number.  For historical reasons these index
273     // numbers are handed out in descending order.  The last index is
274     // always PhiNode::Input (i.e., 1).  The value returned is known
275     // as a "path number" because it distinguishes by which path we are
276     // entering the block.
277     int next_path_num() {
278       assert(preds_parsed() < pred_count(), "too many preds?");
279       return pred_count() - _preds_parsed++;
280     }
281 
282     // Add a previously unaccounted predecessor to this block.
283     // This operates by increasing the size of the block's region
284     // and all its phi nodes (if any).  The value returned is a
285     // path number ("pnum").
286     int add_new_path();
287 
288     // Initialize me by recording the parser's map.  My own map must be null.
289     void record_state(Parse* outer);
290   };
291 
292 #ifndef PRODUCT
293   // BytecodeParseHistogram collects number of bytecodes parsed, nodes constructed, and transformations.
294   class BytecodeParseHistogram : public ArenaObj {
295    private:
296     enum BPHType {
297       BPH_transforms,
298       BPH_values
299     };
300     static bool _initialized;
301     static uint _bytecodes_parsed [Bytecodes::number_of_codes];
302     static uint _nodes_constructed[Bytecodes::number_of_codes];
303     static uint _nodes_transformed[Bytecodes::number_of_codes];
304     static uint _new_values       [Bytecodes::number_of_codes];
305 
306     Bytecodes::Code _initial_bytecode;
307     int             _initial_node_count;
308     int             _initial_transforms;
309     int             _initial_values;

326     // Record results of parsing one bytecode
327     void record_change();
328 
329     // Profile printing
330     static void print(float cutoff = 0.01F); // cutoff in percent
331   };
332 
333   public:
334     // Record work done during parsing
335     BytecodeParseHistogram* _parse_histogram;
336     void set_parse_histogram(BytecodeParseHistogram *bph) { _parse_histogram = bph; }
337     BytecodeParseHistogram* parse_histogram()      { return _parse_histogram; }
338 #endif
339 
340  private:
341   friend class Block;
342 
343   // Variables which characterize this compilation as a whole:
344 
345   JVMState*     _caller;        // JVMS which carries incoming args & state.

346   float         _expected_uses; // expected number of calls to this code
347   float         _prof_factor;   // discount applied to my profile counts
348   int           _depth;         // Inline tree depth, for debug printouts
349   const TypeFunc*_tf;           // My kind of function type
350   int           _entry_bci;     // the osr bci or InvocationEntryBci
351 
352   ciTypeFlow*   _flow;          // Results of previous flow pass.
353   Block*        _blocks;        // Array of basic-block structs.
354   int           _block_count;   // Number of elements in _blocks.
355 
356   GraphKit      _exits;         // Record all normal returns and throws here.
357   bool          _wrote_final;   // Did we write a final field?
358   bool          _wrote_volatile;     // Did we write a volatile field?
359   bool          _wrote_stable;       // Did we write a @Stable field?
360   bool          _wrote_fields;       // Did we write any field?
361   Node*         _alloc_with_final;   // An allocation node with final field
362 
363   // Variables which track Java semantics during bytecode parsing:
364 
365   Block*            _block;     // block currently getting parsed
366   ciBytecodeStream  _iter;      // stream of this method's bytecodes
367 
368   const FastLockNode* _synch_lock; // FastLockNode for synchronized method
369 
370 #ifndef PRODUCT
371   int _max_switch_depth;        // Debugging SwitchRanges.
372   int _est_switch_depth;        // Debugging SwitchRanges.
373 #endif
374 
375   bool         _first_return;                  // true if return is the first to be parsed
376   bool         _replaced_nodes_for_exceptions; // needs processing of replaced nodes in exception paths?
377   uint         _new_idx;                       // any node with _idx above were new during this parsing. Used to trim the replaced nodes list.
378 
379  public:
380   // Constructor
381   Parse(JVMState* caller, ciMethod* parse_method, float expected_uses);
382 



383   virtual Parse* is_Parse() const { return (Parse*)this; }
384 
385   // Accessors.
386   JVMState*     caller()        const { return _caller; }
387   float         expected_uses() const { return _expected_uses; }
388   float         prof_factor()   const { return _prof_factor; }
389   int           depth()         const { return _depth; }
390   const TypeFunc* tf()          const { return _tf; }
391   //            entry_bci()     -- see osr_bci, etc.
392 
393   ciTypeFlow*   flow()          const { return _flow; }
394   //            blocks()        -- see rpo_at, start_block, etc.
395   int           block_count()   const { return _block_count; }
396 
397   GraphKit&     exits()               { return _exits; }
398   bool          wrote_final() const   { return _wrote_final; }
399   void      set_wrote_final(bool z)   { _wrote_final = z; }
400   bool          wrote_volatile() const { return _wrote_volatile; }
401   void      set_wrote_volatile(bool z) { _wrote_volatile = z; }
402   bool          wrote_stable() const  { return _wrote_stable; }
403   void      set_wrote_stable(bool z)  { _wrote_stable = z; }
404   bool         wrote_fields() const   { return _wrote_fields; }
405   void     set_wrote_fields(bool z)   { _wrote_fields = z; }
406   Node*    alloc_with_final() const   { return _alloc_with_final; }
407   void set_alloc_with_final(Node* n)  {
408     assert((_alloc_with_final == nullptr) || (_alloc_with_final == n), "different init objects?");
409     _alloc_with_final = n;






410   }
411 
412   Block*             block()    const { return _block; }
413   ciBytecodeStream&  iter()           { return _iter; }
414   Bytecodes::Code    bc()       const { return _iter.cur_bc(); }
415 
416   void set_block(Block* b)            { _block = b; }
417 
418   // Derived accessors:
419   bool is_osr_parse() const {
420     assert(_entry_bci != UnknownBci, "uninitialized _entry_bci");
421     return _entry_bci != InvocationEntryBci;
422   }
423   bool is_normal_parse() const  { return !is_osr_parse(); }
424   int osr_bci() const           { assert(is_osr_parse(),""); return _entry_bci; }
425 
426   void set_parse_bci(int bci);
427 
428   // Must this parse be aborted?
429   bool failing()                { return C->failing(); }

435   Block* start_block() {
436     return rpo_at(flow()->start_block()->rpo());
437   }
438   // Can return null if the flow pass did not complete a block.
439   Block* successor_for_bci(int bci) {
440     return block()->successor_for_bci(bci);
441   }
442 
443  private:
444   // Create a JVMS & map for the initial state of this method.
445   SafePointNode* create_entry_map();
446 
447   // OSR helpers
448   Node *fetch_interpreter_state(int index, BasicType bt, Node *local_addrs, Node *local_addrs_base);
449   Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit);
450   void  load_interpreter_state(Node* osr_buf);
451 
452   // Functions for managing basic blocks:
453   void init_blocks();
454   void load_state_from(Block* b);
455   void store_state_to(Block* b) { b->record_state(this); }
456 
457   // Parse all the basic blocks.
458   void do_all_blocks();
459 
460   // Parse the current basic block
461   void do_one_block();
462 
463   // Raise an error if we get a bad ciTypeFlow CFG.
464   void handle_missing_successor(int bci);
465 
466   // first actions (before BCI 0)
467   void do_method_entry();
468 
469   // implementation of monitorenter/monitorexit
470   void do_monitor_enter();
471   void do_monitor_exit();
472 
473   // Eagerly create phie throughout the state, to cope with back edges.
474   void ensure_phis_everywhere();
475 
476   // Merge the current mapping into the basic block starting at bci
477   void merge(          int target_bci);
478   // Same as plain merge, except that it allocates a new path number.
479   void merge_new_path( int target_bci);
480   // Merge the current mapping into an exception handler.
481   void merge_exception(int target_bci);
482   // Helper: Merge the current mapping into the given basic block
483   void merge_common(Block* target, int pnum);
484   // Helper functions for merging individual cells.
485   PhiNode *ensure_phi(       int idx, bool nocreate = false);
486   PhiNode *ensure_memory_phi(int idx, bool nocreate = false);

487   // Helper to merge the current memory state into the given basic block
488   void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi);
489 
490   // Parse this bytecode, and alter the Parsers JVM->Node mapping
491   void do_one_bytecode();
492 
493   // helper function to generate array store check
494   void array_store_check();
495   // Helper function to generate array load
496   void array_load(BasicType etype);
497   // Helper function to generate array store
498   void array_store(BasicType etype);
499   // Helper function to compute array addressing
500   Node* array_addressing(BasicType type, int vals, const Type*& elemtype);
501 
502   void clinit_deopt();
503 
504   void rtm_deopt();
505 
506   // Pass current map to exits

 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_OPTO_PARSE_HPP
 26 #define SHARE_OPTO_PARSE_HPP
 27 
 28 #include "ci/ciMethodData.hpp"
 29 #include "ci/ciTypeFlow.hpp"
 30 #include "compiler/methodLiveness.hpp"
 31 #include "libadt/vectset.hpp"
 32 #include "oops/generateOopMap.hpp"
 33 #include "opto/graphKit.hpp"
 34 #include "opto/partialEscape.hpp"
 35 #include "opto/subnode.hpp"
 36 
 37 class BytecodeParseHistogram;
 38 class InlineTree;
 39 class Parse;
 40 class SwitchRange;
 41 
 42 
 43 //------------------------------InlineTree-------------------------------------
 44 class InlineTree : public AnyObj {
 45   friend class VMStructs;
 46 
 47   Compile*    C;                  // cache
 48   JVMState*   _caller_jvms;       // state of caller
 49   ciMethod*   _method;            // method being called by the caller_jvms
 50   bool        _late_inline;       // method is inlined incrementally
 51   InlineTree* _caller_tree;
 52   uint        _count_inline_bcs;  // Accumulated count of inlined bytecodes
 53   const int   _max_inline_level;  // the maximum inline level for this sub-tree (may be adjusted)
 54 

145   void dump_replay_data(outputStream* out, int depth_adjust = 0);
146 };
147 
148 
149 //-----------------------------------------------------------------------------
150 //------------------------------Parse------------------------------------------
151 // Parse bytecodes, build a Graph
152 class Parse : public GraphKit {
153  public:
154   // Per-block information needed by the parser:
155   class Block {
156    private:
157     ciTypeFlow::Block* _flow;
158     int                _pred_count;     // how many predecessors in CFG?
159     int                _preds_parsed;   // how many of these have been parsed?
160     uint               _count;          // how many times executed?  Currently only set by _goto's
161     bool               _is_parsed;      // has this block been parsed yet?
162     bool               _is_handler;     // is this block an exception handler?
163     bool               _has_merged_backedge; // does this block have merged backedge?
164     SafePointNode*     _start_map;      // all values flowing into this block
165     mutable MethodLivenessResult _live_locals;  // lazily initialized liveness bitmap
166     bool               _has_predicates; // Were predicates added before parsing of the loop head?
167 
168     int                _num_successors; // Includes only normal control flow.
169     int                _all_successors; // Include exception paths also.
170     Block**            _successors;
171     Block**            _predecessors;
172     Block*             _from_block;
173     int                _init_pnum;      // the pnum of Block where _state is copied from.
174 
175     const MethodLivenessResult& liveness() const {
176       if (!_live_locals.is_valid()) {
177         _live_locals = flow()->outer()->method()->liveness_at_bci(start());
178       }
179       assert(_live_locals.is_valid(), "sanity check");
180       return _live_locals;
181     }
182    public:
183 
184     // Set up the block data structure itself.
185     Block(Parse* outer, int rpo);
186 
187     // Set up the block's relations to other blocks.
188     void init_graph(Parse* outer);
189 
190     ciTypeFlow::Block* flow() const        { return _flow; }
191     int pred_count() const                 { return _pred_count; }
192     int preds_parsed() const               { return _preds_parsed; }
193     bool is_parsed() const                 { return _is_parsed; }
194     bool is_handler() const                { return _is_handler; }
195     void set_count( uint x )               { _count = x; }
196     uint count() const                     { return _count; }
197 
198     SafePointNode* start_map() const       { assert(is_merged(),"");   return _start_map; }
199     void set_start_map(SafePointNode* m)   { assert(!is_merged(), ""); _start_map = m; }
200 
201     // True after any predecessor flows control into this block
202     bool is_merged() const                 { return _start_map != nullptr; }
203     Block* from_block() const              { return _from_block; }
204     int init_pnum() const                  { return _init_pnum; }
205     PEAState& state()                      {
206       assert(is_merged(), "sanity check");
207       return _start_map->jvms()->alloc_state();
208     }
209 
210 #ifdef ASSERT
211     // True after backedge predecessor flows control into this block
212     bool has_merged_backedge() const       { return _has_merged_backedge; }
213     void mark_merged_backedge(Block* pred) {
214       assert(is_SEL_head(), "should be loop head");
215       if (pred != nullptr && is_SEL_backedge(pred)) {
216         assert(is_parsed(), "block should be parsed before merging backedges");
217         _has_merged_backedge = true;
218       }
219     }
220 #endif
221 
222     // True when all non-exception predecessors have been parsed.
223     bool is_ready() const                  { return preds_parsed() == pred_count(); }
224 
225     bool has_predicates() const            { return _has_predicates; }
226     void set_has_predicates()              { _has_predicates = true; }
227 
228     int num_successors() const             { return _num_successors; }
229     int all_successors() const             { return _all_successors; }
230     Block* successor_at(int i) const {
231       assert((uint)i < (uint)all_successors(), "");
232       return _successors[i];
233     }
234     Block* successor_for_bci(int bci);
235 
236     Block* predecessor_at(int i) const {
237       assert(DoPartialEscapeAnalysis, "_predecessors is only available when DoPartialEscapeAnalysis is ON");
238       assert(i < _pred_count, "");
239       return _predecessors[i];
240     }
241 
242     int start() const                      { return flow()->start(); }
243     int limit() const                      { return flow()->limit(); }
244     int rpo() const                        { return flow()->rpo(); }
245     int start_sp() const                   { return flow()->stack_size(); }
246 
247     bool is_loop_head() const              { return flow()->is_loop_head(); }
248     bool is_in_irreducible_loop() const {
249       return flow()->is_in_irreducible_loop();
250     }
251     bool is_irreducible_loop_entry() const {
252       return flow()->is_irreducible_loop_head() || flow()->is_irreducible_loop_secondary_entry();
253     }
254     void copy_irreducible_status_to(RegionNode* region, const JVMState* jvms) {
255       assert(!is_irreducible_loop_entry() || is_in_irreducible_loop(), "entry is part of irreducible loop");
256       if (is_in_irreducible_loop()) {
257         // The block is in an irreducible loop of this method, so it is possible that this
258         // region becomes an irreducible loop entry. (no guarantee)
259         region->set_loop_status(RegionNode::LoopStatus::MaybeIrreducibleEntry);
260       } else if (jvms->caller() != nullptr) {
261         // The block is not in an irreducible loop of this method, hence it cannot ever

292     }
293 
294     // Return the phi/region input index for the "current" pred,
295     // and bump the pred number.  For historical reasons these index
296     // numbers are handed out in descending order.  The last index is
297     // always PhiNode::Input (i.e., 1).  The value returned is known
298     // as a "path number" because it distinguishes by which path we are
299     // entering the block.
300     int next_path_num() {
301       assert(preds_parsed() < pred_count(), "too many preds?");
302       return pred_count() - _preds_parsed++;
303     }
304 
305     // Add a previously unaccounted predecessor to this block.
306     // This operates by increasing the size of the block's region
307     // and all its phi nodes (if any).  The value returned is a
308     // path number ("pnum").
309     int add_new_path();
310 
311     // Initialize me by recording the parser's map.  My own map must be null.
312     void record_state(Parse* outer, int pnum);
313   };
314 
315 #ifndef PRODUCT
316   // BytecodeParseHistogram collects number of bytecodes parsed, nodes constructed, and transformations.
317   class BytecodeParseHistogram : public ArenaObj {
318    private:
319     enum BPHType {
320       BPH_transforms,
321       BPH_values
322     };
323     static bool _initialized;
324     static uint _bytecodes_parsed [Bytecodes::number_of_codes];
325     static uint _nodes_constructed[Bytecodes::number_of_codes];
326     static uint _nodes_transformed[Bytecodes::number_of_codes];
327     static uint _new_values       [Bytecodes::number_of_codes];
328 
329     Bytecodes::Code _initial_bytecode;
330     int             _initial_node_count;
331     int             _initial_transforms;
332     int             _initial_values;

349     // Record results of parsing one bytecode
350     void record_change();
351 
352     // Profile printing
353     static void print(float cutoff = 0.01F); // cutoff in percent
354   };
355 
356   public:
357     // Record work done during parsing
358     BytecodeParseHistogram* _parse_histogram;
359     void set_parse_histogram(BytecodeParseHistogram *bph) { _parse_histogram = bph; }
360     BytecodeParseHistogram* parse_histogram()      { return _parse_histogram; }
361 #endif
362 
363  private:
364   friend class Block;
365 
366   // Variables which characterize this compilation as a whole:
367 
368   JVMState*     _caller;        // JVMS which carries incoming args & state.
369   PEAState*     _caller_state;  // current PEA state of caller
370   float         _expected_uses; // expected number of calls to this code
371   float         _prof_factor;   // discount applied to my profile counts
372   int           _depth;         // Inline tree depth, for debug printouts
373   const TypeFunc*_tf;           // My kind of function type
374   int           _entry_bci;     // the osr bci or InvocationEntryBci
375 
376   ciTypeFlow*   _flow;          // Results of previous flow pass.
377   Block*        _blocks;        // Array of basic-block structs.
378   int           _block_count;   // Number of elements in _blocks.
379 
380   GraphKit      _exits;         // Record all normal returns and throws here.
381   bool          _wrote_final;   // Did we write a final field?
382   bool          _wrote_volatile;     // Did we write a volatile field?
383   bool          _wrote_stable;       // Did we write a @Stable field?
384   bool          _wrote_fields;       // Did we write any field?
385   Node*         _alloc_with_final;   // An allocation node with final field
386 
387   // Variables which track Java semantics during bytecode parsing:
388 
389   Block*            _block;     // block currently getting parsed
390   ciBytecodeStream  _iter;      // stream of this method's bytecodes
391 
392   const FastLockNode* _synch_lock; // FastLockNode for synchronized method
393 
394 #ifndef PRODUCT
395   int _max_switch_depth;        // Debugging SwitchRanges.
396   int _est_switch_depth;        // Debugging SwitchRanges.
397 #endif
398 
399   int          _first_return;                  // true if return is the first to be parsed
400   bool         _replaced_nodes_for_exceptions; // needs processing of replaced nodes in exception paths?
401   uint         _new_idx;                       // any node with _idx above were new during this parsing. Used to trim the replaced nodes list.
402 
403  public:
404   // Constructor
405   Parse(JVMState* caller, ciMethod* parse_method, float expected_uses, PEAState* caller_state = nullptr);
406 
407 #ifndef PRODUCT
408   ~Parse();
409 #endif
410   virtual Parse* is_Parse() const { return (Parse*)this; }
411 
412   // Accessors.
413   JVMState*     caller()        const { return _caller; }
414   float         expected_uses() const { return _expected_uses; }
415   float         prof_factor()   const { return _prof_factor; }
416   int           depth()         const { return _depth; }
417   const TypeFunc* tf()          const { return _tf; }
418   //            entry_bci()     -- see osr_bci, etc.
419 
420   ciTypeFlow*   flow()          const { return _flow; }
421   //            blocks()        -- see rpo_at, start_block, etc.
422   int           block_count()   const { return _block_count; }
423 
424   GraphKit&     exits()               { return _exits; }
425   bool          wrote_final() const   { return _wrote_final; }
426   void      set_wrote_final(bool z)   { _wrote_final = z; }
427   bool          wrote_volatile() const { return _wrote_volatile; }
428   void      set_wrote_volatile(bool z) { _wrote_volatile = z; }
429   bool          wrote_stable() const  { return _wrote_stable; }
430   void      set_wrote_stable(bool z)  { _wrote_stable = z; }
431   bool         wrote_fields() const   { return _wrote_fields; }
432   void     set_wrote_fields(bool z)   { _wrote_fields = z; }
433   Node*    alloc_with_final() const   { return _alloc_with_final; }
434   void set_alloc_with_final(Node* n)  {
435     if (DoPartialEscapeAnalysis) {
436       assert((_alloc_with_final == nullptr) || (_alloc_with_final == PEA()->is_alias(n)), 
437              "different init objects?");
438       _alloc_with_final = PEA()->is_alias(n);
439     } else {
440       assert((_alloc_with_final == nullptr) || (_alloc_with_final == n), "different init objects?");
441       _alloc_with_final = n;
442     }
443   }
444 
445   Block*             block()    const { return _block; }
446   ciBytecodeStream&  iter()           { return _iter; }
447   Bytecodes::Code    bc()       const { return _iter.cur_bc(); }
448 
449   void set_block(Block* b)            { _block = b; }
450 
451   // Derived accessors:
452   bool is_osr_parse() const {
453     assert(_entry_bci != UnknownBci, "uninitialized _entry_bci");
454     return _entry_bci != InvocationEntryBci;
455   }
456   bool is_normal_parse() const  { return !is_osr_parse(); }
457   int osr_bci() const           { assert(is_osr_parse(),""); return _entry_bci; }
458 
459   void set_parse_bci(int bci);
460 
461   // Must this parse be aborted?
462   bool failing()                { return C->failing(); }

468   Block* start_block() {
469     return rpo_at(flow()->start_block()->rpo());
470   }
471   // Can return null if the flow pass did not complete a block.
472   Block* successor_for_bci(int bci) {
473     return block()->successor_for_bci(bci);
474   }
475 
476  private:
477   // Create a JVMS & map for the initial state of this method.
478   SafePointNode* create_entry_map();
479 
480   // OSR helpers
481   Node *fetch_interpreter_state(int index, BasicType bt, Node *local_addrs, Node *local_addrs_base);
482   Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit);
483   void  load_interpreter_state(Node* osr_buf);
484 
485   // Functions for managing basic blocks:
486   void init_blocks();
487   void load_state_from(Block* b);
488   void store_state_to(Block* b, int pnum) { b->record_state(this, pnum); }
489 
490   // Parse all the basic blocks.
491   void do_all_blocks();
492 
493   // Parse the current basic block
494   void do_one_block();
495 
496   // Raise an error if we get a bad ciTypeFlow CFG.
497   void handle_missing_successor(int bci);
498 
499   // first actions (before BCI 0)
500   void do_method_entry();
501 
502   // implementation of monitorenter/monitorexit
503   void do_monitor_enter();
504   void do_monitor_exit();
505 
506   // Eagerly create phie throughout the state, to cope with back edges.
507   void ensure_phis_everywhere();
508 
509   // Merge the current mapping into the basic block starting at bci
510   void merge(          int target_bci);
511   // Same as plain merge, except that it allocates a new path number.
512   void merge_new_path( int target_bci);
513   // Merge the current mapping into an exception handler.
514   void merge_exception(int target_bci);
515   // Helper: Merge the current mapping into the given basic block
516   void merge_common(Block* target, int pnum);
517   // Helper functions for merging individual cells.
518   PhiNode *ensure_phi(       int idx, bool nocreate = false);
519   PhiNode *ensure_memory_phi(int idx, bool nocreate = false);
520 
521   // Helper to merge the current memory state into the given basic block
522   void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi);
523 
524   // Parse this bytecode, and alter the Parsers JVM->Node mapping
525   void do_one_bytecode();
526 
527   // helper function to generate array store check
528   void array_store_check();
529   // Helper function to generate array load
530   void array_load(BasicType etype);
531   // Helper function to generate array store
532   void array_store(BasicType etype);
533   // Helper function to compute array addressing
534   Node* array_addressing(BasicType type, int vals, const Type*& elemtype);
535 
536   void clinit_deopt();
537 
538   void rtm_deopt();
539 
540   // Pass current map to exits
< prev index next >