1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OPTO_PARSE_HPP
26 #define SHARE_OPTO_PARSE_HPP
27
28 #include "ci/ciMethodData.hpp"
29 #include "ci/ciTypeFlow.hpp"
30 #include "compiler/methodLiveness.hpp"
31 #include "libadt/vectset.hpp"
32 #include "oops/generateOopMap.hpp"
33 #include "opto/graphKit.hpp"
34 #include "opto/subnode.hpp"
35
36 class BytecodeParseHistogram;
37 class InlineTree;
38 class Parse;
39 class SwitchRange;
40
41
42 //------------------------------InlineTree-------------------------------------
43 class InlineTree : public AnyObj {
44 Compile* C; // cache
45 JVMState* _caller_jvms; // state of caller
46 ciMethod* _method; // method being called by the caller_jvms
47 bool _late_inline; // method is inlined incrementally
48 InlineTree* _caller_tree;
49 uint _count_inline_bcs; // Accumulated count of inlined bytecodes
50 const int _max_inline_level; // the maximum inline level for this sub-tree (may be adjusted)
51
52 GrowableArray<InlineTree*> _subtrees;
53
54 bool pass_initial_checks(ciMethod* caller_method, int caller_bci, ciMethod* callee_method);
55
56 void print_impl(outputStream* stj, int indent) const PRODUCT_RETURN;
57 const char* _msg;
58 protected:
59 InlineTree(Compile* C,
60 const InlineTree* caller_tree,
61 ciMethod* callee_method,
62 JVMState* caller_jvms,
63 int caller_bci,
64 int max_inline_level);
65 InlineTree *build_inline_tree_for_callee(ciMethod* callee_method,
66 JVMState* caller_jvms,
67 int caller_bci);
68 bool try_to_inline(ciMethod* callee_method,
69 ciMethod* caller_method,
70 int caller_bci,
71 JVMState* jvms,
72 ciCallProfile& profile,
73 bool& should_delay);
74 bool should_inline(ciMethod* callee_method,
75 ciMethod* caller_method,
76 JVMState* caller_jvms,
77 bool& should_delay,
78 ciCallProfile& profile);
79 bool should_not_inline(ciMethod* callee_method,
80 ciMethod* caller_method,
81 int caller_bci,
82 bool& should_delay,
83 ciCallProfile& profile);
84 bool is_not_reached(ciMethod* callee_method,
85 ciMethod* caller_method,
86 int caller_bci,
87 ciCallProfile& profile);
88 void print_inlining(ciMethod* callee_method, JVMState* jvm, bool success) const;
89
90 InlineTree* caller_tree() const { return _caller_tree; }
91 InlineTree* callee_at(int bci, ciMethod* m) const;
92 int stack_depth() const { return _caller_jvms ? _caller_jvms->depth() : 0; }
93 const char* msg() const { return _msg; }
94 void set_msg(const char* msg) { _msg = msg; }
95 public:
96 static const char* check_can_parse(ciMethod* callee);
97
98 static InlineTree* build_inline_tree_root();
99 static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee);
100
101 // See if it is OK to inline.
102 // The receiver is the inline tree for the caller.
103 //
104 // The result is a temperature indication. If it is hot or cold,
105 // inlining is immediate or undesirable. Otherwise, the info block
106 // returned is newly allocated and may be enqueued.
107 //
108 // If the method is inlinable, a new inline subtree is created on the fly,
109 // and may be accessed by find_subtree_from_root.
110 // The call_method is the dest_method for a special or static invocation.
111 // The call_method is an optimized virtual method candidate otherwise.
112 bool ok_to_inline(ciMethod *call_method, JVMState* caller_jvms, ciCallProfile& profile, bool& should_delay);
113
114 void set_late_inline() {
115 _late_inline = true;
116 }
117
118 // Information about inlined method
119 JVMState* caller_jvms() const { return _caller_jvms; }
120 ciMethod *method() const { return _method; }
121 int caller_bci() const { return _caller_jvms ? _caller_jvms->bci() : InvocationEntryBci; }
122 uint count_inline_bcs() const { return _count_inline_bcs; }
123 int inline_level() const { return stack_depth(); }
124
125 #ifndef PRODUCT
126 private:
127 uint _count_inlines; // Count of inlined methods
128 public:
129 // Debug information collected during parse
130 uint count_inlines() const { return _count_inlines; };
131 #endif
132 GrowableArray<InlineTree*> subtrees() { return _subtrees; }
133
134 void print_value_on(outputStream* st) const PRODUCT_RETURN;
135
136 bool _forced_inline; // Inlining was forced by CompilerOracle, ciReplay or annotation
137 bool forced_inline() const { return _forced_inline; }
138 // Count number of nodes in this subtree
139 int count() const;
140 // Dump inlining replay data to the stream.
141 void dump_replay_data(outputStream* out, int depth_adjust = 0);
142 };
143
144
145 //-----------------------------------------------------------------------------
146 //------------------------------Parse------------------------------------------
147 // Parse bytecodes, build a Graph
148 class Parse : public GraphKit {
149 public:
150 // Per-block information needed by the parser:
151 class Block {
152 private:
153 ciTypeFlow::Block* _flow;
154 int _pred_count; // how many predecessors in CFG?
155 int _preds_parsed; // how many of these have been parsed?
156 uint _count; // how many times executed? Currently only set by _goto's
157 bool _is_parsed; // has this block been parsed yet?
158 bool _is_handler; // is this block an exception handler?
159 bool _has_merged_backedge; // does this block have merged backedge?
160 SafePointNode* _start_map; // all values flowing into this block
161 MethodLivenessResult _live_locals; // lazily initialized liveness bitmap
162 bool _has_predicates; // Were predicates added before parsing of the loop head?
163
164 int _num_successors; // Includes only normal control flow.
165 int _all_successors; // Include exception paths also.
166 Block** _successors;
167
168 public:
169
170 // Set up the block data structure itself.
171 Block(Parse* outer, int rpo);
172
173 // Set up the block's relations to other blocks.
174 void init_graph(Parse* outer);
175
176 ciTypeFlow::Block* flow() const { return _flow; }
177 int pred_count() const { return _pred_count; }
178 int preds_parsed() const { return _preds_parsed; }
179 bool is_parsed() const { return _is_parsed; }
180 bool is_handler() const { return _is_handler; }
181 void set_count( uint x ) { _count = x; }
182 uint count() const { return _count; }
183
184 SafePointNode* start_map() const { assert(is_merged(),""); return _start_map; }
185 void set_start_map(SafePointNode* m) { assert(!is_merged(), ""); _start_map = m; }
186
187 // True after any predecessor flows control into this block
188 bool is_merged() const { return _start_map != nullptr; }
189
190 #ifdef ASSERT
191 // True after backedge predecessor flows control into this block
192 bool has_merged_backedge() const { return _has_merged_backedge; }
193 void mark_merged_backedge(Block* pred) {
194 assert(is_SEL_head(), "should be loop head");
195 if (pred != nullptr && is_SEL_backedge(pred)) {
196 assert(is_parsed(), "block should be parsed before merging backedges");
197 _has_merged_backedge = true;
198 }
199 }
200 #endif
201
202 // True when all non-exception predecessors have been parsed.
203 bool is_ready() const { return preds_parsed() == pred_count(); }
204
205 bool has_predicates() const { return _has_predicates; }
206 void set_has_predicates() { _has_predicates = true; }
207
208 int num_successors() const { return _num_successors; }
209 int all_successors() const { return _all_successors; }
210 Block* successor_at(int i) const {
211 assert((uint)i < (uint)all_successors(), "");
212 return _successors[i];
213 }
214 Block* successor_for_bci(int bci);
215
216 int start() const { return flow()->start(); }
217 int limit() const { return flow()->limit(); }
218 int rpo() const { return flow()->rpo(); }
219 int start_sp() const { return flow()->stack_size(); }
220
221 bool is_loop_head() const { return flow()->is_loop_head(); }
222 bool is_in_irreducible_loop() const {
223 return flow()->is_in_irreducible_loop();
224 }
225 bool is_irreducible_loop_entry() const {
226 return flow()->is_irreducible_loop_head() || flow()->is_irreducible_loop_secondary_entry();
227 }
228 void copy_irreducible_status_to(RegionNode* region, const JVMState* jvms) {
229 assert(!is_irreducible_loop_entry() || is_in_irreducible_loop(), "entry is part of irreducible loop");
230 if (is_in_irreducible_loop()) {
231 // The block is in an irreducible loop of this method, so it is possible that this
232 // region becomes an irreducible loop entry. (no guarantee)
233 region->set_loop_status(RegionNode::LoopStatus::MaybeIrreducibleEntry);
234 } else if (jvms->caller() != nullptr) {
235 // The block is not in an irreducible loop of this method, hence it cannot ever
236 // be the entry of an irreducible loop. But it may be inside an irreducible loop
237 // of a caller of this inlined method. (limited guarantee)
238 assert(region->loop_status() == RegionNode::LoopStatus::NeverIrreducibleEntry, "status not changed");
239 } else {
240 // The block is not in an irreducible loop of this method, and there is no outer
241 // method. This region will never be in an irreducible loop (strong guarantee)
242 region->set_loop_status(RegionNode::LoopStatus::Reducible);
243 }
244 }
245 bool is_SEL_head() const { return flow()->is_single_entry_loop_head(); }
246 bool is_SEL_backedge(Block* pred) const{ return is_SEL_head() && pred->rpo() >= rpo(); }
247 bool is_invariant_local(uint i) const {
248 const JVMState* jvms = start_map()->jvms();
249 if (!jvms->is_loc(i) || flow()->outer()->has_irreducible_entry()) return false;
250 return flow()->is_invariant_local(i - jvms->locoff());
251 }
252 bool can_elide_SEL_phi(uint i) const { assert(is_SEL_head(),""); return is_invariant_local(i); }
253
254 const Type* peek(int off=0) const { return stack_type_at(start_sp() - (off+1)); }
255
256 const Type* stack_type_at(int i) const;
257 const Type* local_type_at(int i) const;
258 static const Type* get_type(ciType* t) { return Type::get_typeflow_type(t); }
259
260 bool has_trap_at(int bci) const { return flow()->has_trap() && flow()->trap_bci() == bci; }
261
262 // Call this just before parsing a block.
263 void mark_parsed() {
264 assert(!_is_parsed, "must parse each block exactly once");
265 _is_parsed = true;
266 }
267
268 // Return the phi/region input index for the "current" pred,
269 // and bump the pred number. For historical reasons these index
270 // numbers are handed out in descending order. The last index is
271 // always PhiNode::Input (i.e., 1). The value returned is known
272 // as a "path number" because it distinguishes by which path we are
273 // entering the block.
274 int next_path_num() {
275 assert(preds_parsed() < pred_count(), "too many preds?");
276 return pred_count() - _preds_parsed++;
277 }
278
279 // Add a previously unaccounted predecessor to this block.
280 // This operates by increasing the size of the block's region
281 // and all its phi nodes (if any). The value returned is a
282 // path number ("pnum").
283 int add_new_path();
284
285 // Initialize me by recording the parser's map. My own map must be null.
286 void record_state(Parse* outer);
287 };
288
289 #ifndef PRODUCT
290 // BytecodeParseHistogram collects number of bytecodes parsed, nodes constructed, and transformations.
291 class BytecodeParseHistogram : public ArenaObj {
292 private:
293 enum BPHType {
294 BPH_transforms,
295 BPH_values
296 };
297 static bool _initialized;
298 static uint _bytecodes_parsed [Bytecodes::number_of_codes];
299 static uint _nodes_constructed[Bytecodes::number_of_codes];
300 static uint _nodes_transformed[Bytecodes::number_of_codes];
301 static uint _new_values [Bytecodes::number_of_codes];
302
303 Bytecodes::Code _initial_bytecode;
304 int _initial_node_count;
305 int _initial_transforms;
306 int _initial_values;
307
308 Parse *_parser;
309 Compile *_compiler;
310
311 // Initialization
312 static void reset();
313
314 // Return info being collected, select with global flag 'BytecodeParseInfo'
315 int current_count(BPHType info_selector);
316
317 public:
318 BytecodeParseHistogram(Parse *p, Compile *c);
319 static bool initialized();
320
321 // Record info when starting to parse one bytecode
322 void set_initial_state( Bytecodes::Code bc );
323 // Record results of parsing one bytecode
324 void record_change();
325
326 // Profile printing
327 static void print(float cutoff = 0.01F); // cutoff in percent
328 };
329
330 public:
331 // Record work done during parsing
332 BytecodeParseHistogram* _parse_histogram;
333 void set_parse_histogram(BytecodeParseHistogram *bph) { _parse_histogram = bph; }
334 BytecodeParseHistogram* parse_histogram() { return _parse_histogram; }
335 #endif
336
337 private:
338 friend class Block;
339
340 // Variables which characterize this compilation as a whole:
341
342 JVMState* _caller; // JVMS which carries incoming args & state.
343 float _expected_uses; // expected number of calls to this code
344 float _prof_factor; // discount applied to my profile counts
345 int _depth; // Inline tree depth, for debug printouts
346 const TypeFunc*_tf; // My kind of function type
347 int _entry_bci; // the osr bci or InvocationEntryBci
348
349 ciTypeFlow* _flow; // Results of previous flow pass.
350 Block* _blocks; // Array of basic-block structs.
351 int _block_count; // Number of elements in _blocks.
352
353 GraphKit _exits; // Record all normal returns and throws here.
354 bool _wrote_final; // Did we write a final field?
355 bool _wrote_volatile; // Did we write a volatile field?
356 bool _wrote_stable; // Did we write a @Stable field?
357 bool _wrote_fields; // Did we write any field?
358 Node* _alloc_with_final_or_stable; // An allocation node with final or @Stable field
359
360 // Variables which track Java semantics during bytecode parsing:
361
362 Block* _block; // block currently getting parsed
363 ciBytecodeStream _iter; // stream of this method's bytecodes
364
365 const FastLockNode* _synch_lock; // FastLockNode for synchronized method
366
367 #ifndef PRODUCT
368 int _max_switch_depth; // Debugging SwitchRanges.
369 int _est_switch_depth; // Debugging SwitchRanges.
370 #endif
371
372 bool _first_return; // true if return is the first to be parsed
373 bool _replaced_nodes_for_exceptions; // needs processing of replaced nodes in exception paths?
374 uint _new_idx; // any node with _idx above were new during this parsing. Used to trim the replaced nodes list.
375
376 public:
377 // Constructor
378 Parse(JVMState* caller, ciMethod* parse_method, float expected_uses);
379
380 virtual Parse* is_Parse() const { return (Parse*)this; }
381
382 // Accessors.
383 JVMState* caller() const { return _caller; }
384 float expected_uses() const { return _expected_uses; }
385 float prof_factor() const { return _prof_factor; }
386 int depth() const { return _depth; }
387 const TypeFunc* tf() const { return _tf; }
388 // entry_bci() -- see osr_bci, etc.
389
390 ciTypeFlow* flow() const { return _flow; }
391 // blocks() -- see rpo_at, start_block, etc.
392 int block_count() const { return _block_count; }
393
394 GraphKit& exits() { return _exits; }
395 bool wrote_final() const { return _wrote_final; }
396 void set_wrote_final(bool z) { _wrote_final = z; }
397 bool wrote_volatile() const { return _wrote_volatile; }
398 void set_wrote_volatile(bool z) { _wrote_volatile = z; }
399 bool wrote_stable() const { return _wrote_stable; }
400 void set_wrote_stable(bool z) { _wrote_stable = z; }
401 bool wrote_fields() const { return _wrote_fields; }
402 void set_wrote_fields(bool z) { _wrote_fields = z; }
403 Node* alloc_with_final_or_stable() const { return _alloc_with_final_or_stable; }
404 void set_alloc_with_final_or_stable(Node* n) {
405 assert((_alloc_with_final_or_stable == nullptr) || (_alloc_with_final_or_stable == n), "different init objects?");
406 _alloc_with_final_or_stable = n;
407 }
408
409 Block* block() const { return _block; }
410 ciBytecodeStream& iter() { return _iter; }
411 Bytecodes::Code bc() const { return _iter.cur_bc(); }
412
413 void set_block(Block* b) { _block = b; }
414
415 // Derived accessors:
416 bool is_osr_parse() const {
417 assert(_entry_bci != UnknownBci, "uninitialized _entry_bci");
418 return _entry_bci != InvocationEntryBci;
419 }
420 bool is_normal_parse() const { return !is_osr_parse(); }
421 int osr_bci() const { assert(is_osr_parse(),""); return _entry_bci; }
422
423 void set_parse_bci(int bci);
424
425 // Must this parse be aborted?
426 bool failing() const { return C->failing_internal(); } // might have cascading effects, not stressing bailouts for now.
427
428 Block* rpo_at(int rpo) {
429 assert(0 <= rpo && rpo < _block_count, "oob");
430 return &_blocks[rpo];
431 }
432 Block* start_block() {
433 return rpo_at(flow()->start_block()->rpo());
434 }
435 // Can return null if the flow pass did not complete a block.
436 Block* successor_for_bci(int bci) {
437 return block()->successor_for_bci(bci);
438 }
439
440 private:
441 // Create a JVMS & map for the initial state of this method.
442 SafePointNode* create_entry_map();
443
444 // OSR helpers
445 Node *fetch_interpreter_state(int index, BasicType bt, Node *local_addrs, Node *local_addrs_base);
446 Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit);
447 void load_interpreter_state(Node* osr_buf);
448
449 // Functions for managing basic blocks:
450 void init_blocks();
451 void load_state_from(Block* b);
452 void store_state_to(Block* b) { b->record_state(this); }
453
454 // Parse all the basic blocks.
455 void do_all_blocks();
456
457 // Parse the current basic block
458 void do_one_block();
459
460 // Raise an error if we get a bad ciTypeFlow CFG.
461 void handle_missing_successor(int bci);
462
463 // first actions (before BCI 0)
464 void do_method_entry();
465
466 // implementation of monitorenter/monitorexit
467 void do_monitor_enter();
468 void do_monitor_exit();
469
470 // Eagerly create phie throughout the state, to cope with back edges.
471 void ensure_phis_everywhere();
472
473 // Merge the current mapping into the basic block starting at bci
474 void merge( int target_bci);
475 // Same as plain merge, except that it allocates a new path number.
476 void merge_new_path( int target_bci);
477 // Merge the current mapping into an exception handler.
478 void merge_exception(int target_bci);
479 // Helper: Merge the current mapping into the given basic block
480 void merge_common(Block* target, int pnum);
481 // Helper functions for merging individual cells.
482 PhiNode *ensure_phi( int idx, bool nocreate = false);
483 PhiNode *ensure_memory_phi(int idx, bool nocreate = false);
484 // Helper to merge the current memory state into the given basic block
485 void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi);
486
487 // Parse this bytecode, and alter the Parsers JVM->Node mapping
488 void do_one_bytecode();
489
490 // helper function to generate array store check
491 void array_store_check();
492 // Helper function to generate array load
493 void array_load(BasicType etype);
494 // Helper function to generate array store
495 void array_store(BasicType etype);
496 // Helper function to compute array addressing
497 Node* array_addressing(BasicType type, int vals, const Type*& elemtype);
498
499 void clinit_deopt();
500
501 // Pass current map to exits
502 void return_current(Node* value);
503
504 // Register finalizers on return from Object.<init>
505 void call_register_finalizer();
506
507 // Insert a compiler safepoint into the graph
508 void add_safepoint();
509
510 // Insert a compiler safepoint into the graph, if there is a back-branch.
511 void maybe_add_safepoint(int target_bci) {
512 if (target_bci <= bci()) {
513 add_safepoint();
514 }
515 }
516
517 // Note: Intrinsic generation routines may be found in library_call.cpp.
518
519 // Helper function to setup Ideal Call nodes
520 void do_call();
521
522 // Helper function to uncommon-trap or bailout for non-compilable call-sites
523 bool can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass *klass);
524
525 // Helper functions for type checking bytecodes:
526 void do_checkcast();
527 void do_instanceof();
528
529 // Helper functions for shifting & arithmetic
530 Node* floating_point_mod(Node* a, Node* b, BasicType type);
531 void l2f();
532
533 // implementation of _get* and _put* bytecodes
534 void do_getstatic() { do_field_access(true, false); }
535 void do_getfield () { do_field_access(true, true); }
536 void do_putstatic() { do_field_access(false, false); }
537 void do_putfield () { do_field_access(false, true); }
538
539 // common code for making initial checks and forming addresses
540 void do_field_access(bool is_get, bool is_field);
541
542 // common code for actually performing the load or store
543 void do_get_xxx(Node* obj, ciField* field, bool is_field);
544 void do_put_xxx(Node* obj, ciField* field, bool is_field);
545
546 // implementation of object creation bytecodes
547 void do_new();
548 void do_newarray(BasicType elemtype);
549 void do_anewarray();
550 void do_multianewarray();
551 Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs);
552
553 // implementation of jsr/ret
554 void do_jsr();
555 void do_ret();
556
557 float dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test);
558 float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci, Node* test);
559 bool seems_never_taken(float prob) const;
560 bool path_is_suitable_for_uncommon_trap(float prob) const;
561
562 void do_ifnull(BoolTest::mask btest, Node* c);
563 void do_if(BoolTest::mask btest, Node* c);
564 int repush_if_args();
565 void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path);
566 void sharpen_type_after_if(BoolTest::mask btest,
567 Node* con, const Type* tcon,
568 Node* val, const Type* tval);
569 void maybe_add_predicate_after_if(Block* path);
570 IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt);
571 void jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, bool unc);
572 void jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, bool unc);
573 void jump_if_always_fork(int dest_bci_if_true, bool unc);
574
575 friend class SwitchRange;
576 void do_tableswitch();
577 void do_lookupswitch();
578 void jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0);
579 bool create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi);
580 void linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi);
581
582 // helper function for call statistics
583 void count_compiled_calls(bool at_method_entry, bool is_inline) PRODUCT_RETURN;
584
585 Node_Notes* make_node_notes(Node_Notes* caller_nn);
586
587 // Helper functions for handling normal and abnormal exits.
588 void build_exits();
589
590 // Fix up all exceptional control flow exiting a single bytecode.
591 void do_exceptions();
592
593 // Fix up all exiting control flow at the end of the parse.
594 void do_exits();
595
596 // Add Catch/CatchProjs
597 // The call is either a Java call or the VM's rethrow stub
598 void catch_call_exceptions(ciExceptionHandlerStream&);
599
600 // Handle all exceptions thrown by the inlined method.
601 // Also handles exceptions for individual bytecodes.
602 void catch_inline_exceptions(SafePointNode* ex_map);
603
604 // Merge the given map into correct exceptional exit state.
605 // Assumes that there is no applicable local handler.
606 void throw_to_exit(SafePointNode* ex_map);
607
608 // Use speculative type to optimize CmpP node
609 Node* optimize_cmp_with_klass(Node* c);
610
611 // Stress unstable if traps
612 void stress_trap(IfNode* orig_iff, Node* counter, Node* incr_store);
613 // Increment counter used by StressUnstableIfTraps
614 void increment_trap_stress_counter(Node*& counter, Node*& incr_store);
615
616 public:
617 #ifndef PRODUCT
618 // Handle PrintOpto, etc.
619 void show_parse_info();
620 void dump_map_adr_mem() const;
621 static void print_statistics(); // Print some performance counters
622 void dump();
623 void dump_bci(int bci);
624 #endif
625 };
626
627 // Specialized uncommon_trap of unstable_if. C2 uses next_bci of path to update the live locals of it.
628 class UnstableIfTrap {
629 CallStaticJavaNode* const _unc;
630 bool _modified; // modified locals based on next_bci()
631 int _next_bci;
632
633 public:
634 UnstableIfTrap(CallStaticJavaNode* call, Parse::Block* path): _unc(call), _modified(false) {
635 assert(_unc != nullptr && Deoptimization::trap_request_reason(_unc->uncommon_trap_request()) == Deoptimization::Reason_unstable_if,
636 "invalid uncommon_trap call!");
637 _next_bci = path != nullptr ? path->start() : -1;
638 }
639
640 // The starting point of the pruned block, where control goes when
641 // deoptimization does happen.
642 int next_bci() const {
643 return _next_bci;
644 }
645
646 bool modified() const {
647 return _modified;
648 }
649
650 void set_modified() {
651 _modified = true;
652 }
653
654 CallStaticJavaNode* uncommon_trap() const {
655 return _unc;
656 }
657
658 inline void* operator new(size_t x) throw() {
659 Compile* C = Compile::current();
660 return C->comp_arena()->AmallocWords(x);
661 }
662 };
663
664 #endif // SHARE_OPTO_PARSE_HPP
--- EOF ---