1 /*
2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OPTO_PARSE_HPP
26 #define SHARE_OPTO_PARSE_HPP
27
28 #include "ci/ciMethodData.hpp"
29 #include "ci/ciTypeFlow.hpp"
30 #include "compiler/methodLiveness.hpp"
31 #include "libadt/vectset.hpp"
32 #include "oops/generateOopMap.hpp"
33 #include "opto/graphKit.hpp"
34 #include "opto/subnode.hpp"
35
36 class BytecodeParseHistogram;
37 class InlineTree;
38 class Parse;
39 class SwitchRange;
40
41
42 //------------------------------InlineTree-------------------------------------
43 class InlineTree : public AnyObj {
44 friend class VMStructs;
45
46 Compile* C; // cache
47 JVMState* _caller_jvms; // state of caller
48 ciMethod* _method; // method being called by the caller_jvms
49 bool _late_inline; // method is inlined incrementally
50 InlineTree* _caller_tree;
51 uint _count_inline_bcs; // Accumulated count of inlined bytecodes
52 const int _max_inline_level; // the maximum inline level for this sub-tree (may be adjusted)
53
54 GrowableArray<InlineTree*> _subtrees;
55
56 bool pass_initial_checks(ciMethod* caller_method, int caller_bci, ciMethod* callee_method);
57
58 void print_impl(outputStream* stj, int indent) const PRODUCT_RETURN;
59 const char* _msg;
60 protected:
61 InlineTree(Compile* C,
62 const InlineTree* caller_tree,
63 ciMethod* callee_method,
64 JVMState* caller_jvms,
65 int caller_bci,
66 int max_inline_level);
67 InlineTree *build_inline_tree_for_callee(ciMethod* callee_method,
68 JVMState* caller_jvms,
69 int caller_bci);
70 bool try_to_inline(ciMethod* callee_method,
71 ciMethod* caller_method,
72 int caller_bci,
73 JVMState* jvms,
74 ciCallProfile& profile,
75 bool& should_delay);
76 bool should_inline(ciMethod* callee_method,
77 ciMethod* caller_method,
78 int caller_bci,
79 bool& should_delay,
80 ciCallProfile& profile);
81 bool should_not_inline(ciMethod* callee_method,
82 ciMethod* caller_method,
83 int caller_bci,
84 bool& should_delay,
85 ciCallProfile& profile);
86 bool is_not_reached(ciMethod* callee_method,
87 ciMethod* caller_method,
88 int caller_bci,
89 ciCallProfile& profile);
90 void print_inlining(ciMethod* callee_method, int caller_bci,
91 ciMethod* caller_method, bool success) const;
92
93 InlineTree* caller_tree() const { return _caller_tree; }
94 InlineTree* callee_at(int bci, ciMethod* m) const;
95 int stack_depth() const { return _caller_jvms ? _caller_jvms->depth() : 0; }
96 const char* msg() const { return _msg; }
97 void set_msg(const char* msg) { _msg = msg; }
98 public:
99 static const char* check_can_parse(ciMethod* callee);
100
101 static InlineTree* build_inline_tree_root();
102 static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee);
103
104 // See if it is OK to inline.
105 // The receiver is the inline tree for the caller.
106 //
107 // The result is a temperature indication. If it is hot or cold,
108 // inlining is immediate or undesirable. Otherwise, the info block
109 // returned is newly allocated and may be enqueued.
110 //
111 // If the method is inlinable, a new inline subtree is created on the fly,
112 // and may be accessed by find_subtree_from_root.
113 // The call_method is the dest_method for a special or static invocation.
114 // The call_method is an optimized virtual method candidate otherwise.
115 bool ok_to_inline(ciMethod *call_method, JVMState* caller_jvms, ciCallProfile& profile, bool& should_delay);
116
117 void set_late_inline() {
118 _late_inline = true;
119 }
120
121 // Information about inlined method
122 JVMState* caller_jvms() const { return _caller_jvms; }
123 ciMethod *method() const { return _method; }
124 int caller_bci() const { return _caller_jvms ? _caller_jvms->bci() : InvocationEntryBci; }
125 uint count_inline_bcs() const { return _count_inline_bcs; }
126 int inline_level() const { return stack_depth(); }
127
128 #ifndef PRODUCT
129 private:
130 uint _count_inlines; // Count of inlined methods
131 public:
132 // Debug information collected during parse
133 uint count_inlines() const { return _count_inlines; };
134 #endif
135 GrowableArray<InlineTree*> subtrees() { return _subtrees; }
136
137 void print_value_on(outputStream* st) const PRODUCT_RETURN;
138
139 bool _forced_inline; // Inlining was forced by CompilerOracle, ciReplay or annotation
140 bool forced_inline() const { return _forced_inline; }
141 // Count number of nodes in this subtree
142 int count() const;
143 // Dump inlining replay data to the stream.
144 void dump_replay_data(outputStream* out, int depth_adjust = 0);
145 };
146
147
148 //-----------------------------------------------------------------------------
149 //------------------------------Parse------------------------------------------
150 // Parse bytecodes, build a Graph
151 class Parse : public GraphKit {
152 public:
153 // Per-block information needed by the parser:
154 class Block {
155 private:
156 ciTypeFlow::Block* _flow;
157 int _pred_count; // how many predecessors in CFG?
158 int _preds_parsed; // how many of these have been parsed?
159 uint _count; // how many times executed? Currently only set by _goto's
160 bool _is_parsed; // has this block been parsed yet?
161 bool _is_handler; // is this block an exception handler?
162 bool _has_merged_backedge; // does this block have merged backedge?
163 SafePointNode* _start_map; // all values flowing into this block
164 MethodLivenessResult _live_locals; // lazily initialized liveness bitmap
165 bool _has_predicates; // Were predicates added before parsing of the loop head?
166
167 int _num_successors; // Includes only normal control flow.
168 int _all_successors; // Include exception paths also.
169 Block** _successors;
170
171 public:
172
173 // Set up the block data structure itself.
174 Block(Parse* outer, int rpo);
175
176 // Set up the block's relations to other blocks.
177 void init_graph(Parse* outer);
178
179 ciTypeFlow::Block* flow() const { return _flow; }
180 int pred_count() const { return _pred_count; }
181 int preds_parsed() const { return _preds_parsed; }
182 bool is_parsed() const { return _is_parsed; }
183 bool is_handler() const { return _is_handler; }
184 void set_count( uint x ) { _count = x; }
185 uint count() const { return _count; }
186
187 SafePointNode* start_map() const { assert(is_merged(),""); return _start_map; }
188 void set_start_map(SafePointNode* m) { assert(!is_merged(), ""); _start_map = m; }
189
190 // True after any predecessor flows control into this block
191 bool is_merged() const { return _start_map != nullptr; }
192
193 #ifdef ASSERT
194 // True after backedge predecessor flows control into this block
195 bool has_merged_backedge() const { return _has_merged_backedge; }
196 void mark_merged_backedge(Block* pred) {
197 assert(is_SEL_head(), "should be loop head");
198 if (pred != nullptr && is_SEL_backedge(pred)) {
199 assert(is_parsed(), "block should be parsed before merging backedges");
200 _has_merged_backedge = true;
201 }
202 }
203 #endif
204
205 // True when all non-exception predecessors have been parsed.
206 bool is_ready() const { return preds_parsed() == pred_count(); }
207
208 bool has_predicates() const { return _has_predicates; }
209 void set_has_predicates() { _has_predicates = true; }
210
211 int num_successors() const { return _num_successors; }
212 int all_successors() const { return _all_successors; }
213 Block* successor_at(int i) const {
214 assert((uint)i < (uint)all_successors(), "");
215 return _successors[i];
216 }
217 Block* successor_for_bci(int bci);
218
219 int start() const { return flow()->start(); }
220 int limit() const { return flow()->limit(); }
221 int rpo() const { return flow()->rpo(); }
222 int start_sp() const { return flow()->stack_size(); }
223
224 bool is_loop_head() const { return flow()->is_loop_head(); }
225 bool is_in_irreducible_loop() const {
226 return flow()->is_in_irreducible_loop();
227 }
228 bool is_irreducible_loop_entry() const {
229 return flow()->is_irreducible_loop_head() || flow()->is_irreducible_loop_secondary_entry();
230 }
231 void copy_irreducible_status_to(RegionNode* region, const JVMState* jvms) {
232 assert(!is_irreducible_loop_entry() || is_in_irreducible_loop(), "entry is part of irreducible loop");
233 if (is_in_irreducible_loop()) {
234 // The block is in an irreducible loop of this method, so it is possible that this
235 // region becomes an irreducible loop entry. (no guarantee)
236 region->set_loop_status(RegionNode::LoopStatus::MaybeIrreducibleEntry);
237 } else if (jvms->caller() != nullptr) {
238 // The block is not in an irreducible loop of this method, hence it cannot ever
239 // be the entry of an irreducible loop. But it may be inside an irreducible loop
240 // of a caller of this inlined method. (limited guarantee)
241 assert(region->loop_status() == RegionNode::LoopStatus::NeverIrreducibleEntry, "status not changed");
242 } else {
243 // The block is not in an irreducible loop of this method, and there is no outer
244 // method. This region will never be in an irreducible loop (strong guarantee)
245 region->set_loop_status(RegionNode::LoopStatus::Reducible);
246 }
247 }
248 bool is_SEL_head() const { return flow()->is_single_entry_loop_head(); }
249 bool is_SEL_backedge(Block* pred) const{ return is_SEL_head() && pred->rpo() >= rpo(); }
250 bool is_invariant_local(uint i) const {
251 const JVMState* jvms = start_map()->jvms();
252 if (!jvms->is_loc(i) || flow()->outer()->has_irreducible_entry()) return false;
253 return flow()->is_invariant_local(i - jvms->locoff());
254 }
255 bool can_elide_SEL_phi(uint i) const { assert(is_SEL_head(),""); return is_invariant_local(i); }
256
257 const Type* peek(int off=0) const { return stack_type_at(start_sp() - (off+1)); }
258
259 const Type* stack_type_at(int i) const;
260 const Type* local_type_at(int i) const;
261 static const Type* get_type(ciType* t) { return Type::get_typeflow_type(t); }
262
263 bool has_trap_at(int bci) const { return flow()->has_trap() && flow()->trap_bci() == bci; }
264
265 // Call this just before parsing a block.
266 void mark_parsed() {
267 assert(!_is_parsed, "must parse each block exactly once");
268 _is_parsed = true;
269 }
270
271 // Return the phi/region input index for the "current" pred,
272 // and bump the pred number. For historical reasons these index
273 // numbers are handed out in descending order. The last index is
274 // always PhiNode::Input (i.e., 1). The value returned is known
275 // as a "path number" because it distinguishes by which path we are
276 // entering the block.
277 int next_path_num() {
278 assert(preds_parsed() < pred_count(), "too many preds?");
279 return pred_count() - _preds_parsed++;
280 }
281
282 // Add a previously unaccounted predecessor to this block.
283 // This operates by increasing the size of the block's region
284 // and all its phi nodes (if any). The value returned is a
285 // path number ("pnum").
286 int add_new_path();
287
288 // Initialize me by recording the parser's map. My own map must be null.
289 void record_state(Parse* outer);
290 };
291
292 #ifndef PRODUCT
293 // BytecodeParseHistogram collects number of bytecodes parsed, nodes constructed, and transformations.
294 class BytecodeParseHistogram : public ArenaObj {
295 private:
296 enum BPHType {
297 BPH_transforms,
298 BPH_values
299 };
300 static bool _initialized;
301 static uint _bytecodes_parsed [Bytecodes::number_of_codes];
302 static uint _nodes_constructed[Bytecodes::number_of_codes];
303 static uint _nodes_transformed[Bytecodes::number_of_codes];
304 static uint _new_values [Bytecodes::number_of_codes];
305
306 Bytecodes::Code _initial_bytecode;
307 int _initial_node_count;
308 int _initial_transforms;
309 int _initial_values;
310
311 Parse *_parser;
312 Compile *_compiler;
313
314 // Initialization
315 static void reset();
316
317 // Return info being collected, select with global flag 'BytecodeParseInfo'
318 int current_count(BPHType info_selector);
319
320 public:
321 BytecodeParseHistogram(Parse *p, Compile *c);
322 static bool initialized();
323
324 // Record info when starting to parse one bytecode
325 void set_initial_state( Bytecodes::Code bc );
326 // Record results of parsing one bytecode
327 void record_change();
328
329 // Profile printing
330 static void print(float cutoff = 0.01F); // cutoff in percent
331 };
332
333 public:
334 // Record work done during parsing
335 BytecodeParseHistogram* _parse_histogram;
336 void set_parse_histogram(BytecodeParseHistogram *bph) { _parse_histogram = bph; }
337 BytecodeParseHistogram* parse_histogram() { return _parse_histogram; }
338 #endif
339
340 private:
341 friend class Block;
342
343 // Variables which characterize this compilation as a whole:
344
345 JVMState* _caller; // JVMS which carries incoming args & state.
346 float _expected_uses; // expected number of calls to this code
347 float _prof_factor; // discount applied to my profile counts
348 int _depth; // Inline tree depth, for debug printouts
349 const TypeFunc*_tf; // My kind of function type
350 int _entry_bci; // the osr bci or InvocationEntryBci
351
352 ciTypeFlow* _flow; // Results of previous flow pass.
353 Block* _blocks; // Array of basic-block structs.
354 int _block_count; // Number of elements in _blocks.
355
356 GraphKit _exits; // Record all normal returns and throws here.
357 bool _wrote_final; // Did we write a final field?
358 bool _wrote_volatile; // Did we write a volatile field?
359 bool _wrote_stable; // Did we write a @Stable field?
360 bool _wrote_fields; // Did we write any field?
361 Node* _alloc_with_final_or_stable; // An allocation node with final or @Stable field
362
363 // Variables which track Java semantics during bytecode parsing:
364
365 Block* _block; // block currently getting parsed
366 ciBytecodeStream _iter; // stream of this method's bytecodes
367
368 const FastLockNode* _synch_lock; // FastLockNode for synchronized method
369
370 #ifndef PRODUCT
371 int _max_switch_depth; // Debugging SwitchRanges.
372 int _est_switch_depth; // Debugging SwitchRanges.
373 #endif
374
375 bool _first_return; // true if return is the first to be parsed
376 bool _replaced_nodes_for_exceptions; // needs processing of replaced nodes in exception paths?
377 uint _new_idx; // any node with _idx above were new during this parsing. Used to trim the replaced nodes list.
378
379 public:
380 // Constructor
381 Parse(JVMState* caller, ciMethod* parse_method, float expected_uses);
382
383 virtual Parse* is_Parse() const { return (Parse*)this; }
384
385 // Accessors.
386 JVMState* caller() const { return _caller; }
387 float expected_uses() const { return _expected_uses; }
388 float prof_factor() const { return _prof_factor; }
389 int depth() const { return _depth; }
390 const TypeFunc* tf() const { return _tf; }
391 // entry_bci() -- see osr_bci, etc.
392
393 ciTypeFlow* flow() const { return _flow; }
394 // blocks() -- see rpo_at, start_block, etc.
395 int block_count() const { return _block_count; }
396
397 GraphKit& exits() { return _exits; }
398 bool wrote_final() const { return _wrote_final; }
399 void set_wrote_final(bool z) { _wrote_final = z; }
400 bool wrote_volatile() const { return _wrote_volatile; }
401 void set_wrote_volatile(bool z) { _wrote_volatile = z; }
402 bool wrote_stable() const { return _wrote_stable; }
403 void set_wrote_stable(bool z) { _wrote_stable = z; }
404 bool wrote_fields() const { return _wrote_fields; }
405 void set_wrote_fields(bool z) { _wrote_fields = z; }
406 Node* alloc_with_final_or_stable() const { return _alloc_with_final_or_stable; }
407 void set_alloc_with_final_or_stable(Node* n) {
408 assert((_alloc_with_final_or_stable == nullptr) || (_alloc_with_final_or_stable == n), "different init objects?");
409 _alloc_with_final_or_stable = n;
410 }
411
412 Block* block() const { return _block; }
413 ciBytecodeStream& iter() { return _iter; }
414 Bytecodes::Code bc() const { return _iter.cur_bc(); }
415
416 void set_block(Block* b) { _block = b; }
417
418 // Derived accessors:
419 bool is_osr_parse() const {
420 assert(_entry_bci != UnknownBci, "uninitialized _entry_bci");
421 return _entry_bci != InvocationEntryBci;
422 }
423 bool is_normal_parse() const { return !is_osr_parse(); }
424 int osr_bci() const { assert(is_osr_parse(),""); return _entry_bci; }
425
426 void set_parse_bci(int bci);
427
428 // Must this parse be aborted?
429 bool failing() const { return C->failing_internal(); } // might have cascading effects, not stressing bailouts for now.
430
431 Block* rpo_at(int rpo) {
432 assert(0 <= rpo && rpo < _block_count, "oob");
433 return &_blocks[rpo];
434 }
435 Block* start_block() {
436 return rpo_at(flow()->start_block()->rpo());
437 }
438 // Can return null if the flow pass did not complete a block.
439 Block* successor_for_bci(int bci) {
440 return block()->successor_for_bci(bci);
441 }
442
443 private:
444 // Create a JVMS & map for the initial state of this method.
445 SafePointNode* create_entry_map();
446
447 // OSR helpers
448 Node *fetch_interpreter_state(int index, BasicType bt, Node *local_addrs, Node *local_addrs_base);
449 Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit);
450 void load_interpreter_state(Node* osr_buf);
451
452 // Functions for managing basic blocks:
453 void init_blocks();
454 void load_state_from(Block* b);
455 void store_state_to(Block* b) { b->record_state(this); }
456
457 // Parse all the basic blocks.
458 void do_all_blocks();
459
460 // Parse the current basic block
461 void do_one_block();
462
463 // Raise an error if we get a bad ciTypeFlow CFG.
464 void handle_missing_successor(int bci);
465
466 // first actions (before BCI 0)
467 void do_method_entry();
468
469 // implementation of monitorenter/monitorexit
470 void do_monitor_enter();
471 void do_monitor_exit();
472
473 // Eagerly create phie throughout the state, to cope with back edges.
474 void ensure_phis_everywhere();
475
476 // Merge the current mapping into the basic block starting at bci
477 void merge( int target_bci);
478 // Same as plain merge, except that it allocates a new path number.
479 void merge_new_path( int target_bci);
480 // Merge the current mapping into an exception handler.
481 void merge_exception(int target_bci);
482 // Helper: Merge the current mapping into the given basic block
483 void merge_common(Block* target, int pnum);
484 // Helper functions for merging individual cells.
485 PhiNode *ensure_phi( int idx, bool nocreate = false);
486 PhiNode *ensure_memory_phi(int idx, bool nocreate = false);
487 // Helper to merge the current memory state into the given basic block
488 void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi);
489
490 // Parse this bytecode, and alter the Parsers JVM->Node mapping
491 void do_one_bytecode();
492
493 // helper function to generate array store check
494 void array_store_check();
495 // Helper function to generate array load
496 void array_load(BasicType etype);
497 // Helper function to generate array store
498 void array_store(BasicType etype);
499 // Helper function to compute array addressing
500 Node* array_addressing(BasicType type, int vals, const Type*& elemtype);
501
502 void clinit_deopt();
503 void clinit_barrier_on_inline();
504
505 // Pass current map to exits
506 void return_current(Node* value);
507
508 // Register finalizers on return from Object.<init>
509 void call_register_finalizer();
510
511 // Insert a compiler safepoint into the graph
512 void add_safepoint();
513
514 // Insert a compiler safepoint into the graph, if there is a back-branch.
515 void maybe_add_safepoint(int target_bci) {
516 if (target_bci <= bci()) {
517 add_safepoint();
518 }
519 }
520
521 // Note: Intrinsic generation routines may be found in library_call.cpp.
522
523 // Helper function to setup Ideal Call nodes
524 void do_call();
525
526 // Helper function to uncommon-trap or bailout for non-compilable call-sites
527 bool can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass *klass);
528
529 // Helper functions for type checking bytecodes:
530 void do_checkcast();
531 void do_instanceof();
532
533 // Helper functions for shifting & arithmetic
534 void modf();
535 void modd();
536 void l2f();
537
538 // implementation of _get* and _put* bytecodes
539 void do_getstatic() { do_field_access(true, false); }
540 void do_getfield () { do_field_access(true, true); }
541 void do_putstatic() { do_field_access(false, false); }
542 void do_putfield () { do_field_access(false, true); }
543
544 // common code for making initial checks and forming addresses
545 void do_field_access(bool is_get, bool is_field);
546
547 // common code for actually performing the load or store
548 void do_get_xxx(Node* obj, ciField* field, bool is_field);
549 void do_put_xxx(Node* obj, ciField* field, bool is_field);
550
551 // implementation of object creation bytecodes
552 void do_new();
553 void do_newarray(BasicType elemtype);
554 void do_anewarray();
555 void do_multianewarray();
556 Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs);
557
558 // implementation of jsr/ret
559 void do_jsr();
560 void do_ret();
561
562 float dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test);
563 float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci, Node* test);
564 bool seems_never_taken(float prob) const;
565 bool path_is_suitable_for_uncommon_trap(float prob) const;
566
567 void do_ifnull(BoolTest::mask btest, Node* c);
568 void do_if(BoolTest::mask btest, Node* c);
569 int repush_if_args();
570 void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path);
571 void sharpen_type_after_if(BoolTest::mask btest,
572 Node* con, const Type* tcon,
573 Node* val, const Type* tval);
574 void maybe_add_predicate_after_if(Block* path);
575 IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt);
576 void jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, bool unc);
577 void jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, bool unc);
578 void jump_if_always_fork(int dest_bci_if_true, bool unc);
579
580 friend class SwitchRange;
581 void do_tableswitch();
582 void do_lookupswitch();
583 void jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0);
584 bool create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi);
585 void linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi);
586
587 // helper function for call statistics
588 void count_compiled_calls(bool at_method_entry, bool is_inline) PRODUCT_RETURN;
589
590 Node_Notes* make_node_notes(Node_Notes* caller_nn);
591
592 // Helper functions for handling normal and abnormal exits.
593 void build_exits();
594
595 // Fix up all exceptional control flow exiting a single bytecode.
596 void do_exceptions();
597
598 // Fix up all exiting control flow at the end of the parse.
599 void do_exits();
600
601 // Add Catch/CatchProjs
602 // The call is either a Java call or the VM's rethrow stub
603 void catch_call_exceptions(ciExceptionHandlerStream&);
604
605 // Handle all exceptions thrown by the inlined method.
606 // Also handles exceptions for individual bytecodes.
607 void catch_inline_exceptions(SafePointNode* ex_map);
608
609 // Merge the given map into correct exceptional exit state.
610 // Assumes that there is no applicable local handler.
611 void throw_to_exit(SafePointNode* ex_map);
612
613 // Use speculative type to optimize CmpP node
614 Node* optimize_cmp_with_klass(Node* c);
615
616 // Stress unstable if traps
617 void stress_trap(IfNode* orig_iff, Node* counter, Node* incr_store);
618 // Increment counter used by StressUnstableIfTraps
619 void increment_trap_stress_counter(Node*& counter, Node*& incr_store);
620
621 public:
622 #ifndef PRODUCT
623 // Handle PrintOpto, etc.
624 void show_parse_info();
625 void dump_map_adr_mem() const;
626 static void print_statistics(); // Print some performance counters
627 void dump();
628 void dump_bci(int bci);
629 #endif
630 };
631
632 // Specialized uncommon_trap of unstable_if. C2 uses next_bci of path to update the live locals of it.
633 class UnstableIfTrap {
634 CallStaticJavaNode* const _unc;
635 bool _modified; // modified locals based on next_bci()
636 int _next_bci;
637
638 public:
639 UnstableIfTrap(CallStaticJavaNode* call, Parse::Block* path): _unc(call), _modified(false) {
640 assert(_unc != nullptr && Deoptimization::trap_request_reason(_unc->uncommon_trap_request()) == Deoptimization::Reason_unstable_if,
641 "invalid uncommon_trap call!");
642 _next_bci = path != nullptr ? path->start() : -1;
643 }
644
645 // The starting point of the pruned block, where control goes when
646 // deoptimization does happen.
647 int next_bci() const {
648 return _next_bci;
649 }
650
651 bool modified() const {
652 return _modified;
653 }
654
655 void set_modified() {
656 _modified = true;
657 }
658
659 CallStaticJavaNode* uncommon_trap() const {
660 return _unc;
661 }
662
663 inline void* operator new(size_t x) throw() {
664 Compile* C = Compile::current();
665 return C->comp_arena()->AmallocWords(x);
666 }
667 };
668
669 #endif // SHARE_OPTO_PARSE_HPP
--- EOF ---