5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/ciMethodData.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "compiler/compileLog.hpp"
28 #include "interpreter/linkResolver.hpp"
29 #include "jvm_io.h"
30 #include "memory/resourceArea.hpp"
31 #include "memory/universe.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "opto/addnode.hpp"
34 #include "opto/castnode.hpp"
35 #include "opto/convertnode.hpp"
36 #include "opto/divnode.hpp"
37 #include "opto/idealGraphPrinter.hpp"
38 #include "opto/matcher.hpp"
39 #include "opto/memnode.hpp"
40 #include "opto/mulnode.hpp"
41 #include "opto/opaquenode.hpp"
42 #include "opto/parse.hpp"
43 #include "opto/runtime.hpp"
44 #include "opto/subtypenode.hpp"
45 #include "runtime/deoptimization.hpp"
46 #include "runtime/sharedRuntime.hpp"
47
48 #ifndef PRODUCT
49 extern uint explicit_null_checks_inserted,
50 explicit_null_checks_elided;
51 #endif
52
53 //---------------------------------array_load----------------------------------
54 void Parse::array_load(BasicType bt) {
55 const Type* elemtype = Type::TOP;
56 bool big_val = bt == T_DOUBLE || bt == T_LONG;
57 Node* adr = array_addressing(bt, 0, elemtype);
58 if (stopped()) return; // guaranteed null or range check
59
60 pop(); // index (already used)
61 Node* array = pop(); // the array itself
62
63 if (elemtype == TypeInt::BOOL) {
64 bt = T_BOOLEAN;
65 }
66 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
67
68 Node* ld = access_load_at(array, adr, adr_type, elemtype, bt,
69 IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD);
70 if (big_val) {
71 push_pair(ld);
72 } else {
73 push(ld);
74 }
75 }
76
77
78 //--------------------------------array_store----------------------------------
79 void Parse::array_store(BasicType bt) {
80 const Type* elemtype = Type::TOP;
81 bool big_val = bt == T_DOUBLE || bt == T_LONG;
82 Node* adr = array_addressing(bt, big_val ? 2 : 1, elemtype);
83 if (stopped()) return; // guaranteed null or range check
84 if (bt == T_OBJECT) {
85 array_store_check();
86 if (stopped()) {
87 return;
88 }
89 }
90 Node* val; // Oop to store
91 if (big_val) {
92 val = pop_pair();
93 } else {
94 val = pop();
95 }
96 pop(); // index (already used)
97 Node* array = pop(); // the array itself
98
99 if (elemtype == TypeInt::BOOL) {
100 bt = T_BOOLEAN;
101 }
102 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
103
104 access_store_at(array, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY);
105 }
106
107
108 //------------------------------array_addressing-------------------------------
109 // Pull array and index from the stack. Compute pointer-to-element.
110 Node* Parse::array_addressing(BasicType type, int vals, const Type*& elemtype) {
111 Node *idx = peek(0+vals); // Get from stack without popping
112 Node *ary = peek(1+vals); // in case of exception
113
114 // Null check the array base, with correct stack contents
115 ary = null_check(ary, T_ARRAY);
116 // Compile-time detect of null-exception?
117 if (stopped()) return top();
118
119 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr();
120 const TypeInt* sizetype = arytype->size();
121 elemtype = arytype->elem();
122
123 if (UseUniqueSubclasses) {
124 const Type* el = elemtype->make_ptr();
125 if (el && el->isa_instptr()) {
126 const TypeInstPtr* toop = el->is_instptr();
127 if (toop->instance_klass()->unique_concrete_subklass()) {
128 // If we load from "AbstractClass[]" we must see "ConcreteSubClass".
129 const Type* subklass = Type::get_const_type(toop->instance_klass());
130 elemtype = subklass->join_speculative(el);
131 }
132 }
133 }
134
135 // Check for big class initializers with all constant offsets
136 // feeding into a known-size array.
137 const TypeInt* idxtype = _gvn.type(idx)->is_int();
138 // See if the highest idx value is less than the lowest array bound,
139 // and if the idx value cannot be negative:
140 bool need_range_check = true;
141 if (idxtype->_hi < sizetype->_lo && idxtype->_lo >= 0) {
142 need_range_check = false;
143 if (C->log() != nullptr) C->log()->elem("observe that='!need_range_check'");
144 }
145
146 if (!arytype->is_loaded()) {
147 // Only fails for some -Xcomp runs
148 // The class is unloaded. We have to run this bytecode in the interpreter.
149 ciKlass* klass = arytype->unloaded_klass();
150
151 uncommon_trap(Deoptimization::Reason_unloaded,
152 Deoptimization::Action_reinterpret,
153 klass, "!loaded array");
154 return top();
155 }
156
157 // Do the range check
158 if (need_range_check) {
159 Node* tst;
160 if (sizetype->_hi <= 0) {
161 // The greatest array bound is negative, so we can conclude that we're
162 // compiling unreachable code, but the unsigned compare trick used below
163 // only works with non-negative lengths. Instead, hack "tst" to be zero so
164 // the uncommon_trap path will always be taken.
165 tst = _gvn.intcon(0);
166 } else {
167 // Range is constant in array-oop, so we can use the original state of mem
168 Node* len = load_array_length(ary);
169
170 // Test length vs index (standard trick using unsigned compare)
171 Node* chk = _gvn.transform( new CmpUNode(idx, len) );
172 BoolTest::mask btest = BoolTest::lt;
173 tst = _gvn.transform( new BoolNode(chk, btest) );
174 }
175 RangeCheckNode* rc = new RangeCheckNode(control(), tst, PROB_MAX, COUNT_UNKNOWN);
176 _gvn.set_type(rc, rc->Value(&_gvn));
177 if (!tst->is_Con()) {
178 record_for_igvn(rc);
179 }
180 set_control(_gvn.transform(new IfTrueNode(rc)));
181 // Branch to failure if out of bounds
182 {
183 PreserveJVMState pjvms(this);
184 set_control(_gvn.transform(new IfFalseNode(rc)));
185 if (C->allow_range_check_smearing()) {
186 // Do not use builtin_throw, since range checks are sometimes
187 // made more stringent by an optimistic transformation.
188 // This creates "tentative" range checks at this point,
189 // which are not guaranteed to throw exceptions.
190 // See IfNode::Ideal, is_range_check, adjust_check.
191 uncommon_trap(Deoptimization::Reason_range_check,
192 Deoptimization::Action_make_not_entrant,
193 nullptr, "range_check");
194 } else {
195 // If we have already recompiled with the range-check-widening
196 // heroic optimization turned off, then we must really be throwing
197 // range check exceptions.
198 builtin_throw(Deoptimization::Reason_range_check);
199 }
200 }
201 }
202 // Check for always knowing you are throwing a range-check exception
203 if (stopped()) return top();
204
205 // Make array address computation control dependent to prevent it
206 // from floating above the range check during loop optimizations.
207 Node* ptr = array_element_address(ary, idx, type, sizetype, control());
208 assert(ptr != top(), "top should go hand-in-hand with stopped");
209
210 return ptr;
211 }
212
213
214 // returns IfNode
215 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) {
216 Node *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
217 Node *tst = _gvn.transform(new BoolNode(cmp, mask));
218 IfNode *iff = create_and_map_if(control(), tst, prob, cnt);
219 return iff;
220 }
221
222
223 // sentinel value for the target bci to mark never taken branches
224 // (according to profiling)
225 static const int never_reached = INT_MAX;
226
227 //------------------------------helper for tableswitch-------------------------
228 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, bool unc) {
229 // True branch, use existing map info
230 { PreserveJVMState pjvms(this);
231 Node *iftrue = _gvn.transform( new IfTrueNode (iff) );
232 set_control( iftrue );
1429 // False branch
1430 Node* iffalse = _gvn.transform( new IfFalseNode(iff) );
1431 set_control(iffalse);
1432
1433 if (stopped()) { // Path is dead?
1434 NOT_PRODUCT(explicit_null_checks_elided++);
1435 if (C->eliminate_boxing()) {
1436 // Mark the successor block as parsed
1437 next_block->next_path_num();
1438 }
1439 } else { // Path is live.
1440 adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, next_block);
1441 }
1442
1443 if (do_stress_trap) {
1444 stress_trap(iff, counter, incr_store);
1445 }
1446 }
1447
1448 //------------------------------------do_if------------------------------------
1449 void Parse::do_if(BoolTest::mask btest, Node* c) {
1450 int target_bci = iter().get_dest();
1451
1452 Block* branch_block = successor_for_bci(target_bci);
1453 Block* next_block = successor_for_bci(iter().next_bci());
1454
1455 float cnt;
1456 float prob = branch_prediction(cnt, btest, target_bci, c);
1457 float untaken_prob = 1.0 - prob;
1458
1459 if (prob == PROB_UNKNOWN) {
1460 if (PrintOpto && Verbose) {
1461 tty->print_cr("Never-taken edge stops compilation at bci %d", bci());
1462 }
1463 repush_if_args(); // to gather stats on loop
1464 uncommon_trap(Deoptimization::Reason_unreached,
1465 Deoptimization::Action_reinterpret,
1466 nullptr, "cold");
1467 if (C->eliminate_boxing()) {
1468 // Mark the successor blocks as parsed
1469 branch_block->next_path_num();
1470 next_block->next_path_num();
1471 }
1472 return;
1473 }
1474
1475 Node* counter = nullptr;
1476 Node* incr_store = nullptr;
1477 bool do_stress_trap = StressUnstableIfTraps && ((C->random() % 2) == 0);
1478 if (do_stress_trap) {
1479 increment_trap_stress_counter(counter, incr_store);
1480 }
1481
1482 // Sanity check the probability value
1483 assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser");
1484
1485 bool taken_if_true = true;
1486 // Convert BoolTest to canonical form:
1487 if (!BoolTest(btest).is_canonical()) {
1488 btest = BoolTest(btest).negate();
1489 taken_if_true = false;
1490 // prob is NOT updated here; it remains the probability of the taken
1491 // path (as opposed to the prob of the path guarded by an 'IfTrueNode').
1492 }
1493 assert(btest != BoolTest::eq, "!= is the only canonical exact test");
1494
1495 Node* tst0 = new BoolNode(c, btest);
1496 Node* tst = _gvn.transform(tst0);
1497 BoolTest::mask taken_btest = BoolTest::illegal;
1498 BoolTest::mask untaken_btest = BoolTest::illegal;
1499
1520 }
1521
1522 // Generate real control flow
1523 float true_prob = (taken_if_true ? prob : untaken_prob);
1524 IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1525 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1526 Node* taken_branch = new IfTrueNode(iff);
1527 Node* untaken_branch = new IfFalseNode(iff);
1528 if (!taken_if_true) { // Finish conversion to canonical form
1529 Node* tmp = taken_branch;
1530 taken_branch = untaken_branch;
1531 untaken_branch = tmp;
1532 }
1533
1534 // Branch is taken:
1535 { PreserveJVMState pjvms(this);
1536 taken_branch = _gvn.transform(taken_branch);
1537 set_control(taken_branch);
1538
1539 if (stopped()) {
1540 if (C->eliminate_boxing()) {
1541 // Mark the successor block as parsed
1542 branch_block->next_path_num();
1543 }
1544 } else {
1545 adjust_map_after_if(taken_btest, c, prob, branch_block);
1546 if (!stopped()) {
1547 merge(target_bci);
1548 }
1549 }
1550 }
1551
1552 untaken_branch = _gvn.transform(untaken_branch);
1553 set_control(untaken_branch);
1554
1555 // Branch not taken.
1556 if (stopped()) {
1557 if (C->eliminate_boxing()) {
1558 // Mark the successor block as parsed
1559 next_block->next_path_num();
1560 }
1561 } else {
1562 adjust_map_after_if(untaken_btest, c, untaken_prob, next_block);
1563 }
1564
1565 if (do_stress_trap) {
1566 stress_trap(iff, counter, incr_store);
1567 }
1568 }
1569
1570 // Force unstable if traps to be taken randomly to trigger intermittent bugs such as incorrect debug information.
1571 // Add another if before the unstable if that checks a "random" condition at runtime (a simple shared counter) and
1572 // then either takes the trap or executes the original, unstable if.
1573 void Parse::stress_trap(IfNode* orig_iff, Node* counter, Node* incr_store) {
1574 // Search for an unstable if trap
1575 CallStaticJavaNode* trap = nullptr;
1576 assert(orig_iff->Opcode() == Op_If && orig_iff->outcnt() == 2, "malformed if");
1577 ProjNode* trap_proj = orig_iff->uncommon_trap_proj(trap, Deoptimization::Reason_unstable_if);
1578 if (trap == nullptr || !trap->jvms()->should_reexecute()) {
1579 // No suitable trap found. Remove unused counter load and increment.
1580 C->gvn_replace_by(incr_store, incr_store->in(MemNode::Memory));
1581 return;
1582 }
1583
1584 // Remove trap from optimization list since we add another path to the trap.
1585 bool success = C->remove_unstable_if_trap(trap, true);
1586 assert(success, "Trap already modified");
1587
1588 // Add a check before the original if that will trap with a certain frequency and execute the original if otherwise
1589 int freq_log = (C->random() % 31) + 1; // Random logarithmic frequency in [1, 31]
1622 }
1623
1624 void Parse::maybe_add_predicate_after_if(Block* path) {
1625 if (path->is_SEL_head() && path->preds_parsed() == 0) {
1626 // Add predicates at bci of if dominating the loop so traps can be
1627 // recorded on the if's profile data
1628 int bc_depth = repush_if_args();
1629 add_parse_predicates();
1630 dec_sp(bc_depth);
1631 path->set_has_predicates();
1632 }
1633 }
1634
1635
1636 //----------------------------adjust_map_after_if------------------------------
1637 // Adjust the JVM state to reflect the result of taking this path.
1638 // Basically, it means inspecting the CmpNode controlling this
1639 // branch, seeing how it constrains a tested value, and then
1640 // deciding if it's worth our while to encode this constraint
1641 // as graph nodes in the current abstract interpretation map.
1642 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path) {
1643 if (!c->is_Cmp()) {
1644 maybe_add_predicate_after_if(path);
1645 return;
1646 }
1647
1648 if (stopped() || btest == BoolTest::illegal) {
1649 return; // nothing to do
1650 }
1651
1652 bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
1653
1654 if (path_is_suitable_for_uncommon_trap(prob)) {
1655 repush_if_args();
1656 Node* call = uncommon_trap(Deoptimization::Reason_unstable_if,
1657 Deoptimization::Action_reinterpret,
1658 nullptr,
1659 (is_fallthrough ? "taken always" : "taken never"));
1660
1661 if (call != nullptr) {
1662 C->record_unstable_if_trap(new UnstableIfTrap(call->as_CallStaticJava(), path));
1663 }
1664 return;
1665 }
1666
1667 Node* val = c->in(1);
1668 Node* con = c->in(2);
1669 const Type* tcon = _gvn.type(con);
1670 const Type* tval = _gvn.type(val);
1671 bool have_con = tcon->singleton();
1672 if (tval->singleton()) {
1673 if (!have_con) {
1674 // Swap, so constant is in con.
1802 Node* obj = nullptr;
1803 const TypeOopPtr* cast_type = nullptr;
1804 // Insert a cast node with a narrowed type after a successful type check.
1805 if (match_type_check(_gvn, btest, con, tcon, val, tval,
1806 &obj, &cast_type)) {
1807 assert(obj != nullptr && cast_type != nullptr, "missing type check info");
1808 const Type* obj_type = _gvn.type(obj);
1809 const TypeOopPtr* tboth = obj_type->join_speculative(cast_type)->isa_oopptr();
1810 if (tboth != nullptr && tboth != obj_type && tboth->higher_equal(obj_type)) {
1811 int obj_in_map = map()->find_edge(obj);
1812 JVMState* jvms = this->jvms();
1813 if (obj_in_map >= 0 &&
1814 (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
1815 TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth);
1816 const Type* tcc = ccast->as_Type()->type();
1817 assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve");
1818 // Delay transform() call to allow recovery of pre-cast value
1819 // at the control merge.
1820 _gvn.set_type_bottom(ccast);
1821 record_for_igvn(ccast);
1822 // Here's the payoff.
1823 replace_in_map(obj, ccast);
1824 }
1825 }
1826 }
1827
1828 int val_in_map = map()->find_edge(val);
1829 if (val_in_map < 0) return; // replace_in_map would be useless
1830 {
1831 JVMState* jvms = this->jvms();
1832 if (!(jvms->is_loc(val_in_map) ||
1833 jvms->is_stk(val_in_map)))
1834 return; // again, it would be useless
1835 }
1836
1837 // Check for a comparison to a constant, and "know" that the compared
1838 // value is constrained on this path.
1839 assert(tcon->singleton(), "");
1840 ConstraintCastNode* ccast = nullptr;
1841 Node* cast = nullptr;
1905 if (c->Opcode() == Op_CmpP &&
1906 (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
1907 c->in(2)->is_Con()) {
1908 Node* load_klass = nullptr;
1909 Node* decode = nullptr;
1910 if (c->in(1)->Opcode() == Op_DecodeNKlass) {
1911 decode = c->in(1);
1912 load_klass = c->in(1)->in(1);
1913 } else {
1914 load_klass = c->in(1);
1915 }
1916 if (load_klass->in(2)->is_AddP()) {
1917 Node* addp = load_klass->in(2);
1918 Node* obj = addp->in(AddPNode::Address);
1919 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
1920 if (obj_type->speculative_type_not_null() != nullptr) {
1921 ciKlass* k = obj_type->speculative_type();
1922 inc_sp(2);
1923 obj = maybe_cast_profiled_obj(obj, k);
1924 dec_sp(2);
1925 // Make the CmpP use the casted obj
1926 addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
1927 load_klass = load_klass->clone();
1928 load_klass->set_req(2, addp);
1929 load_klass = _gvn.transform(load_klass);
1930 if (decode != nullptr) {
1931 decode = decode->clone();
1932 decode->set_req(1, load_klass);
1933 load_klass = _gvn.transform(decode);
1934 }
1935 c = c->clone();
1936 c->set_req(1, load_klass);
1937 c = _gvn.transform(c);
1938 }
1939 }
1940 }
1941 return c;
1942 }
1943
1944 //------------------------------do_one_bytecode--------------------------------
2644
2645 case Bytecodes::_i2d:
2646 a = pop();
2647 b = _gvn.transform( new ConvI2DNode(a));
2648 push_pair(b);
2649 break;
2650
2651 case Bytecodes::_iinc: // Increment local
2652 i = iter().get_index(); // Get local index
2653 set_local( i, _gvn.transform( new AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) );
2654 break;
2655
2656 // Exit points of synchronized methods must have an unlock node
2657 case Bytecodes::_return:
2658 return_current(nullptr);
2659 break;
2660
2661 case Bytecodes::_ireturn:
2662 case Bytecodes::_areturn:
2663 case Bytecodes::_freturn:
2664 return_current(pop());
2665 break;
2666 case Bytecodes::_lreturn:
2667 return_current(pop_pair());
2668 break;
2669 case Bytecodes::_dreturn:
2670 return_current(pop_pair());
2671 break;
2672
2673 case Bytecodes::_athrow:
2674 // null exception oop throws null pointer exception
2675 null_check(peek());
2676 if (stopped()) return;
2677 // Hook the thrown exception directly to subsequent handlers.
2678 if (BailoutToInterpreterForThrows) {
2679 // Keep method interpreted from now on.
2680 uncommon_trap(Deoptimization::Reason_unhandled,
2681 Deoptimization::Action_make_not_compilable);
2682 return;
2683 }
2684 if (env()->jvmti_can_post_on_exceptions()) {
2685 // check if we must post exception events, take uncommon trap if so (with must_throw = false)
2686 uncommon_trap_if_should_post_on_exceptions(Deoptimization::Reason_unhandled, false);
2687 }
2688 // Here if either can_post_on_exceptions or should_post_on_exceptions is false
2701
2702 // See if we can get some profile data and hand it off to the next block
2703 Block *target_block = block()->successor_for_bci(target_bci);
2704 if (target_block->pred_count() != 1) break;
2705 ciMethodData* methodData = method()->method_data();
2706 if (!methodData->is_mature()) break;
2707 ciProfileData* data = methodData->bci_to_data(bci());
2708 assert(data != nullptr && data->is_JumpData(), "need JumpData for taken branch");
2709 int taken = ((ciJumpData*)data)->taken();
2710 taken = method()->scale_count(taken);
2711 target_block->set_count(taken);
2712 break;
2713 }
2714
2715 case Bytecodes::_ifnull: btest = BoolTest::eq; goto handle_if_null;
2716 case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null;
2717 handle_if_null:
2718 // If this is a backwards branch in the bytecodes, add Safepoint
2719 maybe_add_safepoint(iter().get_dest());
2720 a = null();
2721 b = pop();
2722 if (!_gvn.type(b)->speculative_maybe_null() &&
2723 !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
2724 inc_sp(1);
2725 Node* null_ctl = top();
2726 b = null_check_oop(b, &null_ctl, true, true, true);
2727 assert(null_ctl->is_top(), "no null control here");
2728 dec_sp(1);
2729 } else if (_gvn.type(b)->speculative_always_null() &&
2730 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
2731 inc_sp(1);
2732 b = null_assert(b);
2733 dec_sp(1);
2734 }
2735 c = _gvn.transform( new CmpPNode(b, a) );
2736 do_ifnull(btest, c);
2737 break;
2738
2739 case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
2740 case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
2741 handle_if_acmp:
2742 // If this is a backwards branch in the bytecodes, add Safepoint
2743 maybe_add_safepoint(iter().get_dest());
2744 a = pop();
2745 b = pop();
2746 c = _gvn.transform( new CmpPNode(b, a) );
2747 c = optimize_cmp_with_klass(c);
2748 do_if(btest, c);
2749 break;
2750
2751 case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
2752 case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
2753 case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
2754 case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
2755 case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
2756 case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
2757 handle_ifxx:
2758 // If this is a backwards branch in the bytecodes, add Safepoint
2759 maybe_add_safepoint(iter().get_dest());
2760 a = _gvn.intcon(0);
2761 b = pop();
2762 c = _gvn.transform( new CmpINode(b, a) );
2763 do_if(btest, c);
2764 break;
2765
2766 case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
2767 case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
2768 case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;
2783 break;
2784
2785 case Bytecodes::_lookupswitch:
2786 do_lookupswitch();
2787 break;
2788
2789 case Bytecodes::_invokestatic:
2790 case Bytecodes::_invokedynamic:
2791 case Bytecodes::_invokespecial:
2792 case Bytecodes::_invokevirtual:
2793 case Bytecodes::_invokeinterface:
2794 do_call();
2795 break;
2796 case Bytecodes::_checkcast:
2797 do_checkcast();
2798 break;
2799 case Bytecodes::_instanceof:
2800 do_instanceof();
2801 break;
2802 case Bytecodes::_anewarray:
2803 do_anewarray();
2804 break;
2805 case Bytecodes::_newarray:
2806 do_newarray((BasicType)iter().get_index());
2807 break;
2808 case Bytecodes::_multianewarray:
2809 do_multianewarray();
2810 break;
2811 case Bytecodes::_new:
2812 do_new();
2813 break;
2814
2815 case Bytecodes::_jsr:
2816 case Bytecodes::_jsr_w:
2817 do_jsr();
2818 break;
2819
2820 case Bytecodes::_ret:
2821 do_ret();
2822 break;
2823
|
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/ciInlineKlass.hpp"
26 #include "ci/ciMethodData.hpp"
27 #include "ci/ciSymbols.hpp"
28 #include "classfile/vmSymbols.hpp"
29 #include "compiler/compileLog.hpp"
30 #include "interpreter/linkResolver.hpp"
31 #include "jvm_io.h"
32 #include "memory/resourceArea.hpp"
33 #include "memory/universe.hpp"
34 #include "oops/oop.inline.hpp"
35 #include "opto/addnode.hpp"
36 #include "opto/castnode.hpp"
37 #include "opto/convertnode.hpp"
38 #include "opto/divnode.hpp"
39 #include "opto/idealGraphPrinter.hpp"
40 #include "opto/idealKit.hpp"
41 #include "opto/inlinetypenode.hpp"
42 #include "opto/matcher.hpp"
43 #include "opto/memnode.hpp"
44 #include "opto/mulnode.hpp"
45 #include "opto/opaquenode.hpp"
46 #include "opto/parse.hpp"
47 #include "opto/runtime.hpp"
48 #include "opto/subtypenode.hpp"
49 #include "runtime/arguments.hpp"
50 #include "runtime/deoptimization.hpp"
51 #include "runtime/sharedRuntime.hpp"
52
53 #ifndef PRODUCT
54 extern uint explicit_null_checks_inserted,
55 explicit_null_checks_elided;
56 #endif
57
58 Node* Parse::record_profile_for_speculation_at_array_load(Node* ld) {
59 // Feed unused profile data to type speculation
60 if (UseTypeSpeculation && UseArrayLoadStoreProfile) {
61 ciKlass* array_type = nullptr;
62 ciKlass* element_type = nullptr;
63 ProfilePtrKind element_ptr = ProfileMaybeNull;
64 bool flat_array = true;
65 bool null_free_array = true;
66 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
67 if (element_type != nullptr || element_ptr != ProfileMaybeNull) {
68 ld = record_profile_for_speculation(ld, element_type, element_ptr);
69 }
70 }
71 return ld;
72 }
73
74
75 //---------------------------------array_load----------------------------------
76 void Parse::array_load(BasicType bt) {
77 const Type* elemtype = Type::TOP;
78 Node* adr = array_addressing(bt, 0, elemtype);
79 if (stopped()) return; // guaranteed null or range check
80
81 Node* array_index = pop();
82 Node* array = pop();
83
84 // Handle inline type arrays
85 const TypeOopPtr* element_ptr = elemtype->make_oopptr();
86 const TypeAryPtr* array_type = _gvn.type(array)->is_aryptr();
87
88 if (!array_type->is_not_flat()) {
89 // Cannot statically determine if array is a flat array, emit runtime check
90 assert(UseArrayFlattening && is_reference_type(bt) && element_ptr->can_be_inline_type() &&
91 (!element_ptr->is_inlinetypeptr() || element_ptr->inline_klass()->maybe_flat_in_array()), "array can't be flat");
92 IdealKit ideal(this);
93 IdealVariable res(ideal);
94 ideal.declarations_done();
95 ideal.if_then(flat_array_test(array, /* flat = */ false)); {
96 // Non-flat array
97 sync_kit(ideal);
98 if (!array_type->is_flat()) {
99 assert(array_type->is_flat() || control()->in(0)->as_If()->is_flat_array_check(&_gvn), "Should be found");
100 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
101 DecoratorSet decorator_set = IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD;
102 if (needs_range_check(array_type->size(), array_index)) {
103 // We've emitted a RangeCheck but now insert an additional check between the range check and the actual load.
104 // We cannot pin the load to two separate nodes. Instead, we pin it conservatively here such that it cannot
105 // possibly float above the range check at any point.
106 decorator_set |= C2_UNKNOWN_CONTROL_LOAD;
107 }
108 Node* ld = access_load_at(array, adr, adr_type, element_ptr, bt, decorator_set);
109 if (element_ptr->is_inlinetypeptr()) {
110 ld = InlineTypeNode::make_from_oop(this, ld, element_ptr->inline_klass());
111 }
112 ideal.set(res, ld);
113 }
114 ideal.sync_kit(this);
115 } ideal.else_(); {
116 // Flat array
117 sync_kit(ideal);
118 if (!array_type->is_not_flat()) {
119 if (element_ptr->is_inlinetypeptr()) {
120 ciInlineKlass* vk = element_ptr->inline_klass();
121 Node* flat_array = cast_to_flat_array(array, vk);
122 Node* vt = InlineTypeNode::make_from_flat_array(this, vk, flat_array, array_index);
123 ideal.set(res, vt);
124 } else {
125 // Element type is unknown, and thus we cannot statically determine the exact flat array layout. Emit a
126 // runtime call to correctly load the inline type element from the flat array.
127 Node* inline_type = load_from_unknown_flat_array(array, array_index, element_ptr);
128 bool is_null_free = array_type->is_null_free() || !UseNullableValueFlattening;
129 if (is_null_free) {
130 inline_type = cast_not_null(inline_type);
131 }
132 ideal.set(res, inline_type);
133 }
134 }
135 ideal.sync_kit(this);
136 } ideal.end_if();
137 sync_kit(ideal);
138 Node* ld = _gvn.transform(ideal.value(res));
139 ld = record_profile_for_speculation_at_array_load(ld);
140 push_node(bt, ld);
141 return;
142 }
143
144 if (elemtype == TypeInt::BOOL) {
145 bt = T_BOOLEAN;
146 }
147 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
148 Node* ld = access_load_at(array, adr, adr_type, elemtype, bt,
149 IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD);
150 ld = record_profile_for_speculation_at_array_load(ld);
151 // Loading an inline type from a non-flat array
152 if (element_ptr != nullptr && element_ptr->is_inlinetypeptr()) {
153 assert(!array_type->is_null_free() || !element_ptr->maybe_null(), "inline type array elements should never be null");
154 ld = InlineTypeNode::make_from_oop(this, ld, element_ptr->inline_klass());
155 }
156 push_node(bt, ld);
157 }
158
159 Node* Parse::load_from_unknown_flat_array(Node* array, Node* array_index, const TypeOopPtr* element_ptr) {
160 // Below membars keep this access to an unknown flat array correctly
161 // ordered with other unknown and known flat array accesses.
162 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
163
164 Node* call = nullptr;
165 {
166 // Re-execute flat array load if runtime call triggers deoptimization
167 PreserveReexecuteState preexecs(this);
168 jvms()->set_bci(_bci);
169 jvms()->set_should_reexecute(true);
170 inc_sp(2);
171 kill_dead_locals();
172 call = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
173 OptoRuntime::load_unknown_inline_Type(),
174 OptoRuntime::load_unknown_inline_Java(),
175 nullptr, TypeRawPtr::BOTTOM,
176 array, array_index);
177 }
178 make_slow_call_ex(call, env()->Throwable_klass(), false);
179 Node* buffer = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
180
181 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
182
183 // Keep track of the information that the inline type is in flat arrays
184 const Type* unknown_value = element_ptr->is_instptr()->cast_to_flat_in_array();
185 return _gvn.transform(new CheckCastPPNode(control(), buffer, unknown_value));
186 }
187
188 //--------------------------------array_store----------------------------------
189 void Parse::array_store(BasicType bt) {
190 const Type* elemtype = Type::TOP;
191 Node* adr = array_addressing(bt, type2size[bt], elemtype);
192 if (stopped()) return; // guaranteed null or range check
193 Node* stored_value_casted = nullptr;
194 if (bt == T_OBJECT) {
195 stored_value_casted = array_store_check(adr, elemtype);
196 if (stopped()) {
197 return;
198 }
199 }
200 Node* const stored_value = pop_node(bt); // Value to store
201 Node* const array_index = pop(); // Index in the array
202 Node* array = pop(); // The array itself
203
204 const TypeAryPtr* array_type = _gvn.type(array)->is_aryptr();
205 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
206
207 if (elemtype == TypeInt::BOOL) {
208 bt = T_BOOLEAN;
209 } else if (bt == T_OBJECT) {
210 elemtype = elemtype->make_oopptr();
211 const Type* stored_value_casted_type = _gvn.type(stored_value_casted);
212 // Based on the value to be stored, try to determine if the array is not null-free and/or not flat.
213 // This is only legal for non-null stores because the array_store_check always passes for null, even
214 // if the array is null-free. Null stores are handled in GraphKit::inline_array_null_guard().
215 bool not_inline = !stored_value_casted_type->maybe_null() && !stored_value_casted_type->is_oopptr()->can_be_inline_type();
216 bool not_null_free = not_inline;
217 bool not_flat = not_inline || ( stored_value_casted_type->is_inlinetypeptr() &&
218 !stored_value_casted_type->inline_klass()->maybe_flat_in_array());
219 if (!array_type->is_not_null_free() && not_null_free) {
220 // Storing a non-inline type, mark array as not null-free.
221 array_type = array_type->cast_to_not_null_free();
222 Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, array_type));
223 replace_in_map(array, cast);
224 array = cast;
225 }
226 if (!array_type->is_not_flat() && not_flat) {
227 // Storing to a non-flat array, mark array as not flat.
228 array_type = array_type->cast_to_not_flat();
229 Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, array_type));
230 replace_in_map(array, cast);
231 array = cast;
232 }
233
234 if (array_type->is_null_free() && elemtype->is_inlinetypeptr() && elemtype->inline_klass()->is_empty()) {
235 // Array of null-free empty inline type, there is only 1 state for the elements
236 assert(!stored_value_casted_type->maybe_null(), "should be guaranteed by array store check");
237 return;
238 }
239
240 if (!array_type->is_not_flat()) {
241 // Array might be a flat array, emit runtime checks (for nullptr, a simple inline_array_null_guard is sufficient).
242 assert(UseArrayFlattening && !not_flat && elemtype->is_oopptr()->can_be_inline_type() &&
243 (!array_type->klass_is_exact() || array_type->is_flat()), "array can't be a flat array");
244 // TODO 8350865 Depending on the available layouts, we can avoid this check in below flat/not-flat branches. Also the safe_for_replace arg is now always true.
245 array = inline_array_null_guard(array, stored_value_casted, 3, true);
246 IdealKit ideal(this);
247 ideal.if_then(flat_array_test(array, /* flat = */ false)); {
248 // Non-flat array
249 if (!array_type->is_flat()) {
250 sync_kit(ideal);
251 assert(array_type->is_flat() || ideal.ctrl()->in(0)->as_If()->is_flat_array_check(&_gvn), "Should be found");
252 inc_sp(3);
253 access_store_at(array, adr, adr_type, stored_value_casted, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY, false);
254 dec_sp(3);
255 ideal.sync_kit(this);
256 }
257 } ideal.else_(); {
258 // Flat array
259 sync_kit(ideal);
260 if (!array_type->is_not_flat()) {
261 // Try to determine the inline klass type of the stored value
262 ciInlineKlass* vk = nullptr;
263 if (stored_value_casted_type->is_inlinetypeptr()) {
264 vk = stored_value_casted_type->inline_klass();
265 } else if (elemtype->is_inlinetypeptr()) {
266 vk = elemtype->inline_klass();
267 }
268
269 if (vk != nullptr) {
270 // Element type is known, cast and store to flat array layout.
271 Node* flat_array = cast_to_flat_array(array, vk);
272
273 // Re-execute flat array store if buffering triggers deoptimization
274 PreserveReexecuteState preexecs(this);
275 jvms()->set_should_reexecute(true);
276 inc_sp(3);
277
278 if (!stored_value_casted->is_InlineType()) {
279 assert(_gvn.type(stored_value_casted) == TypePtr::NULL_PTR, "Unexpected value");
280 stored_value_casted = InlineTypeNode::make_null(_gvn, vk);
281 }
282
283 stored_value_casted->as_InlineType()->store_flat_array(this, flat_array, array_index);
284 } else {
285 // Element type is unknown, emit a runtime call since the flat array layout is not statically known.
286 store_to_unknown_flat_array(array, array_index, stored_value_casted);
287 }
288 }
289 ideal.sync_kit(this);
290 }
291 ideal.end_if();
292 sync_kit(ideal);
293 return;
294 } else if (!array_type->is_not_null_free()) {
295 // Array is not flat but may be null free
296 assert(elemtype->is_oopptr()->can_be_inline_type(), "array can't be null-free");
297 array = inline_array_null_guard(array, stored_value_casted, 3, true);
298 }
299 }
300 inc_sp(3);
301 access_store_at(array, adr, adr_type, stored_value, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY);
302 dec_sp(3);
303 }
304
305 // Emit a runtime call to store to a flat array whose element type is either unknown (i.e. we do not know the flat
306 // array layout) or not exact (could have different flat array layouts at runtime).
307 void Parse::store_to_unknown_flat_array(Node* array, Node* const idx, Node* non_null_stored_value) {
308 // Below membars keep this access to an unknown flat array correctly
309 // ordered with other unknown and known flat array accesses.
310 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
311
312 Node* call = nullptr;
313 {
314 // Re-execute flat array store if runtime call triggers deoptimization
315 PreserveReexecuteState preexecs(this);
316 jvms()->set_bci(_bci);
317 jvms()->set_should_reexecute(true);
318 inc_sp(3);
319 kill_dead_locals();
320 call = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
321 OptoRuntime::store_unknown_inline_Type(),
322 OptoRuntime::store_unknown_inline_Java(),
323 nullptr, TypeRawPtr::BOTTOM,
324 non_null_stored_value, array, idx);
325 }
326 make_slow_call_ex(call, env()->Throwable_klass(), false);
327
328 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
329 }
330
331 //------------------------------array_addressing-------------------------------
332 // Pull array and index from the stack. Compute pointer-to-element.
333 Node* Parse::array_addressing(BasicType type, int vals, const Type*& elemtype) {
334 Node *idx = peek(0+vals); // Get from stack without popping
335 Node *ary = peek(1+vals); // in case of exception
336
337 // Null check the array base, with correct stack contents
338 ary = null_check(ary, T_ARRAY);
339 // Compile-time detect of null-exception?
340 if (stopped()) return top();
341
342 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr();
343 const TypeInt* sizetype = arytype->size();
344 elemtype = arytype->elem();
345
346 if (UseUniqueSubclasses) {
347 const Type* el = elemtype->make_ptr();
348 if (el && el->isa_instptr()) {
349 const TypeInstPtr* toop = el->is_instptr();
350 if (toop->instance_klass()->unique_concrete_subklass()) {
351 // If we load from "AbstractClass[]" we must see "ConcreteSubClass".
352 const Type* subklass = Type::get_const_type(toop->instance_klass());
353 elemtype = subklass->join_speculative(el);
354 }
355 }
356 }
357
358 if (!arytype->is_loaded()) {
359 // Only fails for some -Xcomp runs
360 // The class is unloaded. We have to run this bytecode in the interpreter.
361 ciKlass* klass = arytype->unloaded_klass();
362
363 uncommon_trap(Deoptimization::Reason_unloaded,
364 Deoptimization::Action_reinterpret,
365 klass, "!loaded array");
366 return top();
367 }
368
369 ary = create_speculative_inline_type_array_checks(ary, arytype, elemtype);
370
371 if (needs_range_check(sizetype, idx)) {
372 create_range_check(idx, ary, sizetype);
373 } else if (C->log() != nullptr) {
374 C->log()->elem("observe that='!need_range_check'");
375 }
376
377 // Check for always knowing you are throwing a range-check exception
378 if (stopped()) return top();
379
380 // Make array address computation control dependent to prevent it
381 // from floating above the range check during loop optimizations.
382 Node* ptr = array_element_address(ary, idx, type, sizetype, control());
383 assert(ptr != top(), "top should go hand-in-hand with stopped");
384
385 return ptr;
386 }
387
388 // Check if we need a range check for an array access. This is the case if the index is either negative or if it could
389 // be greater or equal the smallest possible array size (i.e. out-of-bounds).
390 bool Parse::needs_range_check(const TypeInt* size_type, const Node* index) const {
391 const TypeInt* index_type = _gvn.type(index)->is_int();
392 return index_type->_hi >= size_type->_lo || index_type->_lo < 0;
393 }
394
395 void Parse::create_range_check(Node* idx, Node* ary, const TypeInt* sizetype) {
396 Node* tst;
397 if (sizetype->_hi <= 0) {
398 // The greatest array bound is negative, so we can conclude that we're
399 // compiling unreachable code, but the unsigned compare trick used below
400 // only works with non-negative lengths. Instead, hack "tst" to be zero so
401 // the uncommon_trap path will always be taken.
402 tst = _gvn.intcon(0);
403 } else {
404 // Range is constant in array-oop, so we can use the original state of mem
405 Node* len = load_array_length(ary);
406
407 // Test length vs index (standard trick using unsigned compare)
408 Node* chk = _gvn.transform(new CmpUNode(idx, len) );
409 BoolTest::mask btest = BoolTest::lt;
410 tst = _gvn.transform(new BoolNode(chk, btest) );
411 }
412 RangeCheckNode* rc = new RangeCheckNode(control(), tst, PROB_MAX, COUNT_UNKNOWN);
413 _gvn.set_type(rc, rc->Value(&_gvn));
414 if (!tst->is_Con()) {
415 record_for_igvn(rc);
416 }
417 set_control(_gvn.transform(new IfTrueNode(rc)));
418 // Branch to failure if out of bounds
419 {
420 PreserveJVMState pjvms(this);
421 set_control(_gvn.transform(new IfFalseNode(rc)));
422 if (C->allow_range_check_smearing()) {
423 // Do not use builtin_throw, since range checks are sometimes
424 // made more stringent by an optimistic transformation.
425 // This creates "tentative" range checks at this point,
426 // which are not guaranteed to throw exceptions.
427 // See IfNode::Ideal, is_range_check, adjust_check.
428 uncommon_trap(Deoptimization::Reason_range_check,
429 Deoptimization::Action_make_not_entrant,
430 nullptr, "range_check");
431 } else {
432 // If we have already recompiled with the range-check-widening
433 // heroic optimization turned off, then we must really be throwing
434 // range check exceptions.
435 builtin_throw(Deoptimization::Reason_range_check);
436 }
437 }
438 }
439
440 // For inline type arrays, we can use the profiling information for array accesses to speculate on the type, flatness,
441 // and null-freeness. We can either prepare the speculative type for later uses or emit explicit speculative checks with
442 // traps now. In the latter case, the speculative type guarantees can avoid additional runtime checks later (e.g.
443 // non-null-free implies non-flat which allows us to remove flatness checks). This makes the graph simpler.
444 Node* Parse::create_speculative_inline_type_array_checks(Node* array, const TypeAryPtr* array_type,
445 const Type*& element_type) {
446 if (!array_type->is_flat() && !array_type->is_not_flat()) {
447 // For arrays that might be flat, speculate that the array has the exact type reported in the profile data such that
448 // we can rely on a fixed memory layout (i.e. either a flat layout or not).
449 array = cast_to_speculative_array_type(array, array_type, element_type);
450 } else if (UseTypeSpeculation && UseArrayLoadStoreProfile) {
451 // Array is known to be either flat or not flat. If possible, update the speculative type by using the profile data
452 // at this bci.
453 array = cast_to_profiled_array_type(array);
454 }
455
456 // Even though the type does not tell us whether we have an inline type array or not, we can still check the profile data
457 // whether we have a non-null-free or non-flat array. Speculating on a non-null-free array doesn't help aaload but could
458 // be profitable for a subsequent aastore.
459 if (!array_type->is_null_free() && !array_type->is_not_null_free()) {
460 array = speculate_non_null_free_array(array, array_type);
461 }
462 if (!array_type->is_flat() && !array_type->is_not_flat()) {
463 array = speculate_non_flat_array(array, array_type);
464 }
465 return array;
466 }
467
468 // Speculate that the array has the exact type reported in the profile data. We emit a trap when this turns out to be
469 // wrong. On the fast path, we add a CheckCastPP to use the exact type.
470 Node* Parse::cast_to_speculative_array_type(Node* const array, const TypeAryPtr*& array_type, const Type*& element_type) {
471 Deoptimization::DeoptReason reason = Deoptimization::Reason_speculate_class_check;
472 ciKlass* speculative_array_type = array_type->speculative_type();
473 if (too_many_traps_or_recompiles(reason) || speculative_array_type == nullptr) {
474 // No speculative type, check profile data at this bci
475 speculative_array_type = nullptr;
476 reason = Deoptimization::Reason_class_check;
477 if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(reason)) {
478 ciKlass* profiled_element_type = nullptr;
479 ProfilePtrKind element_ptr = ProfileMaybeNull;
480 bool flat_array = true;
481 bool null_free_array = true;
482 method()->array_access_profiled_type(bci(), speculative_array_type, profiled_element_type, element_ptr, flat_array,
483 null_free_array);
484 }
485 }
486 if (speculative_array_type != nullptr) {
487 // Speculate that this array has the exact type reported by profile data
488 Node* casted_array = nullptr;
489 DEBUG_ONLY(Node* old_control = control();)
490 Node* slow_ctl = type_check_receiver(array, speculative_array_type, 1.0, &casted_array);
491 if (stopped()) {
492 // The check always fails and therefore profile information is incorrect. Don't use it.
493 assert(old_control == slow_ctl, "type check should have been removed");
494 set_control(slow_ctl);
495 } else if (!slow_ctl->is_top()) {
496 { PreserveJVMState pjvms(this);
497 set_control(slow_ctl);
498 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
499 }
500 replace_in_map(array, casted_array);
501 array_type = _gvn.type(casted_array)->is_aryptr();
502 element_type = array_type->elem();
503 return casted_array;
504 }
505 }
506 return array;
507 }
508
509 // Create a CheckCastPP when the speculative type can improve the current type.
510 Node* Parse::cast_to_profiled_array_type(Node* const array) {
511 ciKlass* array_type = nullptr;
512 ciKlass* element_type = nullptr;
513 ProfilePtrKind element_ptr = ProfileMaybeNull;
514 bool flat_array = true;
515 bool null_free_array = true;
516 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
517 if (array_type != nullptr) {
518 return record_profile_for_speculation(array, array_type, ProfileMaybeNull);
519 }
520 return array;
521 }
522
523 // Speculate that the array is non-null-free. We emit a trap when this turns out to be
524 // wrong. On the fast path, we add a CheckCastPP to use the non-null-free type.
525 Node* Parse::speculate_non_null_free_array(Node* const array, const TypeAryPtr*& array_type) {
526 bool null_free_array = true;
527 Deoptimization::DeoptReason reason = Deoptimization::Reason_none;
528 if (array_type->speculative() != nullptr &&
529 array_type->speculative()->is_aryptr()->is_not_null_free() &&
530 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
531 null_free_array = false;
532 reason = Deoptimization::Reason_speculate_class_check;
533 } else if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(Deoptimization::Reason_class_check)) {
534 ciKlass* profiled_array_type = nullptr;
535 ciKlass* profiled_element_type = nullptr;
536 ProfilePtrKind element_ptr = ProfileMaybeNull;
537 bool flat_array = true;
538 method()->array_access_profiled_type(bci(), profiled_array_type, profiled_element_type, element_ptr, flat_array,
539 null_free_array);
540 reason = Deoptimization::Reason_class_check;
541 }
542 if (!null_free_array) {
543 { // Deoptimize if null-free array
544 BuildCutout unless(this, null_free_array_test(array, /* null_free = */ false), PROB_MAX);
545 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
546 }
547 assert(!stopped(), "null-free array should have been caught earlier");
548 Node* casted_array = _gvn.transform(new CheckCastPPNode(control(), array, array_type->cast_to_not_null_free()));
549 replace_in_map(array, casted_array);
550 array_type = _gvn.type(casted_array)->is_aryptr();
551 return casted_array;
552 }
553 return array;
554 }
555
556 // Speculate that the array is non-flat. We emit a trap when this turns out to be wrong.
557 // On the fast path, we add a CheckCastPP to use the non-flat type.
558 Node* Parse::speculate_non_flat_array(Node* const array, const TypeAryPtr* const array_type) {
559 bool flat_array = true;
560 Deoptimization::DeoptReason reason = Deoptimization::Reason_none;
561 if (array_type->speculative() != nullptr &&
562 array_type->speculative()->is_aryptr()->is_not_flat() &&
563 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
564 flat_array = false;
565 reason = Deoptimization::Reason_speculate_class_check;
566 } else if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(reason)) {
567 ciKlass* profiled_array_type = nullptr;
568 ciKlass* profiled_element_type = nullptr;
569 ProfilePtrKind element_ptr = ProfileMaybeNull;
570 bool null_free_array = true;
571 method()->array_access_profiled_type(bci(), profiled_array_type, profiled_element_type, element_ptr, flat_array,
572 null_free_array);
573 reason = Deoptimization::Reason_class_check;
574 }
575 if (!flat_array) {
576 { // Deoptimize if flat array
577 BuildCutout unless(this, flat_array_test(array, /* flat = */ false), PROB_MAX);
578 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
579 }
580 assert(!stopped(), "flat array should have been caught earlier");
581 Node* casted_array = _gvn.transform(new CheckCastPPNode(control(), array, array_type->cast_to_not_flat()));
582 replace_in_map(array, casted_array);
583 return casted_array;
584 }
585 return array;
586 }
587
588 // returns IfNode
589 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) {
590 Node *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
591 Node *tst = _gvn.transform(new BoolNode(cmp, mask));
592 IfNode *iff = create_and_map_if(control(), tst, prob, cnt);
593 return iff;
594 }
595
596
597 // sentinel value for the target bci to mark never taken branches
598 // (according to profiling)
599 static const int never_reached = INT_MAX;
600
601 //------------------------------helper for tableswitch-------------------------
602 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, bool unc) {
603 // True branch, use existing map info
604 { PreserveJVMState pjvms(this);
605 Node *iftrue = _gvn.transform( new IfTrueNode (iff) );
606 set_control( iftrue );
1803 // False branch
1804 Node* iffalse = _gvn.transform( new IfFalseNode(iff) );
1805 set_control(iffalse);
1806
1807 if (stopped()) { // Path is dead?
1808 NOT_PRODUCT(explicit_null_checks_elided++);
1809 if (C->eliminate_boxing()) {
1810 // Mark the successor block as parsed
1811 next_block->next_path_num();
1812 }
1813 } else { // Path is live.
1814 adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, next_block);
1815 }
1816
1817 if (do_stress_trap) {
1818 stress_trap(iff, counter, incr_store);
1819 }
1820 }
1821
1822 //------------------------------------do_if------------------------------------
1823 void Parse::do_if(BoolTest::mask btest, Node* c, bool can_trap, bool new_path, Node** ctrl_taken, Node** stress_count_mem) {
1824 int target_bci = iter().get_dest();
1825
1826 Block* branch_block = successor_for_bci(target_bci);
1827 Block* next_block = successor_for_bci(iter().next_bci());
1828
1829 float cnt;
1830 float prob = branch_prediction(cnt, btest, target_bci, c);
1831 float untaken_prob = 1.0 - prob;
1832
1833 if (prob == PROB_UNKNOWN) {
1834 if (PrintOpto && Verbose) {
1835 tty->print_cr("Never-taken edge stops compilation at bci %d", bci());
1836 }
1837 repush_if_args(); // to gather stats on loop
1838 uncommon_trap(Deoptimization::Reason_unreached,
1839 Deoptimization::Action_reinterpret,
1840 nullptr, "cold");
1841 if (C->eliminate_boxing()) {
1842 // Mark the successor blocks as parsed
1843 branch_block->next_path_num();
1844 next_block->next_path_num();
1845 }
1846 return;
1847 }
1848
1849 Node* counter = nullptr;
1850 Node* incr_store = nullptr;
1851 bool do_stress_trap = StressUnstableIfTraps && ((C->random() % 2) == 0);
1852 if (do_stress_trap) {
1853 increment_trap_stress_counter(counter, incr_store);
1854 if (stress_count_mem != nullptr) {
1855 *stress_count_mem = incr_store;
1856 }
1857 }
1858
1859 // Sanity check the probability value
1860 assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser");
1861
1862 bool taken_if_true = true;
1863 // Convert BoolTest to canonical form:
1864 if (!BoolTest(btest).is_canonical()) {
1865 btest = BoolTest(btest).negate();
1866 taken_if_true = false;
1867 // prob is NOT updated here; it remains the probability of the taken
1868 // path (as opposed to the prob of the path guarded by an 'IfTrueNode').
1869 }
1870 assert(btest != BoolTest::eq, "!= is the only canonical exact test");
1871
1872 Node* tst0 = new BoolNode(c, btest);
1873 Node* tst = _gvn.transform(tst0);
1874 BoolTest::mask taken_btest = BoolTest::illegal;
1875 BoolTest::mask untaken_btest = BoolTest::illegal;
1876
1897 }
1898
1899 // Generate real control flow
1900 float true_prob = (taken_if_true ? prob : untaken_prob);
1901 IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1902 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1903 Node* taken_branch = new IfTrueNode(iff);
1904 Node* untaken_branch = new IfFalseNode(iff);
1905 if (!taken_if_true) { // Finish conversion to canonical form
1906 Node* tmp = taken_branch;
1907 taken_branch = untaken_branch;
1908 untaken_branch = tmp;
1909 }
1910
1911 // Branch is taken:
1912 { PreserveJVMState pjvms(this);
1913 taken_branch = _gvn.transform(taken_branch);
1914 set_control(taken_branch);
1915
1916 if (stopped()) {
1917 if (C->eliminate_boxing() && !new_path) {
1918 // Mark the successor block as parsed (if we haven't created a new path)
1919 branch_block->next_path_num();
1920 }
1921 } else {
1922 adjust_map_after_if(taken_btest, c, prob, branch_block, can_trap);
1923 if (!stopped()) {
1924 if (new_path) {
1925 // Merge by using a new path
1926 merge_new_path(target_bci);
1927 } else if (ctrl_taken != nullptr) {
1928 // Don't merge but save taken branch to be wired by caller
1929 *ctrl_taken = control();
1930 } else {
1931 merge(target_bci);
1932 }
1933 }
1934 }
1935 }
1936
1937 untaken_branch = _gvn.transform(untaken_branch);
1938 set_control(untaken_branch);
1939
1940 // Branch not taken.
1941 if (stopped() && ctrl_taken == nullptr) {
1942 if (C->eliminate_boxing()) {
1943 // Mark the successor block as parsed (if caller does not re-wire control flow)
1944 next_block->next_path_num();
1945 }
1946 } else {
1947 adjust_map_after_if(untaken_btest, c, untaken_prob, next_block, can_trap);
1948 }
1949
1950 if (do_stress_trap) {
1951 stress_trap(iff, counter, incr_store);
1952 }
1953 }
1954
1955
1956 static ProfilePtrKind speculative_ptr_kind(const TypeOopPtr* t) {
1957 if (t->speculative() == nullptr) {
1958 return ProfileUnknownNull;
1959 }
1960 if (t->speculative_always_null()) {
1961 return ProfileAlwaysNull;
1962 }
1963 if (t->speculative_maybe_null()) {
1964 return ProfileMaybeNull;
1965 }
1966 return ProfileNeverNull;
1967 }
1968
1969 void Parse::acmp_always_null_input(Node* input, const TypeOopPtr* tinput, BoolTest::mask btest, Node* eq_region) {
1970 inc_sp(2);
1971 Node* cast = null_check_common(input, T_OBJECT, true, nullptr,
1972 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check) &&
1973 speculative_ptr_kind(tinput) == ProfileAlwaysNull);
1974 dec_sp(2);
1975 if (btest == BoolTest::ne) {
1976 {
1977 PreserveJVMState pjvms(this);
1978 replace_in_map(input, cast);
1979 int target_bci = iter().get_dest();
1980 merge(target_bci);
1981 }
1982 record_for_igvn(eq_region);
1983 set_control(_gvn.transform(eq_region));
1984 } else {
1985 replace_in_map(input, cast);
1986 }
1987 }
1988
1989 Node* Parse::acmp_null_check(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, Node*& null_ctl) {
1990 inc_sp(2);
1991 null_ctl = top();
1992 Node* cast = null_check_oop(input, &null_ctl,
1993 input_ptr == ProfileNeverNull || (input_ptr == ProfileUnknownNull && !too_many_traps_or_recompiles(Deoptimization::Reason_null_check)),
1994 false,
1995 speculative_ptr_kind(tinput) == ProfileNeverNull &&
1996 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check));
1997 dec_sp(2);
1998 return cast;
1999 }
2000
2001 void Parse::acmp_known_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, ciKlass* input_type, BoolTest::mask btest, Node* eq_region) {
2002 Node* ne_region = new RegionNode(1);
2003 Node* null_ctl;
2004 Node* cast = acmp_null_check(input, tinput, input_ptr, null_ctl);
2005 ne_region->add_req(null_ctl);
2006
2007 Node* slow_ctl = type_check_receiver(cast, input_type, 1.0, &cast);
2008 {
2009 PreserveJVMState pjvms(this);
2010 inc_sp(2);
2011 set_control(slow_ctl);
2012 Deoptimization::DeoptReason reason;
2013 if (tinput->speculative_type() != nullptr && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
2014 reason = Deoptimization::Reason_speculate_class_check;
2015 } else {
2016 reason = Deoptimization::Reason_class_check;
2017 }
2018 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
2019 }
2020 ne_region->add_req(control());
2021
2022 record_for_igvn(ne_region);
2023 set_control(_gvn.transform(ne_region));
2024 if (btest == BoolTest::ne) {
2025 {
2026 PreserveJVMState pjvms(this);
2027 if (null_ctl == top()) {
2028 replace_in_map(input, cast);
2029 }
2030 int target_bci = iter().get_dest();
2031 merge(target_bci);
2032 }
2033 record_for_igvn(eq_region);
2034 set_control(_gvn.transform(eq_region));
2035 } else {
2036 if (null_ctl == top()) {
2037 replace_in_map(input, cast);
2038 }
2039 set_control(_gvn.transform(ne_region));
2040 }
2041 }
2042
2043 void Parse::acmp_unknown_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, BoolTest::mask btest, Node* eq_region) {
2044 Node* ne_region = new RegionNode(1);
2045 Node* null_ctl;
2046 Node* cast = acmp_null_check(input, tinput, input_ptr, null_ctl);
2047 ne_region->add_req(null_ctl);
2048
2049 {
2050 BuildCutout unless(this, inline_type_test(cast, /* is_inline = */ false), PROB_MAX);
2051 inc_sp(2);
2052 uncommon_trap_exact(Deoptimization::Reason_class_check, Deoptimization::Action_maybe_recompile);
2053 }
2054
2055 ne_region->add_req(control());
2056
2057 record_for_igvn(ne_region);
2058 set_control(_gvn.transform(ne_region));
2059 if (btest == BoolTest::ne) {
2060 {
2061 PreserveJVMState pjvms(this);
2062 if (null_ctl == top()) {
2063 replace_in_map(input, cast);
2064 }
2065 int target_bci = iter().get_dest();
2066 merge(target_bci);
2067 }
2068 record_for_igvn(eq_region);
2069 set_control(_gvn.transform(eq_region));
2070 } else {
2071 if (null_ctl == top()) {
2072 replace_in_map(input, cast);
2073 }
2074 set_control(_gvn.transform(ne_region));
2075 }
2076 }
2077
2078 void Parse::do_acmp(BoolTest::mask btest, Node* left, Node* right) {
2079 ciKlass* left_type = nullptr;
2080 ciKlass* right_type = nullptr;
2081 ProfilePtrKind left_ptr = ProfileUnknownNull;
2082 ProfilePtrKind right_ptr = ProfileUnknownNull;
2083 bool left_inline_type = true;
2084 bool right_inline_type = true;
2085
2086 // Leverage profiling at acmp
2087 if (UseACmpProfile) {
2088 method()->acmp_profiled_type(bci(), left_type, right_type, left_ptr, right_ptr, left_inline_type, right_inline_type);
2089 if (too_many_traps_or_recompiles(Deoptimization::Reason_class_check)) {
2090 left_type = nullptr;
2091 right_type = nullptr;
2092 left_inline_type = true;
2093 right_inline_type = true;
2094 }
2095 if (too_many_traps_or_recompiles(Deoptimization::Reason_null_check)) {
2096 left_ptr = ProfileUnknownNull;
2097 right_ptr = ProfileUnknownNull;
2098 }
2099 }
2100
2101 if (UseTypeSpeculation) {
2102 record_profile_for_speculation(left, left_type, left_ptr);
2103 record_profile_for_speculation(right, right_type, right_ptr);
2104 }
2105
2106 if (!Arguments::is_valhalla_enabled()) {
2107 Node* cmp = CmpP(left, right);
2108 cmp = optimize_cmp_with_klass(cmp);
2109 do_if(btest, cmp);
2110 return;
2111 }
2112
2113 // Check for equality before potentially allocating
2114 if (left == right) {
2115 do_if(btest, makecon(TypeInt::CC_EQ));
2116 return;
2117 }
2118
2119 // Allocate inline type operands and re-execute on deoptimization
2120 if (left->is_InlineType()) {
2121 PreserveReexecuteState preexecs(this);
2122 inc_sp(2);
2123 jvms()->set_should_reexecute(true);
2124 left = left->as_InlineType()->buffer(this);
2125 }
2126 if (right->is_InlineType()) {
2127 PreserveReexecuteState preexecs(this);
2128 inc_sp(2);
2129 jvms()->set_should_reexecute(true);
2130 right = right->as_InlineType()->buffer(this);
2131 }
2132
2133 // First, do a normal pointer comparison
2134 const TypeOopPtr* tleft = _gvn.type(left)->isa_oopptr();
2135 const TypeOopPtr* tright = _gvn.type(right)->isa_oopptr();
2136 Node* cmp = CmpP(left, right);
2137 cmp = optimize_cmp_with_klass(cmp);
2138 if (tleft == nullptr || !tleft->can_be_inline_type() ||
2139 tright == nullptr || !tright->can_be_inline_type()) {
2140 // This is sufficient, if one of the operands can't be an inline type
2141 do_if(btest, cmp);
2142 return;
2143 }
2144
2145 // Don't add traps to unstable if branches because additional checks are required to
2146 // decide if the operands are equal/substitutable and we therefore shouldn't prune
2147 // branches for one if based on the profiling of the acmp branches.
2148 // Also, OptimizeUnstableIf would set an incorrect re-rexecution state because it
2149 // assumes that there is a 1-1 mapping between the if and the acmp branches and that
2150 // hitting a trap means that we will take the corresponding acmp branch on re-execution.
2151 const bool can_trap = true;
2152
2153 Node* eq_region = nullptr;
2154 if (btest == BoolTest::eq) {
2155 do_if(btest, cmp, !can_trap, true);
2156 if (stopped()) {
2157 // Pointers are equal, operands must be equal
2158 return;
2159 }
2160 } else {
2161 assert(btest == BoolTest::ne, "only eq or ne");
2162 Node* is_not_equal = nullptr;
2163 eq_region = new RegionNode(3);
2164 {
2165 PreserveJVMState pjvms(this);
2166 // Pointers are not equal, but more checks are needed to determine if the operands are (not) substitutable
2167 do_if(btest, cmp, !can_trap, false, &is_not_equal);
2168 if (!stopped()) {
2169 eq_region->init_req(1, control());
2170 }
2171 }
2172 if (is_not_equal == nullptr || is_not_equal->is_top()) {
2173 record_for_igvn(eq_region);
2174 set_control(_gvn.transform(eq_region));
2175 return;
2176 }
2177 set_control(is_not_equal);
2178 }
2179
2180 // Prefer speculative types if available
2181 if (!too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
2182 if (tleft->speculative_type() != nullptr) {
2183 left_type = tleft->speculative_type();
2184 }
2185 if (tright->speculative_type() != nullptr) {
2186 right_type = tright->speculative_type();
2187 }
2188 }
2189
2190 if (speculative_ptr_kind(tleft) != ProfileMaybeNull && speculative_ptr_kind(tleft) != ProfileUnknownNull) {
2191 ProfilePtrKind speculative_left_ptr = speculative_ptr_kind(tleft);
2192 if (speculative_left_ptr == ProfileAlwaysNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_assert)) {
2193 left_ptr = speculative_left_ptr;
2194 } else if (speculative_left_ptr == ProfileNeverNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check)) {
2195 left_ptr = speculative_left_ptr;
2196 }
2197 }
2198 if (speculative_ptr_kind(tright) != ProfileMaybeNull && speculative_ptr_kind(tright) != ProfileUnknownNull) {
2199 ProfilePtrKind speculative_right_ptr = speculative_ptr_kind(tright);
2200 if (speculative_right_ptr == ProfileAlwaysNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_assert)) {
2201 right_ptr = speculative_right_ptr;
2202 } else if (speculative_right_ptr == ProfileNeverNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check)) {
2203 right_ptr = speculative_right_ptr;
2204 }
2205 }
2206
2207 if (left_ptr == ProfileAlwaysNull) {
2208 // Comparison with null. Assert the input is indeed null and we're done.
2209 acmp_always_null_input(left, tleft, btest, eq_region);
2210 return;
2211 }
2212 if (right_ptr == ProfileAlwaysNull) {
2213 // Comparison with null. Assert the input is indeed null and we're done.
2214 acmp_always_null_input(right, tright, btest, eq_region);
2215 return;
2216 }
2217 if (left_type != nullptr && !left_type->is_inlinetype()) {
2218 // Comparison with an object of known type
2219 acmp_known_non_inline_type_input(left, tleft, left_ptr, left_type, btest, eq_region);
2220 return;
2221 }
2222 if (right_type != nullptr && !right_type->is_inlinetype()) {
2223 // Comparison with an object of known type
2224 acmp_known_non_inline_type_input(right, tright, right_ptr, right_type, btest, eq_region);
2225 return;
2226 }
2227 if (!left_inline_type) {
2228 // Comparison with an object known not to be an inline type
2229 acmp_unknown_non_inline_type_input(left, tleft, left_ptr, btest, eq_region);
2230 return;
2231 }
2232 if (!right_inline_type) {
2233 // Comparison with an object known not to be an inline type
2234 acmp_unknown_non_inline_type_input(right, tright, right_ptr, btest, eq_region);
2235 return;
2236 }
2237
2238 // Pointers are not equal, check if first operand is non-null
2239 Node* ne_region = new RegionNode(6);
2240 Node* null_ctl = nullptr;
2241 Node* not_null_left = nullptr;
2242 Node* not_null_right = acmp_null_check(right, tright, right_ptr, null_ctl);
2243 ne_region->init_req(1, null_ctl);
2244
2245 if (!stopped()) {
2246 // First operand is non-null, check if it is an inline type
2247 Node* is_value = inline_type_test(not_null_right);
2248 IfNode* is_value_iff = create_and_map_if(control(), is_value, PROB_FAIR, COUNT_UNKNOWN);
2249 Node* not_value = _gvn.transform(new IfFalseNode(is_value_iff));
2250 ne_region->init_req(2, not_value);
2251 set_control(_gvn.transform(new IfTrueNode(is_value_iff)));
2252
2253 // The first operand is an inline type, check if the second operand is non-null
2254 not_null_left = acmp_null_check(left, tleft, left_ptr, null_ctl);
2255 ne_region->init_req(3, null_ctl);
2256
2257 if (!stopped()) {
2258 // Check if both operands are of the same class.
2259 Node* kls_left = load_object_klass(not_null_left);
2260 Node* kls_right = load_object_klass(not_null_right);
2261 Node* kls_cmp = CmpP(kls_left, kls_right);
2262 Node* kls_bol = _gvn.transform(new BoolNode(kls_cmp, BoolTest::ne));
2263 IfNode* kls_iff = create_and_map_if(control(), kls_bol, PROB_FAIR, COUNT_UNKNOWN);
2264 Node* kls_ne = _gvn.transform(new IfTrueNode(kls_iff));
2265 set_control(_gvn.transform(new IfFalseNode(kls_iff)));
2266 ne_region->init_req(4, kls_ne);
2267 }
2268 }
2269
2270 if (stopped()) {
2271 record_for_igvn(ne_region);
2272 set_control(_gvn.transform(ne_region));
2273 if (btest == BoolTest::ne) {
2274 {
2275 PreserveJVMState pjvms(this);
2276 int target_bci = iter().get_dest();
2277 merge(target_bci);
2278 }
2279 record_for_igvn(eq_region);
2280 set_control(_gvn.transform(eq_region));
2281 }
2282 return;
2283 }
2284
2285 // Both operands are values types of the same class, we need to perform a
2286 // substitutability test. Delegate to ValueObjectMethods::isSubstitutable().
2287 Node* ne_io_phi = PhiNode::make(ne_region, i_o());
2288 Node* mem = reset_memory();
2289 Node* ne_mem_phi = PhiNode::make(ne_region, mem);
2290
2291 Node* eq_io_phi = nullptr;
2292 Node* eq_mem_phi = nullptr;
2293 if (eq_region != nullptr) {
2294 eq_io_phi = PhiNode::make(eq_region, i_o());
2295 eq_mem_phi = PhiNode::make(eq_region, mem);
2296 }
2297
2298 set_all_memory(mem);
2299
2300 kill_dead_locals();
2301 ciSymbol* subst_method_name = UseAltSubstitutabilityMethod ? ciSymbols::isSubstitutableAlt_name() : ciSymbols::isSubstitutable_name();
2302 ciMethod* subst_method = ciEnv::current()->ValueObjectMethods_klass()->find_method(subst_method_name, ciSymbols::object_object_boolean_signature());
2303 CallStaticJavaNode* call = new CallStaticJavaNode(C, TypeFunc::make(subst_method), SharedRuntime::get_resolve_static_call_stub(), subst_method);
2304 call->set_override_symbolic_info(true);
2305 call->init_req(TypeFunc::Parms, not_null_left);
2306 call->init_req(TypeFunc::Parms+1, not_null_right);
2307 inc_sp(2);
2308 set_edges_for_java_call(call, false, false);
2309 Node* ret = set_results_for_java_call(call, false, true);
2310 dec_sp(2);
2311
2312 // Test the return value of ValueObjectMethods::isSubstitutable()
2313 // This is the last check, do_if can emit traps now.
2314 Node* subst_cmp = _gvn.transform(new CmpINode(ret, intcon(1)));
2315 Node* ctl = C->top();
2316 Node* stress_count_mem = nullptr;
2317 if (btest == BoolTest::eq) {
2318 PreserveJVMState pjvms(this);
2319 do_if(btest, subst_cmp, can_trap, false, nullptr, &stress_count_mem);
2320 if (!stopped()) {
2321 ctl = control();
2322 }
2323 } else {
2324 assert(btest == BoolTest::ne, "only eq or ne");
2325 PreserveJVMState pjvms(this);
2326 do_if(btest, subst_cmp, can_trap, false, &ctl, &stress_count_mem);
2327 if (!stopped()) {
2328 eq_region->init_req(2, control());
2329 eq_io_phi->init_req(2, i_o());
2330 eq_mem_phi->init_req(2, reset_memory());
2331 }
2332 }
2333 if (stress_count_mem != nullptr) {
2334 set_memory(stress_count_mem, stress_count_mem->adr_type());
2335 }
2336 ne_region->init_req(5, ctl);
2337 ne_io_phi->init_req(5, i_o());
2338 ne_mem_phi->init_req(5, reset_memory());
2339
2340 record_for_igvn(ne_region);
2341 set_control(_gvn.transform(ne_region));
2342 set_i_o(_gvn.transform(ne_io_phi));
2343 set_all_memory(_gvn.transform(ne_mem_phi));
2344
2345 if (btest == BoolTest::ne) {
2346 {
2347 PreserveJVMState pjvms(this);
2348 int target_bci = iter().get_dest();
2349 merge(target_bci);
2350 }
2351
2352 record_for_igvn(eq_region);
2353 set_control(_gvn.transform(eq_region));
2354 set_i_o(_gvn.transform(eq_io_phi));
2355 set_all_memory(_gvn.transform(eq_mem_phi));
2356 }
2357 }
2358
2359 // Force unstable if traps to be taken randomly to trigger intermittent bugs such as incorrect debug information.
2360 // Add another if before the unstable if that checks a "random" condition at runtime (a simple shared counter) and
2361 // then either takes the trap or executes the original, unstable if.
2362 void Parse::stress_trap(IfNode* orig_iff, Node* counter, Node* incr_store) {
2363 // Search for an unstable if trap
2364 CallStaticJavaNode* trap = nullptr;
2365 assert(orig_iff->Opcode() == Op_If && orig_iff->outcnt() == 2, "malformed if");
2366 ProjNode* trap_proj = orig_iff->uncommon_trap_proj(trap, Deoptimization::Reason_unstable_if);
2367 if (trap == nullptr || !trap->jvms()->should_reexecute()) {
2368 // No suitable trap found. Remove unused counter load and increment.
2369 C->gvn_replace_by(incr_store, incr_store->in(MemNode::Memory));
2370 return;
2371 }
2372
2373 // Remove trap from optimization list since we add another path to the trap.
2374 bool success = C->remove_unstable_if_trap(trap, true);
2375 assert(success, "Trap already modified");
2376
2377 // Add a check before the original if that will trap with a certain frequency and execute the original if otherwise
2378 int freq_log = (C->random() % 31) + 1; // Random logarithmic frequency in [1, 31]
2411 }
2412
2413 void Parse::maybe_add_predicate_after_if(Block* path) {
2414 if (path->is_SEL_head() && path->preds_parsed() == 0) {
2415 // Add predicates at bci of if dominating the loop so traps can be
2416 // recorded on the if's profile data
2417 int bc_depth = repush_if_args();
2418 add_parse_predicates();
2419 dec_sp(bc_depth);
2420 path->set_has_predicates();
2421 }
2422 }
2423
2424
2425 //----------------------------adjust_map_after_if------------------------------
2426 // Adjust the JVM state to reflect the result of taking this path.
2427 // Basically, it means inspecting the CmpNode controlling this
2428 // branch, seeing how it constrains a tested value, and then
2429 // deciding if it's worth our while to encode this constraint
2430 // as graph nodes in the current abstract interpretation map.
2431 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path, bool can_trap) {
2432 if (!c->is_Cmp()) {
2433 maybe_add_predicate_after_if(path);
2434 return;
2435 }
2436
2437 if (stopped() || btest == BoolTest::illegal) {
2438 return; // nothing to do
2439 }
2440
2441 bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
2442
2443 if (can_trap && path_is_suitable_for_uncommon_trap(prob)) {
2444 repush_if_args();
2445 Node* call = uncommon_trap(Deoptimization::Reason_unstable_if,
2446 Deoptimization::Action_reinterpret,
2447 nullptr,
2448 (is_fallthrough ? "taken always" : "taken never"));
2449
2450 if (call != nullptr) {
2451 C->record_unstable_if_trap(new UnstableIfTrap(call->as_CallStaticJava(), path));
2452 }
2453 return;
2454 }
2455
2456 Node* val = c->in(1);
2457 Node* con = c->in(2);
2458 const Type* tcon = _gvn.type(con);
2459 const Type* tval = _gvn.type(val);
2460 bool have_con = tcon->singleton();
2461 if (tval->singleton()) {
2462 if (!have_con) {
2463 // Swap, so constant is in con.
2591 Node* obj = nullptr;
2592 const TypeOopPtr* cast_type = nullptr;
2593 // Insert a cast node with a narrowed type after a successful type check.
2594 if (match_type_check(_gvn, btest, con, tcon, val, tval,
2595 &obj, &cast_type)) {
2596 assert(obj != nullptr && cast_type != nullptr, "missing type check info");
2597 const Type* obj_type = _gvn.type(obj);
2598 const TypeOopPtr* tboth = obj_type->join_speculative(cast_type)->isa_oopptr();
2599 if (tboth != nullptr && tboth != obj_type && tboth->higher_equal(obj_type)) {
2600 int obj_in_map = map()->find_edge(obj);
2601 JVMState* jvms = this->jvms();
2602 if (obj_in_map >= 0 &&
2603 (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
2604 TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth);
2605 const Type* tcc = ccast->as_Type()->type();
2606 assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve");
2607 // Delay transform() call to allow recovery of pre-cast value
2608 // at the control merge.
2609 _gvn.set_type_bottom(ccast);
2610 record_for_igvn(ccast);
2611 if (tboth->is_inlinetypeptr()) {
2612 ccast = InlineTypeNode::make_from_oop(this, ccast, tboth->exact_klass(true)->as_inline_klass());
2613 }
2614 // Here's the payoff.
2615 replace_in_map(obj, ccast);
2616 }
2617 }
2618 }
2619
2620 int val_in_map = map()->find_edge(val);
2621 if (val_in_map < 0) return; // replace_in_map would be useless
2622 {
2623 JVMState* jvms = this->jvms();
2624 if (!(jvms->is_loc(val_in_map) ||
2625 jvms->is_stk(val_in_map)))
2626 return; // again, it would be useless
2627 }
2628
2629 // Check for a comparison to a constant, and "know" that the compared
2630 // value is constrained on this path.
2631 assert(tcon->singleton(), "");
2632 ConstraintCastNode* ccast = nullptr;
2633 Node* cast = nullptr;
2697 if (c->Opcode() == Op_CmpP &&
2698 (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
2699 c->in(2)->is_Con()) {
2700 Node* load_klass = nullptr;
2701 Node* decode = nullptr;
2702 if (c->in(1)->Opcode() == Op_DecodeNKlass) {
2703 decode = c->in(1);
2704 load_klass = c->in(1)->in(1);
2705 } else {
2706 load_klass = c->in(1);
2707 }
2708 if (load_klass->in(2)->is_AddP()) {
2709 Node* addp = load_klass->in(2);
2710 Node* obj = addp->in(AddPNode::Address);
2711 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
2712 if (obj_type->speculative_type_not_null() != nullptr) {
2713 ciKlass* k = obj_type->speculative_type();
2714 inc_sp(2);
2715 obj = maybe_cast_profiled_obj(obj, k);
2716 dec_sp(2);
2717 if (obj->is_InlineType()) {
2718 assert(obj->as_InlineType()->is_allocated(&_gvn), "must be allocated");
2719 obj = obj->as_InlineType()->get_oop();
2720 }
2721 // Make the CmpP use the casted obj
2722 addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
2723 load_klass = load_klass->clone();
2724 load_klass->set_req(2, addp);
2725 load_klass = _gvn.transform(load_klass);
2726 if (decode != nullptr) {
2727 decode = decode->clone();
2728 decode->set_req(1, load_klass);
2729 load_klass = _gvn.transform(decode);
2730 }
2731 c = c->clone();
2732 c->set_req(1, load_klass);
2733 c = _gvn.transform(c);
2734 }
2735 }
2736 }
2737 return c;
2738 }
2739
2740 //------------------------------do_one_bytecode--------------------------------
3440
3441 case Bytecodes::_i2d:
3442 a = pop();
3443 b = _gvn.transform( new ConvI2DNode(a));
3444 push_pair(b);
3445 break;
3446
3447 case Bytecodes::_iinc: // Increment local
3448 i = iter().get_index(); // Get local index
3449 set_local( i, _gvn.transform( new AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) );
3450 break;
3451
3452 // Exit points of synchronized methods must have an unlock node
3453 case Bytecodes::_return:
3454 return_current(nullptr);
3455 break;
3456
3457 case Bytecodes::_ireturn:
3458 case Bytecodes::_areturn:
3459 case Bytecodes::_freturn:
3460 return_current(cast_to_non_larval(pop()));
3461 break;
3462 case Bytecodes::_lreturn:
3463 case Bytecodes::_dreturn:
3464 return_current(pop_pair());
3465 break;
3466
3467 case Bytecodes::_athrow:
3468 // null exception oop throws null pointer exception
3469 null_check(peek());
3470 if (stopped()) return;
3471 // Hook the thrown exception directly to subsequent handlers.
3472 if (BailoutToInterpreterForThrows) {
3473 // Keep method interpreted from now on.
3474 uncommon_trap(Deoptimization::Reason_unhandled,
3475 Deoptimization::Action_make_not_compilable);
3476 return;
3477 }
3478 if (env()->jvmti_can_post_on_exceptions()) {
3479 // check if we must post exception events, take uncommon trap if so (with must_throw = false)
3480 uncommon_trap_if_should_post_on_exceptions(Deoptimization::Reason_unhandled, false);
3481 }
3482 // Here if either can_post_on_exceptions or should_post_on_exceptions is false
3495
3496 // See if we can get some profile data and hand it off to the next block
3497 Block *target_block = block()->successor_for_bci(target_bci);
3498 if (target_block->pred_count() != 1) break;
3499 ciMethodData* methodData = method()->method_data();
3500 if (!methodData->is_mature()) break;
3501 ciProfileData* data = methodData->bci_to_data(bci());
3502 assert(data != nullptr && data->is_JumpData(), "need JumpData for taken branch");
3503 int taken = ((ciJumpData*)data)->taken();
3504 taken = method()->scale_count(taken);
3505 target_block->set_count(taken);
3506 break;
3507 }
3508
3509 case Bytecodes::_ifnull: btest = BoolTest::eq; goto handle_if_null;
3510 case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null;
3511 handle_if_null:
3512 // If this is a backwards branch in the bytecodes, add Safepoint
3513 maybe_add_safepoint(iter().get_dest());
3514 a = null();
3515 b = cast_to_non_larval(pop());
3516 if (b->is_InlineType()) {
3517 // Null checking a scalarized but nullable inline type. Check the null marker
3518 // input instead of the oop input to avoid keeping buffer allocations alive
3519 c = _gvn.transform(new CmpINode(b->as_InlineType()->get_null_marker(), zerocon(T_INT)));
3520 } else {
3521 if (!_gvn.type(b)->speculative_maybe_null() &&
3522 !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
3523 inc_sp(1);
3524 Node* null_ctl = top();
3525 b = null_check_oop(b, &null_ctl, true, true, true);
3526 assert(null_ctl->is_top(), "no null control here");
3527 dec_sp(1);
3528 } else if (_gvn.type(b)->speculative_always_null() &&
3529 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
3530 inc_sp(1);
3531 b = null_assert(b);
3532 dec_sp(1);
3533 }
3534 c = _gvn.transform( new CmpPNode(b, a) );
3535 }
3536 do_ifnull(btest, c);
3537 break;
3538
3539 case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
3540 case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
3541 handle_if_acmp:
3542 // If this is a backwards branch in the bytecodes, add Safepoint
3543 maybe_add_safepoint(iter().get_dest());
3544 a = cast_to_non_larval(pop());
3545 b = cast_to_non_larval(pop());
3546 do_acmp(btest, b, a);
3547 break;
3548
3549 case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
3550 case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
3551 case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
3552 case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
3553 case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
3554 case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
3555 handle_ifxx:
3556 // If this is a backwards branch in the bytecodes, add Safepoint
3557 maybe_add_safepoint(iter().get_dest());
3558 a = _gvn.intcon(0);
3559 b = pop();
3560 c = _gvn.transform( new CmpINode(b, a) );
3561 do_if(btest, c);
3562 break;
3563
3564 case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
3565 case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
3566 case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;
3581 break;
3582
3583 case Bytecodes::_lookupswitch:
3584 do_lookupswitch();
3585 break;
3586
3587 case Bytecodes::_invokestatic:
3588 case Bytecodes::_invokedynamic:
3589 case Bytecodes::_invokespecial:
3590 case Bytecodes::_invokevirtual:
3591 case Bytecodes::_invokeinterface:
3592 do_call();
3593 break;
3594 case Bytecodes::_checkcast:
3595 do_checkcast();
3596 break;
3597 case Bytecodes::_instanceof:
3598 do_instanceof();
3599 break;
3600 case Bytecodes::_anewarray:
3601 do_newarray();
3602 break;
3603 case Bytecodes::_newarray:
3604 do_newarray((BasicType)iter().get_index());
3605 break;
3606 case Bytecodes::_multianewarray:
3607 do_multianewarray();
3608 break;
3609 case Bytecodes::_new:
3610 do_new();
3611 break;
3612
3613 case Bytecodes::_jsr:
3614 case Bytecodes::_jsr_w:
3615 do_jsr();
3616 break;
3617
3618 case Bytecodes::_ret:
3619 do_ret();
3620 break;
3621
|