5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/ciMethodData.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "compiler/compileLog.hpp"
28 #include "interpreter/linkResolver.hpp"
29 #include "jvm_io.h"
30 #include "memory/resourceArea.hpp"
31 #include "memory/universe.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "opto/addnode.hpp"
34 #include "opto/castnode.hpp"
35 #include "opto/convertnode.hpp"
36 #include "opto/divnode.hpp"
37 #include "opto/idealGraphPrinter.hpp"
38 #include "opto/matcher.hpp"
39 #include "opto/memnode.hpp"
40 #include "opto/mulnode.hpp"
41 #include "opto/opaquenode.hpp"
42 #include "opto/parse.hpp"
43 #include "opto/runtime.hpp"
44 #include "opto/subtypenode.hpp"
45 #include "runtime/deoptimization.hpp"
46 #include "runtime/sharedRuntime.hpp"
47
48 #ifndef PRODUCT
49 extern uint explicit_null_checks_inserted,
50 explicit_null_checks_elided;
51 #endif
52
53 //---------------------------------array_load----------------------------------
54 void Parse::array_load(BasicType bt) {
55 const Type* elemtype = Type::TOP;
56 bool big_val = bt == T_DOUBLE || bt == T_LONG;
57 Node* adr = array_addressing(bt, 0, elemtype);
58 if (stopped()) return; // guaranteed null or range check
59
60 pop(); // index (already used)
61 Node* array = pop(); // the array itself
62
63 if (elemtype == TypeInt::BOOL) {
64 bt = T_BOOLEAN;
65 }
66 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
67
68 Node* ld = access_load_at(array, adr, adr_type, elemtype, bt,
69 IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD);
70 if (big_val) {
71 push_pair(ld);
72 } else {
73 push(ld);
74 }
75 }
76
77
78 //--------------------------------array_store----------------------------------
79 void Parse::array_store(BasicType bt) {
80 const Type* elemtype = Type::TOP;
81 bool big_val = bt == T_DOUBLE || bt == T_LONG;
82 Node* adr = array_addressing(bt, big_val ? 2 : 1, elemtype);
83 if (stopped()) return; // guaranteed null or range check
84 if (bt == T_OBJECT) {
85 array_store_check();
86 if (stopped()) {
87 return;
88 }
89 }
90 Node* val; // Oop to store
91 if (big_val) {
92 val = pop_pair();
93 } else {
94 val = pop();
95 }
96 pop(); // index (already used)
97 Node* array = pop(); // the array itself
98
99 if (elemtype == TypeInt::BOOL) {
100 bt = T_BOOLEAN;
101 }
102 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
103
104 access_store_at(array, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY);
105 }
106
107
108 //------------------------------array_addressing-------------------------------
109 // Pull array and index from the stack. Compute pointer-to-element.
110 Node* Parse::array_addressing(BasicType type, int vals, const Type*& elemtype) {
111 Node *idx = peek(0+vals); // Get from stack without popping
112 Node *ary = peek(1+vals); // in case of exception
113
114 // Null check the array base, with correct stack contents
115 ary = null_check(ary, T_ARRAY);
116 // Compile-time detect of null-exception?
117 if (stopped()) return top();
118
119 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr();
120 const TypeInt* sizetype = arytype->size();
121 elemtype = arytype->elem();
122
123 if (UseUniqueSubclasses) {
124 const Type* el = elemtype->make_ptr();
125 if (el && el->isa_instptr()) {
126 const TypeInstPtr* toop = el->is_instptr();
127 if (toop->instance_klass()->unique_concrete_subklass()) {
128 // If we load from "AbstractClass[]" we must see "ConcreteSubClass".
129 const Type* subklass = Type::get_const_type(toop->instance_klass());
130 elemtype = subklass->join_speculative(el);
131 }
132 }
133 }
134
135 // Check for big class initializers with all constant offsets
136 // feeding into a known-size array.
137 const TypeInt* idxtype = _gvn.type(idx)->is_int();
138 // See if the highest idx value is less than the lowest array bound,
139 // and if the idx value cannot be negative:
140 bool need_range_check = true;
141 if (idxtype->_hi < sizetype->_lo && idxtype->_lo >= 0) {
142 need_range_check = false;
143 if (C->log() != nullptr) C->log()->elem("observe that='!need_range_check'");
144 }
145
146 if (!arytype->is_loaded()) {
147 // Only fails for some -Xcomp runs
148 // The class is unloaded. We have to run this bytecode in the interpreter.
149 ciKlass* klass = arytype->unloaded_klass();
150
151 uncommon_trap(Deoptimization::Reason_unloaded,
152 Deoptimization::Action_reinterpret,
153 klass, "!loaded array");
154 return top();
155 }
156
157 // Do the range check
158 if (need_range_check) {
159 Node* tst;
160 if (sizetype->_hi <= 0) {
161 // The greatest array bound is negative, so we can conclude that we're
162 // compiling unreachable code, but the unsigned compare trick used below
163 // only works with non-negative lengths. Instead, hack "tst" to be zero so
164 // the uncommon_trap path will always be taken.
165 tst = _gvn.intcon(0);
166 } else {
167 // Range is constant in array-oop, so we can use the original state of mem
168 Node* len = load_array_length(ary);
169
170 // Test length vs index (standard trick using unsigned compare)
171 Node* chk = _gvn.transform( new CmpUNode(idx, len) );
172 BoolTest::mask btest = BoolTest::lt;
173 tst = _gvn.transform( new BoolNode(chk, btest) );
174 }
175 RangeCheckNode* rc = new RangeCheckNode(control(), tst, PROB_MAX, COUNT_UNKNOWN);
176 _gvn.set_type(rc, rc->Value(&_gvn));
177 if (!tst->is_Con()) {
178 record_for_igvn(rc);
179 }
180 set_control(_gvn.transform(new IfTrueNode(rc)));
181 // Branch to failure if out of bounds
182 {
183 PreserveJVMState pjvms(this);
184 set_control(_gvn.transform(new IfFalseNode(rc)));
185 if (C->allow_range_check_smearing()) {
186 // Do not use builtin_throw, since range checks are sometimes
187 // made more stringent by an optimistic transformation.
188 // This creates "tentative" range checks at this point,
189 // which are not guaranteed to throw exceptions.
190 // See IfNode::Ideal, is_range_check, adjust_check.
191 uncommon_trap(Deoptimization::Reason_range_check,
192 Deoptimization::Action_make_not_entrant,
193 nullptr, "range_check");
194 } else {
195 // If we have already recompiled with the range-check-widening
196 // heroic optimization turned off, then we must really be throwing
197 // range check exceptions.
198 builtin_throw(Deoptimization::Reason_range_check);
199 }
200 }
201 }
202 // Check for always knowing you are throwing a range-check exception
203 if (stopped()) return top();
204
205 // Make array address computation control dependent to prevent it
206 // from floating above the range check during loop optimizations.
207 Node* ptr = array_element_address(ary, idx, type, sizetype, control());
208 assert(ptr != top(), "top should go hand-in-hand with stopped");
209
210 return ptr;
211 }
212
213
214 // returns IfNode
215 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) {
216 Node *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
217 Node *tst = _gvn.transform(new BoolNode(cmp, mask));
218 IfNode *iff = create_and_map_if(control(), tst, prob, cnt);
219 return iff;
220 }
221
222
223 // sentinel value for the target bci to mark never taken branches
224 // (according to profiling)
225 static const int never_reached = INT_MAX;
226
227 //------------------------------helper for tableswitch-------------------------
228 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, bool unc) {
229 // True branch, use existing map info
230 { PreserveJVMState pjvms(this);
231 Node *iftrue = _gvn.transform( new IfTrueNode (iff) );
232 set_control( iftrue );
1450 // False branch
1451 Node* iffalse = _gvn.transform( new IfFalseNode(iff) );
1452 set_control(iffalse);
1453
1454 if (stopped()) { // Path is dead?
1455 NOT_PRODUCT(explicit_null_checks_elided++);
1456 if (C->eliminate_boxing()) {
1457 // Mark the successor block as parsed
1458 next_block->next_path_num();
1459 }
1460 } else { // Path is live.
1461 adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, next_block);
1462 }
1463
1464 if (do_stress_trap) {
1465 stress_trap(iff, counter, incr_store);
1466 }
1467 }
1468
1469 //------------------------------------do_if------------------------------------
1470 void Parse::do_if(BoolTest::mask btest, Node* c) {
1471 int target_bci = iter().get_dest();
1472
1473 Block* branch_block = successor_for_bci(target_bci);
1474 Block* next_block = successor_for_bci(iter().next_bci());
1475
1476 float cnt;
1477 float prob = branch_prediction(cnt, btest, target_bci, c);
1478 float untaken_prob = 1.0 - prob;
1479
1480 if (prob == PROB_UNKNOWN) {
1481 if (PrintOpto && Verbose) {
1482 tty->print_cr("Never-taken edge stops compilation at bci %d", bci());
1483 }
1484 repush_if_args(); // to gather stats on loop
1485 uncommon_trap(Deoptimization::Reason_unreached,
1486 Deoptimization::Action_reinterpret,
1487 nullptr, "cold");
1488 if (C->eliminate_boxing()) {
1489 // Mark the successor blocks as parsed
1490 branch_block->next_path_num();
1491 next_block->next_path_num();
1492 }
1493 return;
1494 }
1495
1496 Node* counter = nullptr;
1497 Node* incr_store = nullptr;
1498 bool do_stress_trap = StressUnstableIfTraps && ((C->random() % 2) == 0);
1499 if (do_stress_trap) {
1500 increment_trap_stress_counter(counter, incr_store);
1501 }
1502
1503 // Sanity check the probability value
1504 assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser");
1505
1506 bool taken_if_true = true;
1507 // Convert BoolTest to canonical form:
1508 if (!BoolTest(btest).is_canonical()) {
1509 btest = BoolTest(btest).negate();
1510 taken_if_true = false;
1511 // prob is NOT updated here; it remains the probability of the taken
1512 // path (as opposed to the prob of the path guarded by an 'IfTrueNode').
1513 }
1514 assert(btest != BoolTest::eq, "!= is the only canonical exact test");
1515
1516 Node* tst0 = new BoolNode(c, btest);
1517 Node* tst = _gvn.transform(tst0);
1518 BoolTest::mask taken_btest = BoolTest::illegal;
1519 BoolTest::mask untaken_btest = BoolTest::illegal;
1520
1541 }
1542
1543 // Generate real control flow
1544 float true_prob = (taken_if_true ? prob : untaken_prob);
1545 IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1546 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1547 Node* taken_branch = new IfTrueNode(iff);
1548 Node* untaken_branch = new IfFalseNode(iff);
1549 if (!taken_if_true) { // Finish conversion to canonical form
1550 Node* tmp = taken_branch;
1551 taken_branch = untaken_branch;
1552 untaken_branch = tmp;
1553 }
1554
1555 // Branch is taken:
1556 { PreserveJVMState pjvms(this);
1557 taken_branch = _gvn.transform(taken_branch);
1558 set_control(taken_branch);
1559
1560 if (stopped()) {
1561 if (C->eliminate_boxing()) {
1562 // Mark the successor block as parsed
1563 branch_block->next_path_num();
1564 }
1565 } else {
1566 adjust_map_after_if(taken_btest, c, prob, branch_block);
1567 if (!stopped()) {
1568 merge(target_bci);
1569 }
1570 }
1571 }
1572
1573 untaken_branch = _gvn.transform(untaken_branch);
1574 set_control(untaken_branch);
1575
1576 // Branch not taken.
1577 if (stopped()) {
1578 if (C->eliminate_boxing()) {
1579 // Mark the successor block as parsed
1580 next_block->next_path_num();
1581 }
1582 } else {
1583 adjust_map_after_if(untaken_btest, c, untaken_prob, next_block);
1584 }
1585
1586 if (do_stress_trap) {
1587 stress_trap(iff, counter, incr_store);
1588 }
1589 }
1590
1591 // Force unstable if traps to be taken randomly to trigger intermittent bugs such as incorrect debug information.
1592 // Add another if before the unstable if that checks a "random" condition at runtime (a simple shared counter) and
1593 // then either takes the trap or executes the original, unstable if.
1594 void Parse::stress_trap(IfNode* orig_iff, Node* counter, Node* incr_store) {
1595 // Search for an unstable if trap
1596 CallStaticJavaNode* trap = nullptr;
1597 assert(orig_iff->Opcode() == Op_If && orig_iff->outcnt() == 2, "malformed if");
1598 ProjNode* trap_proj = orig_iff->uncommon_trap_proj(trap, Deoptimization::Reason_unstable_if);
1599 if (trap == nullptr || !trap->jvms()->should_reexecute()) {
1600 // No suitable trap found. Remove unused counter load and increment.
1601 C->gvn_replace_by(incr_store, incr_store->in(MemNode::Memory));
1602 return;
1603 }
1604
1605 // Remove trap from optimization list since we add another path to the trap.
1606 bool success = C->remove_unstable_if_trap(trap, true);
1607 assert(success, "Trap already modified");
1608
1609 // Add a check before the original if that will trap with a certain frequency and execute the original if otherwise
1610 int freq_log = (C->random() % 31) + 1; // Random logarithmic frequency in [1, 31]
1643 }
1644
1645 void Parse::maybe_add_predicate_after_if(Block* path) {
1646 if (path->is_SEL_head() && path->preds_parsed() == 0) {
1647 // Add predicates at bci of if dominating the loop so traps can be
1648 // recorded on the if's profile data
1649 int bc_depth = repush_if_args();
1650 add_parse_predicates();
1651 dec_sp(bc_depth);
1652 path->set_has_predicates();
1653 }
1654 }
1655
1656
1657 //----------------------------adjust_map_after_if------------------------------
1658 // Adjust the JVM state to reflect the result of taking this path.
1659 // Basically, it means inspecting the CmpNode controlling this
1660 // branch, seeing how it constrains a tested value, and then
1661 // deciding if it's worth our while to encode this constraint
1662 // as graph nodes in the current abstract interpretation map.
1663 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path) {
1664 if (!c->is_Cmp()) {
1665 maybe_add_predicate_after_if(path);
1666 return;
1667 }
1668
1669 if (stopped() || btest == BoolTest::illegal) {
1670 return; // nothing to do
1671 }
1672
1673 bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
1674
1675 if (path_is_suitable_for_uncommon_trap(prob)) {
1676 repush_if_args();
1677 Node* call = uncommon_trap(Deoptimization::Reason_unstable_if,
1678 Deoptimization::Action_reinterpret,
1679 nullptr,
1680 (is_fallthrough ? "taken always" : "taken never"));
1681
1682 if (call != nullptr) {
1683 C->record_unstable_if_trap(new UnstableIfTrap(call->as_CallStaticJava(), path));
1684 }
1685 return;
1686 }
1687
1688 Node* val = c->in(1);
1689 Node* con = c->in(2);
1690 const Type* tcon = _gvn.type(con);
1691 const Type* tval = _gvn.type(val);
1692 bool have_con = tcon->singleton();
1693 if (tval->singleton()) {
1694 if (!have_con) {
1695 // Swap, so constant is in con.
1696 con = val;
1697 tcon = tval;
1698 val = c->in(2);
1699 tval = _gvn.type(val);
1700 btest = BoolTest(btest).commute();
1701 have_con = true;
1702 } else {
1703 // Do we have two constants? Then leave well enough alone.
1704 have_con = false;
1705 }
1706 }
1707 if (!have_con) { // remaining adjustments need a con
1823 Node* obj = nullptr;
1824 const TypeOopPtr* cast_type = nullptr;
1825 // Insert a cast node with a narrowed type after a successful type check.
1826 if (match_type_check(_gvn, btest, con, tcon, val, tval,
1827 &obj, &cast_type)) {
1828 assert(obj != nullptr && cast_type != nullptr, "missing type check info");
1829 const Type* obj_type = _gvn.type(obj);
1830 const TypeOopPtr* tboth = obj_type->join_speculative(cast_type)->isa_oopptr();
1831 if (tboth != nullptr && tboth != obj_type && tboth->higher_equal(obj_type)) {
1832 int obj_in_map = map()->find_edge(obj);
1833 JVMState* jvms = this->jvms();
1834 if (obj_in_map >= 0 &&
1835 (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
1836 TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth);
1837 const Type* tcc = ccast->as_Type()->type();
1838 assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve");
1839 // Delay transform() call to allow recovery of pre-cast value
1840 // at the control merge.
1841 _gvn.set_type_bottom(ccast);
1842 record_for_igvn(ccast);
1843 // Here's the payoff.
1844 replace_in_map(obj, ccast);
1845 }
1846 }
1847 }
1848
1849 int val_in_map = map()->find_edge(val);
1850 if (val_in_map < 0) return; // replace_in_map would be useless
1851 {
1852 JVMState* jvms = this->jvms();
1853 if (!(jvms->is_loc(val_in_map) ||
1854 jvms->is_stk(val_in_map)))
1855 return; // again, it would be useless
1856 }
1857
1858 // Check for a comparison to a constant, and "know" that the compared
1859 // value is constrained on this path.
1860 assert(tcon->singleton(), "");
1861 ConstraintCastNode* ccast = nullptr;
1862 Node* cast = nullptr;
1926 if (c->Opcode() == Op_CmpP &&
1927 (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
1928 c->in(2)->is_Con()) {
1929 Node* load_klass = nullptr;
1930 Node* decode = nullptr;
1931 if (c->in(1)->Opcode() == Op_DecodeNKlass) {
1932 decode = c->in(1);
1933 load_klass = c->in(1)->in(1);
1934 } else {
1935 load_klass = c->in(1);
1936 }
1937 if (load_klass->in(2)->is_AddP()) {
1938 Node* addp = load_klass->in(2);
1939 Node* obj = addp->in(AddPNode::Address);
1940 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
1941 if (obj_type->speculative_type_not_null() != nullptr) {
1942 ciKlass* k = obj_type->speculative_type();
1943 inc_sp(2);
1944 obj = maybe_cast_profiled_obj(obj, k);
1945 dec_sp(2);
1946 // Make the CmpP use the casted obj
1947 addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
1948 load_klass = load_klass->clone();
1949 load_klass->set_req(2, addp);
1950 load_klass = _gvn.transform(load_klass);
1951 if (decode != nullptr) {
1952 decode = decode->clone();
1953 decode->set_req(1, load_klass);
1954 load_klass = _gvn.transform(decode);
1955 }
1956 c = c->clone();
1957 c->set_req(1, load_klass);
1958 c = _gvn.transform(c);
1959 }
1960 }
1961 }
1962 return c;
1963 }
1964
1965 //------------------------------do_one_bytecode--------------------------------
2668 b = _gvn.transform( new ConvI2DNode(a));
2669 push_pair(b);
2670 break;
2671
2672 case Bytecodes::_iinc: // Increment local
2673 i = iter().get_index(); // Get local index
2674 set_local( i, _gvn.transform( new AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) );
2675 break;
2676
2677 // Exit points of synchronized methods must have an unlock node
2678 case Bytecodes::_return:
2679 return_current(nullptr);
2680 break;
2681
2682 case Bytecodes::_ireturn:
2683 case Bytecodes::_areturn:
2684 case Bytecodes::_freturn:
2685 return_current(pop());
2686 break;
2687 case Bytecodes::_lreturn:
2688 return_current(pop_pair());
2689 break;
2690 case Bytecodes::_dreturn:
2691 return_current(pop_pair());
2692 break;
2693
2694 case Bytecodes::_athrow:
2695 // null exception oop throws null pointer exception
2696 null_check(peek());
2697 if (stopped()) return;
2698 // Hook the thrown exception directly to subsequent handlers.
2699 if (BailoutToInterpreterForThrows) {
2700 // Keep method interpreted from now on.
2701 uncommon_trap(Deoptimization::Reason_unhandled,
2702 Deoptimization::Action_make_not_compilable);
2703 return;
2704 }
2705 if (env()->jvmti_can_post_on_exceptions()) {
2706 // check if we must post exception events, take uncommon trap if so (with must_throw = false)
2707 uncommon_trap_if_should_post_on_exceptions(Deoptimization::Reason_unhandled, false);
2708 }
2709 // Here if either can_post_on_exceptions or should_post_on_exceptions is false
2723 // See if we can get some profile data and hand it off to the next block
2724 Block *target_block = block()->successor_for_bci(target_bci);
2725 if (target_block->pred_count() != 1) break;
2726 ciMethodData* methodData = method()->method_data();
2727 if (!methodData->is_mature()) break;
2728 ciProfileData* data = methodData->bci_to_data(bci());
2729 assert(data != nullptr && data->is_JumpData(), "need JumpData for taken branch");
2730 int taken = ((ciJumpData*)data)->taken();
2731 taken = method()->scale_count(taken);
2732 target_block->set_count(taken);
2733 break;
2734 }
2735
2736 case Bytecodes::_ifnull: btest = BoolTest::eq; goto handle_if_null;
2737 case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null;
2738 handle_if_null:
2739 // If this is a backwards branch in the bytecodes, add Safepoint
2740 maybe_add_safepoint(iter().get_dest());
2741 a = null();
2742 b = pop();
2743 if (!_gvn.type(b)->speculative_maybe_null() &&
2744 !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
2745 inc_sp(1);
2746 Node* null_ctl = top();
2747 b = null_check_oop(b, &null_ctl, true, true, true);
2748 assert(null_ctl->is_top(), "no null control here");
2749 dec_sp(1);
2750 } else if (_gvn.type(b)->speculative_always_null() &&
2751 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
2752 inc_sp(1);
2753 b = null_assert(b);
2754 dec_sp(1);
2755 }
2756 c = _gvn.transform( new CmpPNode(b, a) );
2757 do_ifnull(btest, c);
2758 break;
2759
2760 case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
2761 case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
2762 handle_if_acmp:
2763 // If this is a backwards branch in the bytecodes, add Safepoint
2764 maybe_add_safepoint(iter().get_dest());
2765 a = pop();
2766 b = pop();
2767 c = _gvn.transform( new CmpPNode(b, a) );
2768 c = optimize_cmp_with_klass(c);
2769 do_if(btest, c);
2770 break;
2771
2772 case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
2773 case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
2774 case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
2775 case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
2776 case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
2777 case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
2778 handle_ifxx:
2779 // If this is a backwards branch in the bytecodes, add Safepoint
2780 maybe_add_safepoint(iter().get_dest());
2781 a = _gvn.intcon(0);
2782 b = pop();
2783 c = _gvn.transform( new CmpINode(b, a) );
2784 do_if(btest, c);
2785 break;
2786
2787 case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
2788 case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
2789 case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;
2804 break;
2805
2806 case Bytecodes::_lookupswitch:
2807 do_lookupswitch();
2808 break;
2809
2810 case Bytecodes::_invokestatic:
2811 case Bytecodes::_invokedynamic:
2812 case Bytecodes::_invokespecial:
2813 case Bytecodes::_invokevirtual:
2814 case Bytecodes::_invokeinterface:
2815 do_call();
2816 break;
2817 case Bytecodes::_checkcast:
2818 do_checkcast();
2819 break;
2820 case Bytecodes::_instanceof:
2821 do_instanceof();
2822 break;
2823 case Bytecodes::_anewarray:
2824 do_anewarray();
2825 break;
2826 case Bytecodes::_newarray:
2827 do_newarray((BasicType)iter().get_index());
2828 break;
2829 case Bytecodes::_multianewarray:
2830 do_multianewarray();
2831 break;
2832 case Bytecodes::_new:
2833 do_new();
2834 break;
2835
2836 case Bytecodes::_jsr:
2837 case Bytecodes::_jsr_w:
2838 do_jsr();
2839 break;
2840
2841 case Bytecodes::_ret:
2842 do_ret();
2843 break;
2844
|
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/ciInlineKlass.hpp"
26 #include "ci/ciMethodData.hpp"
27 #include "ci/ciSymbols.hpp"
28 #include "classfile/vmSymbols.hpp"
29 #include "compiler/compileLog.hpp"
30 #include "interpreter/linkResolver.hpp"
31 #include "jvm_io.h"
32 #include "memory/resourceArea.hpp"
33 #include "memory/universe.hpp"
34 #include "oops/oop.inline.hpp"
35 #include "opto/addnode.hpp"
36 #include "opto/castnode.hpp"
37 #include "opto/convertnode.hpp"
38 #include "opto/divnode.hpp"
39 #include "opto/idealGraphPrinter.hpp"
40 #include "opto/idealKit.hpp"
41 #include "opto/inlinetypenode.hpp"
42 #include "opto/matcher.hpp"
43 #include "opto/memnode.hpp"
44 #include "opto/mulnode.hpp"
45 #include "opto/opaquenode.hpp"
46 #include "opto/parse.hpp"
47 #include "opto/runtime.hpp"
48 #include "opto/subtypenode.hpp"
49 #include "runtime/arguments.hpp"
50 #include "runtime/deoptimization.hpp"
51 #include "runtime/sharedRuntime.hpp"
52
53 #ifndef PRODUCT
54 extern uint explicit_null_checks_inserted,
55 explicit_null_checks_elided;
56 #endif
57
58 Node* Parse::record_profile_for_speculation_at_array_load(Node* ld) {
59 // Feed unused profile data to type speculation
60 if (UseTypeSpeculation && UseArrayLoadStoreProfile) {
61 ciKlass* array_type = nullptr;
62 ciKlass* element_type = nullptr;
63 ProfilePtrKind element_ptr = ProfileMaybeNull;
64 bool flat_array = true;
65 bool null_free_array = true;
66 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
67 if (element_type != nullptr || element_ptr != ProfileMaybeNull) {
68 ld = record_profile_for_speculation(ld, element_type, element_ptr);
69 }
70 }
71 return ld;
72 }
73
74
75 //---------------------------------array_load----------------------------------
76 void Parse::array_load(BasicType bt) {
77 const Type* elemtype = Type::TOP;
78 Node* adr = array_addressing(bt, 0, elemtype);
79 if (stopped()) return; // guaranteed null or range check
80
81 Node* array_index = pop();
82 Node* array = pop();
83
84 // Handle inline type arrays
85 const TypeOopPtr* element_ptr = elemtype->make_oopptr();
86 const TypeAryPtr* array_type = _gvn.type(array)->is_aryptr();
87
88 if (!array_type->is_not_flat()) {
89 // Cannot statically determine if array is a flat array, emit runtime check
90 assert(UseArrayFlattening && is_reference_type(bt) && element_ptr->can_be_inline_type() &&
91 (!element_ptr->is_inlinetypeptr() || element_ptr->inline_klass()->maybe_flat_in_array()), "array can't be flat");
92 IdealKit ideal(this);
93 IdealVariable res(ideal);
94 ideal.declarations_done();
95 ideal.if_then(flat_array_test(array, /* flat = */ false)); {
96 // Non-flat array
97 sync_kit(ideal);
98 if (!array_type->is_flat()) {
99 assert(array_type->is_flat() || control()->in(0)->as_If()->is_flat_array_check(&_gvn), "Should be found");
100 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
101 DecoratorSet decorator_set = IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD;
102 if (needs_range_check(array_type->size(), array_index)) {
103 // We've emitted a RangeCheck but now insert an additional check between the range check and the actual load.
104 // We cannot pin the load to two separate nodes. Instead, we pin it conservatively here such that it cannot
105 // possibly float above the range check at any point.
106 decorator_set |= C2_UNKNOWN_CONTROL_LOAD;
107 }
108 Node* ld = access_load_at(array, adr, adr_type, element_ptr, bt, decorator_set);
109 if (element_ptr->is_inlinetypeptr()) {
110 ld = InlineTypeNode::make_from_oop(this, ld, element_ptr->inline_klass());
111 }
112 ideal.set(res, ld);
113 }
114 ideal.sync_kit(this);
115 } ideal.else_(); {
116 // Flat array
117 sync_kit(ideal);
118 if (!array_type->is_not_flat()) {
119 if (element_ptr->is_inlinetypeptr()) {
120 ciInlineKlass* vk = element_ptr->inline_klass();
121 Node* flat_array = cast_to_flat_array(array, vk);
122 Node* vt = InlineTypeNode::make_from_flat_array(this, vk, flat_array, array_index);
123 ideal.set(res, vt);
124 } else {
125 // Element type is unknown, and thus we cannot statically determine the exact flat array layout. Emit a
126 // runtime call to correctly load the inline type element from the flat array.
127 Node* inline_type = load_from_unknown_flat_array(array, array_index, element_ptr);
128 bool is_null_free = array_type->is_null_free() || !UseNullableValueFlattening;
129 if (is_null_free) {
130 inline_type = cast_not_null(inline_type);
131 }
132 ideal.set(res, inline_type);
133 }
134 }
135 ideal.sync_kit(this);
136 } ideal.end_if();
137 sync_kit(ideal);
138 Node* ld = _gvn.transform(ideal.value(res));
139 ld = record_profile_for_speculation_at_array_load(ld);
140 push_node(bt, ld);
141 return;
142 }
143
144 if (elemtype == TypeInt::BOOL) {
145 bt = T_BOOLEAN;
146 }
147 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
148 Node* ld = access_load_at(array, adr, adr_type, elemtype, bt,
149 IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD);
150 ld = record_profile_for_speculation_at_array_load(ld);
151 // Loading an inline type from a non-flat array
152 if (element_ptr != nullptr && element_ptr->is_inlinetypeptr()) {
153 assert(!array_type->is_null_free() || !element_ptr->maybe_null(), "inline type array elements should never be null");
154 ld = InlineTypeNode::make_from_oop(this, ld, element_ptr->inline_klass());
155 }
156 push_node(bt, ld);
157 }
158
159 Node* Parse::load_from_unknown_flat_array(Node* array, Node* array_index, const TypeOopPtr* element_ptr) {
160 // Below membars keep this access to an unknown flat array correctly
161 // ordered with other unknown and known flat array accesses.
162 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
163
164 Node* call = nullptr;
165 {
166 // Re-execute flat array load if runtime call triggers deoptimization
167 PreserveReexecuteState preexecs(this);
168 jvms()->set_bci(_bci);
169 jvms()->set_should_reexecute(true);
170 inc_sp(2);
171 kill_dead_locals();
172 call = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
173 OptoRuntime::load_unknown_inline_Type(),
174 OptoRuntime::load_unknown_inline_Java(),
175 nullptr, TypeRawPtr::BOTTOM,
176 array, array_index);
177 }
178 make_slow_call_ex(call, env()->Throwable_klass(), false);
179 Node* buffer = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
180
181 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
182
183 // Keep track of the information that the inline type is in flat arrays
184 const Type* unknown_value = element_ptr->is_instptr()->cast_to_flat_in_array();
185 return _gvn.transform(new CheckCastPPNode(control(), buffer, unknown_value));
186 }
187
188 //--------------------------------array_store----------------------------------
189 void Parse::array_store(BasicType bt) {
190 const Type* elemtype = Type::TOP;
191 Node* adr = array_addressing(bt, type2size[bt], elemtype);
192 if (stopped()) return; // guaranteed null or range check
193 Node* stored_value_casted = nullptr;
194 if (bt == T_OBJECT) {
195 stored_value_casted = array_store_check(adr, elemtype);
196 if (stopped()) {
197 return;
198 }
199 }
200 Node* const stored_value = pop_node(bt); // Value to store
201 Node* const array_index = pop(); // Index in the array
202 Node* array = pop(); // The array itself
203
204 const TypeAryPtr* array_type = _gvn.type(array)->is_aryptr();
205 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
206
207 if (elemtype == TypeInt::BOOL) {
208 bt = T_BOOLEAN;
209 } else if (bt == T_OBJECT) {
210 elemtype = elemtype->make_oopptr();
211 const Type* stored_value_casted_type = _gvn.type(stored_value_casted);
212 // Based on the value to be stored, try to determine if the array is not null-free and/or not flat.
213 // This is only legal for non-null stores because the array_store_check always passes for null, even
214 // if the array is null-free. Null stores are handled in GraphKit::inline_array_null_guard().
215 bool not_inline = !stored_value_casted_type->maybe_null() && !stored_value_casted_type->is_oopptr()->can_be_inline_type();
216 bool not_null_free = not_inline;
217 bool not_flat = not_inline || ( stored_value_casted_type->is_inlinetypeptr() &&
218 !stored_value_casted_type->inline_klass()->maybe_flat_in_array());
219 if (!array_type->is_not_null_free() && not_null_free) {
220 // Storing a non-inline type, mark array as not null-free.
221 array_type = array_type->cast_to_not_null_free();
222 Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, array_type));
223 replace_in_map(array, cast);
224 array = cast;
225 }
226 if (!array_type->is_not_flat() && not_flat) {
227 // Storing to a non-flat array, mark array as not flat.
228 array_type = array_type->cast_to_not_flat();
229 Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, array_type));
230 replace_in_map(array, cast);
231 array = cast;
232 }
233
234 if (array_type->is_null_free() && elemtype->is_inlinetypeptr() && elemtype->inline_klass()->is_empty()) {
235 // Array of null-free empty inline type, there is only 1 state for the elements
236 assert(!stored_value_casted_type->maybe_null(), "should be guaranteed by array store check");
237 return;
238 }
239
240 if (!array_type->is_not_flat()) {
241 // Array might be a flat array, emit runtime checks (for nullptr, a simple inline_array_null_guard is sufficient).
242 assert(UseArrayFlattening && !not_flat && elemtype->is_oopptr()->can_be_inline_type() &&
243 (!array_type->klass_is_exact() || array_type->is_flat()), "array can't be a flat array");
244 // TODO 8350865 Depending on the available layouts, we can avoid this check in below flat/not-flat branches. Also the safe_for_replace arg is now always true.
245 array = inline_array_null_guard(array, stored_value_casted, 3, true);
246 IdealKit ideal(this);
247 ideal.if_then(flat_array_test(array, /* flat = */ false)); {
248 // Non-flat array
249 if (!array_type->is_flat()) {
250 sync_kit(ideal);
251 assert(array_type->is_flat() || ideal.ctrl()->in(0)->as_If()->is_flat_array_check(&_gvn), "Should be found");
252 inc_sp(3);
253 access_store_at(array, adr, adr_type, stored_value_casted, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY, false);
254 dec_sp(3);
255 ideal.sync_kit(this);
256 }
257 } ideal.else_(); {
258 // Flat array
259 sync_kit(ideal);
260 if (!array_type->is_not_flat()) {
261 // Try to determine the inline klass type of the stored value
262 ciInlineKlass* vk = nullptr;
263 if (stored_value_casted_type->is_inlinetypeptr()) {
264 vk = stored_value_casted_type->inline_klass();
265 } else if (elemtype->is_inlinetypeptr()) {
266 vk = elemtype->inline_klass();
267 }
268
269 if (vk != nullptr) {
270 // Element type is known, cast and store to flat array layout.
271 Node* flat_array = cast_to_flat_array(array, vk);
272
273 // Re-execute flat array store if buffering triggers deoptimization
274 PreserveReexecuteState preexecs(this);
275 jvms()->set_should_reexecute(true);
276 inc_sp(3);
277
278 if (!stored_value_casted->is_InlineType()) {
279 assert(_gvn.type(stored_value_casted) == TypePtr::NULL_PTR, "Unexpected value");
280 stored_value_casted = InlineTypeNode::make_null(_gvn, vk);
281 }
282
283 stored_value_casted->as_InlineType()->store_flat_array(this, flat_array, array_index);
284 } else {
285 // Element type is unknown, emit a runtime call since the flat array layout is not statically known.
286 store_to_unknown_flat_array(array, array_index, stored_value_casted);
287 }
288 }
289 ideal.sync_kit(this);
290 }
291 ideal.end_if();
292 sync_kit(ideal);
293 return;
294 } else if (!array_type->is_not_null_free()) {
295 // Array is not flat but may be null free
296 assert(elemtype->is_oopptr()->can_be_inline_type(), "array can't be null-free");
297 array = inline_array_null_guard(array, stored_value_casted, 3, true);
298 }
299 }
300 inc_sp(3);
301 access_store_at(array, adr, adr_type, stored_value, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY);
302 dec_sp(3);
303 }
304
305 // Emit a runtime call to store to a flat array whose element type is either unknown (i.e. we do not know the flat
306 // array layout) or not exact (could have different flat array layouts at runtime).
307 void Parse::store_to_unknown_flat_array(Node* array, Node* const idx, Node* non_null_stored_value) {
308 // Below membars keep this access to an unknown flat array correctly
309 // ordered with other unknown and known flat array accesses.
310 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
311
312 Node* call = nullptr;
313 {
314 // Re-execute flat array store if runtime call triggers deoptimization
315 PreserveReexecuteState preexecs(this);
316 jvms()->set_bci(_bci);
317 jvms()->set_should_reexecute(true);
318 inc_sp(3);
319 kill_dead_locals();
320 call = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
321 OptoRuntime::store_unknown_inline_Type(),
322 OptoRuntime::store_unknown_inline_Java(),
323 nullptr, TypeRawPtr::BOTTOM,
324 non_null_stored_value, array, idx);
325 }
326 make_slow_call_ex(call, env()->Throwable_klass(), false);
327
328 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
329 }
330
331 //------------------------------array_addressing-------------------------------
332 // Pull array and index from the stack. Compute pointer-to-element.
333 Node* Parse::array_addressing(BasicType type, int vals, const Type*& elemtype) {
334 Node *idx = peek(0+vals); // Get from stack without popping
335 Node *ary = peek(1+vals); // in case of exception
336
337 // Null check the array base, with correct stack contents
338 ary = null_check(ary, T_ARRAY);
339 // Compile-time detect of null-exception?
340 if (stopped()) return top();
341
342 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr();
343 const TypeInt* sizetype = arytype->size();
344 elemtype = arytype->elem();
345
346 if (UseUniqueSubclasses) {
347 const Type* el = elemtype->make_ptr();
348 if (el && el->isa_instptr()) {
349 const TypeInstPtr* toop = el->is_instptr();
350 if (toop->instance_klass()->unique_concrete_subklass()) {
351 // If we load from "AbstractClass[]" we must see "ConcreteSubClass".
352 const Type* subklass = Type::get_const_type(toop->instance_klass());
353 elemtype = subklass->join_speculative(el);
354 }
355 }
356 }
357
358 if (!arytype->is_loaded()) {
359 // Only fails for some -Xcomp runs
360 // The class is unloaded. We have to run this bytecode in the interpreter.
361 ciKlass* klass = arytype->unloaded_klass();
362
363 uncommon_trap(Deoptimization::Reason_unloaded,
364 Deoptimization::Action_reinterpret,
365 klass, "!loaded array");
366 return top();
367 }
368
369 ary = create_speculative_inline_type_array_checks(ary, arytype, elemtype);
370
371 if (needs_range_check(sizetype, idx)) {
372 create_range_check(idx, ary, sizetype);
373 } else if (C->log() != nullptr) {
374 C->log()->elem("observe that='!need_range_check'");
375 }
376
377 // Check for always knowing you are throwing a range-check exception
378 if (stopped()) return top();
379
380 // Make array address computation control dependent to prevent it
381 // from floating above the range check during loop optimizations.
382 Node* ptr = array_element_address(ary, idx, type, sizetype, control());
383 assert(ptr != top(), "top should go hand-in-hand with stopped");
384
385 return ptr;
386 }
387
388 // Check if we need a range check for an array access. This is the case if the index is either negative or if it could
389 // be greater or equal the smallest possible array size (i.e. out-of-bounds).
390 bool Parse::needs_range_check(const TypeInt* size_type, const Node* index) const {
391 const TypeInt* index_type = _gvn.type(index)->is_int();
392 return index_type->_hi >= size_type->_lo || index_type->_lo < 0;
393 }
394
395 void Parse::create_range_check(Node* idx, Node* ary, const TypeInt* sizetype) {
396 Node* tst;
397 if (sizetype->_hi <= 0) {
398 // The greatest array bound is negative, so we can conclude that we're
399 // compiling unreachable code, but the unsigned compare trick used below
400 // only works with non-negative lengths. Instead, hack "tst" to be zero so
401 // the uncommon_trap path will always be taken.
402 tst = _gvn.intcon(0);
403 } else {
404 // Range is constant in array-oop, so we can use the original state of mem
405 Node* len = load_array_length(ary);
406
407 // Test length vs index (standard trick using unsigned compare)
408 Node* chk = _gvn.transform(new CmpUNode(idx, len) );
409 BoolTest::mask btest = BoolTest::lt;
410 tst = _gvn.transform(new BoolNode(chk, btest) );
411 }
412 RangeCheckNode* rc = new RangeCheckNode(control(), tst, PROB_MAX, COUNT_UNKNOWN);
413 _gvn.set_type(rc, rc->Value(&_gvn));
414 if (!tst->is_Con()) {
415 record_for_igvn(rc);
416 }
417 set_control(_gvn.transform(new IfTrueNode(rc)));
418 // Branch to failure if out of bounds
419 {
420 PreserveJVMState pjvms(this);
421 set_control(_gvn.transform(new IfFalseNode(rc)));
422 if (C->allow_range_check_smearing()) {
423 // Do not use builtin_throw, since range checks are sometimes
424 // made more stringent by an optimistic transformation.
425 // This creates "tentative" range checks at this point,
426 // which are not guaranteed to throw exceptions.
427 // See IfNode::Ideal, is_range_check, adjust_check.
428 uncommon_trap(Deoptimization::Reason_range_check,
429 Deoptimization::Action_make_not_entrant,
430 nullptr, "range_check");
431 } else {
432 // If we have already recompiled with the range-check-widening
433 // heroic optimization turned off, then we must really be throwing
434 // range check exceptions.
435 builtin_throw(Deoptimization::Reason_range_check);
436 }
437 }
438 }
439
440 // For inline type arrays, we can use the profiling information for array accesses to speculate on the type, flatness,
441 // and null-freeness. We can either prepare the speculative type for later uses or emit explicit speculative checks with
442 // traps now. In the latter case, the speculative type guarantees can avoid additional runtime checks later (e.g.
443 // non-null-free implies non-flat which allows us to remove flatness checks). This makes the graph simpler.
444 Node* Parse::create_speculative_inline_type_array_checks(Node* array, const TypeAryPtr* array_type,
445 const Type*& element_type) {
446 if (!array_type->is_flat() && !array_type->is_not_flat()) {
447 // For arrays that might be flat, speculate that the array has the exact type reported in the profile data such that
448 // we can rely on a fixed memory layout (i.e. either a flat layout or not).
449 array = cast_to_speculative_array_type(array, array_type, element_type);
450 } else if (UseTypeSpeculation && UseArrayLoadStoreProfile) {
451 // Array is known to be either flat or not flat. If possible, update the speculative type by using the profile data
452 // at this bci.
453 array = cast_to_profiled_array_type(array);
454 }
455
456 // Even though the type does not tell us whether we have an inline type array or not, we can still check the profile data
457 // whether we have a non-null-free or non-flat array. Speculating on a non-null-free array doesn't help aaload but could
458 // be profitable for a subsequent aastore.
459 if (!array_type->is_null_free() && !array_type->is_not_null_free()) {
460 array = speculate_non_null_free_array(array, array_type);
461 }
462 if (!array_type->is_flat() && !array_type->is_not_flat()) {
463 array = speculate_non_flat_array(array, array_type);
464 }
465 return array;
466 }
467
468 // Speculate that the array has the exact type reported in the profile data. We emit a trap when this turns out to be
469 // wrong. On the fast path, we add a CheckCastPP to use the exact type.
470 Node* Parse::cast_to_speculative_array_type(Node* const array, const TypeAryPtr*& array_type, const Type*& element_type) {
471 Deoptimization::DeoptReason reason = Deoptimization::Reason_speculate_class_check;
472 ciKlass* speculative_array_type = array_type->speculative_type();
473 if (too_many_traps_or_recompiles(reason) || speculative_array_type == nullptr) {
474 // No speculative type, check profile data at this bci
475 speculative_array_type = nullptr;
476 reason = Deoptimization::Reason_class_check;
477 if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(reason)) {
478 ciKlass* profiled_element_type = nullptr;
479 ProfilePtrKind element_ptr = ProfileMaybeNull;
480 bool flat_array = true;
481 bool null_free_array = true;
482 method()->array_access_profiled_type(bci(), speculative_array_type, profiled_element_type, element_ptr, flat_array,
483 null_free_array);
484 }
485 }
486 if (speculative_array_type != nullptr) {
487 // Speculate that this array has the exact type reported by profile data
488 Node* casted_array = nullptr;
489 DEBUG_ONLY(Node* old_control = control();)
490 Node* slow_ctl = type_check_receiver(array, speculative_array_type, 1.0, &casted_array);
491 if (stopped()) {
492 // The check always fails and therefore profile information is incorrect. Don't use it.
493 assert(old_control == slow_ctl, "type check should have been removed");
494 set_control(slow_ctl);
495 } else if (!slow_ctl->is_top()) {
496 { PreserveJVMState pjvms(this);
497 set_control(slow_ctl);
498 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
499 }
500 replace_in_map(array, casted_array);
501 array_type = _gvn.type(casted_array)->is_aryptr();
502 element_type = array_type->elem();
503 return casted_array;
504 }
505 }
506 return array;
507 }
508
509 // Create a CheckCastPP when the speculative type can improve the current type.
510 Node* Parse::cast_to_profiled_array_type(Node* const array) {
511 ciKlass* array_type = nullptr;
512 ciKlass* element_type = nullptr;
513 ProfilePtrKind element_ptr = ProfileMaybeNull;
514 bool flat_array = true;
515 bool null_free_array = true;
516 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
517 if (array_type != nullptr) {
518 return record_profile_for_speculation(array, array_type, ProfileMaybeNull);
519 }
520 return array;
521 }
522
523 // Speculate that the array is non-null-free. We emit a trap when this turns out to be
524 // wrong. On the fast path, we add a CheckCastPP to use the non-null-free type.
525 Node* Parse::speculate_non_null_free_array(Node* const array, const TypeAryPtr*& array_type) {
526 bool null_free_array = true;
527 Deoptimization::DeoptReason reason = Deoptimization::Reason_none;
528 if (array_type->speculative() != nullptr &&
529 array_type->speculative()->is_aryptr()->is_not_null_free() &&
530 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
531 null_free_array = false;
532 reason = Deoptimization::Reason_speculate_class_check;
533 } else if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(Deoptimization::Reason_class_check)) {
534 ciKlass* profiled_array_type = nullptr;
535 ciKlass* profiled_element_type = nullptr;
536 ProfilePtrKind element_ptr = ProfileMaybeNull;
537 bool flat_array = true;
538 method()->array_access_profiled_type(bci(), profiled_array_type, profiled_element_type, element_ptr, flat_array,
539 null_free_array);
540 reason = Deoptimization::Reason_class_check;
541 }
542 if (!null_free_array) {
543 { // Deoptimize if null-free array
544 BuildCutout unless(this, null_free_array_test(array, /* null_free = */ false), PROB_MAX);
545 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
546 }
547 assert(!stopped(), "null-free array should have been caught earlier");
548 Node* casted_array = _gvn.transform(new CheckCastPPNode(control(), array, array_type->cast_to_not_null_free()));
549 replace_in_map(array, casted_array);
550 array_type = _gvn.type(casted_array)->is_aryptr();
551 return casted_array;
552 }
553 return array;
554 }
555
556 // Speculate that the array is non-flat. We emit a trap when this turns out to be wrong.
557 // On the fast path, we add a CheckCastPP to use the non-flat type.
558 Node* Parse::speculate_non_flat_array(Node* const array, const TypeAryPtr* const array_type) {
559 bool flat_array = true;
560 Deoptimization::DeoptReason reason = Deoptimization::Reason_none;
561 if (array_type->speculative() != nullptr &&
562 array_type->speculative()->is_aryptr()->is_not_flat() &&
563 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
564 flat_array = false;
565 reason = Deoptimization::Reason_speculate_class_check;
566 } else if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(reason)) {
567 ciKlass* profiled_array_type = nullptr;
568 ciKlass* profiled_element_type = nullptr;
569 ProfilePtrKind element_ptr = ProfileMaybeNull;
570 bool null_free_array = true;
571 method()->array_access_profiled_type(bci(), profiled_array_type, profiled_element_type, element_ptr, flat_array,
572 null_free_array);
573 reason = Deoptimization::Reason_class_check;
574 }
575 if (!flat_array) {
576 { // Deoptimize if flat array
577 BuildCutout unless(this, flat_array_test(array, /* flat = */ false), PROB_MAX);
578 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
579 }
580 assert(!stopped(), "flat array should have been caught earlier");
581 Node* casted_array = _gvn.transform(new CheckCastPPNode(control(), array, array_type->cast_to_not_flat()));
582 replace_in_map(array, casted_array);
583 return casted_array;
584 }
585 return array;
586 }
587
588 // returns IfNode
589 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) {
590 Node *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
591 Node *tst = _gvn.transform(new BoolNode(cmp, mask));
592 IfNode *iff = create_and_map_if(control(), tst, prob, cnt);
593 return iff;
594 }
595
596
597 // sentinel value for the target bci to mark never taken branches
598 // (according to profiling)
599 static const int never_reached = INT_MAX;
600
601 //------------------------------helper for tableswitch-------------------------
602 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, bool unc) {
603 // True branch, use existing map info
604 { PreserveJVMState pjvms(this);
605 Node *iftrue = _gvn.transform( new IfTrueNode (iff) );
606 set_control( iftrue );
1824 // False branch
1825 Node* iffalse = _gvn.transform( new IfFalseNode(iff) );
1826 set_control(iffalse);
1827
1828 if (stopped()) { // Path is dead?
1829 NOT_PRODUCT(explicit_null_checks_elided++);
1830 if (C->eliminate_boxing()) {
1831 // Mark the successor block as parsed
1832 next_block->next_path_num();
1833 }
1834 } else { // Path is live.
1835 adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, next_block);
1836 }
1837
1838 if (do_stress_trap) {
1839 stress_trap(iff, counter, incr_store);
1840 }
1841 }
1842
1843 //------------------------------------do_if------------------------------------
1844 void Parse::do_if(BoolTest::mask btest, Node* c, bool can_trap, bool new_path, Node** ctrl_taken, Node** stress_count_mem) {
1845 int target_bci = iter().get_dest();
1846
1847 Block* branch_block = successor_for_bci(target_bci);
1848 Block* next_block = successor_for_bci(iter().next_bci());
1849
1850 float cnt;
1851 float prob = branch_prediction(cnt, btest, target_bci, c);
1852 float untaken_prob = 1.0 - prob;
1853
1854 if (prob == PROB_UNKNOWN) {
1855 if (PrintOpto && Verbose) {
1856 tty->print_cr("Never-taken edge stops compilation at bci %d", bci());
1857 }
1858 repush_if_args(); // to gather stats on loop
1859 uncommon_trap(Deoptimization::Reason_unreached,
1860 Deoptimization::Action_reinterpret,
1861 nullptr, "cold");
1862 if (C->eliminate_boxing()) {
1863 // Mark the successor blocks as parsed
1864 branch_block->next_path_num();
1865 next_block->next_path_num();
1866 }
1867 return;
1868 }
1869
1870 Node* counter = nullptr;
1871 Node* incr_store = nullptr;
1872 bool do_stress_trap = StressUnstableIfTraps && ((C->random() % 2) == 0);
1873 if (do_stress_trap) {
1874 increment_trap_stress_counter(counter, incr_store);
1875 if (stress_count_mem != nullptr) {
1876 *stress_count_mem = incr_store;
1877 }
1878 }
1879
1880 // Sanity check the probability value
1881 assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser");
1882
1883 bool taken_if_true = true;
1884 // Convert BoolTest to canonical form:
1885 if (!BoolTest(btest).is_canonical()) {
1886 btest = BoolTest(btest).negate();
1887 taken_if_true = false;
1888 // prob is NOT updated here; it remains the probability of the taken
1889 // path (as opposed to the prob of the path guarded by an 'IfTrueNode').
1890 }
1891 assert(btest != BoolTest::eq, "!= is the only canonical exact test");
1892
1893 Node* tst0 = new BoolNode(c, btest);
1894 Node* tst = _gvn.transform(tst0);
1895 BoolTest::mask taken_btest = BoolTest::illegal;
1896 BoolTest::mask untaken_btest = BoolTest::illegal;
1897
1918 }
1919
1920 // Generate real control flow
1921 float true_prob = (taken_if_true ? prob : untaken_prob);
1922 IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1923 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1924 Node* taken_branch = new IfTrueNode(iff);
1925 Node* untaken_branch = new IfFalseNode(iff);
1926 if (!taken_if_true) { // Finish conversion to canonical form
1927 Node* tmp = taken_branch;
1928 taken_branch = untaken_branch;
1929 untaken_branch = tmp;
1930 }
1931
1932 // Branch is taken:
1933 { PreserveJVMState pjvms(this);
1934 taken_branch = _gvn.transform(taken_branch);
1935 set_control(taken_branch);
1936
1937 if (stopped()) {
1938 if (C->eliminate_boxing() && !new_path) {
1939 // Mark the successor block as parsed (if we haven't created a new path)
1940 branch_block->next_path_num();
1941 }
1942 } else {
1943 adjust_map_after_if(taken_btest, c, prob, branch_block, can_trap);
1944 if (!stopped()) {
1945 if (new_path) {
1946 // Merge by using a new path
1947 merge_new_path(target_bci);
1948 } else if (ctrl_taken != nullptr) {
1949 // Don't merge but save taken branch to be wired by caller
1950 *ctrl_taken = control();
1951 } else {
1952 merge(target_bci);
1953 }
1954 }
1955 }
1956 }
1957
1958 untaken_branch = _gvn.transform(untaken_branch);
1959 set_control(untaken_branch);
1960
1961 // Branch not taken.
1962 if (stopped() && ctrl_taken == nullptr) {
1963 if (C->eliminate_boxing()) {
1964 // Mark the successor block as parsed (if caller does not re-wire control flow)
1965 next_block->next_path_num();
1966 }
1967 } else {
1968 adjust_map_after_if(untaken_btest, c, untaken_prob, next_block, can_trap);
1969 }
1970
1971 if (do_stress_trap) {
1972 stress_trap(iff, counter, incr_store);
1973 }
1974 }
1975
1976
1977 static ProfilePtrKind speculative_ptr_kind(const TypeOopPtr* t) {
1978 if (t->speculative() == nullptr) {
1979 return ProfileUnknownNull;
1980 }
1981 if (t->speculative_always_null()) {
1982 return ProfileAlwaysNull;
1983 }
1984 if (t->speculative_maybe_null()) {
1985 return ProfileMaybeNull;
1986 }
1987 return ProfileNeverNull;
1988 }
1989
1990 void Parse::acmp_always_null_input(Node* input, const TypeOopPtr* tinput, BoolTest::mask btest, Node* eq_region) {
1991 inc_sp(2);
1992 Node* cast = null_check_common(input, T_OBJECT, true, nullptr,
1993 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check) &&
1994 speculative_ptr_kind(tinput) == ProfileAlwaysNull);
1995 dec_sp(2);
1996 if (btest == BoolTest::ne) {
1997 {
1998 PreserveJVMState pjvms(this);
1999 replace_in_map(input, cast);
2000 int target_bci = iter().get_dest();
2001 merge(target_bci);
2002 }
2003 record_for_igvn(eq_region);
2004 set_control(_gvn.transform(eq_region));
2005 } else {
2006 replace_in_map(input, cast);
2007 }
2008 }
2009
2010 Node* Parse::acmp_null_check(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, Node*& null_ctl) {
2011 inc_sp(2);
2012 null_ctl = top();
2013 Node* cast = null_check_oop(input, &null_ctl,
2014 input_ptr == ProfileNeverNull || (input_ptr == ProfileUnknownNull && !too_many_traps_or_recompiles(Deoptimization::Reason_null_check)),
2015 false,
2016 speculative_ptr_kind(tinput) == ProfileNeverNull &&
2017 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check));
2018 dec_sp(2);
2019 return cast;
2020 }
2021
2022 void Parse::acmp_type_check_or_trap(Node** non_null_input, ciKlass* input_type, Deoptimization::DeoptReason reason) {
2023 Node* slow_ctl = type_check_receiver(*non_null_input, input_type, 1.0, non_null_input);
2024 {
2025 PreserveJVMState pjvms(this);
2026 inc_sp(2);
2027 set_control(slow_ctl);
2028 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
2029 }
2030 }
2031
2032 void Parse::acmp_type_check(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, ciKlass* input_type, BoolTest::mask btest, Node* eq_region) {
2033 Node* null_ctl;
2034 Node* cast = acmp_null_check(input, tinput, input_ptr, null_ctl);
2035
2036 if (input_type != nullptr) {
2037 Deoptimization::DeoptReason reason;
2038 if (tinput->speculative_type() != nullptr && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
2039 reason = Deoptimization::Reason_speculate_class_check;
2040 } else {
2041 reason = Deoptimization::Reason_class_check;
2042 }
2043 acmp_type_check_or_trap(&cast, input_type, reason);
2044 } else {
2045 // No specific type, check for inline type
2046 BuildCutout unless(this, inline_type_test(cast, /* is_inline = */ false), PROB_MAX);
2047 inc_sp(2);
2048 uncommon_trap_exact(Deoptimization::Reason_class_check, Deoptimization::Action_maybe_recompile);
2049 }
2050
2051 Node* ne_region = new RegionNode(2);
2052 ne_region->add_req(null_ctl);
2053 ne_region->add_req(control());
2054
2055 record_for_igvn(ne_region);
2056 set_control(_gvn.transform(ne_region));
2057 if (btest == BoolTest::ne) {
2058 {
2059 PreserveJVMState pjvms(this);
2060 if (null_ctl == top()) {
2061 replace_in_map(input, cast);
2062 }
2063 int target_bci = iter().get_dest();
2064 merge(target_bci);
2065 }
2066 record_for_igvn(eq_region);
2067 set_control(_gvn.transform(eq_region));
2068 } else {
2069 if (null_ctl == top()) {
2070 replace_in_map(input, cast);
2071 }
2072 set_control(_gvn.transform(ne_region));
2073 }
2074 }
2075
2076 void Parse::do_acmp(BoolTest::mask btest, Node* left, Node* right) {
2077 ciKlass* left_type = nullptr;
2078 ciKlass* right_type = nullptr;
2079 ProfilePtrKind left_ptr = ProfileUnknownNull;
2080 ProfilePtrKind right_ptr = ProfileUnknownNull;
2081 bool left_inline_type = true;
2082 bool right_inline_type = true;
2083
2084 // Leverage profiling at acmp
2085 if (UseACmpProfile) {
2086 method()->acmp_profiled_type(bci(), left_type, right_type, left_ptr, right_ptr, left_inline_type, right_inline_type);
2087 if (too_many_traps_or_recompiles(Deoptimization::Reason_class_check)) {
2088 left_type = nullptr;
2089 right_type = nullptr;
2090 left_inline_type = true;
2091 right_inline_type = true;
2092 }
2093 if (too_many_traps_or_recompiles(Deoptimization::Reason_null_check)) {
2094 left_ptr = ProfileUnknownNull;
2095 right_ptr = ProfileUnknownNull;
2096 }
2097 }
2098
2099 if (UseTypeSpeculation) {
2100 record_profile_for_speculation(left, left_type, left_ptr);
2101 record_profile_for_speculation(right, right_type, right_ptr);
2102 }
2103
2104 if (!Arguments::is_valhalla_enabled()) {
2105 Node* cmp = CmpP(left, right);
2106 cmp = optimize_cmp_with_klass(cmp);
2107 do_if(btest, cmp);
2108 return;
2109 }
2110
2111 // Check for equality before potentially allocating
2112 if (left == right) {
2113 do_if(btest, makecon(TypeInt::CC_EQ));
2114 return;
2115 }
2116
2117 // Allocate inline type operands and re-execute on deoptimization
2118 if (left->is_InlineType()) {
2119 PreserveReexecuteState preexecs(this);
2120 inc_sp(2);
2121 jvms()->set_should_reexecute(true);
2122 left = left->as_InlineType()->buffer(this);
2123 }
2124 if (right->is_InlineType()) {
2125 PreserveReexecuteState preexecs(this);
2126 inc_sp(2);
2127 jvms()->set_should_reexecute(true);
2128 right = right->as_InlineType()->buffer(this);
2129 }
2130
2131 // First, do a normal pointer comparison
2132 const TypeOopPtr* tleft = _gvn.type(left)->isa_oopptr();
2133 const TypeOopPtr* tright = _gvn.type(right)->isa_oopptr();
2134 Node* cmp = CmpP(left, right);
2135 record_for_igvn(cmp);
2136 cmp = optimize_cmp_with_klass(cmp);
2137 if (tleft == nullptr || !tleft->can_be_inline_type() ||
2138 tright == nullptr || !tright->can_be_inline_type()) {
2139 // This is sufficient, if one of the operands can't be an inline type
2140 do_if(btest, cmp);
2141 return;
2142 }
2143
2144 // Don't add traps to unstable if branches because additional checks are required to
2145 // decide if the operands are equal/substitutable and we therefore shouldn't prune
2146 // branches for one if based on the profiling of the acmp branches.
2147 // Also, OptimizeUnstableIf would set an incorrect re-rexecution state because it
2148 // assumes that there is a 1-1 mapping between the if and the acmp branches and that
2149 // hitting a trap means that we will take the corresponding acmp branch on re-execution.
2150 const bool can_trap = true;
2151
2152 Node* eq_region = nullptr;
2153 if (btest == BoolTest::eq) {
2154 do_if(btest, cmp, !can_trap, true);
2155 if (stopped()) {
2156 // Pointers are equal, operands must be equal
2157 return;
2158 }
2159 } else {
2160 assert(btest == BoolTest::ne, "only eq or ne");
2161 Node* is_not_equal = nullptr;
2162 eq_region = new RegionNode(3);
2163 {
2164 PreserveJVMState pjvms(this);
2165 // Pointers are not equal, but more checks are needed to determine if the operands are (not) substitutable
2166 do_if(btest, cmp, !can_trap, false, &is_not_equal);
2167 if (!stopped()) {
2168 eq_region->init_req(1, control());
2169 }
2170 }
2171 if (is_not_equal == nullptr || is_not_equal->is_top()) {
2172 record_for_igvn(eq_region);
2173 set_control(_gvn.transform(eq_region));
2174 return;
2175 }
2176 set_control(is_not_equal);
2177 }
2178
2179 // Prefer speculative types if available
2180 if (!too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
2181 if (tleft->speculative_type() != nullptr) {
2182 left_type = tleft->speculative_type();
2183 }
2184 if (tright->speculative_type() != nullptr) {
2185 right_type = tright->speculative_type();
2186 }
2187 }
2188
2189 if (speculative_ptr_kind(tleft) != ProfileMaybeNull && speculative_ptr_kind(tleft) != ProfileUnknownNull) {
2190 ProfilePtrKind speculative_left_ptr = speculative_ptr_kind(tleft);
2191 if (speculative_left_ptr == ProfileAlwaysNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_assert)) {
2192 left_ptr = speculative_left_ptr;
2193 } else if (speculative_left_ptr == ProfileNeverNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check)) {
2194 left_ptr = speculative_left_ptr;
2195 }
2196 }
2197 if (speculative_ptr_kind(tright) != ProfileMaybeNull && speculative_ptr_kind(tright) != ProfileUnknownNull) {
2198 ProfilePtrKind speculative_right_ptr = speculative_ptr_kind(tright);
2199 if (speculative_right_ptr == ProfileAlwaysNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_assert)) {
2200 right_ptr = speculative_right_ptr;
2201 } else if (speculative_right_ptr == ProfileNeverNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check)) {
2202 right_ptr = speculative_right_ptr;
2203 }
2204 }
2205
2206 if (left_ptr == ProfileAlwaysNull) {
2207 // Comparison with null. Assert the input is indeed null and we're done.
2208 acmp_always_null_input(left, tleft, btest, eq_region);
2209 return;
2210 }
2211 if (right_ptr == ProfileAlwaysNull) {
2212 // Comparison with null. Assert the input is indeed null and we're done.
2213 acmp_always_null_input(right, tright, btest, eq_region);
2214 return;
2215 }
2216 if (left_type != nullptr && !left_type->is_inlinetype()) {
2217 // Comparison with an object of known type
2218 acmp_type_check(left, tleft, left_ptr, left_type, btest, eq_region);
2219 return;
2220 }
2221 if (right_type != nullptr && !right_type->is_inlinetype()) {
2222 // Comparison with an object of known type
2223 acmp_type_check(right, tright, right_ptr, right_type, btest, eq_region);
2224 return;
2225 }
2226 if (!left_inline_type) {
2227 // Comparison with an object known not to be an inline type
2228 acmp_type_check(left, tleft, left_ptr, nullptr, btest, eq_region);
2229 return;
2230 }
2231 if (!right_inline_type) {
2232 // Comparison with an object known not to be an inline type
2233 acmp_type_check(right, tright, right_ptr, nullptr, btest, eq_region);
2234 return;
2235 }
2236
2237 // Pointers are not equal, check if first operand is non-null
2238 Node* ne_region = new RegionNode(6);
2239 Node* null_ctl = nullptr;
2240 Node* not_null_left = nullptr;
2241 Node* not_null_right = acmp_null_check(right, tright, right_ptr, null_ctl);
2242 ne_region->init_req(1, null_ctl);
2243
2244 if (!stopped()) {
2245 // First operand is non-null, check if it is the speculative inline type if possible
2246 // (which later allows isSubstitutable to be intrinsified), or any inline type if no
2247 // speculation is available.
2248 if (right_type != nullptr && right_type->is_inlinetype()) {
2249 acmp_type_check_or_trap(¬_null_right, right_type, Deoptimization::Reason_speculate_class_check);
2250 } else {
2251 Node* is_value = inline_type_test(not_null_right);
2252 IfNode* is_value_iff = create_and_map_if(control(), is_value, PROB_FAIR, COUNT_UNKNOWN);
2253 Node* not_value = _gvn.transform(new IfFalseNode(is_value_iff));
2254 ne_region->init_req(2, not_value);
2255 set_control(_gvn.transform(new IfTrueNode(is_value_iff)));
2256 }
2257
2258 // The first operand is an inline type, check if the second operand is non-null
2259 not_null_left = acmp_null_check(left, tleft, left_ptr, null_ctl);
2260 ne_region->init_req(3, null_ctl);
2261 if (!stopped()) {
2262 // Check if lhs operand is of a specific speculative inline type (see above).
2263 // If not, we don't need to enforce that the lhs is a value object since we know
2264 // it already for the rhs, and must enforce that they have the same type.
2265 if (left_type != nullptr && left_type->is_inlinetype()) {
2266 acmp_type_check_or_trap(¬_null_left, left_type, Deoptimization::Reason_speculate_class_check);
2267 }
2268 if (!stopped()) {
2269 // Check if both operands are of the same class.
2270 Node* kls_left = load_object_klass(not_null_left);
2271 Node* kls_right = load_object_klass(not_null_right);
2272 Node* kls_cmp = CmpP(kls_left, kls_right);
2273 Node* kls_bol = _gvn.transform(new BoolNode(kls_cmp, BoolTest::ne));
2274 IfNode* kls_iff = create_and_map_if(control(), kls_bol, PROB_FAIR, COUNT_UNKNOWN);
2275 Node* kls_ne = _gvn.transform(new IfTrueNode(kls_iff));
2276 set_control(_gvn.transform(new IfFalseNode(kls_iff)));
2277 ne_region->init_req(4, kls_ne);
2278 }
2279 }
2280 }
2281
2282 if (stopped()) {
2283 record_for_igvn(ne_region);
2284 set_control(_gvn.transform(ne_region));
2285 if (btest == BoolTest::ne) {
2286 {
2287 PreserveJVMState pjvms(this);
2288 int target_bci = iter().get_dest();
2289 merge(target_bci);
2290 }
2291 record_for_igvn(eq_region);
2292 set_control(_gvn.transform(eq_region));
2293 }
2294 return;
2295 }
2296
2297 // Both operands are values types of the same class, we need to perform a
2298 // substitutability test. Delegate to ValueObjectMethods::isSubstitutable().
2299 Node* ne_io_phi = PhiNode::make(ne_region, i_o());
2300 Node* mem = reset_memory();
2301 Node* ne_mem_phi = PhiNode::make(ne_region, mem);
2302
2303 Node* eq_io_phi = nullptr;
2304 Node* eq_mem_phi = nullptr;
2305 if (eq_region != nullptr) {
2306 eq_io_phi = PhiNode::make(eq_region, i_o());
2307 eq_mem_phi = PhiNode::make(eq_region, mem);
2308 }
2309
2310 set_all_memory(mem);
2311
2312 kill_dead_locals();
2313 ciSymbol* subst_method_name = ciSymbols::isSubstitutable_name();
2314 ciMethod* subst_method = ciEnv::current()->ValueObjectMethods_klass()->find_method(subst_method_name, ciSymbols::object_object_boolean_signature());
2315 CallStaticJavaNode* call = new CallStaticJavaNode(C, TypeFunc::make(subst_method), SharedRuntime::get_resolve_static_call_stub(), subst_method);
2316 call->set_override_symbolic_info(true);
2317 call->init_req(TypeFunc::Parms, not_null_left);
2318 call->init_req(TypeFunc::Parms+1, not_null_right);
2319 inc_sp(2);
2320 set_edges_for_java_call(call, false, false);
2321 Node* ret = set_results_for_java_call(call, false, true);
2322 dec_sp(2);
2323
2324 // Test the return value of ValueObjectMethods::isSubstitutable()
2325 // This is the last check, do_if can emit traps now.
2326 Node* subst_cmp = _gvn.transform(new CmpINode(ret, intcon(1)));
2327 Node* ctl = C->top();
2328 Node* stress_count_mem = nullptr;
2329 if (btest == BoolTest::eq) {
2330 PreserveJVMState pjvms(this);
2331 do_if(btest, subst_cmp, can_trap, false, nullptr, &stress_count_mem);
2332 if (!stopped()) {
2333 ctl = control();
2334 }
2335 } else {
2336 assert(btest == BoolTest::ne, "only eq or ne");
2337 PreserveJVMState pjvms(this);
2338 do_if(btest, subst_cmp, can_trap, false, &ctl, &stress_count_mem);
2339 if (!stopped()) {
2340 eq_region->init_req(2, control());
2341 eq_io_phi->init_req(2, i_o());
2342 eq_mem_phi->init_req(2, reset_memory());
2343 }
2344 }
2345 if (stress_count_mem != nullptr) {
2346 set_memory(stress_count_mem, stress_count_mem->adr_type());
2347 }
2348 ne_region->init_req(5, ctl);
2349 ne_io_phi->init_req(5, i_o());
2350 ne_mem_phi->init_req(5, reset_memory());
2351
2352 record_for_igvn(ne_region);
2353 set_control(_gvn.transform(ne_region));
2354 set_i_o(_gvn.transform(ne_io_phi));
2355 set_all_memory(_gvn.transform(ne_mem_phi));
2356
2357 if (btest == BoolTest::ne) {
2358 {
2359 PreserveJVMState pjvms(this);
2360 int target_bci = iter().get_dest();
2361 merge(target_bci);
2362 }
2363
2364 record_for_igvn(eq_region);
2365 set_control(_gvn.transform(eq_region));
2366 set_i_o(_gvn.transform(eq_io_phi));
2367 set_all_memory(_gvn.transform(eq_mem_phi));
2368 }
2369 }
2370
2371 // Force unstable if traps to be taken randomly to trigger intermittent bugs such as incorrect debug information.
2372 // Add another if before the unstable if that checks a "random" condition at runtime (a simple shared counter) and
2373 // then either takes the trap or executes the original, unstable if.
2374 void Parse::stress_trap(IfNode* orig_iff, Node* counter, Node* incr_store) {
2375 // Search for an unstable if trap
2376 CallStaticJavaNode* trap = nullptr;
2377 assert(orig_iff->Opcode() == Op_If && orig_iff->outcnt() == 2, "malformed if");
2378 ProjNode* trap_proj = orig_iff->uncommon_trap_proj(trap, Deoptimization::Reason_unstable_if);
2379 if (trap == nullptr || !trap->jvms()->should_reexecute()) {
2380 // No suitable trap found. Remove unused counter load and increment.
2381 C->gvn_replace_by(incr_store, incr_store->in(MemNode::Memory));
2382 return;
2383 }
2384
2385 // Remove trap from optimization list since we add another path to the trap.
2386 bool success = C->remove_unstable_if_trap(trap, true);
2387 assert(success, "Trap already modified");
2388
2389 // Add a check before the original if that will trap with a certain frequency and execute the original if otherwise
2390 int freq_log = (C->random() % 31) + 1; // Random logarithmic frequency in [1, 31]
2423 }
2424
2425 void Parse::maybe_add_predicate_after_if(Block* path) {
2426 if (path->is_SEL_head() && path->preds_parsed() == 0) {
2427 // Add predicates at bci of if dominating the loop so traps can be
2428 // recorded on the if's profile data
2429 int bc_depth = repush_if_args();
2430 add_parse_predicates();
2431 dec_sp(bc_depth);
2432 path->set_has_predicates();
2433 }
2434 }
2435
2436
2437 //----------------------------adjust_map_after_if------------------------------
2438 // Adjust the JVM state to reflect the result of taking this path.
2439 // Basically, it means inspecting the CmpNode controlling this
2440 // branch, seeing how it constrains a tested value, and then
2441 // deciding if it's worth our while to encode this constraint
2442 // as graph nodes in the current abstract interpretation map.
2443 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path, bool can_trap) {
2444 if (!c->is_Cmp()) {
2445 maybe_add_predicate_after_if(path);
2446 return;
2447 }
2448
2449 if (stopped() || btest == BoolTest::illegal) {
2450 return; // nothing to do
2451 }
2452
2453 bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
2454
2455 if (can_trap && path_is_suitable_for_uncommon_trap(prob)) {
2456 repush_if_args();
2457 Node* call = uncommon_trap(Deoptimization::Reason_unstable_if,
2458 Deoptimization::Action_reinterpret,
2459 nullptr,
2460 (is_fallthrough ? "taken always" : "taken never"));
2461
2462 if (call != nullptr) {
2463 C->record_unstable_if_trap(new UnstableIfTrap(call->as_CallStaticJava(), path));
2464 }
2465 return;
2466 }
2467
2468 if (c->is_FlatArrayCheck()) {
2469 maybe_add_predicate_after_if(path);
2470 return;
2471 }
2472
2473 Node* val = c->in(1);
2474 Node* con = c->in(2);
2475 const Type* tcon = _gvn.type(con);
2476 const Type* tval = _gvn.type(val);
2477 bool have_con = tcon->singleton();
2478 if (tval->singleton()) {
2479 if (!have_con) {
2480 // Swap, so constant is in con.
2481 con = val;
2482 tcon = tval;
2483 val = c->in(2);
2484 tval = _gvn.type(val);
2485 btest = BoolTest(btest).commute();
2486 have_con = true;
2487 } else {
2488 // Do we have two constants? Then leave well enough alone.
2489 have_con = false;
2490 }
2491 }
2492 if (!have_con) { // remaining adjustments need a con
2608 Node* obj = nullptr;
2609 const TypeOopPtr* cast_type = nullptr;
2610 // Insert a cast node with a narrowed type after a successful type check.
2611 if (match_type_check(_gvn, btest, con, tcon, val, tval,
2612 &obj, &cast_type)) {
2613 assert(obj != nullptr && cast_type != nullptr, "missing type check info");
2614 const Type* obj_type = _gvn.type(obj);
2615 const TypeOopPtr* tboth = obj_type->join_speculative(cast_type)->isa_oopptr();
2616 if (tboth != nullptr && tboth != obj_type && tboth->higher_equal(obj_type)) {
2617 int obj_in_map = map()->find_edge(obj);
2618 JVMState* jvms = this->jvms();
2619 if (obj_in_map >= 0 &&
2620 (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
2621 TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth);
2622 const Type* tcc = ccast->as_Type()->type();
2623 assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve");
2624 // Delay transform() call to allow recovery of pre-cast value
2625 // at the control merge.
2626 _gvn.set_type_bottom(ccast);
2627 record_for_igvn(ccast);
2628 if (tboth->is_inlinetypeptr()) {
2629 ccast = InlineTypeNode::make_from_oop(this, ccast, tboth->exact_klass(true)->as_inline_klass());
2630 }
2631 // Here's the payoff.
2632 replace_in_map(obj, ccast);
2633 }
2634 }
2635 }
2636
2637 int val_in_map = map()->find_edge(val);
2638 if (val_in_map < 0) return; // replace_in_map would be useless
2639 {
2640 JVMState* jvms = this->jvms();
2641 if (!(jvms->is_loc(val_in_map) ||
2642 jvms->is_stk(val_in_map)))
2643 return; // again, it would be useless
2644 }
2645
2646 // Check for a comparison to a constant, and "know" that the compared
2647 // value is constrained on this path.
2648 assert(tcon->singleton(), "");
2649 ConstraintCastNode* ccast = nullptr;
2650 Node* cast = nullptr;
2714 if (c->Opcode() == Op_CmpP &&
2715 (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
2716 c->in(2)->is_Con()) {
2717 Node* load_klass = nullptr;
2718 Node* decode = nullptr;
2719 if (c->in(1)->Opcode() == Op_DecodeNKlass) {
2720 decode = c->in(1);
2721 load_klass = c->in(1)->in(1);
2722 } else {
2723 load_klass = c->in(1);
2724 }
2725 if (load_klass->in(2)->is_AddP()) {
2726 Node* addp = load_klass->in(2);
2727 Node* obj = addp->in(AddPNode::Address);
2728 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
2729 if (obj_type->speculative_type_not_null() != nullptr) {
2730 ciKlass* k = obj_type->speculative_type();
2731 inc_sp(2);
2732 obj = maybe_cast_profiled_obj(obj, k);
2733 dec_sp(2);
2734 if (obj->is_InlineType()) {
2735 assert(obj->as_InlineType()->is_allocated(&_gvn), "must be allocated");
2736 obj = obj->as_InlineType()->get_oop();
2737 }
2738 // Make the CmpP use the casted obj
2739 addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
2740 load_klass = load_klass->clone();
2741 load_klass->set_req(2, addp);
2742 load_klass = _gvn.transform(load_klass);
2743 if (decode != nullptr) {
2744 decode = decode->clone();
2745 decode->set_req(1, load_klass);
2746 load_klass = _gvn.transform(decode);
2747 }
2748 c = c->clone();
2749 c->set_req(1, load_klass);
2750 c = _gvn.transform(c);
2751 }
2752 }
2753 }
2754 return c;
2755 }
2756
2757 //------------------------------do_one_bytecode--------------------------------
3460 b = _gvn.transform( new ConvI2DNode(a));
3461 push_pair(b);
3462 break;
3463
3464 case Bytecodes::_iinc: // Increment local
3465 i = iter().get_index(); // Get local index
3466 set_local( i, _gvn.transform( new AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) );
3467 break;
3468
3469 // Exit points of synchronized methods must have an unlock node
3470 case Bytecodes::_return:
3471 return_current(nullptr);
3472 break;
3473
3474 case Bytecodes::_ireturn:
3475 case Bytecodes::_areturn:
3476 case Bytecodes::_freturn:
3477 return_current(pop());
3478 break;
3479 case Bytecodes::_lreturn:
3480 case Bytecodes::_dreturn:
3481 return_current(pop_pair());
3482 break;
3483
3484 case Bytecodes::_athrow:
3485 // null exception oop throws null pointer exception
3486 null_check(peek());
3487 if (stopped()) return;
3488 // Hook the thrown exception directly to subsequent handlers.
3489 if (BailoutToInterpreterForThrows) {
3490 // Keep method interpreted from now on.
3491 uncommon_trap(Deoptimization::Reason_unhandled,
3492 Deoptimization::Action_make_not_compilable);
3493 return;
3494 }
3495 if (env()->jvmti_can_post_on_exceptions()) {
3496 // check if we must post exception events, take uncommon trap if so (with must_throw = false)
3497 uncommon_trap_if_should_post_on_exceptions(Deoptimization::Reason_unhandled, false);
3498 }
3499 // Here if either can_post_on_exceptions or should_post_on_exceptions is false
3513 // See if we can get some profile data and hand it off to the next block
3514 Block *target_block = block()->successor_for_bci(target_bci);
3515 if (target_block->pred_count() != 1) break;
3516 ciMethodData* methodData = method()->method_data();
3517 if (!methodData->is_mature()) break;
3518 ciProfileData* data = methodData->bci_to_data(bci());
3519 assert(data != nullptr && data->is_JumpData(), "need JumpData for taken branch");
3520 int taken = ((ciJumpData*)data)->taken();
3521 taken = method()->scale_count(taken);
3522 target_block->set_count(taken);
3523 break;
3524 }
3525
3526 case Bytecodes::_ifnull: btest = BoolTest::eq; goto handle_if_null;
3527 case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null;
3528 handle_if_null:
3529 // If this is a backwards branch in the bytecodes, add Safepoint
3530 maybe_add_safepoint(iter().get_dest());
3531 a = null();
3532 b = pop();
3533 if (b->is_InlineType()) {
3534 // Null checking a scalarized but nullable inline type. Check the null marker
3535 // input instead of the oop input to avoid keeping buffer allocations alive
3536 c = _gvn.transform(new CmpINode(b->as_InlineType()->get_null_marker(), zerocon(T_INT)));
3537 } else {
3538 if (!_gvn.type(b)->speculative_maybe_null() &&
3539 !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
3540 inc_sp(1);
3541 Node* null_ctl = top();
3542 b = null_check_oop(b, &null_ctl, true, true, true);
3543 assert(null_ctl->is_top(), "no null control here");
3544 dec_sp(1);
3545 } else if (_gvn.type(b)->speculative_always_null() &&
3546 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
3547 inc_sp(1);
3548 b = null_assert(b);
3549 dec_sp(1);
3550 }
3551 c = _gvn.transform( new CmpPNode(b, a) );
3552 }
3553 do_ifnull(btest, c);
3554 break;
3555
3556 case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
3557 case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
3558 handle_if_acmp:
3559 // If this is a backwards branch in the bytecodes, add Safepoint
3560 maybe_add_safepoint(iter().get_dest());
3561 a = pop();
3562 b = pop();
3563 do_acmp(btest, b, a);
3564 break;
3565
3566 case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
3567 case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
3568 case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
3569 case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
3570 case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
3571 case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
3572 handle_ifxx:
3573 // If this is a backwards branch in the bytecodes, add Safepoint
3574 maybe_add_safepoint(iter().get_dest());
3575 a = _gvn.intcon(0);
3576 b = pop();
3577 c = _gvn.transform( new CmpINode(b, a) );
3578 do_if(btest, c);
3579 break;
3580
3581 case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
3582 case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
3583 case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;
3598 break;
3599
3600 case Bytecodes::_lookupswitch:
3601 do_lookupswitch();
3602 break;
3603
3604 case Bytecodes::_invokestatic:
3605 case Bytecodes::_invokedynamic:
3606 case Bytecodes::_invokespecial:
3607 case Bytecodes::_invokevirtual:
3608 case Bytecodes::_invokeinterface:
3609 do_call();
3610 break;
3611 case Bytecodes::_checkcast:
3612 do_checkcast();
3613 break;
3614 case Bytecodes::_instanceof:
3615 do_instanceof();
3616 break;
3617 case Bytecodes::_anewarray:
3618 do_newarray();
3619 break;
3620 case Bytecodes::_newarray:
3621 do_newarray((BasicType)iter().get_index());
3622 break;
3623 case Bytecodes::_multianewarray:
3624 do_multianewarray();
3625 break;
3626 case Bytecodes::_new:
3627 do_new();
3628 break;
3629
3630 case Bytecodes::_jsr:
3631 case Bytecodes::_jsr_w:
3632 do_jsr();
3633 break;
3634
3635 case Bytecodes::_ret:
3636 do_ret();
3637 break;
3638
|