7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "ci/ciMethodData.hpp"
27 #include "classfile/vmSymbols.hpp"
28 #include "compiler/compileLog.hpp"
29 #include "interpreter/linkResolver.hpp"
30 #include "jvm_io.h"
31 #include "memory/resourceArea.hpp"
32 #include "memory/universe.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "opto/addnode.hpp"
35 #include "opto/castnode.hpp"
36 #include "opto/convertnode.hpp"
37 #include "opto/divnode.hpp"
38 #include "opto/idealGraphPrinter.hpp"
39 #include "opto/matcher.hpp"
40 #include "opto/memnode.hpp"
41 #include "opto/mulnode.hpp"
42 #include "opto/opaquenode.hpp"
43 #include "opto/parse.hpp"
44 #include "opto/runtime.hpp"
45 #include "runtime/deoptimization.hpp"
46 #include "runtime/sharedRuntime.hpp"
47
48 #ifndef PRODUCT
49 extern uint explicit_null_checks_inserted,
50 explicit_null_checks_elided;
51 #endif
52
53 //---------------------------------array_load----------------------------------
54 void Parse::array_load(BasicType bt) {
55 const Type* elemtype = Type::TOP;
56 bool big_val = bt == T_DOUBLE || bt == T_LONG;
57 Node* adr = array_addressing(bt, 0, elemtype);
58 if (stopped()) return; // guaranteed null or range check
59
60 pop(); // index (already used)
61 Node* array = pop(); // the array itself
62
63 if (elemtype == TypeInt::BOOL) {
64 bt = T_BOOLEAN;
65 }
66 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
67
68 Node* ld = access_load_at(array, adr, adr_type, elemtype, bt,
69 IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD);
70 if (big_val) {
71 push_pair(ld);
72 } else {
73 push(ld);
74 }
75 }
76
77
78 //--------------------------------array_store----------------------------------
79 void Parse::array_store(BasicType bt) {
80 const Type* elemtype = Type::TOP;
81 bool big_val = bt == T_DOUBLE || bt == T_LONG;
82 Node* adr = array_addressing(bt, big_val ? 2 : 1, elemtype);
83 if (stopped()) return; // guaranteed null or range check
84 if (bt == T_OBJECT) {
85 array_store_check();
86 if (stopped()) {
87 return;
88 }
89 }
90 Node* val; // Oop to store
91 if (big_val) {
92 val = pop_pair();
93 } else {
94 val = pop();
95 }
96 pop(); // index (already used)
97 Node* array = pop(); // the array itself
98
99 if (elemtype == TypeInt::BOOL) {
100 bt = T_BOOLEAN;
101 }
102 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
103
104 access_store_at(array, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY);
105 }
106
107
108 //------------------------------array_addressing-------------------------------
109 // Pull array and index from the stack. Compute pointer-to-element.
110 Node* Parse::array_addressing(BasicType type, int vals, const Type*& elemtype) {
111 Node *idx = peek(0+vals); // Get from stack without popping
112 Node *ary = peek(1+vals); // in case of exception
113
114 // Null check the array base, with correct stack contents
115 ary = null_check(ary, T_ARRAY);
116 // Compile-time detect of null-exception?
117 if (stopped()) return top();
118
119 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr();
120 const TypeInt* sizetype = arytype->size();
121 elemtype = arytype->elem();
122
123 if (UseUniqueSubclasses) {
124 const Type* el = elemtype->make_ptr();
185 if (C->allow_range_check_smearing()) {
186 // Do not use builtin_throw, since range checks are sometimes
187 // made more stringent by an optimistic transformation.
188 // This creates "tentative" range checks at this point,
189 // which are not guaranteed to throw exceptions.
190 // See IfNode::Ideal, is_range_check, adjust_check.
191 uncommon_trap(Deoptimization::Reason_range_check,
192 Deoptimization::Action_make_not_entrant,
193 nullptr, "range_check");
194 } else {
195 // If we have already recompiled with the range-check-widening
196 // heroic optimization turned off, then we must really be throwing
197 // range check exceptions.
198 builtin_throw(Deoptimization::Reason_range_check);
199 }
200 }
201 }
202 // Check for always knowing you are throwing a range-check exception
203 if (stopped()) return top();
204
205 // Make array address computation control dependent to prevent it
206 // from floating above the range check during loop optimizations.
207 Node* ptr = array_element_address(ary, idx, type, sizetype, control());
208 assert(ptr != top(), "top should go hand-in-hand with stopped");
209
210 return ptr;
211 }
212
213
214 // returns IfNode
215 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) {
216 Node *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
217 Node *tst = _gvn.transform(new BoolNode(cmp, mask));
218 IfNode *iff = create_and_map_if(control(), tst, prob, cnt);
219 return iff;
220 }
221
222
223 // sentinel value for the target bci to mark never taken branches
224 // (according to profiling)
1440 }
1441 }
1442 }
1443
1444 // False branch
1445 Node* iffalse = _gvn.transform( new IfFalseNode(iff) );
1446 set_control(iffalse);
1447
1448 if (stopped()) { // Path is dead?
1449 NOT_PRODUCT(explicit_null_checks_elided++);
1450 if (C->eliminate_boxing()) {
1451 // Mark the successor block as parsed
1452 next_block->next_path_num();
1453 }
1454 } else { // Path is live.
1455 adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, next_block);
1456 }
1457 }
1458
1459 //------------------------------------do_if------------------------------------
1460 void Parse::do_if(BoolTest::mask btest, Node* c) {
1461 int target_bci = iter().get_dest();
1462
1463 Block* branch_block = successor_for_bci(target_bci);
1464 Block* next_block = successor_for_bci(iter().next_bci());
1465
1466 float cnt;
1467 float prob = branch_prediction(cnt, btest, target_bci, c);
1468 float untaken_prob = 1.0 - prob;
1469
1470 if (prob == PROB_UNKNOWN) {
1471 if (PrintOpto && Verbose) {
1472 tty->print_cr("Never-taken edge stops compilation at bci %d", bci());
1473 }
1474 repush_if_args(); // to gather stats on loop
1475 uncommon_trap(Deoptimization::Reason_unreached,
1476 Deoptimization::Action_reinterpret,
1477 nullptr, "cold");
1478 if (C->eliminate_boxing()) {
1479 // Mark the successor blocks as parsed
1480 branch_block->next_path_num();
1524 }
1525
1526 // Generate real control flow
1527 float true_prob = (taken_if_true ? prob : untaken_prob);
1528 IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1529 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1530 Node* taken_branch = new IfTrueNode(iff);
1531 Node* untaken_branch = new IfFalseNode(iff);
1532 if (!taken_if_true) { // Finish conversion to canonical form
1533 Node* tmp = taken_branch;
1534 taken_branch = untaken_branch;
1535 untaken_branch = tmp;
1536 }
1537
1538 // Branch is taken:
1539 { PreserveJVMState pjvms(this);
1540 taken_branch = _gvn.transform(taken_branch);
1541 set_control(taken_branch);
1542
1543 if (stopped()) {
1544 if (C->eliminate_boxing()) {
1545 // Mark the successor block as parsed
1546 branch_block->next_path_num();
1547 }
1548 } else {
1549 adjust_map_after_if(taken_btest, c, prob, branch_block);
1550 if (!stopped()) {
1551 merge(target_bci);
1552 }
1553 }
1554 }
1555
1556 untaken_branch = _gvn.transform(untaken_branch);
1557 set_control(untaken_branch);
1558
1559 // Branch not taken.
1560 if (stopped()) {
1561 if (C->eliminate_boxing()) {
1562 // Mark the successor block as parsed
1563 next_block->next_path_num();
1564 }
1565 } else {
1566 adjust_map_after_if(untaken_btest, c, untaken_prob, next_block);
1567 }
1568 }
1569
1570 bool Parse::path_is_suitable_for_uncommon_trap(float prob) const {
1571 // Don't want to speculate on uncommon traps when running with -Xcomp
1572 if (!UseInterpreter) {
1573 return false;
1574 }
1575 return (seems_never_taken(prob) && seems_stable_comparison());
1576 }
1577
1578 void Parse::maybe_add_predicate_after_if(Block* path) {
1579 if (path->is_SEL_head() && path->preds_parsed() == 0) {
1580 // Add predicates at bci of if dominating the loop so traps can be
1581 // recorded on the if's profile data
1582 int bc_depth = repush_if_args();
1583 add_parse_predicates();
1584 dec_sp(bc_depth);
1585 path->set_has_predicates();
1586 }
1587 }
1588
1589
1685 if (obj != nullptr && (con_type->isa_instptr() || con_type->isa_aryptr())) {
1686 // Found:
1687 // Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq])
1688 // or the narrowOop equivalent.
1689 const Type* obj_type = _gvn.type(obj);
1690 const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr();
1691 if (tboth != nullptr && tboth->klass_is_exact() && tboth != obj_type &&
1692 tboth->higher_equal(obj_type)) {
1693 // obj has to be of the exact type Foo if the CmpP succeeds.
1694 int obj_in_map = map()->find_edge(obj);
1695 JVMState* jvms = this->jvms();
1696 if (obj_in_map >= 0 &&
1697 (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
1698 TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth);
1699 const Type* tcc = ccast->as_Type()->type();
1700 assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve");
1701 // Delay transform() call to allow recovery of pre-cast value
1702 // at the control merge.
1703 _gvn.set_type_bottom(ccast);
1704 record_for_igvn(ccast);
1705 // Here's the payoff.
1706 replace_in_map(obj, ccast);
1707 }
1708 }
1709 }
1710 }
1711
1712 int val_in_map = map()->find_edge(val);
1713 if (val_in_map < 0) return; // replace_in_map would be useless
1714 {
1715 JVMState* jvms = this->jvms();
1716 if (!(jvms->is_loc(val_in_map) ||
1717 jvms->is_stk(val_in_map)))
1718 return; // again, it would be useless
1719 }
1720
1721 // Check for a comparison to a constant, and "know" that the compared
1722 // value is constrained on this path.
1723 assert(tcon->singleton(), "");
1724 ConstraintCastNode* ccast = nullptr;
1790 if (c->Opcode() == Op_CmpP &&
1791 (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
1792 c->in(2)->is_Con()) {
1793 Node* load_klass = nullptr;
1794 Node* decode = nullptr;
1795 if (c->in(1)->Opcode() == Op_DecodeNKlass) {
1796 decode = c->in(1);
1797 load_klass = c->in(1)->in(1);
1798 } else {
1799 load_klass = c->in(1);
1800 }
1801 if (load_klass->in(2)->is_AddP()) {
1802 Node* addp = load_klass->in(2);
1803 Node* obj = addp->in(AddPNode::Address);
1804 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
1805 if (obj_type->speculative_type_not_null() != nullptr) {
1806 ciKlass* k = obj_type->speculative_type();
1807 inc_sp(2);
1808 obj = maybe_cast_profiled_obj(obj, k);
1809 dec_sp(2);
1810 // Make the CmpP use the casted obj
1811 addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
1812 load_klass = load_klass->clone();
1813 load_klass->set_req(2, addp);
1814 load_klass = _gvn.transform(load_klass);
1815 if (decode != nullptr) {
1816 decode = decode->clone();
1817 decode->set_req(1, load_klass);
1818 load_klass = _gvn.transform(decode);
1819 }
1820 c = c->clone();
1821 c->set_req(1, load_klass);
1822 c = _gvn.transform(c);
1823 }
1824 }
1825 }
1826 return c;
1827 }
1828
1829 //------------------------------do_one_bytecode--------------------------------
2636 // See if we can get some profile data and hand it off to the next block
2637 Block *target_block = block()->successor_for_bci(target_bci);
2638 if (target_block->pred_count() != 1) break;
2639 ciMethodData* methodData = method()->method_data();
2640 if (!methodData->is_mature()) break;
2641 ciProfileData* data = methodData->bci_to_data(bci());
2642 assert(data != nullptr && data->is_JumpData(), "need JumpData for taken branch");
2643 int taken = ((ciJumpData*)data)->taken();
2644 taken = method()->scale_count(taken);
2645 target_block->set_count(taken);
2646 break;
2647 }
2648
2649 case Bytecodes::_ifnull: btest = BoolTest::eq; goto handle_if_null;
2650 case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null;
2651 handle_if_null:
2652 // If this is a backwards branch in the bytecodes, add Safepoint
2653 maybe_add_safepoint(iter().get_dest());
2654 a = null();
2655 b = pop();
2656 if (!_gvn.type(b)->speculative_maybe_null() &&
2657 !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
2658 inc_sp(1);
2659 Node* null_ctl = top();
2660 b = null_check_oop(b, &null_ctl, true, true, true);
2661 assert(null_ctl->is_top(), "no null control here");
2662 dec_sp(1);
2663 } else if (_gvn.type(b)->speculative_always_null() &&
2664 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
2665 inc_sp(1);
2666 b = null_assert(b);
2667 dec_sp(1);
2668 }
2669 c = _gvn.transform( new CmpPNode(b, a) );
2670 do_ifnull(btest, c);
2671 break;
2672
2673 case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
2674 case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
2675 handle_if_acmp:
2676 // If this is a backwards branch in the bytecodes, add Safepoint
2677 maybe_add_safepoint(iter().get_dest());
2678 a = pop();
2679 b = pop();
2680 c = _gvn.transform( new CmpPNode(b, a) );
2681 c = optimize_cmp_with_klass(c);
2682 do_if(btest, c);
2683 break;
2684
2685 case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
2686 case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
2687 case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
2688 case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
2689 case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
2690 case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
2691 handle_ifxx:
2692 // If this is a backwards branch in the bytecodes, add Safepoint
2693 maybe_add_safepoint(iter().get_dest());
2694 a = _gvn.intcon(0);
2695 b = pop();
2696 c = _gvn.transform( new CmpINode(b, a) );
2697 do_if(btest, c);
2698 break;
2699
2700 case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
2701 case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
2702 case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;
2717 break;
2718
2719 case Bytecodes::_lookupswitch:
2720 do_lookupswitch();
2721 break;
2722
2723 case Bytecodes::_invokestatic:
2724 case Bytecodes::_invokedynamic:
2725 case Bytecodes::_invokespecial:
2726 case Bytecodes::_invokevirtual:
2727 case Bytecodes::_invokeinterface:
2728 do_call();
2729 break;
2730 case Bytecodes::_checkcast:
2731 do_checkcast();
2732 break;
2733 case Bytecodes::_instanceof:
2734 do_instanceof();
2735 break;
2736 case Bytecodes::_anewarray:
2737 do_anewarray();
2738 break;
2739 case Bytecodes::_newarray:
2740 do_newarray((BasicType)iter().get_index());
2741 break;
2742 case Bytecodes::_multianewarray:
2743 do_multianewarray();
2744 break;
2745 case Bytecodes::_new:
2746 do_new();
2747 break;
2748
2749 case Bytecodes::_jsr:
2750 case Bytecodes::_jsr_w:
2751 do_jsr();
2752 break;
2753
2754 case Bytecodes::_ret:
2755 do_ret();
2756 break;
2757
2758
2759 case Bytecodes::_monitorenter:
2760 do_monitor_enter();
2761 break;
2762
2763 case Bytecodes::_monitorexit:
2764 do_monitor_exit();
2765 break;
2766
2767 case Bytecodes::_breakpoint:
|
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "ci/ciMethodData.hpp"
27 #include "ci/ciSymbols.hpp"
28 #include "classfile/vmSymbols.hpp"
29 #include "compiler/compileLog.hpp"
30 #include "interpreter/linkResolver.hpp"
31 #include "jvm_io.h"
32 #include "memory/resourceArea.hpp"
33 #include "memory/universe.hpp"
34 #include "oops/oop.inline.hpp"
35 #include "opto/addnode.hpp"
36 #include "opto/castnode.hpp"
37 #include "opto/convertnode.hpp"
38 #include "opto/divnode.hpp"
39 #include "opto/idealGraphPrinter.hpp"
40 #include "opto/idealKit.hpp"
41 #include "opto/inlinetypenode.hpp"
42 #include "opto/matcher.hpp"
43 #include "opto/memnode.hpp"
44 #include "opto/mulnode.hpp"
45 #include "opto/opaquenode.hpp"
46 #include "opto/parse.hpp"
47 #include "opto/runtime.hpp"
48 #include "runtime/deoptimization.hpp"
49 #include "runtime/sharedRuntime.hpp"
50
51 #ifndef PRODUCT
52 extern uint explicit_null_checks_inserted,
53 explicit_null_checks_elided;
54 #endif
55
56 Node* Parse::record_profile_for_speculation_at_array_load(Node* ld) {
57 // Feed unused profile data to type speculation
58 if (UseTypeSpeculation && UseArrayLoadStoreProfile) {
59 ciKlass* array_type = nullptr;
60 ciKlass* element_type = nullptr;
61 ProfilePtrKind element_ptr = ProfileMaybeNull;
62 bool flat_array = true;
63 bool null_free_array = true;
64 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
65 if (element_type != nullptr || element_ptr != ProfileMaybeNull) {
66 ld = record_profile_for_speculation(ld, element_type, element_ptr);
67 }
68 }
69 return ld;
70 }
71
72
73 //---------------------------------array_load----------------------------------
74 void Parse::array_load(BasicType bt) {
75 const Type* elemtype = Type::TOP;
76 Node* adr = array_addressing(bt, 0, elemtype);
77 if (stopped()) return; // guaranteed null or range check
78
79 Node* idx = pop();
80 Node* ary = pop();
81
82 // Handle inline type arrays
83 const TypeOopPtr* elemptr = elemtype->make_oopptr();
84 const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
85 if (ary_t->is_flat()) {
86 // Load from flat inline type array
87 Node* vt = InlineTypeNode::make_from_flat(this, elemtype->inline_klass(), ary, adr);
88 push(vt);
89 return;
90 } else if (ary_t->is_null_free()) {
91 // Load from non-flat inline type array (elements can never be null)
92 bt = T_PRIMITIVE_OBJECT;
93 } else if (!ary_t->is_not_flat()) {
94 // Cannot statically determine if array is a flat array, emit runtime check
95 assert(UseFlatArray && is_reference_type(bt) && elemptr->can_be_inline_type() && !ary_t->klass_is_exact() && !ary_t->is_not_null_free() &&
96 (!elemptr->is_inlinetypeptr() || elemptr->inline_klass()->flat_array()), "array can't be flat");
97 IdealKit ideal(this);
98 IdealVariable res(ideal);
99 ideal.declarations_done();
100 ideal.if_then(flat_array_test(ary, /* flat = */ false)); {
101 // non-flat array
102 assert(ideal.ctrl()->in(0)->as_If()->is_flat_array_check(&_gvn), "Should be found");
103 sync_kit(ideal);
104 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
105 Node* ld = access_load_at(ary, adr, adr_type, elemptr, bt,
106 IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD);
107 if (elemptr->is_inlinetypeptr()) {
108 assert(elemptr->maybe_null(), "null free array should be handled above");
109 ld = InlineTypeNode::make_from_oop(this, ld, elemptr->inline_klass(), false);
110 }
111 ideal.sync_kit(this);
112 ideal.set(res, ld);
113 } ideal.else_(); {
114 // flat array
115 sync_kit(ideal);
116 if (elemptr->is_inlinetypeptr()) {
117 // Element type is known, cast and load from flat representation
118 ciInlineKlass* vk = elemptr->inline_klass();
119 assert(vk->flat_array() && elemptr->maybe_null(), "never/always flat - should be optimized");
120 ciArrayKlass* array_klass = ciArrayKlass::make(vk, /* null_free */ true);
121 const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr();
122 Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, arytype));
123 Node* casted_adr = array_element_address(cast, idx, T_PRIMITIVE_OBJECT, ary_t->size(), control());
124 // Re-execute flat array load if buffering triggers deoptimization
125 PreserveReexecuteState preexecs(this);
126 jvms()->set_should_reexecute(true);
127 inc_sp(2);
128 Node* vt = InlineTypeNode::make_from_flat(this, vk, cast, casted_adr)->buffer(this, false);
129 ideal.set(res, vt);
130 ideal.sync_kit(this);
131 } else {
132 // Element type is unknown, emit runtime call
133
134 // Below membars keep this access to an unknown flat array correctly
135 // ordered with other unknown and known flat array accesses.
136 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
137
138 Node* call = nullptr;
139 {
140 // Re-execute flat array load if runtime call triggers deoptimization
141 PreserveReexecuteState preexecs(this);
142 jvms()->set_bci(_bci);
143 jvms()->set_should_reexecute(true);
144 inc_sp(2);
145 kill_dead_locals();
146 call = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
147 OptoRuntime::load_unknown_inline_type(),
148 OptoRuntime::load_unknown_inline_Java(),
149 nullptr, TypeRawPtr::BOTTOM,
150 ary, idx);
151 }
152 make_slow_call_ex(call, env()->Throwable_klass(), false);
153 Node* buffer = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
154
155 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
156
157 // Keep track of the information that the inline type is in flat arrays
158 const Type* unknown_value = elemptr->is_instptr()->cast_to_flat_array();
159 buffer = _gvn.transform(new CheckCastPPNode(control(), buffer, unknown_value));
160
161 ideal.sync_kit(this);
162 ideal.set(res, buffer);
163 }
164 } ideal.end_if();
165 sync_kit(ideal);
166 Node* ld = _gvn.transform(ideal.value(res));
167 ld = record_profile_for_speculation_at_array_load(ld);
168 push_node(bt, ld);
169 return;
170 }
171
172 if (elemtype == TypeInt::BOOL) {
173 bt = T_BOOLEAN;
174 }
175 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
176 Node* ld = access_load_at(ary, adr, adr_type, elemtype, bt,
177 IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD);
178 ld = record_profile_for_speculation_at_array_load(ld);
179 // Loading an inline type from a non-flat array
180 if (elemptr != nullptr && elemptr->is_inlinetypeptr()) {
181 assert(!ary_t->is_null_free() || !elemptr->maybe_null(), "inline type array elements should never be null");
182 ld = InlineTypeNode::make_from_oop(this, ld, elemptr->inline_klass(), !elemptr->maybe_null());
183 }
184 push_node(bt, ld);
185 }
186
187
188 //--------------------------------array_store----------------------------------
189 void Parse::array_store(BasicType bt) {
190 const Type* elemtype = Type::TOP;
191 Node* adr = array_addressing(bt, type2size[bt], elemtype);
192 if (stopped()) return; // guaranteed null or range check
193 Node* cast_val = nullptr;
194 if (bt == T_OBJECT) {
195 cast_val = array_store_check(adr, elemtype);
196 if (stopped()) return;
197 }
198 Node* val = pop_node(bt); // Value to store
199 Node* idx = pop(); // Index in the array
200 Node* ary = pop(); // The array itself
201
202 const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
203 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
204
205 if (elemtype == TypeInt::BOOL) {
206 bt = T_BOOLEAN;
207 } else if (bt == T_OBJECT) {
208 elemtype = elemtype->make_oopptr();
209 const Type* tval = _gvn.type(cast_val);
210 // Based on the value to be stored, try to determine if the array is not null-free and/or not flat.
211 // This is only legal for non-null stores because the array_store_check always passes for null, even
212 // if the array is null-free. Null stores are handled in GraphKit::gen_inline_array_null_guard().
213 bool not_null_free = !tval->maybe_null() && !tval->is_oopptr()->can_be_inline_type();
214 bool not_flat = not_null_free || (tval->is_inlinetypeptr() && !tval->inline_klass()->flat_array());
215 if (!ary_t->is_not_null_free() && not_null_free) {
216 // Storing a non-inline type, mark array as not null-free (-> not flat).
217 ary_t = ary_t->cast_to_not_null_free();
218 Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, ary_t));
219 replace_in_map(ary, cast);
220 ary = cast;
221 } else if (!ary_t->is_not_flat() && not_flat) {
222 // Storing to a non-flat array, mark array as not flat.
223 ary_t = ary_t->cast_to_not_flat();
224 Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, ary_t));
225 replace_in_map(ary, cast);
226 ary = cast;
227 }
228
229 if (ary_t->is_flat()) {
230 // Store to flat inline type array
231 assert(!tval->maybe_null(), "should be guaranteed by array store check");
232 // Re-execute flat array store if buffering triggers deoptimization
233 PreserveReexecuteState preexecs(this);
234 inc_sp(3);
235 jvms()->set_should_reexecute(true);
236 cast_val->as_InlineType()->store_flat(this, ary, adr, nullptr, 0, MO_UNORDERED | IN_HEAP | IS_ARRAY);
237 return;
238 } else if (ary_t->is_null_free()) {
239 // Store to non-flat inline type array (elements can never be null)
240 assert(!tval->maybe_null(), "should be guaranteed by array store check");
241 if (elemtype->inline_klass()->is_empty()) {
242 // Ignore empty inline stores, array is already initialized.
243 return;
244 }
245 } else if (!ary_t->is_not_flat() && (tval != TypePtr::NULL_PTR || StressReflectiveCode)) {
246 // Array might be a flat array, emit runtime checks (for nullptr, a simple inline_array_null_guard is sufficient).
247 assert(UseFlatArray && !not_flat && elemtype->is_oopptr()->can_be_inline_type() &&
248 !ary_t->klass_is_exact() && !ary_t->is_not_null_free(), "array can't be a flat array");
249 IdealKit ideal(this);
250 ideal.if_then(flat_array_test(ary, /* flat = */ false)); {
251 // non-flat array
252 assert(ideal.ctrl()->in(0)->as_If()->is_flat_array_check(&_gvn), "Should be found");
253 sync_kit(ideal);
254 Node* cast_ary = inline_array_null_guard(ary, cast_val, 3);
255 inc_sp(3);
256 access_store_at(cast_ary, adr, adr_type, cast_val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY, false);
257 dec_sp(3);
258 ideal.sync_kit(this);
259 } ideal.else_(); {
260 sync_kit(ideal);
261 // flat array
262 Node* null_ctl = top();
263 Node* val = null_check_oop(cast_val, &null_ctl);
264 if (null_ctl != top()) {
265 PreserveJVMState pjvms(this);
266 inc_sp(3);
267 set_control(null_ctl);
268 uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none);
269 dec_sp(3);
270 }
271 // Try to determine the inline klass
272 ciInlineKlass* vk = nullptr;
273 if (tval->is_inlinetypeptr()) {
274 vk = tval->inline_klass();
275 } else if (elemtype->is_inlinetypeptr()) {
276 vk = elemtype->inline_klass();
277 }
278 Node* casted_ary = ary;
279 if (vk != nullptr && !stopped()) {
280 // Element type is known, cast and store to flat representation
281 assert(vk->flat_array() && elemtype->maybe_null(), "never/always flat - should be optimized");
282 ciArrayKlass* array_klass = ciArrayKlass::make(vk, /* null_free */ true);
283 const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr();
284 casted_ary = _gvn.transform(new CheckCastPPNode(control(), casted_ary, arytype));
285 Node* casted_adr = array_element_address(casted_ary, idx, T_OBJECT, arytype->size(), control());
286 if (!val->is_InlineType()) {
287 assert(!gvn().type(val)->maybe_null(), "inline type array elements should never be null");
288 val = InlineTypeNode::make_from_oop(this, val, vk);
289 }
290 // Re-execute flat array store if buffering triggers deoptimization
291 PreserveReexecuteState preexecs(this);
292 inc_sp(3);
293 jvms()->set_should_reexecute(true);
294 val->as_InlineType()->store_flat(this, casted_ary, casted_adr, nullptr, 0, MO_UNORDERED | IN_HEAP | IS_ARRAY);
295 } else if (!stopped()) {
296 // Element type is unknown, emit runtime call
297
298 // Below membars keep this access to an unknown flat array correctly
299 // ordered with other unknown and known flat array accesses.
300 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
301
302 make_runtime_call(RC_LEAF,
303 OptoRuntime::store_unknown_inline_type(),
304 CAST_FROM_FN_PTR(address, OptoRuntime::store_unknown_inline),
305 "store_unknown_inline", TypeRawPtr::BOTTOM,
306 val, casted_ary, idx);
307
308 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
309 }
310 ideal.sync_kit(this);
311 }
312 ideal.end_if();
313 sync_kit(ideal);
314 return;
315 } else if (!ary_t->is_not_null_free()) {
316 // Array is not flat but may be null free
317 assert(elemtype->is_oopptr()->can_be_inline_type() && !ary_t->klass_is_exact(), "array can't be null-free");
318 ary = inline_array_null_guard(ary, cast_val, 3, true);
319 }
320 }
321 inc_sp(3);
322 access_store_at(ary, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY);
323 dec_sp(3);
324 }
325
326
327 //------------------------------array_addressing-------------------------------
328 // Pull array and index from the stack. Compute pointer-to-element.
329 Node* Parse::array_addressing(BasicType type, int vals, const Type*& elemtype) {
330 Node *idx = peek(0+vals); // Get from stack without popping
331 Node *ary = peek(1+vals); // in case of exception
332
333 // Null check the array base, with correct stack contents
334 ary = null_check(ary, T_ARRAY);
335 // Compile-time detect of null-exception?
336 if (stopped()) return top();
337
338 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr();
339 const TypeInt* sizetype = arytype->size();
340 elemtype = arytype->elem();
341
342 if (UseUniqueSubclasses) {
343 const Type* el = elemtype->make_ptr();
404 if (C->allow_range_check_smearing()) {
405 // Do not use builtin_throw, since range checks are sometimes
406 // made more stringent by an optimistic transformation.
407 // This creates "tentative" range checks at this point,
408 // which are not guaranteed to throw exceptions.
409 // See IfNode::Ideal, is_range_check, adjust_check.
410 uncommon_trap(Deoptimization::Reason_range_check,
411 Deoptimization::Action_make_not_entrant,
412 nullptr, "range_check");
413 } else {
414 // If we have already recompiled with the range-check-widening
415 // heroic optimization turned off, then we must really be throwing
416 // range check exceptions.
417 builtin_throw(Deoptimization::Reason_range_check);
418 }
419 }
420 }
421 // Check for always knowing you are throwing a range-check exception
422 if (stopped()) return top();
423
424 // This could be an access to an inline type array. We can't tell if it's
425 // flat or not. Knowing the exact type avoids runtime checks and leads to
426 // a much simpler graph shape. Check profile information.
427 if (!arytype->is_flat() && !arytype->is_not_flat()) {
428 // First check the speculative type
429 Deoptimization::DeoptReason reason = Deoptimization::Reason_speculate_class_check;
430 ciKlass* array_type = arytype->speculative_type();
431 if (too_many_traps_or_recompiles(reason) || array_type == nullptr) {
432 // No speculative type, check profile data at this bci
433 array_type = nullptr;
434 reason = Deoptimization::Reason_class_check;
435 if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(reason)) {
436 ciKlass* element_type = nullptr;
437 ProfilePtrKind element_ptr = ProfileMaybeNull;
438 bool flat_array = true;
439 bool null_free_array = true;
440 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
441 }
442 }
443 if (array_type != nullptr) {
444 // Speculate that this array has the exact type reported by profile data
445 Node* better_ary = nullptr;
446 DEBUG_ONLY(Node* old_control = control();)
447 Node* slow_ctl = type_check_receiver(ary, array_type, 1.0, &better_ary);
448 if (stopped()) {
449 // The check always fails and therefore profile information is incorrect. Don't use it.
450 assert(old_control == slow_ctl, "type check should have been removed");
451 set_control(slow_ctl);
452 } else if (!slow_ctl->is_top()) {
453 { PreserveJVMState pjvms(this);
454 set_control(slow_ctl);
455 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
456 }
457 replace_in_map(ary, better_ary);
458 ary = better_ary;
459 arytype = _gvn.type(ary)->is_aryptr();
460 elemtype = arytype->elem();
461 }
462 }
463 } else if (UseTypeSpeculation && UseArrayLoadStoreProfile) {
464 // No need to speculate: feed profile data at this bci for the
465 // array to type speculation
466 ciKlass* array_type = nullptr;
467 ciKlass* element_type = nullptr;
468 ProfilePtrKind element_ptr = ProfileMaybeNull;
469 bool flat_array = true;
470 bool null_free_array = true;
471 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
472 if (array_type != nullptr) {
473 ary = record_profile_for_speculation(ary, array_type, ProfileMaybeNull);
474 }
475 }
476
477 // We have no exact array type from profile data. Check profile data
478 // for a non null-free or non flat array. Non null-free implies non
479 // flat so check this one first. Speculating on a non null-free
480 // array doesn't help aaload but could be profitable for a
481 // subsequent aastore.
482 if (!arytype->is_null_free() && !arytype->is_not_null_free()) {
483 bool null_free_array = true;
484 Deoptimization::DeoptReason reason = Deoptimization::Reason_none;
485 if (arytype->speculative() != nullptr &&
486 arytype->speculative()->is_aryptr()->is_not_null_free() &&
487 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
488 null_free_array = false;
489 reason = Deoptimization::Reason_speculate_class_check;
490 } else if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(Deoptimization::Reason_class_check)) {
491 ciKlass* array_type = nullptr;
492 ciKlass* element_type = nullptr;
493 ProfilePtrKind element_ptr = ProfileMaybeNull;
494 bool flat_array = true;
495 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
496 reason = Deoptimization::Reason_class_check;
497 }
498 if (!null_free_array) {
499 { // Deoptimize if null-free array
500 BuildCutout unless(this, null_free_array_test(load_object_klass(ary), /* null_free = */ false), PROB_MAX);
501 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
502 }
503 assert(!stopped(), "null-free array should have been caught earlier");
504 Node* better_ary = _gvn.transform(new CheckCastPPNode(control(), ary, arytype->cast_to_not_null_free()));
505 replace_in_map(ary, better_ary);
506 ary = better_ary;
507 arytype = _gvn.type(ary)->is_aryptr();
508 }
509 }
510
511 if (!arytype->is_flat() && !arytype->is_not_flat()) {
512 bool flat_array = true;
513 Deoptimization::DeoptReason reason = Deoptimization::Reason_none;
514 if (arytype->speculative() != nullptr &&
515 arytype->speculative()->is_aryptr()->is_not_flat() &&
516 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
517 flat_array = false;
518 reason = Deoptimization::Reason_speculate_class_check;
519 } else if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(reason)) {
520 ciKlass* array_type = nullptr;
521 ciKlass* element_type = nullptr;
522 ProfilePtrKind element_ptr = ProfileMaybeNull;
523 bool null_free_array = true;
524 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
525 reason = Deoptimization::Reason_class_check;
526 }
527 if (!flat_array) {
528 { // Deoptimize if flat array
529 BuildCutout unless(this, flat_array_test(ary, /* flat = */ false), PROB_MAX);
530 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
531 }
532 assert(!stopped(), "flat array should have been caught earlier");
533 Node* better_ary = _gvn.transform(new CheckCastPPNode(control(), ary, arytype->cast_to_not_flat()));
534 replace_in_map(ary, better_ary);
535 ary = better_ary;
536 arytype = _gvn.type(ary)->is_aryptr();
537 }
538 }
539
540 // Make array address computation control dependent to prevent it
541 // from floating above the range check during loop optimizations.
542 Node* ptr = array_element_address(ary, idx, type, sizetype, control());
543 assert(ptr != top(), "top should go hand-in-hand with stopped");
544
545 return ptr;
546 }
547
548
549 // returns IfNode
550 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) {
551 Node *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
552 Node *tst = _gvn.transform(new BoolNode(cmp, mask));
553 IfNode *iff = create_and_map_if(control(), tst, prob, cnt);
554 return iff;
555 }
556
557
558 // sentinel value for the target bci to mark never taken branches
559 // (according to profiling)
1775 }
1776 }
1777 }
1778
1779 // False branch
1780 Node* iffalse = _gvn.transform( new IfFalseNode(iff) );
1781 set_control(iffalse);
1782
1783 if (stopped()) { // Path is dead?
1784 NOT_PRODUCT(explicit_null_checks_elided++);
1785 if (C->eliminate_boxing()) {
1786 // Mark the successor block as parsed
1787 next_block->next_path_num();
1788 }
1789 } else { // Path is live.
1790 adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, next_block);
1791 }
1792 }
1793
1794 //------------------------------------do_if------------------------------------
1795 void Parse::do_if(BoolTest::mask btest, Node* c, bool new_path, Node** ctrl_taken) {
1796 int target_bci = iter().get_dest();
1797
1798 Block* branch_block = successor_for_bci(target_bci);
1799 Block* next_block = successor_for_bci(iter().next_bci());
1800
1801 float cnt;
1802 float prob = branch_prediction(cnt, btest, target_bci, c);
1803 float untaken_prob = 1.0 - prob;
1804
1805 if (prob == PROB_UNKNOWN) {
1806 if (PrintOpto && Verbose) {
1807 tty->print_cr("Never-taken edge stops compilation at bci %d", bci());
1808 }
1809 repush_if_args(); // to gather stats on loop
1810 uncommon_trap(Deoptimization::Reason_unreached,
1811 Deoptimization::Action_reinterpret,
1812 nullptr, "cold");
1813 if (C->eliminate_boxing()) {
1814 // Mark the successor blocks as parsed
1815 branch_block->next_path_num();
1859 }
1860
1861 // Generate real control flow
1862 float true_prob = (taken_if_true ? prob : untaken_prob);
1863 IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1864 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1865 Node* taken_branch = new IfTrueNode(iff);
1866 Node* untaken_branch = new IfFalseNode(iff);
1867 if (!taken_if_true) { // Finish conversion to canonical form
1868 Node* tmp = taken_branch;
1869 taken_branch = untaken_branch;
1870 untaken_branch = tmp;
1871 }
1872
1873 // Branch is taken:
1874 { PreserveJVMState pjvms(this);
1875 taken_branch = _gvn.transform(taken_branch);
1876 set_control(taken_branch);
1877
1878 if (stopped()) {
1879 if (C->eliminate_boxing() && !new_path) {
1880 // Mark the successor block as parsed (if we haven't created a new path)
1881 branch_block->next_path_num();
1882 }
1883 } else {
1884 adjust_map_after_if(taken_btest, c, prob, branch_block);
1885 if (!stopped()) {
1886 if (new_path) {
1887 // Merge by using a new path
1888 merge_new_path(target_bci);
1889 } else if (ctrl_taken != nullptr) {
1890 // Don't merge but save taken branch to be wired by caller
1891 *ctrl_taken = control();
1892 } else {
1893 merge(target_bci);
1894 }
1895 }
1896 }
1897 }
1898
1899 untaken_branch = _gvn.transform(untaken_branch);
1900 set_control(untaken_branch);
1901
1902 // Branch not taken.
1903 if (stopped() && ctrl_taken == nullptr) {
1904 if (C->eliminate_boxing()) {
1905 // Mark the successor block as parsed (if caller does not re-wire control flow)
1906 next_block->next_path_num();
1907 }
1908 } else {
1909 adjust_map_after_if(untaken_btest, c, untaken_prob, next_block);
1910 }
1911 }
1912
1913
1914 static ProfilePtrKind speculative_ptr_kind(const TypeOopPtr* t) {
1915 if (t->speculative() == nullptr) {
1916 return ProfileUnknownNull;
1917 }
1918 if (t->speculative_always_null()) {
1919 return ProfileAlwaysNull;
1920 }
1921 if (t->speculative_maybe_null()) {
1922 return ProfileMaybeNull;
1923 }
1924 return ProfileNeverNull;
1925 }
1926
1927 void Parse::acmp_always_null_input(Node* input, const TypeOopPtr* tinput, BoolTest::mask btest, Node* eq_region) {
1928 inc_sp(2);
1929 Node* cast = null_check_common(input, T_OBJECT, true, nullptr,
1930 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check) &&
1931 speculative_ptr_kind(tinput) == ProfileAlwaysNull);
1932 dec_sp(2);
1933 if (btest == BoolTest::ne) {
1934 {
1935 PreserveJVMState pjvms(this);
1936 replace_in_map(input, cast);
1937 int target_bci = iter().get_dest();
1938 merge(target_bci);
1939 }
1940 record_for_igvn(eq_region);
1941 set_control(_gvn.transform(eq_region));
1942 } else {
1943 replace_in_map(input, cast);
1944 }
1945 }
1946
1947 Node* Parse::acmp_null_check(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, Node*& null_ctl) {
1948 inc_sp(2);
1949 null_ctl = top();
1950 Node* cast = null_check_oop(input, &null_ctl,
1951 input_ptr == ProfileNeverNull || (input_ptr == ProfileUnknownNull && !too_many_traps_or_recompiles(Deoptimization::Reason_null_check)),
1952 false,
1953 speculative_ptr_kind(tinput) == ProfileNeverNull &&
1954 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check));
1955 dec_sp(2);
1956 assert(!stopped(), "null input should have been caught earlier");
1957 return cast;
1958 }
1959
1960 void Parse::acmp_known_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, ciKlass* input_type, BoolTest::mask btest, Node* eq_region) {
1961 Node* ne_region = new RegionNode(1);
1962 Node* null_ctl;
1963 Node* cast = acmp_null_check(input, tinput, input_ptr, null_ctl);
1964 ne_region->add_req(null_ctl);
1965
1966 Node* slow_ctl = type_check_receiver(cast, input_type, 1.0, &cast);
1967 {
1968 PreserveJVMState pjvms(this);
1969 inc_sp(2);
1970 set_control(slow_ctl);
1971 Deoptimization::DeoptReason reason;
1972 if (tinput->speculative_type() != nullptr && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
1973 reason = Deoptimization::Reason_speculate_class_check;
1974 } else {
1975 reason = Deoptimization::Reason_class_check;
1976 }
1977 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
1978 }
1979 ne_region->add_req(control());
1980
1981 record_for_igvn(ne_region);
1982 set_control(_gvn.transform(ne_region));
1983 if (btest == BoolTest::ne) {
1984 {
1985 PreserveJVMState pjvms(this);
1986 if (null_ctl == top()) {
1987 replace_in_map(input, cast);
1988 }
1989 int target_bci = iter().get_dest();
1990 merge(target_bci);
1991 }
1992 record_for_igvn(eq_region);
1993 set_control(_gvn.transform(eq_region));
1994 } else {
1995 if (null_ctl == top()) {
1996 replace_in_map(input, cast);
1997 }
1998 set_control(_gvn.transform(ne_region));
1999 }
2000 }
2001
2002 void Parse::acmp_unknown_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, BoolTest::mask btest, Node* eq_region) {
2003 Node* ne_region = new RegionNode(1);
2004 Node* null_ctl;
2005 Node* cast = acmp_null_check(input, tinput, input_ptr, null_ctl);
2006 ne_region->add_req(null_ctl);
2007
2008 {
2009 BuildCutout unless(this, inline_type_test(cast, /* is_inline = */ false), PROB_MAX);
2010 inc_sp(2);
2011 uncommon_trap_exact(Deoptimization::Reason_class_check, Deoptimization::Action_maybe_recompile);
2012 }
2013
2014 ne_region->add_req(control());
2015
2016 record_for_igvn(ne_region);
2017 set_control(_gvn.transform(ne_region));
2018 if (btest == BoolTest::ne) {
2019 {
2020 PreserveJVMState pjvms(this);
2021 if (null_ctl == top()) {
2022 replace_in_map(input, cast);
2023 }
2024 int target_bci = iter().get_dest();
2025 merge(target_bci);
2026 }
2027 record_for_igvn(eq_region);
2028 set_control(_gvn.transform(eq_region));
2029 } else {
2030 if (null_ctl == top()) {
2031 replace_in_map(input, cast);
2032 }
2033 set_control(_gvn.transform(ne_region));
2034 }
2035 }
2036
2037 void Parse::do_acmp(BoolTest::mask btest, Node* left, Node* right) {
2038 ciKlass* left_type = nullptr;
2039 ciKlass* right_type = nullptr;
2040 ProfilePtrKind left_ptr = ProfileUnknownNull;
2041 ProfilePtrKind right_ptr = ProfileUnknownNull;
2042 bool left_inline_type = true;
2043 bool right_inline_type = true;
2044
2045 // Leverage profiling at acmp
2046 if (UseACmpProfile) {
2047 method()->acmp_profiled_type(bci(), left_type, right_type, left_ptr, right_ptr, left_inline_type, right_inline_type);
2048 if (too_many_traps_or_recompiles(Deoptimization::Reason_class_check)) {
2049 left_type = nullptr;
2050 right_type = nullptr;
2051 left_inline_type = true;
2052 right_inline_type = true;
2053 }
2054 if (too_many_traps_or_recompiles(Deoptimization::Reason_null_check)) {
2055 left_ptr = ProfileUnknownNull;
2056 right_ptr = ProfileUnknownNull;
2057 }
2058 }
2059
2060 if (UseTypeSpeculation) {
2061 record_profile_for_speculation(left, left_type, left_ptr);
2062 record_profile_for_speculation(right, right_type, right_ptr);
2063 }
2064
2065 if (!EnableValhalla) {
2066 Node* cmp = CmpP(left, right);
2067 cmp = optimize_cmp_with_klass(cmp);
2068 do_if(btest, cmp);
2069 return;
2070 }
2071
2072 // Check for equality before potentially allocating
2073 if (left == right) {
2074 do_if(btest, makecon(TypeInt::CC_EQ));
2075 return;
2076 }
2077
2078 // Allocate inline type operands and re-execute on deoptimization
2079 if (left->is_InlineType()) {
2080 if (_gvn.type(right)->is_zero_type() ||
2081 (right->is_InlineType() && _gvn.type(right->as_InlineType()->get_is_init())->is_zero_type())) {
2082 // Null checking a scalarized but nullable inline type. Check the IsInit
2083 // input instead of the oop input to avoid keeping buffer allocations alive.
2084 Node* cmp = CmpI(left->as_InlineType()->get_is_init(), intcon(0));
2085 do_if(btest, cmp);
2086 return;
2087 } else {
2088 PreserveReexecuteState preexecs(this);
2089 inc_sp(2);
2090 jvms()->set_should_reexecute(true);
2091 left = left->as_InlineType()->buffer(this)->get_oop();
2092 }
2093 }
2094 if (right->is_InlineType()) {
2095 PreserveReexecuteState preexecs(this);
2096 inc_sp(2);
2097 jvms()->set_should_reexecute(true);
2098 right = right->as_InlineType()->buffer(this)->get_oop();
2099 }
2100
2101 // First, do a normal pointer comparison
2102 const TypeOopPtr* tleft = _gvn.type(left)->isa_oopptr();
2103 const TypeOopPtr* tright = _gvn.type(right)->isa_oopptr();
2104 Node* cmp = CmpP(left, right);
2105 cmp = optimize_cmp_with_klass(cmp);
2106 if (tleft == nullptr || !tleft->can_be_inline_type() ||
2107 tright == nullptr || !tright->can_be_inline_type()) {
2108 // This is sufficient, if one of the operands can't be an inline type
2109 do_if(btest, cmp);
2110 return;
2111 }
2112 Node* eq_region = nullptr;
2113 if (btest == BoolTest::eq) {
2114 do_if(btest, cmp, true);
2115 if (stopped()) {
2116 return;
2117 }
2118 } else {
2119 assert(btest == BoolTest::ne, "only eq or ne");
2120 Node* is_not_equal = nullptr;
2121 eq_region = new RegionNode(3);
2122 {
2123 PreserveJVMState pjvms(this);
2124 do_if(btest, cmp, false, &is_not_equal);
2125 if (!stopped()) {
2126 eq_region->init_req(1, control());
2127 }
2128 }
2129 if (is_not_equal == nullptr || is_not_equal->is_top()) {
2130 record_for_igvn(eq_region);
2131 set_control(_gvn.transform(eq_region));
2132 return;
2133 }
2134 set_control(is_not_equal);
2135 }
2136
2137 // Prefer speculative types if available
2138 if (!too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
2139 if (tleft->speculative_type() != nullptr) {
2140 left_type = tleft->speculative_type();
2141 }
2142 if (tright->speculative_type() != nullptr) {
2143 right_type = tright->speculative_type();
2144 }
2145 }
2146
2147 if (speculative_ptr_kind(tleft) != ProfileMaybeNull && speculative_ptr_kind(tleft) != ProfileUnknownNull) {
2148 ProfilePtrKind speculative_left_ptr = speculative_ptr_kind(tleft);
2149 if (speculative_left_ptr == ProfileAlwaysNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_assert)) {
2150 left_ptr = speculative_left_ptr;
2151 } else if (speculative_left_ptr == ProfileNeverNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check)) {
2152 left_ptr = speculative_left_ptr;
2153 }
2154 }
2155 if (speculative_ptr_kind(tright) != ProfileMaybeNull && speculative_ptr_kind(tright) != ProfileUnknownNull) {
2156 ProfilePtrKind speculative_right_ptr = speculative_ptr_kind(tright);
2157 if (speculative_right_ptr == ProfileAlwaysNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_assert)) {
2158 right_ptr = speculative_right_ptr;
2159 } else if (speculative_right_ptr == ProfileNeverNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check)) {
2160 right_ptr = speculative_right_ptr;
2161 }
2162 }
2163
2164 if (left_ptr == ProfileAlwaysNull) {
2165 // Comparison with null. Assert the input is indeed null and we're done.
2166 acmp_always_null_input(left, tleft, btest, eq_region);
2167 return;
2168 }
2169 if (right_ptr == ProfileAlwaysNull) {
2170 // Comparison with null. Assert the input is indeed null and we're done.
2171 acmp_always_null_input(right, tright, btest, eq_region);
2172 return;
2173 }
2174 if (left_type != nullptr && !left_type->is_inlinetype()) {
2175 // Comparison with an object of known type
2176 acmp_known_non_inline_type_input(left, tleft, left_ptr, left_type, btest, eq_region);
2177 return;
2178 }
2179 if (right_type != nullptr && !right_type->is_inlinetype()) {
2180 // Comparison with an object of known type
2181 acmp_known_non_inline_type_input(right, tright, right_ptr, right_type, btest, eq_region);
2182 return;
2183 }
2184 if (!left_inline_type) {
2185 // Comparison with an object known not to be an inline type
2186 acmp_unknown_non_inline_type_input(left, tleft, left_ptr, btest, eq_region);
2187 return;
2188 }
2189 if (!right_inline_type) {
2190 // Comparison with an object known not to be an inline type
2191 acmp_unknown_non_inline_type_input(right, tright, right_ptr, btest, eq_region);
2192 return;
2193 }
2194
2195 // Pointers are not equal, check if first operand is non-null
2196 Node* ne_region = new RegionNode(6);
2197 Node* null_ctl;
2198 Node* not_null_right = acmp_null_check(right, tright, right_ptr, null_ctl);
2199 ne_region->init_req(1, null_ctl);
2200
2201 // First operand is non-null, check if it is an inline type
2202 Node* is_value = inline_type_test(not_null_right);
2203 IfNode* is_value_iff = create_and_map_if(control(), is_value, PROB_FAIR, COUNT_UNKNOWN);
2204 Node* not_value = _gvn.transform(new IfFalseNode(is_value_iff));
2205 ne_region->init_req(2, not_value);
2206 set_control(_gvn.transform(new IfTrueNode(is_value_iff)));
2207
2208 // The first operand is an inline type, check if the second operand is non-null
2209 Node* not_null_left = acmp_null_check(left, tleft, left_ptr, null_ctl);
2210 ne_region->init_req(3, null_ctl);
2211
2212 // Check if both operands are of the same class.
2213 Node* kls_left = load_object_klass(not_null_left);
2214 Node* kls_right = load_object_klass(not_null_right);
2215 Node* kls_cmp = CmpP(kls_left, kls_right);
2216 Node* kls_bol = _gvn.transform(new BoolNode(kls_cmp, BoolTest::ne));
2217 IfNode* kls_iff = create_and_map_if(control(), kls_bol, PROB_FAIR, COUNT_UNKNOWN);
2218 Node* kls_ne = _gvn.transform(new IfTrueNode(kls_iff));
2219 set_control(_gvn.transform(new IfFalseNode(kls_iff)));
2220 ne_region->init_req(4, kls_ne);
2221
2222 if (stopped()) {
2223 record_for_igvn(ne_region);
2224 set_control(_gvn.transform(ne_region));
2225 if (btest == BoolTest::ne) {
2226 {
2227 PreserveJVMState pjvms(this);
2228 int target_bci = iter().get_dest();
2229 merge(target_bci);
2230 }
2231 record_for_igvn(eq_region);
2232 set_control(_gvn.transform(eq_region));
2233 }
2234 return;
2235 }
2236
2237 // Both operands are values types of the same class, we need to perform a
2238 // substitutability test. Delegate to ValueObjectMethods::isSubstitutable().
2239 Node* ne_io_phi = PhiNode::make(ne_region, i_o());
2240 Node* mem = reset_memory();
2241 Node* ne_mem_phi = PhiNode::make(ne_region, mem);
2242
2243 Node* eq_io_phi = nullptr;
2244 Node* eq_mem_phi = nullptr;
2245 if (eq_region != nullptr) {
2246 eq_io_phi = PhiNode::make(eq_region, i_o());
2247 eq_mem_phi = PhiNode::make(eq_region, mem);
2248 }
2249
2250 set_all_memory(mem);
2251
2252 kill_dead_locals();
2253 ciMethod* subst_method = ciEnv::current()->ValueObjectMethods_klass()->find_method(ciSymbols::isSubstitutable_name(), ciSymbols::object_object_boolean_signature());
2254 CallStaticJavaNode *call = new CallStaticJavaNode(C, TypeFunc::make(subst_method), SharedRuntime::get_resolve_static_call_stub(), subst_method);
2255 call->set_override_symbolic_info(true);
2256 call->init_req(TypeFunc::Parms, not_null_left);
2257 call->init_req(TypeFunc::Parms+1, not_null_right);
2258 inc_sp(2);
2259 set_edges_for_java_call(call, false, false);
2260 Node* ret = set_results_for_java_call(call, false, true);
2261 dec_sp(2);
2262
2263 // Test the return value of ValueObjectMethods::isSubstitutable()
2264 Node* subst_cmp = _gvn.transform(new CmpINode(ret, intcon(1)));
2265 Node* ctl = C->top();
2266 if (btest == BoolTest::eq) {
2267 PreserveJVMState pjvms(this);
2268 do_if(btest, subst_cmp);
2269 if (!stopped()) {
2270 ctl = control();
2271 }
2272 } else {
2273 assert(btest == BoolTest::ne, "only eq or ne");
2274 PreserveJVMState pjvms(this);
2275 do_if(btest, subst_cmp, false, &ctl);
2276 if (!stopped()) {
2277 eq_region->init_req(2, control());
2278 eq_io_phi->init_req(2, i_o());
2279 eq_mem_phi->init_req(2, reset_memory());
2280 }
2281 }
2282 ne_region->init_req(5, ctl);
2283 ne_io_phi->init_req(5, i_o());
2284 ne_mem_phi->init_req(5, reset_memory());
2285
2286 record_for_igvn(ne_region);
2287 set_control(_gvn.transform(ne_region));
2288 set_i_o(_gvn.transform(ne_io_phi));
2289 set_all_memory(_gvn.transform(ne_mem_phi));
2290
2291 if (btest == BoolTest::ne) {
2292 {
2293 PreserveJVMState pjvms(this);
2294 int target_bci = iter().get_dest();
2295 merge(target_bci);
2296 }
2297
2298 record_for_igvn(eq_region);
2299 set_control(_gvn.transform(eq_region));
2300 set_i_o(_gvn.transform(eq_io_phi));
2301 set_all_memory(_gvn.transform(eq_mem_phi));
2302 }
2303 }
2304
2305 bool Parse::path_is_suitable_for_uncommon_trap(float prob) const {
2306 // Don't want to speculate on uncommon traps when running with -Xcomp
2307 if (!UseInterpreter) {
2308 return false;
2309 }
2310 return (seems_never_taken(prob) && seems_stable_comparison());
2311 }
2312
2313 void Parse::maybe_add_predicate_after_if(Block* path) {
2314 if (path->is_SEL_head() && path->preds_parsed() == 0) {
2315 // Add predicates at bci of if dominating the loop so traps can be
2316 // recorded on the if's profile data
2317 int bc_depth = repush_if_args();
2318 add_parse_predicates();
2319 dec_sp(bc_depth);
2320 path->set_has_predicates();
2321 }
2322 }
2323
2324
2420 if (obj != nullptr && (con_type->isa_instptr() || con_type->isa_aryptr())) {
2421 // Found:
2422 // Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq])
2423 // or the narrowOop equivalent.
2424 const Type* obj_type = _gvn.type(obj);
2425 const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr();
2426 if (tboth != nullptr && tboth->klass_is_exact() && tboth != obj_type &&
2427 tboth->higher_equal(obj_type)) {
2428 // obj has to be of the exact type Foo if the CmpP succeeds.
2429 int obj_in_map = map()->find_edge(obj);
2430 JVMState* jvms = this->jvms();
2431 if (obj_in_map >= 0 &&
2432 (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
2433 TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth);
2434 const Type* tcc = ccast->as_Type()->type();
2435 assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve");
2436 // Delay transform() call to allow recovery of pre-cast value
2437 // at the control merge.
2438 _gvn.set_type_bottom(ccast);
2439 record_for_igvn(ccast);
2440 if (tboth->is_inlinetypeptr()) {
2441 ccast = InlineTypeNode::make_from_oop(this, ccast, tboth->exact_klass(true)->as_inline_klass());
2442 }
2443 // Here's the payoff.
2444 replace_in_map(obj, ccast);
2445 }
2446 }
2447 }
2448 }
2449
2450 int val_in_map = map()->find_edge(val);
2451 if (val_in_map < 0) return; // replace_in_map would be useless
2452 {
2453 JVMState* jvms = this->jvms();
2454 if (!(jvms->is_loc(val_in_map) ||
2455 jvms->is_stk(val_in_map)))
2456 return; // again, it would be useless
2457 }
2458
2459 // Check for a comparison to a constant, and "know" that the compared
2460 // value is constrained on this path.
2461 assert(tcon->singleton(), "");
2462 ConstraintCastNode* ccast = nullptr;
2528 if (c->Opcode() == Op_CmpP &&
2529 (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
2530 c->in(2)->is_Con()) {
2531 Node* load_klass = nullptr;
2532 Node* decode = nullptr;
2533 if (c->in(1)->Opcode() == Op_DecodeNKlass) {
2534 decode = c->in(1);
2535 load_klass = c->in(1)->in(1);
2536 } else {
2537 load_klass = c->in(1);
2538 }
2539 if (load_klass->in(2)->is_AddP()) {
2540 Node* addp = load_klass->in(2);
2541 Node* obj = addp->in(AddPNode::Address);
2542 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
2543 if (obj_type->speculative_type_not_null() != nullptr) {
2544 ciKlass* k = obj_type->speculative_type();
2545 inc_sp(2);
2546 obj = maybe_cast_profiled_obj(obj, k);
2547 dec_sp(2);
2548 if (obj->is_InlineType()) {
2549 assert(obj->as_InlineType()->is_allocated(&_gvn), "must be allocated");
2550 obj = obj->as_InlineType()->get_oop();
2551 }
2552 // Make the CmpP use the casted obj
2553 addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
2554 load_klass = load_klass->clone();
2555 load_klass->set_req(2, addp);
2556 load_klass = _gvn.transform(load_klass);
2557 if (decode != nullptr) {
2558 decode = decode->clone();
2559 decode->set_req(1, load_klass);
2560 load_klass = _gvn.transform(decode);
2561 }
2562 c = c->clone();
2563 c->set_req(1, load_klass);
2564 c = _gvn.transform(c);
2565 }
2566 }
2567 }
2568 return c;
2569 }
2570
2571 //------------------------------do_one_bytecode--------------------------------
3378 // See if we can get some profile data and hand it off to the next block
3379 Block *target_block = block()->successor_for_bci(target_bci);
3380 if (target_block->pred_count() != 1) break;
3381 ciMethodData* methodData = method()->method_data();
3382 if (!methodData->is_mature()) break;
3383 ciProfileData* data = methodData->bci_to_data(bci());
3384 assert(data != nullptr && data->is_JumpData(), "need JumpData for taken branch");
3385 int taken = ((ciJumpData*)data)->taken();
3386 taken = method()->scale_count(taken);
3387 target_block->set_count(taken);
3388 break;
3389 }
3390
3391 case Bytecodes::_ifnull: btest = BoolTest::eq; goto handle_if_null;
3392 case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null;
3393 handle_if_null:
3394 // If this is a backwards branch in the bytecodes, add Safepoint
3395 maybe_add_safepoint(iter().get_dest());
3396 a = null();
3397 b = pop();
3398 if (b->is_InlineType()) {
3399 // Null checking a scalarized but nullable inline type. Check the IsInit
3400 // input instead of the oop input to avoid keeping buffer allocations alive
3401 c = _gvn.transform(new CmpINode(b->as_InlineType()->get_is_init(), zerocon(T_INT)));
3402 } else {
3403 if (!_gvn.type(b)->speculative_maybe_null() &&
3404 !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
3405 inc_sp(1);
3406 Node* null_ctl = top();
3407 b = null_check_oop(b, &null_ctl, true, true, true);
3408 assert(null_ctl->is_top(), "no null control here");
3409 dec_sp(1);
3410 } else if (_gvn.type(b)->speculative_always_null() &&
3411 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
3412 inc_sp(1);
3413 b = null_assert(b);
3414 dec_sp(1);
3415 }
3416 c = _gvn.transform( new CmpPNode(b, a) );
3417 }
3418 do_ifnull(btest, c);
3419 break;
3420
3421 case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
3422 case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
3423 handle_if_acmp:
3424 // If this is a backwards branch in the bytecodes, add Safepoint
3425 maybe_add_safepoint(iter().get_dest());
3426 a = pop();
3427 b = pop();
3428 do_acmp(btest, b, a);
3429 break;
3430
3431 case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
3432 case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
3433 case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
3434 case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
3435 case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
3436 case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
3437 handle_ifxx:
3438 // If this is a backwards branch in the bytecodes, add Safepoint
3439 maybe_add_safepoint(iter().get_dest());
3440 a = _gvn.intcon(0);
3441 b = pop();
3442 c = _gvn.transform( new CmpINode(b, a) );
3443 do_if(btest, c);
3444 break;
3445
3446 case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
3447 case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
3448 case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;
3463 break;
3464
3465 case Bytecodes::_lookupswitch:
3466 do_lookupswitch();
3467 break;
3468
3469 case Bytecodes::_invokestatic:
3470 case Bytecodes::_invokedynamic:
3471 case Bytecodes::_invokespecial:
3472 case Bytecodes::_invokevirtual:
3473 case Bytecodes::_invokeinterface:
3474 do_call();
3475 break;
3476 case Bytecodes::_checkcast:
3477 do_checkcast();
3478 break;
3479 case Bytecodes::_instanceof:
3480 do_instanceof();
3481 break;
3482 case Bytecodes::_anewarray:
3483 do_newarray();
3484 break;
3485 case Bytecodes::_newarray:
3486 do_newarray((BasicType)iter().get_index());
3487 break;
3488 case Bytecodes::_multianewarray:
3489 do_multianewarray();
3490 break;
3491 case Bytecodes::_new:
3492 do_new();
3493 break;
3494 case Bytecodes::_aconst_init:
3495 do_aconst_init();
3496 break;
3497 case Bytecodes::_withfield:
3498 do_withfield();
3499 break;
3500
3501 case Bytecodes::_jsr:
3502 case Bytecodes::_jsr_w:
3503 do_jsr();
3504 break;
3505
3506 case Bytecodes::_ret:
3507 do_ret();
3508 break;
3509
3510
3511 case Bytecodes::_monitorenter:
3512 do_monitor_enter();
3513 break;
3514
3515 case Bytecodes::_monitorexit:
3516 do_monitor_exit();
3517 break;
3518
3519 case Bytecodes::_breakpoint:
|