1 /*
2 * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/ciInlineKlass.hpp"
26 #include "ci/ciMethodData.hpp"
27 #include "ci/ciSymbols.hpp"
28 #include "classfile/vmSymbols.hpp"
29 #include "compiler/compileLog.hpp"
30 #include "interpreter/linkResolver.hpp"
31 #include "jvm_io.h"
32 #include "memory/resourceArea.hpp"
33 #include "memory/universe.hpp"
34 #include "oops/oop.inline.hpp"
35 #include "opto/addnode.hpp"
36 #include "opto/castnode.hpp"
37 #include "opto/convertnode.hpp"
38 #include "opto/divnode.hpp"
39 #include "opto/idealGraphPrinter.hpp"
40 #include "opto/idealKit.hpp"
41 #include "opto/inlinetypenode.hpp"
42 #include "opto/matcher.hpp"
43 #include "opto/memnode.hpp"
44 #include "opto/mulnode.hpp"
45 #include "opto/opaquenode.hpp"
46 #include "opto/parse.hpp"
47 #include "opto/runtime.hpp"
48 #include "opto/subtypenode.hpp"
49 #include "runtime/arguments.hpp"
50 #include "runtime/deoptimization.hpp"
51 #include "runtime/globals.hpp"
52 #include "runtime/sharedRuntime.hpp"
53
54 #ifndef PRODUCT
55 extern uint explicit_null_checks_inserted,
56 explicit_null_checks_elided;
57 #endif
58
59 Node* Parse::record_profile_for_speculation_at_array_load(Node* ld) {
60 // Feed unused profile data to type speculation
61 if (UseTypeSpeculation && UseArrayLoadStoreProfile) {
62 ciKlass* array_type = nullptr;
63 ciKlass* element_type = nullptr;
64 ProfilePtrKind element_ptr = ProfileMaybeNull;
65 bool flat_array = true;
66 bool null_free_array = true;
67 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
68 if (element_type != nullptr || element_ptr != ProfileMaybeNull) {
69 ld = record_profile_for_speculation(ld, element_type, element_ptr);
70 }
71 }
72 return ld;
73 }
74
75
76 //---------------------------------array_load----------------------------------
77 void Parse::array_load(BasicType bt) {
78 const Type* elemtype = Type::TOP;
79 Node* adr = array_addressing(bt, 0, elemtype);
80 if (stopped()) return; // guaranteed null or range check
81
82 Node* array_index = pop();
83 Node* array = pop();
84
85 // Handle inline type arrays
86 const TypeOopPtr* element_ptr = elemtype->make_oopptr();
87 const TypeAryPtr* array_type = _gvn.type(array)->is_aryptr();
88
89 if (!array_type->is_not_flat()) {
90 // Cannot statically determine if array is a flat array, emit runtime check
91 assert(UseArrayFlattening && is_reference_type(bt) && element_ptr->can_be_inline_type() &&
92 (!element_ptr->is_inlinetypeptr() || element_ptr->inline_klass()->maybe_flat_in_array()), "array can't be flat");
93 IdealKit ideal(this);
94 IdealVariable res(ideal);
95 ideal.declarations_done();
96 ideal.if_then(flat_array_test(array, /* flat = */ false)); {
97 // Non-flat array
98 sync_kit(ideal);
99 if (!array_type->is_flat()) {
100 assert(array_type->is_flat() || control()->in(0)->as_If()->is_flat_array_check(&_gvn), "Should be found");
101 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
102 DecoratorSet decorator_set = IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD;
103 if (needs_range_check(array_type->size(), array_index)) {
104 // We've emitted a RangeCheck but now insert an additional check between the range check and the actual load.
105 // We cannot pin the load to two separate nodes. Instead, we pin it conservatively here such that it cannot
106 // possibly float above the range check at any point.
107 decorator_set |= C2_UNKNOWN_CONTROL_LOAD;
108 }
109 Node* ld = access_load_at(array, adr, adr_type, element_ptr, bt, decorator_set);
110 if (element_ptr->is_inlinetypeptr()) {
111 ld = InlineTypeNode::make_from_oop(this, ld, element_ptr->inline_klass());
112 }
113 ideal.set(res, ld);
114 }
115 ideal.sync_kit(this);
116 } ideal.else_(); {
117 // Flat array
118 sync_kit(ideal);
119 if (!array_type->is_not_flat()) {
120 if (element_ptr->is_inlinetypeptr()) {
121 ciInlineKlass* vk = element_ptr->inline_klass();
122 Node* flat_array = cast_to_flat_array(array, vk);
123 Node* vt = InlineTypeNode::make_from_flat_array(this, vk, flat_array, array_index);
124 ideal.set(res, vt);
125 } else {
126 // Element type is unknown, and thus we cannot statically determine the exact flat array layout. Emit a
127 // runtime call to correctly load the inline type element from the flat array.
128 Node* inline_type = load_from_unknown_flat_array(array, array_index, element_ptr);
129 bool is_null_free = array_type->is_null_free() ||
130 (!UseNullableAtomicValueFlattening && !UseNullableNonAtomicValueFlattening);
131 if (is_null_free) {
132 inline_type = cast_not_null(inline_type);
133 }
134 ideal.set(res, inline_type);
135 }
136 }
137 ideal.sync_kit(this);
138 } ideal.end_if();
139 sync_kit(ideal);
140 Node* ld = _gvn.transform(ideal.value(res));
141 ld = record_profile_for_speculation_at_array_load(ld);
142 push_node(bt, ld);
143 return;
144 }
145
146 if (elemtype == TypeInt::BOOL) {
147 bt = T_BOOLEAN;
148 }
149 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
150 Node* ld = access_load_at(array, adr, adr_type, elemtype, bt,
151 IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD);
152 ld = record_profile_for_speculation_at_array_load(ld);
153 // Loading an inline type from a non-flat array
154 if (element_ptr != nullptr && element_ptr->is_inlinetypeptr()) {
155 assert(!array_type->is_null_free() || !element_ptr->maybe_null(), "inline type array elements should never be null");
156 ld = InlineTypeNode::make_from_oop(this, ld, element_ptr->inline_klass());
157 }
158 push_node(bt, ld);
159 }
160
161 Node* Parse::load_from_unknown_flat_array(Node* array, Node* array_index, const TypeOopPtr* element_ptr) {
162 // Below membars keep this access to an unknown flat array correctly
163 // ordered with other unknown and known flat array accesses.
164 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
165
166 Node* call = nullptr;
167 {
168 // Re-execute flat array load if runtime call triggers deoptimization
169 PreserveReexecuteState preexecs(this);
170 jvms()->set_bci(_bci);
171 jvms()->set_should_reexecute(true);
172 inc_sp(2);
173 kill_dead_locals();
174 call = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
175 OptoRuntime::load_unknown_inline_Type(),
176 OptoRuntime::load_unknown_inline_Java(),
177 nullptr, TypeRawPtr::BOTTOM,
178 array, array_index);
179 }
180 make_slow_call_ex(call, env()->Throwable_klass(), false);
181 Node* buffer = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
182
183 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
184
185 // Keep track of the information that the inline type is in flat arrays
186 const Type* unknown_value = element_ptr->is_instptr()->cast_to_flat_in_array();
187 return _gvn.transform(new CheckCastPPNode(control(), buffer, unknown_value));
188 }
189
190 //--------------------------------array_store----------------------------------
191 void Parse::array_store(BasicType bt) {
192 const Type* elemtype = Type::TOP;
193 Node* adr = array_addressing(bt, type2size[bt], elemtype);
194 if (stopped()) return; // guaranteed null or range check
195 Node* stored_value_casted = nullptr;
196 if (bt == T_OBJECT) {
197 stored_value_casted = array_store_check(adr, elemtype);
198 if (stopped()) {
199 return;
200 }
201 }
202 Node* const stored_value = pop_node(bt); // Value to store
203 Node* const array_index = pop(); // Index in the array
204 Node* array = pop(); // The array itself
205
206 const TypeAryPtr* array_type = _gvn.type(array)->is_aryptr();
207 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
208
209 if (elemtype == TypeInt::BOOL) {
210 bt = T_BOOLEAN;
211 } else if (bt == T_OBJECT) {
212 elemtype = elemtype->make_oopptr();
213 const Type* stored_value_casted_type = _gvn.type(stored_value_casted);
214 // Based on the value to be stored, try to determine if the array is not null-free and/or not flat.
215 // This is only legal for non-null stores because the array_store_check always passes for null, even
216 // if the array is null-free. Null stores are handled in GraphKit::inline_array_null_guard().
217 bool not_inline = !stored_value_casted_type->maybe_null() && !stored_value_casted_type->is_oopptr()->can_be_inline_type();
218 bool not_null_free = not_inline;
219 bool not_flat = not_inline || ( stored_value_casted_type->is_inlinetypeptr() &&
220 !stored_value_casted_type->inline_klass()->maybe_flat_in_array());
221 if (!array_type->is_not_null_free() && not_null_free) {
222 // Storing a non-inline type, mark array as not null-free.
223 array_type = array_type->cast_to_not_null_free();
224 Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, array_type));
225 replace_in_map(array, cast);
226 array = cast;
227 }
228 if (!array_type->is_not_flat() && not_flat) {
229 // Storing to a non-flat array, mark array as not flat.
230 array_type = array_type->cast_to_not_flat();
231 Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, array_type));
232 replace_in_map(array, cast);
233 array = cast;
234 }
235
236 if (array_type->is_null_free() && elemtype->is_inlinetypeptr() && elemtype->inline_klass()->is_empty()) {
237 // Array of null-free empty inline type, there is only 1 state for the elements
238 assert(!stored_value_casted_type->maybe_null(), "should be guaranteed by array store check");
239 return;
240 }
241
242 if (!array_type->is_not_flat()) {
243 // Array might be a flat array, emit runtime checks (for null, a simple inline_array_null_guard is sufficient).
244 assert(UseArrayFlattening && !not_flat && elemtype->is_oopptr()->can_be_inline_type() &&
245 (!array_type->klass_is_exact() || array_type->is_flat()), "array can't be a flat array");
246 // TODO 8350865 Depending on the available layouts, we can avoid this check in below flat/not-flat branches. Also the safe_for_replace arg is now always true.
247 array = inline_array_null_guard(array, stored_value_casted, 3, true);
248 IdealKit ideal(this);
249 ideal.if_then(flat_array_test(array, /* flat = */ false)); {
250 // Non-flat array
251 if (!array_type->is_flat()) {
252 sync_kit(ideal);
253 assert(array_type->is_flat() || ideal.ctrl()->in(0)->as_If()->is_flat_array_check(&_gvn), "Should be found");
254 inc_sp(3);
255 access_store_at(array, adr, adr_type, stored_value_casted, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY, false);
256 dec_sp(3);
257 ideal.sync_kit(this);
258 }
259 } ideal.else_(); {
260 // Flat array
261 sync_kit(ideal);
262 if (!array_type->is_not_flat()) {
263 // Try to determine the inline klass type of the stored value
264 ciInlineKlass* vk = nullptr;
265 if (stored_value_casted_type->is_inlinetypeptr()) {
266 vk = stored_value_casted_type->inline_klass();
267 } else if (elemtype->is_inlinetypeptr()) {
268 vk = elemtype->inline_klass();
269 }
270
271 if (vk != nullptr) {
272 // Element type is known, cast and store to flat array layout.
273 Node* flat_array = cast_to_flat_array(array, vk);
274
275 // Re-execute flat array store if buffering triggers deoptimization
276 PreserveReexecuteState preexecs(this);
277 jvms()->set_should_reexecute(true);
278 inc_sp(3);
279
280 if (!stored_value_casted->is_InlineType()) {
281 assert(_gvn.type(stored_value_casted) == TypePtr::NULL_PTR, "Unexpected value");
282 stored_value_casted = InlineTypeNode::make_null(_gvn, vk);
283 }
284
285 stored_value_casted->as_InlineType()->store_flat_array(this, flat_array, array_index);
286 } else {
287 // Element type is unknown, emit a runtime call since the flat array layout is not statically known.
288 store_to_unknown_flat_array(array, array_index, stored_value_casted);
289 }
290 }
291 ideal.sync_kit(this);
292 }
293 ideal.end_if();
294 sync_kit(ideal);
295 return;
296 } else if (!array_type->is_not_null_free()) {
297 // Array is not flat but may be null free
298 assert(elemtype->is_oopptr()->can_be_inline_type(), "array can't be null-free");
299 array = inline_array_null_guard(array, stored_value_casted, 3, true);
300 }
301 }
302 inc_sp(3);
303 access_store_at(array, adr, adr_type, stored_value, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY);
304 dec_sp(3);
305 }
306
307 // Emit a runtime call to store to a flat array whose element type is either unknown (i.e. we do not know the flat
308 // array layout) or not exact (could have different flat array layouts at runtime).
309 void Parse::store_to_unknown_flat_array(Node* array, Node* const idx, Node* non_null_stored_value) {
310 // Below membars keep this access to an unknown flat array correctly
311 // ordered with other unknown and known flat array accesses.
312 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
313
314 Node* call = nullptr;
315 {
316 // Re-execute flat array store if runtime call triggers deoptimization
317 PreserveReexecuteState preexecs(this);
318 jvms()->set_bci(_bci);
319 jvms()->set_should_reexecute(true);
320 inc_sp(3);
321 kill_dead_locals();
322 call = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
323 OptoRuntime::store_unknown_inline_Type(),
324 OptoRuntime::store_unknown_inline_Java(),
325 nullptr, TypeRawPtr::BOTTOM,
326 non_null_stored_value, array, idx);
327 }
328 make_slow_call_ex(call, env()->Throwable_klass(), false);
329
330 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
331 }
332
333 //------------------------------array_addressing-------------------------------
334 // Pull array and index from the stack. Compute pointer-to-element.
335 Node* Parse::array_addressing(BasicType type, int vals, const Type*& elemtype) {
336 Node *idx = peek(0+vals); // Get from stack without popping
337 Node *ary = peek(1+vals); // in case of exception
338
339 // Null check the array base, with correct stack contents
340 ary = null_check(ary, T_ARRAY);
341 // Compile-time detect of null-exception?
342 if (stopped()) return top();
343
344 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr();
345 const TypeInt* sizetype = arytype->size();
346 elemtype = arytype->elem();
347
348 if (UseUniqueSubclasses) {
349 const Type* el = elemtype->make_ptr();
350 if (el && el->isa_instptr()) {
351 const TypeInstPtr* toop = el->is_instptr();
352 if (toop->instance_klass()->unique_concrete_subklass()) {
353 // If we load from "AbstractClass[]" we must see "ConcreteSubClass".
354 const Type* subklass = Type::get_const_type(toop->instance_klass());
355 elemtype = subklass->join_speculative(el);
356 }
357 }
358 }
359
360 if (!arytype->is_loaded()) {
361 // Only fails for some -Xcomp runs
362 // The class is unloaded. We have to run this bytecode in the interpreter.
363 ciKlass* klass = arytype->unloaded_klass();
364
365 uncommon_trap(Deoptimization::Reason_unloaded,
366 Deoptimization::Action_reinterpret,
367 klass, "!loaded array");
368 return top();
369 }
370
371 ary = create_speculative_inline_type_array_checks(ary, arytype, elemtype);
372
373 if (needs_range_check(sizetype, idx)) {
374 create_range_check(idx, ary, sizetype);
375 } else if (C->log() != nullptr) {
376 C->log()->elem("observe that='!need_range_check'");
377 }
378
379 // Check for always knowing you are throwing a range-check exception
380 if (stopped()) return top();
381
382 // Make array address computation control dependent to prevent it
383 // from floating above the range check during loop optimizations.
384 Node* ptr = array_element_address(ary, idx, type, sizetype, control());
385 assert(ptr != top(), "top should go hand-in-hand with stopped");
386
387 return ptr;
388 }
389
390 // Check if we need a range check for an array access. This is the case if the index is either negative or if it could
391 // be greater or equal the smallest possible array size (i.e. out-of-bounds).
392 bool Parse::needs_range_check(const TypeInt* size_type, const Node* index) const {
393 const TypeInt* index_type = _gvn.type(index)->is_int();
394 return index_type->_hi >= size_type->_lo || index_type->_lo < 0;
395 }
396
397 void Parse::create_range_check(Node* idx, Node* ary, const TypeInt* sizetype) {
398 Node* tst;
399 if (sizetype->_hi <= 0) {
400 // The greatest array bound is negative, so we can conclude that we're
401 // compiling unreachable code, but the unsigned compare trick used below
402 // only works with non-negative lengths. Instead, hack "tst" to be zero so
403 // the uncommon_trap path will always be taken.
404 tst = _gvn.intcon(0);
405 } else {
406 // Range is constant in array-oop, so we can use the original state of mem
407 Node* len = load_array_length(ary);
408
409 // Test length vs index (standard trick using unsigned compare)
410 Node* chk = _gvn.transform(new CmpUNode(idx, len) );
411 BoolTest::mask btest = BoolTest::lt;
412 tst = _gvn.transform(new BoolNode(chk, btest) );
413 }
414 RangeCheckNode* rc = new RangeCheckNode(control(), tst, PROB_MAX, COUNT_UNKNOWN);
415 _gvn.set_type(rc, rc->Value(&_gvn));
416 if (!tst->is_Con()) {
417 record_for_igvn(rc);
418 }
419 set_control(_gvn.transform(new IfTrueNode(rc)));
420 // Branch to failure if out of bounds
421 {
422 PreserveJVMState pjvms(this);
423 set_control(_gvn.transform(new IfFalseNode(rc)));
424 if (C->allow_range_check_smearing()) {
425 // Do not use builtin_throw, since range checks are sometimes
426 // made more stringent by an optimistic transformation.
427 // This creates "tentative" range checks at this point,
428 // which are not guaranteed to throw exceptions.
429 // See IfNode::Ideal, is_range_check, adjust_check.
430 uncommon_trap(Deoptimization::Reason_range_check,
431 Deoptimization::Action_make_not_entrant,
432 nullptr, "range_check");
433 } else {
434 // If we have already recompiled with the range-check-widening
435 // heroic optimization turned off, then we must really be throwing
436 // range check exceptions.
437 builtin_throw(Deoptimization::Reason_range_check);
438 }
439 }
440 }
441
442 // For inline type arrays, we can use the profiling information for array accesses to speculate on the type, flatness,
443 // and null-freeness. We can either prepare the speculative type for later uses or emit explicit speculative checks with
444 // traps now. In the latter case, the speculative type guarantees can avoid additional runtime checks later (e.g.
445 // non-null-free implies non-flat which allows us to remove flatness checks). This makes the graph simpler.
446 Node* Parse::create_speculative_inline_type_array_checks(Node* array, const TypeAryPtr* array_type,
447 const Type*& element_type) {
448 if (!array_type->is_flat() && !array_type->is_not_flat()) {
449 // For arrays that might be flat, speculate that the array has the exact type reported in the profile data such that
450 // we can rely on a fixed memory layout (i.e. either a flat layout or not).
451 array = cast_to_speculative_array_type(array, array_type, element_type);
452 } else if (UseTypeSpeculation && UseArrayLoadStoreProfile) {
453 // Array is known to be either flat or not flat. If possible, update the speculative type by using the profile data
454 // at this bci.
455 array = cast_to_profiled_array_type(array);
456 }
457
458 // Even though the type does not tell us whether we have an inline type array or not, we can still check the profile data
459 // whether we have a non-null-free or non-flat array. Speculating on a non-null-free array doesn't help aaload but could
460 // be profitable for a subsequent aastore.
461 if (!array_type->is_null_free() && !array_type->is_not_null_free()) {
462 array = speculate_non_null_free_array(array, array_type);
463 }
464 if (!array_type->is_flat() && !array_type->is_not_flat()) {
465 array = speculate_non_flat_array(array, array_type);
466 }
467 return array;
468 }
469
470 // Speculate that the array has the exact type reported in the profile data. We emit a trap when this turns out to be
471 // wrong. On the fast path, we add a CheckCastPP to use the exact type.
472 Node* Parse::cast_to_speculative_array_type(Node* const array, const TypeAryPtr*& array_type, const Type*& element_type) {
473 Deoptimization::DeoptReason reason = Deoptimization::Reason_speculate_class_check;
474 ciKlass* speculative_array_type = array_type->speculative_type();
475 if (too_many_traps_or_recompiles(reason) || speculative_array_type == nullptr) {
476 // No speculative type, check profile data at this bci
477 speculative_array_type = nullptr;
478 reason = Deoptimization::Reason_class_check;
479 if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(reason)) {
480 ciKlass* profiled_element_type = nullptr;
481 ProfilePtrKind element_ptr = ProfileMaybeNull;
482 bool flat_array = true;
483 bool null_free_array = true;
484 method()->array_access_profiled_type(bci(), speculative_array_type, profiled_element_type, element_ptr, flat_array,
485 null_free_array);
486 }
487 }
488 if (speculative_array_type != nullptr) {
489 // Speculate that this array has the exact type reported by profile data
490 Node* casted_array = nullptr;
491 DEBUG_ONLY(Node* old_control = control();)
492 Node* slow_ctl = type_check_receiver(array, speculative_array_type, 1.0, &casted_array);
493 if (stopped()) {
494 // The check always fails and therefore profile information is incorrect. Don't use it.
495 assert(old_control == slow_ctl, "type check should have been removed");
496 set_control(slow_ctl);
497 } else if (!slow_ctl->is_top()) {
498 { PreserveJVMState pjvms(this);
499 set_control(slow_ctl);
500 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
501 }
502 replace_in_map(array, casted_array);
503 array_type = _gvn.type(casted_array)->is_aryptr();
504 element_type = array_type->elem();
505 return casted_array;
506 }
507 }
508 return array;
509 }
510
511 // Create a CheckCastPP when the speculative type can improve the current type.
512 Node* Parse::cast_to_profiled_array_type(Node* const array) {
513 ciKlass* array_type = nullptr;
514 ciKlass* element_type = nullptr;
515 ProfilePtrKind element_ptr = ProfileMaybeNull;
516 bool flat_array = true;
517 bool null_free_array = true;
518 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
519 if (array_type != nullptr) {
520 return record_profile_for_speculation(array, array_type, ProfileMaybeNull);
521 }
522 return array;
523 }
524
525 // Speculate that the array is non-null-free. We emit a trap when this turns out to be
526 // wrong. On the fast path, we add a CheckCastPP to use the non-null-free type.
527 Node* Parse::speculate_non_null_free_array(Node* const array, const TypeAryPtr*& array_type) {
528 bool null_free_array = true;
529 Deoptimization::DeoptReason reason = Deoptimization::Reason_none;
530 if (array_type->speculative() != nullptr &&
531 array_type->speculative()->is_aryptr()->is_not_null_free() &&
532 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
533 null_free_array = false;
534 reason = Deoptimization::Reason_speculate_class_check;
535 } else if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(Deoptimization::Reason_class_check)) {
536 ciKlass* profiled_array_type = nullptr;
537 ciKlass* profiled_element_type = nullptr;
538 ProfilePtrKind element_ptr = ProfileMaybeNull;
539 bool flat_array = true;
540 method()->array_access_profiled_type(bci(), profiled_array_type, profiled_element_type, element_ptr, flat_array,
541 null_free_array);
542 reason = Deoptimization::Reason_class_check;
543 }
544 if (!null_free_array) {
545 { // Deoptimize if null-free array
546 BuildCutout unless(this, null_free_array_test(array, /* null_free = */ false), PROB_MAX);
547 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
548 }
549 assert(!stopped(), "null-free array should have been caught earlier");
550 Node* casted_array = _gvn.transform(new CheckCastPPNode(control(), array, array_type->cast_to_not_null_free()));
551 replace_in_map(array, casted_array);
552 array_type = _gvn.type(casted_array)->is_aryptr();
553 return casted_array;
554 }
555 return array;
556 }
557
558 // Speculate that the array is non-flat. We emit a trap when this turns out to be wrong.
559 // On the fast path, we add a CheckCastPP to use the non-flat type.
560 Node* Parse::speculate_non_flat_array(Node* const array, const TypeAryPtr* const array_type) {
561 bool flat_array = true;
562 Deoptimization::DeoptReason reason = Deoptimization::Reason_none;
563 if (array_type->speculative() != nullptr &&
564 array_type->speculative()->is_aryptr()->is_not_flat() &&
565 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
566 flat_array = false;
567 reason = Deoptimization::Reason_speculate_class_check;
568 } else if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(reason)) {
569 ciKlass* profiled_array_type = nullptr;
570 ciKlass* profiled_element_type = nullptr;
571 ProfilePtrKind element_ptr = ProfileMaybeNull;
572 bool null_free_array = true;
573 method()->array_access_profiled_type(bci(), profiled_array_type, profiled_element_type, element_ptr, flat_array,
574 null_free_array);
575 reason = Deoptimization::Reason_class_check;
576 }
577 if (!flat_array) {
578 { // Deoptimize if flat array
579 BuildCutout unless(this, flat_array_test(array, /* flat = */ false), PROB_MAX);
580 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
581 }
582 assert(!stopped(), "flat array should have been caught earlier");
583 Node* casted_array = _gvn.transform(new CheckCastPPNode(control(), array, array_type->cast_to_not_flat()));
584 replace_in_map(array, casted_array);
585 return casted_array;
586 }
587 return array;
588 }
589
590 // returns IfNode
591 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) {
592 Node *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
593 Node *tst = _gvn.transform(new BoolNode(cmp, mask));
594 IfNode *iff = create_and_map_if(control(), tst, prob, cnt);
595 return iff;
596 }
597
598
599 // sentinel value for the target bci to mark never taken branches
600 // (according to profiling)
601 static const int never_reached = INT_MAX;
602
603 //------------------------------helper for tableswitch-------------------------
604 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, bool unc) {
605 // True branch, use existing map info
606 { PreserveJVMState pjvms(this);
607 Node *iftrue = _gvn.transform( new IfTrueNode (iff) );
608 set_control( iftrue );
609 if (unc) {
610 repush_if_args();
611 uncommon_trap(Deoptimization::Reason_unstable_if,
612 Deoptimization::Action_reinterpret,
613 nullptr,
614 "taken always");
615 } else {
616 assert(dest_bci_if_true != never_reached, "inconsistent dest");
617 merge_new_path(dest_bci_if_true);
618 }
619 }
620
621 // False branch
622 Node *iffalse = _gvn.transform( new IfFalseNode(iff) );
623 set_control( iffalse );
624 }
625
626 void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, bool unc) {
627 // True branch, use existing map info
628 { PreserveJVMState pjvms(this);
629 Node *iffalse = _gvn.transform( new IfFalseNode (iff) );
630 set_control( iffalse );
631 if (unc) {
632 repush_if_args();
633 uncommon_trap(Deoptimization::Reason_unstable_if,
634 Deoptimization::Action_reinterpret,
635 nullptr,
636 "taken never");
637 } else {
638 assert(dest_bci_if_true != never_reached, "inconsistent dest");
639 merge_new_path(dest_bci_if_true);
640 }
641 }
642
643 // False branch
644 Node *iftrue = _gvn.transform( new IfTrueNode(iff) );
645 set_control( iftrue );
646 }
647
648 void Parse::jump_if_always_fork(int dest_bci, bool unc) {
649 // False branch, use existing map and control()
650 if (unc) {
651 repush_if_args();
652 uncommon_trap(Deoptimization::Reason_unstable_if,
653 Deoptimization::Action_reinterpret,
654 nullptr,
655 "taken never");
656 } else {
657 assert(dest_bci != never_reached, "inconsistent dest");
658 merge_new_path(dest_bci);
659 }
660 }
661
662
663 extern "C" {
664 static int jint_cmp(const void *i, const void *j) {
665 int a = *(jint *)i;
666 int b = *(jint *)j;
667 return a > b ? 1 : a < b ? -1 : 0;
668 }
669 }
670
671
672 class SwitchRange : public StackObj {
673 // a range of integers coupled with a bci destination
674 jint _lo; // inclusive lower limit
675 jint _hi; // inclusive upper limit
676 int _dest;
677 float _cnt; // how many times this range was hit according to profiling
678
679 public:
680 jint lo() const { return _lo; }
681 jint hi() const { return _hi; }
682 int dest() const { return _dest; }
683 bool is_singleton() const { return _lo == _hi; }
684 float cnt() const { return _cnt; }
685
686 void setRange(jint lo, jint hi, int dest, float cnt) {
687 assert(lo <= hi, "must be a non-empty range");
688 _lo = lo, _hi = hi; _dest = dest; _cnt = cnt;
689 assert(_cnt >= 0, "");
690 }
691 bool adjoinRange(jint lo, jint hi, int dest, float cnt, bool trim_ranges) {
692 assert(lo <= hi, "must be a non-empty range");
693 if (lo == _hi+1) {
694 // see merge_ranges() comment below
695 if (trim_ranges) {
696 if (cnt == 0) {
697 if (_cnt != 0) {
698 return false;
699 }
700 if (dest != _dest) {
701 _dest = never_reached;
702 }
703 } else {
704 if (_cnt == 0) {
705 return false;
706 }
707 if (dest != _dest) {
708 return false;
709 }
710 }
711 } else {
712 if (dest != _dest) {
713 return false;
714 }
715 }
716 _hi = hi;
717 _cnt += cnt;
718 return true;
719 }
720 return false;
721 }
722
723 void set (jint value, int dest, float cnt) {
724 setRange(value, value, dest, cnt);
725 }
726 bool adjoin(jint value, int dest, float cnt, bool trim_ranges) {
727 return adjoinRange(value, value, dest, cnt, trim_ranges);
728 }
729 bool adjoin(SwitchRange& other) {
730 return adjoinRange(other._lo, other._hi, other._dest, other._cnt, false);
731 }
732
733 void print() {
734 if (is_singleton())
735 tty->print(" {%d}=>%d (cnt=%f)", lo(), dest(), cnt());
736 else if (lo() == min_jint)
737 tty->print(" {..%d}=>%d (cnt=%f)", hi(), dest(), cnt());
738 else if (hi() == max_jint)
739 tty->print(" {%d..}=>%d (cnt=%f)", lo(), dest(), cnt());
740 else
741 tty->print(" {%d..%d}=>%d (cnt=%f)", lo(), hi(), dest(), cnt());
742 }
743 };
744
745 // We try to minimize the number of ranges and the size of the taken
746 // ones using profiling data. When ranges are created,
747 // SwitchRange::adjoinRange() only allows 2 adjoining ranges to merge
748 // if both were never hit or both were hit to build longer unreached
749 // ranges. Here, we now merge adjoining ranges with the same
750 // destination and finally set destination of unreached ranges to the
751 // special value never_reached because it can help minimize the number
752 // of tests that are necessary.
753 //
754 // For instance:
755 // [0, 1] to target1 sometimes taken
756 // [1, 2] to target1 never taken
757 // [2, 3] to target2 never taken
758 // would lead to:
759 // [0, 1] to target1 sometimes taken
760 // [1, 3] never taken
761 //
762 // (first 2 ranges to target1 are not merged)
763 static void merge_ranges(SwitchRange* ranges, int& rp) {
764 if (rp == 0) {
765 return;
766 }
767 int shift = 0;
768 for (int j = 0; j < rp; j++) {
769 SwitchRange& r1 = ranges[j-shift];
770 SwitchRange& r2 = ranges[j+1];
771 if (r1.adjoin(r2)) {
772 shift++;
773 } else if (shift > 0) {
774 ranges[j+1-shift] = r2;
775 }
776 }
777 rp -= shift;
778 for (int j = 0; j <= rp; j++) {
779 SwitchRange& r = ranges[j];
780 if (r.cnt() == 0 && r.dest() != never_reached) {
781 r.setRange(r.lo(), r.hi(), never_reached, r.cnt());
782 }
783 }
784 }
785
786 //-------------------------------do_tableswitch--------------------------------
787 void Parse::do_tableswitch() {
788 // Get information about tableswitch
789 int default_dest = iter().get_dest_table(0);
790 jint lo_index = iter().get_int_table(1);
791 jint hi_index = iter().get_int_table(2);
792 int len = hi_index - lo_index + 1;
793
794 if (len < 1) {
795 // If this is a backward branch, add safepoint
796 maybe_add_safepoint(default_dest);
797 pop(); // the effect of the instruction execution on the operand stack
798 merge(default_dest);
799 return;
800 }
801
802 ciMethodData* methodData = method()->method_data();
803 ciMultiBranchData* profile = nullptr;
804 if (methodData->is_mature() && UseSwitchProfiling) {
805 ciProfileData* data = methodData->bci_to_data(bci());
806 if (data != nullptr && data->is_MultiBranchData()) {
807 profile = (ciMultiBranchData*)data;
808 }
809 }
810 bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
811
812 // generate decision tree, using trichotomy when possible
813 int rnum = len+2;
814 bool makes_backward_branch = (default_dest <= bci());
815 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
816 int rp = -1;
817 if (lo_index != min_jint) {
818 float cnt = 1.0F;
819 if (profile != nullptr) {
820 cnt = (float)profile->default_count() / (hi_index != max_jint ? 2.0F : 1.0F);
821 }
822 ranges[++rp].setRange(min_jint, lo_index-1, default_dest, cnt);
823 }
824 for (int j = 0; j < len; j++) {
825 jint match_int = lo_index+j;
826 int dest = iter().get_dest_table(j+3);
827 makes_backward_branch |= (dest <= bci());
828 float cnt = 1.0F;
829 if (profile != nullptr) {
830 cnt = (float)profile->count_at(j);
831 }
832 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, cnt, trim_ranges)) {
833 ranges[++rp].set(match_int, dest, cnt);
834 }
835 }
836 jint highest = lo_index+(len-1);
837 assert(ranges[rp].hi() == highest, "");
838 if (highest != max_jint) {
839 float cnt = 1.0F;
840 if (profile != nullptr) {
841 cnt = (float)profile->default_count() / (lo_index != min_jint ? 2.0F : 1.0F);
842 }
843 if (!ranges[rp].adjoinRange(highest+1, max_jint, default_dest, cnt, trim_ranges)) {
844 ranges[++rp].setRange(highest+1, max_jint, default_dest, cnt);
845 }
846 }
847 assert(rp < len+2, "not too many ranges");
848
849 if (trim_ranges) {
850 merge_ranges(ranges, rp);
851 }
852
853 // Safepoint in case if backward branch observed
854 if (makes_backward_branch) {
855 add_safepoint();
856 }
857
858 Node* lookup = pop(); // lookup value
859 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
860 }
861
862
863 //------------------------------do_lookupswitch--------------------------------
864 void Parse::do_lookupswitch() {
865 // Get information about lookupswitch
866 int default_dest = iter().get_dest_table(0);
867 jint len = iter().get_int_table(1);
868
869 if (len < 1) { // If this is a backward branch, add safepoint
870 maybe_add_safepoint(default_dest);
871 pop(); // the effect of the instruction execution on the operand stack
872 merge(default_dest);
873 return;
874 }
875
876 ciMethodData* methodData = method()->method_data();
877 ciMultiBranchData* profile = nullptr;
878 if (methodData->is_mature() && UseSwitchProfiling) {
879 ciProfileData* data = methodData->bci_to_data(bci());
880 if (data != nullptr && data->is_MultiBranchData()) {
881 profile = (ciMultiBranchData*)data;
882 }
883 }
884 bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
885
886 // generate decision tree, using trichotomy when possible
887 jint* table = NEW_RESOURCE_ARRAY(jint, len*3);
888 {
889 for (int j = 0; j < len; j++) {
890 table[3*j+0] = iter().get_int_table(2+2*j);
891 table[3*j+1] = iter().get_dest_table(2+2*j+1);
892 // Handle overflow when converting from uint to jint
893 table[3*j+2] = (profile == nullptr) ? 1 : (jint)MIN2<uint>((uint)max_jint, profile->count_at(j));
894 }
895 qsort(table, len, 3*sizeof(table[0]), jint_cmp);
896 }
897
898 float default_cnt = 1.0F;
899 if (profile != nullptr) {
900 juint defaults = max_juint - len;
901 default_cnt = (float)profile->default_count()/(float)defaults;
902 }
903
904 int rnum = len*2+1;
905 bool makes_backward_branch = (default_dest <= bci());
906 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
907 int rp = -1;
908 for (int j = 0; j < len; j++) {
909 jint match_int = table[3*j+0];
910 jint dest = table[3*j+1];
911 jint cnt = table[3*j+2];
912 jint next_lo = rp < 0 ? min_jint : ranges[rp].hi()+1;
913 makes_backward_branch |= (dest <= bci());
914 float c = default_cnt * ((float)match_int - (float)next_lo);
915 if (match_int != next_lo && (rp < 0 || !ranges[rp].adjoinRange(next_lo, match_int-1, default_dest, c, trim_ranges))) {
916 assert(default_dest != never_reached, "sentinel value for dead destinations");
917 ranges[++rp].setRange(next_lo, match_int-1, default_dest, c);
918 }
919 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, (float)cnt, trim_ranges)) {
920 assert(dest != never_reached, "sentinel value for dead destinations");
921 ranges[++rp].set(match_int, dest, (float)cnt);
922 }
923 }
924 jint highest = table[3*(len-1)];
925 assert(ranges[rp].hi() == highest, "");
926 if (highest != max_jint &&
927 !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, default_cnt * ((float)max_jint - (float)highest), trim_ranges)) {
928 ranges[++rp].setRange(highest+1, max_jint, default_dest, default_cnt * ((float)max_jint - (float)highest));
929 }
930 assert(rp < rnum, "not too many ranges");
931
932 if (trim_ranges) {
933 merge_ranges(ranges, rp);
934 }
935
936 // Safepoint in case backward branch observed
937 if (makes_backward_branch) {
938 add_safepoint();
939 }
940
941 Node *lookup = pop(); // lookup value
942 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
943 }
944
945 static float if_prob(float taken_cnt, float total_cnt) {
946 assert(taken_cnt <= total_cnt, "");
947 if (total_cnt == 0) {
948 return PROB_FAIR;
949 }
950 float p = taken_cnt / total_cnt;
951 return clamp(p, PROB_MIN, PROB_MAX);
952 }
953
954 static float if_cnt(float cnt) {
955 if (cnt == 0) {
956 return COUNT_UNKNOWN;
957 }
958 return cnt;
959 }
960
961 static float sum_of_cnts(SwitchRange *lo, SwitchRange *hi) {
962 float total_cnt = 0;
963 for (SwitchRange* sr = lo; sr <= hi; sr++) {
964 total_cnt += sr->cnt();
965 }
966 return total_cnt;
967 }
968
969 class SwitchRanges : public ResourceObj {
970 public:
971 SwitchRange* _lo;
972 SwitchRange* _hi;
973 SwitchRange* _mid;
974 float _cost;
975
976 enum {
977 Start,
978 LeftDone,
979 RightDone,
980 Done
981 } _state;
982
983 SwitchRanges(SwitchRange *lo, SwitchRange *hi)
984 : _lo(lo), _hi(hi), _mid(nullptr),
985 _cost(0), _state(Start) {
986 }
987
988 SwitchRanges()
989 : _lo(nullptr), _hi(nullptr), _mid(nullptr),
990 _cost(0), _state(Start) {}
991 };
992
993 // Estimate cost of performing a binary search on lo..hi
994 static float compute_tree_cost(SwitchRange *lo, SwitchRange *hi, float total_cnt) {
995 GrowableArray<SwitchRanges> tree;
996 SwitchRanges root(lo, hi);
997 tree.push(root);
998
999 float cost = 0;
1000 do {
1001 SwitchRanges& r = *tree.adr_at(tree.length()-1);
1002 if (r._hi != r._lo) {
1003 if (r._mid == nullptr) {
1004 float r_cnt = sum_of_cnts(r._lo, r._hi);
1005
1006 if (r_cnt == 0) {
1007 tree.pop();
1008 cost = 0;
1009 continue;
1010 }
1011
1012 SwitchRange* mid = nullptr;
1013 mid = r._lo;
1014 for (float cnt = 0; ; ) {
1015 assert(mid <= r._hi, "out of bounds");
1016 cnt += mid->cnt();
1017 if (cnt > r_cnt / 2) {
1018 break;
1019 }
1020 mid++;
1021 }
1022 assert(mid <= r._hi, "out of bounds");
1023 r._mid = mid;
1024 r._cost = r_cnt / total_cnt;
1025 }
1026 r._cost += cost;
1027 if (r._state < SwitchRanges::LeftDone && r._mid > r._lo) {
1028 cost = 0;
1029 r._state = SwitchRanges::LeftDone;
1030 tree.push(SwitchRanges(r._lo, r._mid-1));
1031 } else if (r._state < SwitchRanges::RightDone) {
1032 cost = 0;
1033 r._state = SwitchRanges::RightDone;
1034 tree.push(SwitchRanges(r._mid == r._lo ? r._mid+1 : r._mid, r._hi));
1035 } else {
1036 tree.pop();
1037 cost = r._cost;
1038 }
1039 } else {
1040 tree.pop();
1041 cost = r._cost;
1042 }
1043 } while (tree.length() > 0);
1044
1045
1046 return cost;
1047 }
1048
1049 // It sometimes pays off to test most common ranges before the binary search
1050 void Parse::linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi) {
1051 uint nr = hi - lo + 1;
1052 float total_cnt = sum_of_cnts(lo, hi);
1053
1054 float min = compute_tree_cost(lo, hi, total_cnt);
1055 float extra = 1;
1056 float sub = 0;
1057
1058 SwitchRange* array1 = lo;
1059 SwitchRange* array2 = NEW_RESOURCE_ARRAY(SwitchRange, nr);
1060
1061 SwitchRange* ranges = nullptr;
1062
1063 while (nr >= 2) {
1064 assert(lo == array1 || lo == array2, "one the 2 already allocated arrays");
1065 ranges = (lo == array1) ? array2 : array1;
1066
1067 // Find highest frequency range
1068 SwitchRange* candidate = lo;
1069 for (SwitchRange* sr = lo+1; sr <= hi; sr++) {
1070 if (sr->cnt() > candidate->cnt()) {
1071 candidate = sr;
1072 }
1073 }
1074 SwitchRange most_freq = *candidate;
1075 if (most_freq.cnt() == 0) {
1076 break;
1077 }
1078
1079 // Copy remaining ranges into another array
1080 int shift = 0;
1081 for (uint i = 0; i < nr; i++) {
1082 SwitchRange* sr = &lo[i];
1083 if (sr != candidate) {
1084 ranges[i-shift] = *sr;
1085 } else {
1086 shift++;
1087 if (i > 0 && i < nr-1) {
1088 SwitchRange prev = lo[i-1];
1089 prev.setRange(prev.lo(), sr->hi(), prev.dest(), prev.cnt());
1090 if (prev.adjoin(lo[i+1])) {
1091 shift++;
1092 i++;
1093 }
1094 ranges[i-shift] = prev;
1095 }
1096 }
1097 }
1098 nr -= shift;
1099
1100 // Evaluate cost of testing the most common range and performing a
1101 // binary search on the other ranges
1102 float cost = extra + compute_tree_cost(&ranges[0], &ranges[nr-1], total_cnt);
1103 if (cost >= min) {
1104 break;
1105 }
1106 // swap arrays
1107 lo = &ranges[0];
1108 hi = &ranges[nr-1];
1109
1110 // It pays off: emit the test for the most common range
1111 assert(most_freq.cnt() > 0, "must be taken");
1112 Node* val = _gvn.transform(new SubINode(key_val, _gvn.intcon(most_freq.lo())));
1113 Node* cmp = _gvn.transform(new CmpUNode(val, _gvn.intcon(java_subtract(most_freq.hi(), most_freq.lo()))));
1114 Node* tst = _gvn.transform(new BoolNode(cmp, BoolTest::le));
1115 IfNode* iff = create_and_map_if(control(), tst, if_prob(most_freq.cnt(), total_cnt), if_cnt(most_freq.cnt()));
1116 jump_if_true_fork(iff, most_freq.dest(), false);
1117
1118 sub += most_freq.cnt() / total_cnt;
1119 extra += 1 - sub;
1120 min = cost;
1121 }
1122 }
1123
1124 //----------------------------create_jump_tables-------------------------------
1125 bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) {
1126 // Are jumptables enabled
1127 if (!UseJumpTables) return false;
1128
1129 // Are jumptables supported
1130 if (!Matcher::has_match_rule(Op_Jump)) return false;
1131
1132 bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
1133
1134 // Decide if a guard is needed to lop off big ranges at either (or
1135 // both) end(s) of the input set. We'll call this the default target
1136 // even though we can't be sure that it is the true "default".
1137
1138 bool needs_guard = false;
1139 int default_dest;
1140 int64_t total_outlier_size = 0;
1141 int64_t hi_size = ((int64_t)hi->hi()) - ((int64_t)hi->lo()) + 1;
1142 int64_t lo_size = ((int64_t)lo->hi()) - ((int64_t)lo->lo()) + 1;
1143
1144 if (lo->dest() == hi->dest()) {
1145 total_outlier_size = hi_size + lo_size;
1146 default_dest = lo->dest();
1147 } else if (lo_size > hi_size) {
1148 total_outlier_size = lo_size;
1149 default_dest = lo->dest();
1150 } else {
1151 total_outlier_size = hi_size;
1152 default_dest = hi->dest();
1153 }
1154
1155 float total = sum_of_cnts(lo, hi);
1156 float cost = compute_tree_cost(lo, hi, total);
1157
1158 // If a guard test will eliminate very sparse end ranges, then
1159 // it is worth the cost of an extra jump.
1160 float trimmed_cnt = 0;
1161 if (total_outlier_size > (MaxJumpTableSparseness * 4)) {
1162 needs_guard = true;
1163 if (default_dest == lo->dest()) {
1164 trimmed_cnt += lo->cnt();
1165 lo++;
1166 }
1167 if (default_dest == hi->dest()) {
1168 trimmed_cnt += hi->cnt();
1169 hi--;
1170 }
1171 }
1172
1173 // Find the total number of cases and ranges
1174 int64_t num_cases = ((int64_t)hi->hi()) - ((int64_t)lo->lo()) + 1;
1175 int num_range = hi - lo + 1;
1176
1177 // Don't create table if: too large, too small, or too sparse.
1178 if (num_cases > MaxJumpTableSize)
1179 return false;
1180 if (UseSwitchProfiling) {
1181 // MinJumpTableSize is set so with a well balanced binary tree,
1182 // when the number of ranges is MinJumpTableSize, it's cheaper to
1183 // go through a JumpNode that a tree of IfNodes. Average cost of a
1184 // tree of IfNodes with MinJumpTableSize is
1185 // log2f(MinJumpTableSize) comparisons. So if the cost computed
1186 // from profile data is less than log2f(MinJumpTableSize) then
1187 // going with the binary search is cheaper.
1188 if (cost < log2f(MinJumpTableSize)) {
1189 return false;
1190 }
1191 } else {
1192 if (num_cases < MinJumpTableSize)
1193 return false;
1194 }
1195 if (num_cases > (MaxJumpTableSparseness * num_range))
1196 return false;
1197
1198 // Normalize table lookups to zero
1199 int lowval = lo->lo();
1200 key_val = _gvn.transform( new SubINode(key_val, _gvn.intcon(lowval)) );
1201
1202 // Generate a guard to protect against input keyvals that aren't
1203 // in the switch domain.
1204 if (needs_guard) {
1205 Node* size = _gvn.intcon(num_cases);
1206 Node* cmp = _gvn.transform(new CmpUNode(key_val, size));
1207 Node* tst = _gvn.transform(new BoolNode(cmp, BoolTest::ge));
1208 IfNode* iff = create_and_map_if(control(), tst, if_prob(trimmed_cnt, total), if_cnt(trimmed_cnt));
1209 jump_if_true_fork(iff, default_dest, trim_ranges && trimmed_cnt == 0);
1210
1211 total -= trimmed_cnt;
1212 }
1213
1214 // Create an ideal node JumpTable that has projections
1215 // of all possible ranges for a switch statement
1216 // The key_val input must be converted to a pointer offset and scaled.
1217 // Compare Parse::array_addressing above.
1218
1219 // Clean the 32-bit int into a real 64-bit offset.
1220 // Otherwise, the jint value 0 might turn into an offset of 0x0800000000.
1221 // Make I2L conversion control dependent to prevent it from
1222 // floating above the range check during loop optimizations.
1223 // Do not use a narrow int type here to prevent the data path from dying
1224 // while the control path is not removed. This can happen if the type of key_val
1225 // is later known to be out of bounds of [0, num_cases] and therefore a narrow cast
1226 // would be replaced by TOP while C2 is not able to fold the corresponding range checks.
1227 // Set _carry_dependency for the cast to avoid being removed by IGVN.
1228 #ifdef _LP64
1229 key_val = C->constrained_convI2L(&_gvn, key_val, TypeInt::INT, control(), true /* carry_dependency */);
1230 #endif
1231
1232 // Shift the value by wordsize so we have an index into the table, rather
1233 // than a switch value
1234 Node *shiftWord = _gvn.MakeConX(wordSize);
1235 key_val = _gvn.transform( new MulXNode( key_val, shiftWord));
1236
1237 // Create the JumpNode
1238 Arena* arena = C->comp_arena();
1239 float* probs = (float*)arena->Amalloc(sizeof(float)*num_cases);
1240 int i = 0;
1241 if (total == 0) {
1242 for (SwitchRange* r = lo; r <= hi; r++) {
1243 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) {
1244 probs[i] = 1.0F / num_cases;
1245 }
1246 }
1247 } else {
1248 for (SwitchRange* r = lo; r <= hi; r++) {
1249 float prob = r->cnt()/total;
1250 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) {
1251 probs[i] = prob / (r->hi() - r->lo() + 1);
1252 }
1253 }
1254 }
1255
1256 ciMethodData* methodData = method()->method_data();
1257 ciMultiBranchData* profile = nullptr;
1258 if (methodData->is_mature()) {
1259 ciProfileData* data = methodData->bci_to_data(bci());
1260 if (data != nullptr && data->is_MultiBranchData()) {
1261 profile = (ciMultiBranchData*)data;
1262 }
1263 }
1264
1265 Node* jtn = _gvn.transform(new JumpNode(control(), key_val, num_cases, probs, profile == nullptr ? COUNT_UNKNOWN : total));
1266
1267 // These are the switch destinations hanging off the jumpnode
1268 i = 0;
1269 for (SwitchRange* r = lo; r <= hi; r++) {
1270 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) {
1271 Node* input = _gvn.transform(new JumpProjNode(jtn, i, r->dest(), (int)(j - lowval)));
1272 {
1273 PreserveJVMState pjvms(this);
1274 set_control(input);
1275 jump_if_always_fork(r->dest(), trim_ranges && r->cnt() == 0);
1276 }
1277 }
1278 }
1279 assert(i == num_cases, "miscount of cases");
1280 stop_and_kill_map(); // no more uses for this JVMS
1281 return true;
1282 }
1283
1284 //----------------------------jump_switch_ranges-------------------------------
1285 void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, int switch_depth) {
1286 Block* switch_block = block();
1287 bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
1288
1289 if (switch_depth == 0) {
1290 // Do special processing for the top-level call.
1291 assert(lo->lo() == min_jint, "initial range must exhaust Type::INT");
1292 assert(hi->hi() == max_jint, "initial range must exhaust Type::INT");
1293
1294 // Decrement pred-numbers for the unique set of nodes.
1295 #ifdef ASSERT
1296 if (!trim_ranges) {
1297 // Ensure that the block's successors are a (duplicate-free) set.
1298 int successors_counted = 0; // block occurrences in [hi..lo]
1299 int unique_successors = switch_block->num_successors();
1300 for (int i = 0; i < unique_successors; i++) {
1301 Block* target = switch_block->successor_at(i);
1302
1303 // Check that the set of successors is the same in both places.
1304 int successors_found = 0;
1305 for (SwitchRange* p = lo; p <= hi; p++) {
1306 if (p->dest() == target->start()) successors_found++;
1307 }
1308 assert(successors_found > 0, "successor must be known");
1309 successors_counted += successors_found;
1310 }
1311 assert(successors_counted == (hi-lo)+1, "no unexpected successors");
1312 }
1313 #endif
1314
1315 // Maybe prune the inputs, based on the type of key_val.
1316 jint min_val = min_jint;
1317 jint max_val = max_jint;
1318 const TypeInt* ti = key_val->bottom_type()->isa_int();
1319 if (ti != nullptr) {
1320 min_val = ti->_lo;
1321 max_val = ti->_hi;
1322 assert(min_val <= max_val, "invalid int type");
1323 }
1324 while (lo->hi() < min_val) {
1325 lo++;
1326 }
1327 if (lo->lo() < min_val) {
1328 lo->setRange(min_val, lo->hi(), lo->dest(), lo->cnt());
1329 }
1330 while (hi->lo() > max_val) {
1331 hi--;
1332 }
1333 if (hi->hi() > max_val) {
1334 hi->setRange(hi->lo(), max_val, hi->dest(), hi->cnt());
1335 }
1336
1337 linear_search_switch_ranges(key_val, lo, hi);
1338 }
1339
1340 #ifndef PRODUCT
1341 if (switch_depth == 0) {
1342 _max_switch_depth = 0;
1343 _est_switch_depth = log2i_graceful((hi - lo + 1) - 1) + 1;
1344 }
1345 SwitchRange* orig_lo = lo;
1346 SwitchRange* orig_hi = hi;
1347 #endif
1348
1349 // The lower-range processing is done iteratively to avoid O(N) stack depth
1350 // when the profiling-based pivot repeatedly selects mid==lo (JDK-8366138).
1351 // The upper-range processing remains recursive but is only reached for
1352 // balanced splits, bounding its depth to O(log N).
1353 // Termination: every iteration either exits or strictly decreases hi-lo:
1354 // lo == mid && mid < hi, increments lo
1355 // lo < mid <= hi, sets hi = mid - 1.
1356 for (int depth = switch_depth;; depth++) {
1357 #ifndef PRODUCT
1358 _max_switch_depth = MAX2(depth, _max_switch_depth);
1359 #endif
1360
1361 assert(lo <= hi, "must be a non-empty set of ranges");
1362 if (lo == hi) {
1363 jump_if_always_fork(lo->dest(), trim_ranges && lo->cnt() == 0);
1364 break;
1365 }
1366
1367 assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges");
1368 assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges");
1369
1370 if (create_jump_tables(key_val, lo, hi)) return;
1371
1372 SwitchRange* mid = nullptr;
1373 float total_cnt = sum_of_cnts(lo, hi);
1374
1375 int nr = hi - lo + 1;
1376 // With total_cnt==0 the profiling pivot degenerates to mid==lo
1377 // (0 >= 0/2), producing a linear chain of If nodes instead of a
1378 // balanced tree. A balanced tree is strictly better here: all paths
1379 // are cold, so a balanced split gives fewer comparisons at runtime
1380 // and avoids pathological memory usage in the optimizer.
1381 if (UseSwitchProfiling && total_cnt > 0) {
1382 // Don't keep the binary search tree balanced: pick up mid point
1383 // that split frequencies in half.
1384 float cnt = 0;
1385 for (SwitchRange* sr = lo; sr <= hi; sr++) {
1386 cnt += sr->cnt();
1387 if (cnt >= total_cnt / 2) {
1388 mid = sr;
1389 break;
1390 }
1391 }
1392 } else {
1393 mid = lo + nr/2;
1394
1395 // if there is an easy choice, pivot at a singleton:
1396 if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton()) mid--;
1397
1398 assert(lo < mid && mid <= hi, "good pivot choice");
1399 assert(nr != 2 || mid == hi, "should pick higher of 2");
1400 assert(nr != 3 || mid == hi-1, "should pick middle of 3");
1401 }
1402 assert(mid != nullptr, "mid must be set");
1403
1404 Node *test_val = _gvn.intcon(mid == lo ? mid->hi() : mid->lo());
1405
1406 if (mid->is_singleton()) {
1407 IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne, 1-if_prob(mid->cnt(), total_cnt), if_cnt(mid->cnt()));
1408 jump_if_false_fork(iff_ne, mid->dest(), trim_ranges && mid->cnt() == 0);
1409
1410 // Special Case: If there are exactly three ranges, and the high
1411 // and low range each go to the same place, omit the "gt" test,
1412 // since it will not discriminate anything.
1413 bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest() && mid == hi-1) || mid == lo;
1414
1415 // if there is a higher range, test for it and process it:
1416 if (mid < hi && !eq_test_only) {
1417 // two comparisons of same values--should enable 1 test for 2 branches
1418 // Use BoolTest::lt instead of BoolTest::gt
1419 float cnt = sum_of_cnts(lo, mid-1);
1420 IfNode *iff_lt = jump_if_fork_int(key_val, test_val, BoolTest::lt, if_prob(cnt, total_cnt), if_cnt(cnt));
1421 Node *iftrue = _gvn.transform( new IfTrueNode(iff_lt) );
1422 Node *iffalse = _gvn.transform( new IfFalseNode(iff_lt) );
1423 { PreserveJVMState pjvms(this);
1424 set_control(iffalse);
1425 jump_switch_ranges(key_val, mid+1, hi, depth+1);
1426 }
1427 set_control(iftrue);
1428 }
1429
1430 } else {
1431 // mid is a range, not a singleton, so treat mid..hi as a unit
1432 float cnt = sum_of_cnts(mid == lo ? mid+1 : mid, hi);
1433 IfNode *iff_ge = jump_if_fork_int(key_val, test_val, mid == lo ? BoolTest::gt : BoolTest::ge, if_prob(cnt, total_cnt), if_cnt(cnt));
1434
1435 // if there is a higher range, test for it and process it:
1436 if (mid == hi) {
1437 jump_if_true_fork(iff_ge, mid->dest(), trim_ranges && cnt == 0);
1438 } else {
1439 Node *iftrue = _gvn.transform( new IfTrueNode(iff_ge) );
1440 Node *iffalse = _gvn.transform( new IfFalseNode(iff_ge) );
1441 { PreserveJVMState pjvms(this);
1442 set_control(iftrue);
1443 jump_switch_ranges(key_val, mid == lo ? mid+1 : mid, hi, depth+1);
1444 }
1445 set_control(iffalse);
1446 }
1447 }
1448
1449 // Process the lower range: iterate instead of recursing.
1450 if (mid == lo) {
1451 if (mid->is_singleton()) {
1452 lo++;
1453 } else {
1454 jump_if_always_fork(lo->dest(), trim_ranges && lo->cnt() == 0);
1455 break;
1456 }
1457 } else {
1458 hi = mid - 1;
1459 }
1460 }
1461
1462 // Decrease pred_count for each successor after all is done.
1463 if (switch_depth == 0) {
1464 int unique_successors = switch_block->num_successors();
1465 for (int i = 0; i < unique_successors; i++) {
1466 Block* target = switch_block->successor_at(i);
1467 // Throw away the pre-allocated path for each unique successor.
1468 target->next_path_num();
1469 }
1470 }
1471
1472 #ifndef PRODUCT
1473 if (TraceOptoParse && Verbose && WizardMode && switch_depth == 0) {
1474 SwitchRange* r;
1475 int nsing = 0;
1476 for (r = orig_lo; r <= orig_hi; r++) {
1477 if( r->is_singleton() ) nsing++;
1478 }
1479 tty->print(">>> ");
1480 _method->print_short_name();
1481 tty->print_cr(" switch decision tree");
1482 tty->print_cr(" %d ranges (%d singletons), max_depth=%d, est_depth=%d",
1483 (int) (orig_hi-orig_lo+1), nsing, _max_switch_depth, _est_switch_depth);
1484 if (_max_switch_depth > _est_switch_depth) {
1485 tty->print_cr("******** BAD SWITCH DEPTH ********");
1486 }
1487 tty->print(" ");
1488 for (r = orig_lo; r <= orig_hi; r++) {
1489 r->print();
1490 }
1491 tty->cr();
1492 }
1493 #endif
1494 }
1495
1496 Node* Parse::floating_point_mod(Node* a, Node* b, BasicType type) {
1497 assert(type == BasicType::T_FLOAT || type == BasicType::T_DOUBLE, "only float and double are floating points");
1498 CallLeafPureNode* mod = type == BasicType::T_DOUBLE ? static_cast<CallLeafPureNode*>(new ModDNode(C, a, b)) : new ModFNode(C, a, b);
1499
1500 set_predefined_input_for_runtime_call(mod);
1501 mod = _gvn.transform(mod)->as_CallLeafPure();
1502 set_predefined_output_for_runtime_call(mod);
1503 Node* result = _gvn.transform(new ProjNode(mod, TypeFunc::Parms + 0));
1504 record_for_igvn(mod);
1505 return result;
1506 }
1507
1508 void Parse::l2f() {
1509 Node* f2 = pop();
1510 Node* f1 = pop();
1511 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(),
1512 CAST_FROM_FN_PTR(address, SharedRuntime::l2f),
1513 "l2f", nullptr, //no memory effects
1514 f1, f2);
1515 Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0));
1516
1517 push(res);
1518 }
1519
1520 // Handle jsr and jsr_w bytecode
1521 void Parse::do_jsr() {
1522 assert(bc() == Bytecodes::_jsr || bc() == Bytecodes::_jsr_w, "wrong bytecode");
1523
1524 // Store information about current state, tagged with new _jsr_bci
1525 int return_bci = iter().next_bci();
1526 int jsr_bci = (bc() == Bytecodes::_jsr) ? iter().get_dest() : iter().get_far_dest();
1527
1528 // The way we do things now, there is only one successor block
1529 // for the jsr, because the target code is cloned by ciTypeFlow.
1530 Block* target = successor_for_bci(jsr_bci);
1531
1532 // What got pushed?
1533 const Type* ret_addr = target->peek();
1534 assert(ret_addr->singleton(), "must be a constant (cloned jsr body)");
1535
1536 // Effect on jsr on stack
1537 push(_gvn.makecon(ret_addr));
1538
1539 // Flow to the jsr.
1540 merge(jsr_bci);
1541 }
1542
1543 // Handle ret bytecode
1544 void Parse::do_ret() {
1545 // Find to whom we return.
1546 assert(block()->num_successors() == 1, "a ret can only go one place now");
1547 Block* target = block()->successor_at(0);
1548 assert(!target->is_ready(), "our arrival must be expected");
1549 int pnum = target->next_path_num();
1550 merge_common(target, pnum);
1551 }
1552
1553 static bool has_injected_profile(BoolTest::mask btest, Node* test, int& taken, int& not_taken) {
1554 if (btest != BoolTest::eq && btest != BoolTest::ne) {
1555 // Only ::eq and ::ne are supported for profile injection.
1556 return false;
1557 }
1558 if (test->is_Cmp() &&
1559 test->in(1)->Opcode() == Op_ProfileBoolean) {
1560 ProfileBooleanNode* profile = (ProfileBooleanNode*)test->in(1);
1561 int false_cnt = profile->false_count();
1562 int true_cnt = profile->true_count();
1563
1564 // Counts matching depends on the actual test operation (::eq or ::ne).
1565 // No need to scale the counts because profile injection was designed
1566 // to feed exact counts into VM.
1567 taken = (btest == BoolTest::eq) ? false_cnt : true_cnt;
1568 not_taken = (btest == BoolTest::eq) ? true_cnt : false_cnt;
1569
1570 profile->consume();
1571 return true;
1572 }
1573 return false;
1574 }
1575
1576 // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful.
1577 // We also check that individual counters are positive first, otherwise the sum can become positive.
1578 // (check for saturation, integer overflow, and immature counts)
1579 static bool counters_are_meaningful(int counter1, int counter2, int min) {
1580 // check for saturation, including "uint" values too big to fit in "int"
1581 if (counter1 < 0 || counter2 < 0) {
1582 return false;
1583 }
1584 // check for integer overflow of the sum
1585 int64_t sum = (int64_t)counter1 + (int64_t)counter2;
1586 STATIC_ASSERT(sizeof(counter1) < sizeof(sum));
1587 if (sum > INT_MAX) {
1588 return false;
1589 }
1590 // check if mature
1591 return (counter1 + counter2) >= min;
1592 }
1593
1594 //--------------------------dynamic_branch_prediction--------------------------
1595 // Try to gather dynamic branch prediction behavior. Return a probability
1596 // of the branch being taken and set the "cnt" field. Returns a -1.0
1597 // if we need to use static prediction for some reason.
1598 float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test) {
1599 ResourceMark rm;
1600
1601 cnt = COUNT_UNKNOWN;
1602
1603 int taken = 0;
1604 int not_taken = 0;
1605
1606 bool use_mdo = !has_injected_profile(btest, test, taken, not_taken);
1607
1608 if (use_mdo) {
1609 // Use MethodData information if it is available
1610 // FIXME: free the ProfileData structure
1611 ciMethodData* methodData = method()->method_data();
1612 if (!methodData->is_mature()) return PROB_UNKNOWN;
1613 ciProfileData* data = methodData->bci_to_data(bci());
1614 if (data == nullptr) {
1615 return PROB_UNKNOWN;
1616 }
1617 if (!data->is_JumpData()) return PROB_UNKNOWN;
1618
1619 // get taken and not taken values
1620 // NOTE: saturated UINT_MAX values become negative,
1621 // as do counts above INT_MAX.
1622 taken = data->as_JumpData()->taken();
1623 not_taken = 0;
1624 if (data->is_BranchData()) {
1625 not_taken = data->as_BranchData()->not_taken();
1626 }
1627
1628 // scale the counts to be commensurate with invocation counts:
1629 // NOTE: overflow for positive values is clamped at INT_MAX
1630 taken = method()->scale_count(taken);
1631 not_taken = method()->scale_count(not_taken);
1632 }
1633 // At this point, saturation or overflow is indicated by INT_MAX
1634 // or a negative value.
1635
1636 // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful.
1637 // We also check that individual counters are positive first, otherwise the sum can become positive.
1638 if (!counters_are_meaningful(taken, not_taken, 40)) {
1639 if (C->log() != nullptr) {
1640 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken);
1641 }
1642 return PROB_UNKNOWN;
1643 }
1644
1645 // Compute frequency that we arrive here
1646 float sum = taken + not_taken;
1647 // Adjust, if this block is a cloned private block but the
1648 // Jump counts are shared. Taken the private counts for
1649 // just this path instead of the shared counts.
1650 if( block()->count() > 0 )
1651 sum = block()->count();
1652 cnt = sum / FreqCountInvocations;
1653
1654 // Pin probability to sane limits
1655 float prob;
1656 if( !taken )
1657 prob = (0+PROB_MIN) / 2;
1658 else if( !not_taken )
1659 prob = (1+PROB_MAX) / 2;
1660 else { // Compute probability of true path
1661 prob = (float)taken / (float)(taken + not_taken);
1662 if (prob > PROB_MAX) prob = PROB_MAX;
1663 if (prob < PROB_MIN) prob = PROB_MIN;
1664 }
1665
1666 assert((cnt > 0.0f) && (prob > 0.0f),
1667 "Bad frequency assignment in if cnt=%g prob=%g taken=%d not_taken=%d", cnt, prob, taken, not_taken);
1668
1669 if (C->log() != nullptr) {
1670 const char* prob_str = nullptr;
1671 if (prob >= PROB_MAX) prob_str = (prob == PROB_MAX) ? "max" : "always";
1672 if (prob <= PROB_MIN) prob_str = (prob == PROB_MIN) ? "min" : "never";
1673 char prob_str_buf[30];
1674 if (prob_str == nullptr) {
1675 jio_snprintf(prob_str_buf, sizeof(prob_str_buf), "%20.2f", prob);
1676 prob_str = prob_str_buf;
1677 }
1678 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%f' prob='%s'",
1679 iter().get_dest(), taken, not_taken, cnt, prob_str);
1680 }
1681 return prob;
1682 }
1683
1684 //-----------------------------branch_prediction-------------------------------
1685 float Parse::branch_prediction(float& cnt,
1686 BoolTest::mask btest,
1687 int target_bci,
1688 Node* test) {
1689 float prob = dynamic_branch_prediction(cnt, btest, test);
1690 // If prob is unknown, switch to static prediction
1691 if (prob != PROB_UNKNOWN) return prob;
1692
1693 prob = PROB_FAIR; // Set default value
1694 if (btest == BoolTest::eq) // Exactly equal test?
1695 prob = PROB_STATIC_INFREQUENT; // Assume its relatively infrequent
1696 else if (btest == BoolTest::ne)
1697 prob = PROB_STATIC_FREQUENT; // Assume its relatively frequent
1698
1699 // If this is a conditional test guarding a backwards branch,
1700 // assume its a loop-back edge. Make it a likely taken branch.
1701 if (target_bci < bci()) {
1702 if (is_osr_parse()) { // Could be a hot OSR'd loop; force deopt
1703 // Since it's an OSR, we probably have profile data, but since
1704 // branch_prediction returned PROB_UNKNOWN, the counts are too small.
1705 // Let's make a special check here for completely zero counts.
1706 ciMethodData* methodData = method()->method_data();
1707 if (!methodData->is_empty()) {
1708 ciProfileData* data = methodData->bci_to_data(bci());
1709 // Only stop for truly zero counts, which mean an unknown part
1710 // of the OSR-ed method, and we want to deopt to gather more stats.
1711 // If you have ANY counts, then this loop is simply 'cold' relative
1712 // to the OSR loop.
1713 if (data == nullptr ||
1714 (data->as_BranchData()->taken() + data->as_BranchData()->not_taken() == 0)) {
1715 // This is the only way to return PROB_UNKNOWN:
1716 return PROB_UNKNOWN;
1717 }
1718 }
1719 }
1720 prob = PROB_STATIC_FREQUENT; // Likely to take backwards branch
1721 }
1722
1723 assert(prob != PROB_UNKNOWN, "must have some guess at this point");
1724 return prob;
1725 }
1726
1727 // The magic constants are chosen so as to match the output of
1728 // branch_prediction() when the profile reports a zero taken count.
1729 // It is important to distinguish zero counts unambiguously, because
1730 // some branches (e.g., _213_javac.Assembler.eliminate) validly produce
1731 // very small but nonzero probabilities, which if confused with zero
1732 // counts would keep the program recompiling indefinitely.
1733 bool Parse::seems_never_taken(float prob) const {
1734 return prob < PROB_MIN;
1735 }
1736
1737 //-------------------------------repush_if_args--------------------------------
1738 // Push arguments of an "if" bytecode back onto the stack by adjusting _sp.
1739 inline int Parse::repush_if_args() {
1740 if (PrintOpto && WizardMode) {
1741 tty->print("defending against excessive implicit null exceptions on %s @%d in ",
1742 Bytecodes::name(iter().cur_bc()), iter().cur_bci());
1743 method()->print_name(); tty->cr();
1744 }
1745 int bc_depth = - Bytecodes::depth(iter().cur_bc());
1746 assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches");
1747 DEBUG_ONLY(sync_jvms()); // argument(n) requires a synced jvms
1748 assert(argument(0) != nullptr, "must exist");
1749 assert(bc_depth == 1 || argument(1) != nullptr, "two must exist");
1750 inc_sp(bc_depth);
1751 return bc_depth;
1752 }
1753
1754 // Used by StressUnstableIfTraps
1755 static volatile int _trap_stress_counter = 0;
1756
1757 void Parse::increment_trap_stress_counter(Node*& counter, Node*& incr_store) {
1758 Node* counter_addr = makecon(TypeRawPtr::make((address)&_trap_stress_counter));
1759 counter = make_load(control(), counter_addr, TypeInt::INT, T_INT, MemNode::unordered);
1760 counter = _gvn.transform(new AddINode(counter, intcon(1)));
1761 incr_store = store_to_memory(control(), counter_addr, counter, T_INT, MemNode::unordered);
1762 }
1763
1764 //----------------------------------do_ifnull----------------------------------
1765 void Parse::do_ifnull(BoolTest::mask btest, Node *c) {
1766 int target_bci = iter().get_dest();
1767
1768 Node* counter = nullptr;
1769 Node* incr_store = nullptr;
1770 bool do_stress_trap = StressUnstableIfTraps && ((C->random() % 2) == 0);
1771 if (do_stress_trap) {
1772 increment_trap_stress_counter(counter, incr_store);
1773 }
1774
1775 Block* branch_block = successor_for_bci(target_bci);
1776 Block* next_block = successor_for_bci(iter().next_bci());
1777
1778 float cnt;
1779 float prob = branch_prediction(cnt, btest, target_bci, c);
1780 if (prob == PROB_UNKNOWN) {
1781 // (An earlier version of do_ifnull omitted this trap for OSR methods.)
1782 if (PrintOpto && Verbose) {
1783 tty->print_cr("Never-taken edge stops compilation at bci %d", bci());
1784 }
1785 repush_if_args(); // to gather stats on loop
1786 uncommon_trap(Deoptimization::Reason_unreached,
1787 Deoptimization::Action_reinterpret,
1788 nullptr, "cold");
1789 if (C->eliminate_boxing()) {
1790 // Mark the successor blocks as parsed
1791 branch_block->next_path_num();
1792 next_block->next_path_num();
1793 }
1794 return;
1795 }
1796
1797 NOT_PRODUCT(explicit_null_checks_inserted++);
1798
1799 // Generate real control flow
1800 Node *tst = _gvn.transform( new BoolNode( c, btest ) );
1801
1802 // Sanity check the probability value
1803 assert(prob > 0.0f,"Bad probability in Parser");
1804 // Need xform to put node in hash table
1805 IfNode *iff = create_and_xform_if( control(), tst, prob, cnt );
1806 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1807 // True branch
1808 { PreserveJVMState pjvms(this);
1809 Node* iftrue = _gvn.transform( new IfTrueNode (iff) );
1810 set_control(iftrue);
1811
1812 if (stopped()) { // Path is dead?
1813 NOT_PRODUCT(explicit_null_checks_elided++);
1814 if (C->eliminate_boxing()) {
1815 // Mark the successor block as parsed
1816 branch_block->next_path_num();
1817 }
1818 } else { // Path is live.
1819 adjust_map_after_if(btest, c, prob, branch_block);
1820 if (!stopped()) {
1821 merge(target_bci);
1822 }
1823 }
1824 }
1825
1826 // False branch
1827 Node* iffalse = _gvn.transform( new IfFalseNode(iff) );
1828 set_control(iffalse);
1829
1830 if (stopped()) { // Path is dead?
1831 NOT_PRODUCT(explicit_null_checks_elided++);
1832 if (C->eliminate_boxing()) {
1833 // Mark the successor block as parsed
1834 next_block->next_path_num();
1835 }
1836 } else { // Path is live.
1837 adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, next_block);
1838 }
1839
1840 if (do_stress_trap) {
1841 stress_trap(iff, counter, incr_store);
1842 }
1843 }
1844
1845 //------------------------------------do_if------------------------------------
1846 void Parse::do_if(BoolTest::mask btest, Node* c, bool can_trap, bool new_path, Node** ctrl_taken, Node** stress_count_mem) {
1847 int target_bci = iter().get_dest();
1848
1849 Block* branch_block = successor_for_bci(target_bci);
1850 Block* next_block = successor_for_bci(iter().next_bci());
1851
1852 float cnt;
1853 float prob = branch_prediction(cnt, btest, target_bci, c);
1854 float untaken_prob = 1.0 - prob;
1855
1856 if (prob == PROB_UNKNOWN) {
1857 if (PrintOpto && Verbose) {
1858 tty->print_cr("Never-taken edge stops compilation at bci %d", bci());
1859 }
1860 repush_if_args(); // to gather stats on loop
1861 uncommon_trap(Deoptimization::Reason_unreached,
1862 Deoptimization::Action_reinterpret,
1863 nullptr, "cold");
1864 if (C->eliminate_boxing()) {
1865 // Mark the successor blocks as parsed
1866 branch_block->next_path_num();
1867 next_block->next_path_num();
1868 }
1869 return;
1870 }
1871
1872 Node* counter = nullptr;
1873 Node* incr_store = nullptr;
1874 bool do_stress_trap = StressUnstableIfTraps && ((C->random() % 2) == 0);
1875 if (do_stress_trap) {
1876 increment_trap_stress_counter(counter, incr_store);
1877 if (stress_count_mem != nullptr) {
1878 *stress_count_mem = incr_store;
1879 }
1880 }
1881
1882 // Sanity check the probability value
1883 assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser");
1884
1885 bool taken_if_true = true;
1886 // Convert BoolTest to canonical form:
1887 if (!BoolTest(btest).is_canonical()) {
1888 btest = BoolTest(btest).negate();
1889 taken_if_true = false;
1890 // prob is NOT updated here; it remains the probability of the taken
1891 // path (as opposed to the prob of the path guarded by an 'IfTrueNode').
1892 }
1893 assert(btest != BoolTest::eq, "!= is the only canonical exact test");
1894
1895 Node* tst0 = new BoolNode(c, btest);
1896 Node* tst = _gvn.transform(tst0);
1897 BoolTest::mask taken_btest = BoolTest::illegal;
1898 BoolTest::mask untaken_btest = BoolTest::illegal;
1899
1900 if (tst->is_Bool()) {
1901 // Refresh c from the transformed bool node, since it may be
1902 // simpler than the original c. Also re-canonicalize btest.
1903 // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p null)).
1904 // That can arise from statements like: if (x instanceof C) ...
1905 if (tst != tst0) {
1906 // Canonicalize one more time since transform can change it.
1907 btest = tst->as_Bool()->_test._test;
1908 if (!BoolTest(btest).is_canonical()) {
1909 // Reverse edges one more time...
1910 tst = _gvn.transform( tst->as_Bool()->negate(&_gvn) );
1911 btest = tst->as_Bool()->_test._test;
1912 assert(BoolTest(btest).is_canonical(), "sanity");
1913 taken_if_true = !taken_if_true;
1914 }
1915 c = tst->in(1);
1916 }
1917 BoolTest::mask neg_btest = BoolTest(btest).negate();
1918 taken_btest = taken_if_true ? btest : neg_btest;
1919 untaken_btest = taken_if_true ? neg_btest : btest;
1920 }
1921
1922 // Generate real control flow
1923 float true_prob = (taken_if_true ? prob : untaken_prob);
1924 IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1925 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1926 Node* taken_branch = new IfTrueNode(iff);
1927 Node* untaken_branch = new IfFalseNode(iff);
1928 if (!taken_if_true) { // Finish conversion to canonical form
1929 Node* tmp = taken_branch;
1930 taken_branch = untaken_branch;
1931 untaken_branch = tmp;
1932 }
1933
1934 // Branch is taken:
1935 { PreserveJVMState pjvms(this);
1936 taken_branch = _gvn.transform(taken_branch);
1937 set_control(taken_branch);
1938
1939 if (stopped()) {
1940 if (C->eliminate_boxing() && !new_path) {
1941 // Mark the successor block as parsed (if we haven't created a new path)
1942 branch_block->next_path_num();
1943 }
1944 } else {
1945 adjust_map_after_if(taken_btest, c, prob, branch_block, can_trap);
1946 if (!stopped()) {
1947 if (new_path) {
1948 // Merge by using a new path
1949 merge_new_path(target_bci);
1950 } else if (ctrl_taken != nullptr) {
1951 // Don't merge but save taken branch to be wired by caller
1952 *ctrl_taken = control();
1953 } else {
1954 merge(target_bci);
1955 }
1956 }
1957 }
1958 }
1959
1960 untaken_branch = _gvn.transform(untaken_branch);
1961 set_control(untaken_branch);
1962
1963 // Branch not taken.
1964 if (stopped() && ctrl_taken == nullptr) {
1965 if (C->eliminate_boxing()) {
1966 // Mark the successor block as parsed (if caller does not re-wire control flow)
1967 next_block->next_path_num();
1968 }
1969 } else {
1970 adjust_map_after_if(untaken_btest, c, untaken_prob, next_block, can_trap);
1971 }
1972
1973 if (do_stress_trap) {
1974 stress_trap(iff, counter, incr_store);
1975 }
1976 }
1977
1978
1979 static ProfilePtrKind speculative_ptr_kind(const TypeOopPtr* t) {
1980 if (t->speculative() == nullptr) {
1981 return ProfileUnknownNull;
1982 }
1983 if (t->speculative_always_null()) {
1984 return ProfileAlwaysNull;
1985 }
1986 if (t->speculative_maybe_null()) {
1987 return ProfileMaybeNull;
1988 }
1989 return ProfileNeverNull;
1990 }
1991
1992 void Parse::acmp_always_null_input(Node* input, const TypeOopPtr* tinput, BoolTest::mask btest, Node* eq_region) {
1993 if (btest == BoolTest::ne) {
1994 {
1995 PreserveJVMState pjvms(this);
1996 inc_sp(2);
1997 null_check_common(input, T_OBJECT, true, nullptr,
1998 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check) &&
1999 speculative_ptr_kind(tinput) == ProfileAlwaysNull);
2000 dec_sp(2);
2001 int target_bci = iter().get_dest();
2002 merge(target_bci);
2003 }
2004 record_for_igvn(eq_region);
2005 set_control(_gvn.transform(eq_region));
2006 } else {
2007 inc_sp(2);
2008 null_check_common(input, T_OBJECT, true, nullptr,
2009 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check) &&
2010 speculative_ptr_kind(tinput) == ProfileAlwaysNull);
2011 dec_sp(2);
2012 }
2013 }
2014
2015 Node* Parse::acmp_null_check(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, Node*& null_ctl) {
2016 inc_sp(2);
2017 null_ctl = top();
2018 Node* cast = null_check_oop(input, &null_ctl,
2019 input_ptr == ProfileNeverNull || (input_ptr == ProfileUnknownNull && !too_many_traps_or_recompiles(Deoptimization::Reason_null_check)),
2020 false,
2021 speculative_ptr_kind(tinput) == ProfileNeverNull &&
2022 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check));
2023 dec_sp(2);
2024 return cast;
2025 }
2026
2027 void Parse::acmp_type_check_or_trap(Node** non_null_input, ciKlass* input_type, Deoptimization::DeoptReason reason) {
2028 Node* slow_ctl = type_check_receiver(*non_null_input, input_type, 1.0, non_null_input);
2029 {
2030 PreserveJVMState pjvms(this);
2031 inc_sp(2);
2032 set_control(slow_ctl);
2033 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
2034 }
2035 }
2036
2037 void Parse::acmp_type_check(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, ciKlass* input_type, BoolTest::mask btest, Node* eq_region) {
2038 Node* null_ctl;
2039 Node* cast = acmp_null_check(input, tinput, input_ptr, null_ctl);
2040
2041 if (input_type != nullptr) {
2042 Deoptimization::DeoptReason reason;
2043 if (tinput->speculative_type() != nullptr && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
2044 reason = Deoptimization::Reason_speculate_class_check;
2045 } else {
2046 reason = Deoptimization::Reason_class_check;
2047 }
2048 acmp_type_check_or_trap(&cast, input_type, reason);
2049 } else {
2050 // No specific type, check for inline type
2051 BuildCutout unless(this, inline_type_test(cast, /* is_inline = */ false), PROB_MAX);
2052 inc_sp(2);
2053 uncommon_trap_exact(Deoptimization::Reason_class_check, Deoptimization::Action_maybe_recompile);
2054 }
2055
2056 Node* ne_region = new RegionNode(2);
2057 ne_region->add_req(null_ctl);
2058 ne_region->add_req(control());
2059
2060 record_for_igvn(ne_region);
2061 set_control(_gvn.transform(ne_region));
2062 if (btest == BoolTest::ne) {
2063 {
2064 PreserveJVMState pjvms(this);
2065 if (null_ctl == top()) {
2066 replace_in_map(input, cast);
2067 }
2068 int target_bci = iter().get_dest();
2069 merge(target_bci);
2070 }
2071 record_for_igvn(eq_region);
2072 set_control(_gvn.transform(eq_region));
2073 } else {
2074 if (null_ctl == top()) {
2075 replace_in_map(input, cast);
2076 }
2077 set_control(_gvn.transform(ne_region));
2078 }
2079 }
2080
2081 void Parse::do_acmp(BoolTest::mask btest, Node* left, Node* right) {
2082 ciKlass* left_type = nullptr;
2083 ciKlass* right_type = nullptr;
2084 ProfilePtrKind left_ptr = ProfileUnknownNull;
2085 ProfilePtrKind right_ptr = ProfileUnknownNull;
2086 bool left_inline_type = true;
2087 bool right_inline_type = true;
2088
2089 // Leverage profiling at acmp
2090 if (UseACmpProfile) {
2091 method()->acmp_profiled_type(bci(), left_type, right_type, left_ptr, right_ptr, left_inline_type, right_inline_type);
2092 if (too_many_traps_or_recompiles(Deoptimization::Reason_class_check)) {
2093 left_type = nullptr;
2094 right_type = nullptr;
2095 left_inline_type = true;
2096 right_inline_type = true;
2097 }
2098 if (too_many_traps_or_recompiles(Deoptimization::Reason_null_check)) {
2099 left_ptr = ProfileUnknownNull;
2100 right_ptr = ProfileUnknownNull;
2101 }
2102 }
2103
2104 if (UseTypeSpeculation) {
2105 record_profile_for_speculation(left, left_type, left_ptr);
2106 record_profile_for_speculation(right, right_type, right_ptr);
2107 }
2108
2109 if (!Arguments::is_valhalla_enabled()) {
2110 Node* cmp = CmpP(left, right);
2111 cmp = optimize_cmp_with_klass(cmp);
2112 do_if(btest, cmp);
2113 return;
2114 }
2115
2116 // Check for equality before potentially allocating
2117 if (left == right) {
2118 do_if(btest, makecon(TypeInt::CC_EQ));
2119 return;
2120 }
2121
2122 // Allocate inline type operands and re-execute on deoptimization
2123 if (left->is_InlineType()) {
2124 PreserveReexecuteState preexecs(this);
2125 inc_sp(2);
2126 jvms()->set_should_reexecute(true);
2127 left = left->as_InlineType()->buffer(this);
2128 }
2129 if (right->is_InlineType()) {
2130 PreserveReexecuteState preexecs(this);
2131 inc_sp(2);
2132 jvms()->set_should_reexecute(true);
2133 right = right->as_InlineType()->buffer(this);
2134 }
2135
2136 // First, do a normal pointer comparison
2137 const TypeOopPtr* tleft = _gvn.type(left)->isa_oopptr();
2138 const TypeOopPtr* tright = _gvn.type(right)->isa_oopptr();
2139 Node* cmp = CmpP(left, right);
2140 record_for_igvn(cmp);
2141 cmp = optimize_cmp_with_klass(cmp);
2142 if (tleft == nullptr || !tleft->can_be_inline_type() ||
2143 tright == nullptr || !tright->can_be_inline_type()) {
2144 // This is sufficient, if one of the operands can't be an inline type
2145 do_if(btest, cmp);
2146 return;
2147 }
2148
2149 // Don't add traps to unstable if branches because additional checks are required to
2150 // decide if the operands are equal/substitutable and we therefore shouldn't prune
2151 // branches for one if based on the profiling of the acmp branches.
2152 // Also, OptimizeUnstableIf would set an incorrect re-rexecution state because it
2153 // assumes that there is a 1-1 mapping between the if and the acmp branches and that
2154 // hitting a trap means that we will take the corresponding acmp branch on re-execution.
2155 const bool can_trap = true;
2156
2157 Node* eq_region = nullptr;
2158 if (btest == BoolTest::eq) {
2159 do_if(btest, cmp, !can_trap, true);
2160 if (stopped()) {
2161 // Pointers are equal, operands must be equal
2162 return;
2163 }
2164 } else {
2165 assert(btest == BoolTest::ne, "only eq or ne");
2166 Node* is_not_equal = nullptr;
2167 eq_region = new RegionNode(3);
2168 {
2169 PreserveJVMState pjvms(this);
2170 // Pointers are not equal, but more checks are needed to determine if the operands are (not) substitutable
2171 do_if(btest, cmp, !can_trap, false, &is_not_equal);
2172 if (!stopped()) {
2173 eq_region->init_req(1, control());
2174 }
2175 }
2176 if (is_not_equal == nullptr || is_not_equal->is_top()) {
2177 record_for_igvn(eq_region);
2178 set_control(_gvn.transform(eq_region));
2179 return;
2180 }
2181 set_control(is_not_equal);
2182 }
2183
2184 // Prefer speculative types if available
2185 if (!too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
2186 if (tleft->speculative_type() != nullptr) {
2187 left_type = tleft->speculative_type();
2188 }
2189 if (tright->speculative_type() != nullptr) {
2190 right_type = tright->speculative_type();
2191 }
2192 }
2193
2194 if (speculative_ptr_kind(tleft) != ProfileMaybeNull && speculative_ptr_kind(tleft) != ProfileUnknownNull) {
2195 ProfilePtrKind speculative_left_ptr = speculative_ptr_kind(tleft);
2196 if (speculative_left_ptr == ProfileAlwaysNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_assert)) {
2197 left_ptr = speculative_left_ptr;
2198 } else if (speculative_left_ptr == ProfileNeverNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check)) {
2199 left_ptr = speculative_left_ptr;
2200 }
2201 }
2202 if (speculative_ptr_kind(tright) != ProfileMaybeNull && speculative_ptr_kind(tright) != ProfileUnknownNull) {
2203 ProfilePtrKind speculative_right_ptr = speculative_ptr_kind(tright);
2204 if (speculative_right_ptr == ProfileAlwaysNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_assert)) {
2205 right_ptr = speculative_right_ptr;
2206 } else if (speculative_right_ptr == ProfileNeverNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check)) {
2207 right_ptr = speculative_right_ptr;
2208 }
2209 }
2210
2211 if (left_ptr == ProfileAlwaysNull) {
2212 // Comparison with null. Assert the input is indeed null and we're done.
2213 acmp_always_null_input(left, tleft, btest, eq_region);
2214 return;
2215 }
2216 if (right_ptr == ProfileAlwaysNull) {
2217 // Comparison with null. Assert the input is indeed null and we're done.
2218 acmp_always_null_input(right, tright, btest, eq_region);
2219 return;
2220 }
2221 if (left_type != nullptr && !left_type->is_inlinetype()) {
2222 // Comparison with an object of known type
2223 acmp_type_check(left, tleft, left_ptr, left_type, btest, eq_region);
2224 return;
2225 }
2226 if (right_type != nullptr && !right_type->is_inlinetype()) {
2227 // Comparison with an object of known type
2228 acmp_type_check(right, tright, right_ptr, right_type, btest, eq_region);
2229 return;
2230 }
2231 if (!left_inline_type) {
2232 // Comparison with an object known not to be an inline type
2233 acmp_type_check(left, tleft, left_ptr, nullptr, btest, eq_region);
2234 return;
2235 }
2236 if (!right_inline_type) {
2237 // Comparison with an object known not to be an inline type
2238 acmp_type_check(right, tright, right_ptr, nullptr, btest, eq_region);
2239 return;
2240 }
2241
2242 // Pointers are not equal, check if first operand is non-null
2243 Node* ne_region = new RegionNode(6);
2244 Node* null_ctl = nullptr;
2245 Node* not_null_left = nullptr;
2246 Node* not_null_right = acmp_null_check(right, tright, right_ptr, null_ctl);
2247 ne_region->init_req(1, null_ctl);
2248
2249 if (!stopped()) {
2250 // First operand is non-null, check if it is the speculative inline type if possible
2251 // (which later allows isSubstitutable to be intrinsified), or any inline type if no
2252 // speculation is available.
2253 if (right_type != nullptr && right_type->is_inlinetype()) {
2254 acmp_type_check_or_trap(¬_null_right, right_type, Deoptimization::Reason_speculate_class_check);
2255 } else {
2256 Node* is_value = inline_type_test(not_null_right);
2257 IfNode* is_value_iff = create_and_map_if(control(), is_value, PROB_FAIR, COUNT_UNKNOWN);
2258 Node* not_value = _gvn.transform(new IfFalseNode(is_value_iff));
2259 ne_region->init_req(2, not_value);
2260 set_control(_gvn.transform(new IfTrueNode(is_value_iff)));
2261 }
2262
2263 // The first operand is an inline type, check if the second operand is non-null
2264 not_null_left = acmp_null_check(left, tleft, left_ptr, null_ctl);
2265 ne_region->init_req(3, null_ctl);
2266 if (!stopped()) {
2267 // Check if lhs operand is of a specific speculative inline type (see above).
2268 // If not, we don't need to enforce that the lhs is a value object since we know
2269 // it already for the rhs, and must enforce that they have the same type.
2270 if (left_type != nullptr && left_type->is_inlinetype()) {
2271 acmp_type_check_or_trap(¬_null_left, left_type, Deoptimization::Reason_speculate_class_check);
2272 }
2273 if (!stopped()) {
2274 // Check if both operands are of the same class.
2275 Node* kls_left = load_object_klass(not_null_left);
2276 Node* kls_right = load_object_klass(not_null_right);
2277 Node* kls_cmp = CmpP(kls_left, kls_right);
2278 Node* kls_bol = _gvn.transform(new BoolNode(kls_cmp, BoolTest::ne));
2279 IfNode* kls_iff = create_and_map_if(control(), kls_bol, PROB_FAIR, COUNT_UNKNOWN);
2280 Node* kls_ne = _gvn.transform(new IfTrueNode(kls_iff));
2281 set_control(_gvn.transform(new IfFalseNode(kls_iff)));
2282 ne_region->init_req(4, kls_ne);
2283 }
2284 }
2285 }
2286
2287 if (stopped()) {
2288 record_for_igvn(ne_region);
2289 set_control(_gvn.transform(ne_region));
2290 if (btest == BoolTest::ne) {
2291 {
2292 PreserveJVMState pjvms(this);
2293 int target_bci = iter().get_dest();
2294 merge(target_bci);
2295 }
2296 record_for_igvn(eq_region);
2297 set_control(_gvn.transform(eq_region));
2298 }
2299 return;
2300 }
2301
2302 // Both operands are values types of the same class, we need to perform a
2303 // substitutability test. Delegate to ValueObjectMethods::isSubstitutable().
2304 Node* ne_io_phi = PhiNode::make(ne_region, i_o());
2305 Node* mem = reset_memory();
2306 Node* ne_mem_phi = PhiNode::make(ne_region, mem);
2307
2308 Node* eq_io_phi = nullptr;
2309 Node* eq_mem_phi = nullptr;
2310 if (eq_region != nullptr) {
2311 eq_io_phi = PhiNode::make(eq_region, i_o());
2312 eq_mem_phi = PhiNode::make(eq_region, mem);
2313 }
2314
2315 set_all_memory(mem);
2316
2317 kill_dead_locals();
2318 ciSymbol* subst_method_name = ciSymbols::isSubstitutable_name();
2319 ciMethod* subst_method = ciEnv::current()->ValueObjectMethods_klass()->find_method(subst_method_name, ciSymbols::object_object_boolean_signature());
2320 CallStaticJavaNode* call = new CallStaticJavaNode(C, TypeFunc::make(subst_method), SharedRuntime::get_resolve_static_call_stub(), subst_method);
2321 call->set_override_symbolic_info(true);
2322 call->init_req(TypeFunc::Parms, not_null_left);
2323 call->init_req(TypeFunc::Parms+1, not_null_right);
2324 inc_sp(2);
2325 set_edges_for_java_call(call, false, false);
2326 Node* ret = set_results_for_java_call(call, false, true);
2327 dec_sp(2);
2328
2329 // Test the return value of ValueObjectMethods::isSubstitutable()
2330 // This is the last check, do_if can emit traps now.
2331 Node* subst_cmp = _gvn.transform(new CmpINode(ret, intcon(1)));
2332 Node* ctl = C->top();
2333 Node* stress_count_mem = nullptr;
2334 if (btest == BoolTest::eq) {
2335 PreserveJVMState pjvms(this);
2336 do_if(btest, subst_cmp, can_trap, false, nullptr, &stress_count_mem);
2337 if (!stopped()) {
2338 ctl = control();
2339 }
2340 } else {
2341 assert(btest == BoolTest::ne, "only eq or ne");
2342 PreserveJVMState pjvms(this);
2343 do_if(btest, subst_cmp, can_trap, false, &ctl, &stress_count_mem);
2344 if (!stopped()) {
2345 eq_region->init_req(2, control());
2346 eq_io_phi->init_req(2, i_o());
2347 eq_mem_phi->init_req(2, reset_memory());
2348 }
2349 }
2350 if (stress_count_mem != nullptr) {
2351 set_memory(stress_count_mem, stress_count_mem->adr_type());
2352 }
2353 ne_region->init_req(5, ctl);
2354 ne_io_phi->init_req(5, i_o());
2355 ne_mem_phi->init_req(5, reset_memory());
2356
2357 record_for_igvn(ne_region);
2358 set_control(_gvn.transform(ne_region));
2359 set_i_o(_gvn.transform(ne_io_phi));
2360 set_all_memory(_gvn.transform(ne_mem_phi));
2361
2362 if (btest == BoolTest::ne) {
2363 {
2364 PreserveJVMState pjvms(this);
2365 int target_bci = iter().get_dest();
2366 merge(target_bci);
2367 }
2368
2369 record_for_igvn(eq_region);
2370 set_control(_gvn.transform(eq_region));
2371 set_i_o(_gvn.transform(eq_io_phi));
2372 set_all_memory(_gvn.transform(eq_mem_phi));
2373 }
2374 }
2375
2376 // Force unstable if traps to be taken randomly to trigger intermittent bugs such as incorrect debug information.
2377 // Add another if before the unstable if that checks a "random" condition at runtime (a simple shared counter) and
2378 // then either takes the trap or executes the original, unstable if.
2379 void Parse::stress_trap(IfNode* orig_iff, Node* counter, Node* incr_store) {
2380 // Search for an unstable if trap
2381 CallStaticJavaNode* trap = nullptr;
2382 assert(orig_iff->Opcode() == Op_If && orig_iff->outcnt() == 2, "malformed if");
2383 ProjNode* trap_proj = orig_iff->uncommon_trap_proj(trap, Deoptimization::Reason_unstable_if);
2384 if (trap == nullptr || !trap->jvms()->should_reexecute()) {
2385 // No suitable trap found. Remove unused counter load and increment.
2386 C->gvn_replace_by(incr_store, incr_store->in(MemNode::Memory));
2387 return;
2388 }
2389
2390 // Remove trap from optimization list since we add another path to the trap.
2391 bool success = C->remove_unstable_if_trap(trap, true);
2392 assert(success, "Trap already modified");
2393
2394 // Add a check before the original if that will trap with a certain frequency and execute the original if otherwise
2395 int freq_log = (C->random() % 31) + 1; // Random logarithmic frequency in [1, 31]
2396 Node* mask = intcon(right_n_bits(freq_log));
2397 counter = _gvn.transform(new AndINode(counter, mask));
2398 Node* cmp = _gvn.transform(new CmpINode(counter, intcon(0)));
2399 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::mask::eq));
2400 IfNode* iff = _gvn.transform(new IfNode(orig_iff->in(0), bol, orig_iff->_prob, orig_iff->_fcnt))->as_If();
2401 Node* if_true = _gvn.transform(new IfTrueNode(iff));
2402 Node* if_false = _gvn.transform(new IfFalseNode(iff));
2403 assert(!if_true->is_top() && !if_false->is_top(), "trap always / never taken");
2404
2405 // Trap
2406 assert(trap_proj->outcnt() == 1, "some other nodes are dependent on the trap projection");
2407
2408 Node* trap_region = new RegionNode(3);
2409 trap_region->set_req(1, trap_proj);
2410 trap_region->set_req(2, if_true);
2411 trap->set_req(0, _gvn.transform(trap_region));
2412
2413 // Don't trap, execute original if
2414 orig_iff->set_req(0, if_false);
2415 }
2416
2417 bool Parse::path_is_suitable_for_uncommon_trap(float prob) const {
2418 // Randomly skip emitting an uncommon trap
2419 if (StressUnstableIfTraps && ((C->random() % 2) == 0)) {
2420 return false;
2421 }
2422 // Don't want to speculate on uncommon traps when running with -Xcomp
2423 if (!UseInterpreter) {
2424 return false;
2425 }
2426 return seems_never_taken(prob) &&
2427 !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
2428 }
2429
2430 void Parse::maybe_add_predicate_after_if(Block* path) {
2431 if (path->is_SEL_head() && path->preds_parsed() == 0) {
2432 // Add predicates at bci of if dominating the loop so traps can be
2433 // recorded on the if's profile data
2434 int bc_depth = repush_if_args();
2435 add_parse_predicates();
2436 dec_sp(bc_depth);
2437 path->set_has_predicates();
2438 }
2439 }
2440
2441
2442 //----------------------------adjust_map_after_if------------------------------
2443 // Adjust the JVM state to reflect the result of taking this path.
2444 // Basically, it means inspecting the CmpNode controlling this
2445 // branch, seeing how it constrains a tested value, and then
2446 // deciding if it's worth our while to encode this constraint
2447 // as graph nodes in the current abstract interpretation map.
2448 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path, bool can_trap) {
2449 if (!c->is_Cmp()) {
2450 maybe_add_predicate_after_if(path);
2451 return;
2452 }
2453
2454 if (stopped() || btest == BoolTest::illegal) {
2455 return; // nothing to do
2456 }
2457
2458 bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
2459
2460 if (can_trap && path_is_suitable_for_uncommon_trap(prob)) {
2461 repush_if_args();
2462 Node* call = uncommon_trap(Deoptimization::Reason_unstable_if,
2463 Deoptimization::Action_reinterpret,
2464 nullptr,
2465 (is_fallthrough ? "taken always" : "taken never"));
2466
2467 if (call != nullptr) {
2468 C->record_unstable_if_trap(new UnstableIfTrap(call->as_CallStaticJava(), path));
2469 }
2470 return;
2471 }
2472
2473 if (c->is_FlatArrayCheck()) {
2474 maybe_add_predicate_after_if(path);
2475 return;
2476 }
2477
2478 Node* val = c->in(1);
2479 Node* con = c->in(2);
2480 const Type* tcon = _gvn.type(con);
2481 const Type* tval = _gvn.type(val);
2482 bool have_con = tcon->singleton();
2483 if (tval->singleton()) {
2484 if (!have_con) {
2485 // Swap, so constant is in con.
2486 con = val;
2487 tcon = tval;
2488 val = c->in(2);
2489 tval = _gvn.type(val);
2490 btest = BoolTest(btest).commute();
2491 have_con = true;
2492 } else {
2493 // Do we have two constants? Then leave well enough alone.
2494 have_con = false;
2495 }
2496 }
2497 if (!have_con) { // remaining adjustments need a con
2498 maybe_add_predicate_after_if(path);
2499 return;
2500 }
2501
2502 sharpen_type_after_if(btest, con, tcon, val, tval);
2503 maybe_add_predicate_after_if(path);
2504 }
2505
2506
2507 static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) {
2508 Node* ldk;
2509 if (n->is_DecodeNKlass()) {
2510 if (n->in(1)->Opcode() != Op_LoadNKlass) {
2511 return nullptr;
2512 } else {
2513 ldk = n->in(1);
2514 }
2515 } else if (n->Opcode() != Op_LoadKlass) {
2516 return nullptr;
2517 } else {
2518 ldk = n;
2519 }
2520 assert(ldk != nullptr && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node");
2521
2522 Node* adr = ldk->in(MemNode::Address);
2523 intptr_t off = 0;
2524 Node* obj = AddPNode::Ideal_base_and_offset(adr, gvn, off);
2525 if (obj == nullptr || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass?
2526 return nullptr;
2527 const TypePtr* tp = gvn->type(obj)->is_ptr();
2528 if (tp == nullptr || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr?
2529 return nullptr;
2530
2531 return obj;
2532 }
2533
2534 // Matches exact and inexact type check IR shapes during parsing.
2535 // On successful match, returns type checked object node and its type after successful check
2536 // as out parameters.
2537 static bool match_type_check(PhaseGVN& gvn,
2538 BoolTest::mask btest,
2539 Node* con, const Type* tcon,
2540 Node* val, const Type* tval,
2541 Node** obj, const TypeOopPtr** cast_type) { // out-parameters
2542 // Look for opportunities to sharpen the type of a node whose klass is compared with a constant klass.
2543 // The constant klass being tested against can come from many bytecode instructions (implicitly or explicitly),
2544 // and also from profile data used by speculative casts.
2545 if (btest == BoolTest::eq && tcon->isa_klassptr()) {
2546 // Found:
2547 // Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq])
2548 // or the narrowOop equivalent.
2549 (*obj) = extract_obj_from_klass_load(&gvn, val);
2550 // Some klass comparisons are not directly in the form
2551 // Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq]),
2552 // e.g. Bool(CmpP(CastPP(LoadKlass(...)), ConP(klass)), [eq]).
2553 // These patterns with nullable klasses arise from example from
2554 // load_array_klass_from_mirror.
2555 if (*obj == nullptr) { return false; }
2556 (*cast_type) = tcon->isa_klassptr()->as_instance_type();
2557 return true; // found
2558 }
2559
2560 // Match an instanceof check.
2561 // During parsing its IR shape is not canonicalized yet.
2562 //
2563 // obj superklass
2564 // | |
2565 // SubTypeCheck
2566 // |
2567 // Bool [eq] / [ne]
2568 // |
2569 // If
2570 // / \
2571 // T F
2572 // \ /
2573 // Region
2574 // \ ConI ConI
2575 // \ | /
2576 // val -> Phi ConI <- con
2577 // \ /
2578 // CmpI
2579 // |
2580 // Bool [btest]
2581 // |
2582 //
2583 if (tval->isa_int() && val->is_Phi() && val->in(0)->as_Region()->is_diamond()) {
2584 RegionNode* diamond = val->in(0)->as_Region();
2585 IfNode* if1 = diamond->in(1)->in(0)->as_If();
2586 BoolNode* b1 = if1->in(1)->isa_Bool();
2587 if (b1 != nullptr && b1->in(1)->isa_SubTypeCheck()) {
2588 assert(b1->_test._test == BoolTest::eq ||
2589 b1->_test._test == BoolTest::ne, "%d", b1->_test._test);
2590
2591 ProjNode* success_proj = if1->proj_out(b1->_test._test == BoolTest::eq ? 1 : 0);
2592 int idx = diamond->find_edge(success_proj);
2593 assert(idx == 1 || idx == 2, "");
2594 Node* vcon = val->in(idx);
2595
2596 if ((btest == BoolTest::eq && vcon == con) || (btest == BoolTest::ne && vcon != con)) {
2597 assert(val->find_edge(con) > 0, "mismatch");
2598 SubTypeCheckNode* sub = b1->in(1)->as_SubTypeCheck();
2599 Node* obj_or_subklass = sub->in(SubTypeCheckNode::ObjOrSubKlass);
2600 Node* superklass = sub->in(SubTypeCheckNode::SuperKlass);
2601
2602 if (gvn.type(obj_or_subklass)->isa_oopptr()) {
2603 const TypeKlassPtr* klass_ptr_type = gvn.type(superklass)->is_klassptr();
2604 const TypeKlassPtr* improved_klass_ptr_type = klass_ptr_type->try_improve();
2605
2606 (*obj) = obj_or_subklass;
2607 (*cast_type) = improved_klass_ptr_type->cast_to_exactness(false)->as_instance_type();
2608 return true; // found
2609 }
2610 }
2611 }
2612 }
2613 return false; // not found
2614 }
2615
2616 void Parse::sharpen_type_after_if(BoolTest::mask btest,
2617 Node* con, const Type* tcon,
2618 Node* val, const Type* tval) {
2619 Node* obj = nullptr;
2620 const TypeOopPtr* cast_type = nullptr;
2621 // Insert a cast node with a narrowed type after a successful type check.
2622 if (match_type_check(_gvn, btest, con, tcon, val, tval,
2623 &obj, &cast_type)) {
2624 assert(obj != nullptr && cast_type != nullptr, "missing type check info");
2625 const Type* obj_type = _gvn.type(obj);
2626 const Type* tboth = obj_type->filter_speculative(cast_type);
2627 assert(tboth->higher_equal(obj_type) && tboth->higher_equal(cast_type), "sanity");
2628 if (tboth == Type::TOP && KillPathsReachableByDeadTypeNode) {
2629 // Let dead type node cleaning logic prune effectively dead path for us.
2630 // CheckCastPP::Value() == TOP and it will trigger the cleanup during GVN.
2631 // Don't materialize the cast when cleanup is disabled, because
2632 // it kills data and control leaving IR in broken state.
2633 tboth = cast_type;
2634 }
2635 if (tboth != Type::TOP && tboth != obj_type) {
2636 int obj_in_map = map()->find_edge(obj);
2637 if (obj_in_map >= 0 &&
2638 (jvms()->is_loc(obj_in_map) || jvms()->is_stk(obj_in_map))) {
2639 TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth);
2640 // Delay transform() call to allow recovery of pre-cast value at the control merge.
2641 _gvn.set_type_bottom(ccast);
2642 record_for_igvn(ccast);
2643 if (tboth->is_inlinetypeptr()) {
2644 ccast = InlineTypeNode::make_from_oop(this, ccast, tboth->isa_oopptr()->exact_klass(true)->as_inline_klass());
2645 }
2646 // Here's the payoff.
2647 replace_in_map(obj, ccast);
2648 }
2649 }
2650 }
2651
2652 int val_in_map = map()->find_edge(val);
2653 if (val_in_map < 0) return; // replace_in_map would be useless
2654 {
2655 JVMState* jvms = this->jvms();
2656 if (!(jvms->is_loc(val_in_map) ||
2657 jvms->is_stk(val_in_map)))
2658 return; // again, it would be useless
2659 }
2660
2661 // Check for a comparison to a constant, and "know" that the compared
2662 // value is constrained on this path.
2663 assert(tcon->singleton(), "");
2664 ConstraintCastNode* ccast = nullptr;
2665 Node* cast = nullptr;
2666
2667 switch (btest) {
2668 case BoolTest::eq: // Constant test?
2669 {
2670 const Type* tboth = tcon->join_speculative(tval);
2671 if (tboth == tval) break; // Nothing to gain.
2672 if (tcon->isa_int()) {
2673 ccast = new CastIINode(control(), val, tboth);
2674 } else if (tcon == TypePtr::NULL_PTR) {
2675 // Cast to null, but keep the pointer identity temporarily live.
2676 ccast = new CastPPNode(control(), val, tboth);
2677 } else {
2678 const TypeF* tf = tcon->isa_float_constant();
2679 const TypeD* td = tcon->isa_double_constant();
2680 // Exclude tests vs float/double 0 as these could be
2681 // either +0 or -0. Just because you are equal to +0
2682 // doesn't mean you ARE +0!
2683 // Note, following code also replaces Long and Oop values.
2684 if ((!tf || tf->_f != 0.0) &&
2685 (!td || td->_d != 0.0))
2686 cast = con; // Replace non-constant val by con.
2687 }
2688 }
2689 break;
2690
2691 case BoolTest::ne:
2692 if (tcon == TypePtr::NULL_PTR) {
2693 cast = cast_not_null(val, false);
2694 }
2695 break;
2696
2697 default:
2698 // (At this point we could record int range types with CastII.)
2699 break;
2700 }
2701
2702 if (ccast != nullptr) {
2703 const Type* tcc = ccast->as_Type()->type();
2704 assert(tcc != tval && tcc->higher_equal(tval), "must improve");
2705 // Delay transform() call to allow recovery of pre-cast value
2706 // at the control merge.
2707 _gvn.set_type_bottom(ccast);
2708 record_for_igvn(ccast);
2709 cast = ccast;
2710 }
2711
2712 if (cast != nullptr) { // Here's the payoff.
2713 replace_in_map(val, cast);
2714 }
2715 }
2716
2717 /**
2718 * Use speculative type to optimize CmpP node: if comparison is
2719 * against the low level class, cast the object to the speculative
2720 * type if any. CmpP should then go away.
2721 *
2722 * @param c expected CmpP node
2723 * @return result of CmpP on object casted to speculative type
2724 *
2725 */
2726 Node* Parse::optimize_cmp_with_klass(Node* c) {
2727 // If this is transformed by the _gvn to a comparison with the low
2728 // level klass then we may be able to use speculation
2729 if (c->Opcode() == Op_CmpP &&
2730 (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
2731 c->in(2)->is_Con()) {
2732 Node* load_klass = nullptr;
2733 Node* decode = nullptr;
2734 if (c->in(1)->Opcode() == Op_DecodeNKlass) {
2735 decode = c->in(1);
2736 load_klass = c->in(1)->in(1);
2737 } else {
2738 load_klass = c->in(1);
2739 }
2740 if (load_klass->in(2)->is_AddP()) {
2741 Node* addp = load_klass->in(2);
2742 Node* obj = addp->in(AddPNode::Address);
2743 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
2744 if (obj_type->speculative_type_not_null() != nullptr) {
2745 ciKlass* k = obj_type->speculative_type();
2746 inc_sp(2);
2747 obj = maybe_cast_profiled_obj(obj, k);
2748 dec_sp(2);
2749 if (obj->is_InlineType()) {
2750 assert(obj->as_InlineType()->is_allocated(&_gvn), "must be allocated");
2751 obj = obj->as_InlineType()->get_oop();
2752 }
2753 // Make the CmpP use the casted obj
2754 addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
2755 load_klass = load_klass->clone();
2756 load_klass->set_req(2, addp);
2757 load_klass = _gvn.transform(load_klass);
2758 if (decode != nullptr) {
2759 decode = decode->clone();
2760 decode->set_req(1, load_klass);
2761 load_klass = _gvn.transform(decode);
2762 }
2763 c = c->clone();
2764 c->set_req(1, load_klass);
2765 c = _gvn.transform(c);
2766 }
2767 }
2768 }
2769 return c;
2770 }
2771
2772 //------------------------------do_one_bytecode--------------------------------
2773 // Parse this bytecode, and alter the Parsers JVM->Node mapping
2774 void Parse::do_one_bytecode() {
2775 Node *a, *b, *c, *d; // Handy temps
2776 BoolTest::mask btest;
2777 int i;
2778
2779 assert(!has_exceptions(), "bytecode entry state must be clear of throws");
2780
2781 if (C->check_node_count(NodeLimitFudgeFactor * 5,
2782 "out of nodes parsing method")) {
2783 return;
2784 }
2785
2786 #ifdef ASSERT
2787 // for setting breakpoints
2788 if (TraceOptoParse) {
2789 tty->print(" @");
2790 dump_bci(bci());
2791 tty->print(" %s", Bytecodes::name(bc()));
2792 tty->cr();
2793 }
2794 #endif
2795
2796 switch (bc()) {
2797 case Bytecodes::_nop:
2798 // do nothing
2799 break;
2800 case Bytecodes::_lconst_0:
2801 push_pair(longcon(0));
2802 break;
2803
2804 case Bytecodes::_lconst_1:
2805 push_pair(longcon(1));
2806 break;
2807
2808 case Bytecodes::_fconst_0:
2809 push(zerocon(T_FLOAT));
2810 break;
2811
2812 case Bytecodes::_fconst_1:
2813 push(makecon(TypeF::ONE));
2814 break;
2815
2816 case Bytecodes::_fconst_2:
2817 push(makecon(TypeF::make(2.0f)));
2818 break;
2819
2820 case Bytecodes::_dconst_0:
2821 push_pair(zerocon(T_DOUBLE));
2822 break;
2823
2824 case Bytecodes::_dconst_1:
2825 push_pair(makecon(TypeD::ONE));
2826 break;
2827
2828 case Bytecodes::_iconst_m1:push(intcon(-1)); break;
2829 case Bytecodes::_iconst_0: push(intcon( 0)); break;
2830 case Bytecodes::_iconst_1: push(intcon( 1)); break;
2831 case Bytecodes::_iconst_2: push(intcon( 2)); break;
2832 case Bytecodes::_iconst_3: push(intcon( 3)); break;
2833 case Bytecodes::_iconst_4: push(intcon( 4)); break;
2834 case Bytecodes::_iconst_5: push(intcon( 5)); break;
2835 case Bytecodes::_bipush: push(intcon(iter().get_constant_u1())); break;
2836 case Bytecodes::_sipush: push(intcon(iter().get_constant_u2())); break;
2837 case Bytecodes::_aconst_null: push(null()); break;
2838
2839 case Bytecodes::_ldc:
2840 case Bytecodes::_ldc_w:
2841 case Bytecodes::_ldc2_w: {
2842 // ciTypeFlow should trap if the ldc is in error state or if the constant is not loaded
2843 assert(!iter().is_in_error(), "ldc is in error state");
2844 ciConstant constant = iter().get_constant();
2845 assert(constant.is_loaded(), "constant is not loaded");
2846 const Type* con_type = Type::make_from_constant(constant);
2847 if (con_type != nullptr) {
2848 push_node(con_type->basic_type(), makecon(con_type));
2849 }
2850 break;
2851 }
2852
2853 case Bytecodes::_aload_0:
2854 push( local(0) );
2855 break;
2856 case Bytecodes::_aload_1:
2857 push( local(1) );
2858 break;
2859 case Bytecodes::_aload_2:
2860 push( local(2) );
2861 break;
2862 case Bytecodes::_aload_3:
2863 push( local(3) );
2864 break;
2865 case Bytecodes::_aload:
2866 push( local(iter().get_index()) );
2867 break;
2868
2869 case Bytecodes::_fload_0:
2870 case Bytecodes::_iload_0:
2871 push( local(0) );
2872 break;
2873 case Bytecodes::_fload_1:
2874 case Bytecodes::_iload_1:
2875 push( local(1) );
2876 break;
2877 case Bytecodes::_fload_2:
2878 case Bytecodes::_iload_2:
2879 push( local(2) );
2880 break;
2881 case Bytecodes::_fload_3:
2882 case Bytecodes::_iload_3:
2883 push( local(3) );
2884 break;
2885 case Bytecodes::_fload:
2886 case Bytecodes::_iload:
2887 push( local(iter().get_index()) );
2888 break;
2889 case Bytecodes::_lload_0:
2890 push_pair_local( 0 );
2891 break;
2892 case Bytecodes::_lload_1:
2893 push_pair_local( 1 );
2894 break;
2895 case Bytecodes::_lload_2:
2896 push_pair_local( 2 );
2897 break;
2898 case Bytecodes::_lload_3:
2899 push_pair_local( 3 );
2900 break;
2901 case Bytecodes::_lload:
2902 push_pair_local( iter().get_index() );
2903 break;
2904
2905 case Bytecodes::_dload_0:
2906 push_pair_local(0);
2907 break;
2908 case Bytecodes::_dload_1:
2909 push_pair_local(1);
2910 break;
2911 case Bytecodes::_dload_2:
2912 push_pair_local(2);
2913 break;
2914 case Bytecodes::_dload_3:
2915 push_pair_local(3);
2916 break;
2917 case Bytecodes::_dload:
2918 push_pair_local(iter().get_index());
2919 break;
2920 case Bytecodes::_fstore_0:
2921 case Bytecodes::_istore_0:
2922 case Bytecodes::_astore_0:
2923 set_local( 0, pop() );
2924 break;
2925 case Bytecodes::_fstore_1:
2926 case Bytecodes::_istore_1:
2927 case Bytecodes::_astore_1:
2928 set_local( 1, pop() );
2929 break;
2930 case Bytecodes::_fstore_2:
2931 case Bytecodes::_istore_2:
2932 case Bytecodes::_astore_2:
2933 set_local( 2, pop() );
2934 break;
2935 case Bytecodes::_fstore_3:
2936 case Bytecodes::_istore_3:
2937 case Bytecodes::_astore_3:
2938 set_local( 3, pop() );
2939 break;
2940 case Bytecodes::_fstore:
2941 case Bytecodes::_istore:
2942 case Bytecodes::_astore:
2943 set_local( iter().get_index(), pop() );
2944 break;
2945 // long stores
2946 case Bytecodes::_lstore_0:
2947 set_pair_local( 0, pop_pair() );
2948 break;
2949 case Bytecodes::_lstore_1:
2950 set_pair_local( 1, pop_pair() );
2951 break;
2952 case Bytecodes::_lstore_2:
2953 set_pair_local( 2, pop_pair() );
2954 break;
2955 case Bytecodes::_lstore_3:
2956 set_pair_local( 3, pop_pair() );
2957 break;
2958 case Bytecodes::_lstore:
2959 set_pair_local( iter().get_index(), pop_pair() );
2960 break;
2961
2962 // double stores
2963 case Bytecodes::_dstore_0:
2964 set_pair_local( 0, pop_pair() );
2965 break;
2966 case Bytecodes::_dstore_1:
2967 set_pair_local( 1, pop_pair() );
2968 break;
2969 case Bytecodes::_dstore_2:
2970 set_pair_local( 2, pop_pair() );
2971 break;
2972 case Bytecodes::_dstore_3:
2973 set_pair_local( 3, pop_pair() );
2974 break;
2975 case Bytecodes::_dstore:
2976 set_pair_local( iter().get_index(), pop_pair() );
2977 break;
2978
2979 case Bytecodes::_pop: dec_sp(1); break;
2980 case Bytecodes::_pop2: dec_sp(2); break;
2981 case Bytecodes::_swap:
2982 a = pop();
2983 b = pop();
2984 push(a);
2985 push(b);
2986 break;
2987 case Bytecodes::_dup:
2988 a = pop();
2989 push(a);
2990 push(a);
2991 break;
2992 case Bytecodes::_dup_x1:
2993 a = pop();
2994 b = pop();
2995 push( a );
2996 push( b );
2997 push( a );
2998 break;
2999 case Bytecodes::_dup_x2:
3000 a = pop();
3001 b = pop();
3002 c = pop();
3003 push( a );
3004 push( c );
3005 push( b );
3006 push( a );
3007 break;
3008 case Bytecodes::_dup2:
3009 a = pop();
3010 b = pop();
3011 push( b );
3012 push( a );
3013 push( b );
3014 push( a );
3015 break;
3016
3017 case Bytecodes::_dup2_x1:
3018 // before: .. c, b, a
3019 // after: .. b, a, c, b, a
3020 // not tested
3021 a = pop();
3022 b = pop();
3023 c = pop();
3024 push( b );
3025 push( a );
3026 push( c );
3027 push( b );
3028 push( a );
3029 break;
3030 case Bytecodes::_dup2_x2:
3031 // before: .. d, c, b, a
3032 // after: .. b, a, d, c, b, a
3033 // not tested
3034 a = pop();
3035 b = pop();
3036 c = pop();
3037 d = pop();
3038 push( b );
3039 push( a );
3040 push( d );
3041 push( c );
3042 push( b );
3043 push( a );
3044 break;
3045
3046 case Bytecodes::_arraylength: {
3047 // Must do null-check with value on expression stack
3048 Node *ary = null_check(peek(), T_ARRAY);
3049 // Compile-time detect of null-exception?
3050 if (stopped()) return;
3051 a = pop();
3052 push(load_array_length(a));
3053 break;
3054 }
3055
3056 case Bytecodes::_baload: array_load(T_BYTE); break;
3057 case Bytecodes::_caload: array_load(T_CHAR); break;
3058 case Bytecodes::_iaload: array_load(T_INT); break;
3059 case Bytecodes::_saload: array_load(T_SHORT); break;
3060 case Bytecodes::_faload: array_load(T_FLOAT); break;
3061 case Bytecodes::_aaload: array_load(T_OBJECT); break;
3062 case Bytecodes::_laload: array_load(T_LONG); break;
3063 case Bytecodes::_daload: array_load(T_DOUBLE); break;
3064 case Bytecodes::_bastore: array_store(T_BYTE); break;
3065 case Bytecodes::_castore: array_store(T_CHAR); break;
3066 case Bytecodes::_iastore: array_store(T_INT); break;
3067 case Bytecodes::_sastore: array_store(T_SHORT); break;
3068 case Bytecodes::_fastore: array_store(T_FLOAT); break;
3069 case Bytecodes::_aastore: array_store(T_OBJECT); break;
3070 case Bytecodes::_lastore: array_store(T_LONG); break;
3071 case Bytecodes::_dastore: array_store(T_DOUBLE); break;
3072
3073 case Bytecodes::_getfield:
3074 do_getfield();
3075 break;
3076
3077 case Bytecodes::_getstatic:
3078 do_getstatic();
3079 break;
3080
3081 case Bytecodes::_putfield:
3082 do_putfield();
3083 break;
3084
3085 case Bytecodes::_putstatic:
3086 do_putstatic();
3087 break;
3088
3089 case Bytecodes::_irem:
3090 // Must keep both values on the expression-stack during null-check
3091 zero_check_int(peek());
3092 // Compile-time detect of null-exception?
3093 if (stopped()) return;
3094 b = pop();
3095 a = pop();
3096 push(_gvn.transform(new ModINode(control(), a, b)));
3097 break;
3098 case Bytecodes::_idiv:
3099 // Must keep both values on the expression-stack during null-check
3100 zero_check_int(peek());
3101 // Compile-time detect of null-exception?
3102 if (stopped()) return;
3103 b = pop();
3104 a = pop();
3105 push( _gvn.transform( new DivINode(control(),a,b) ) );
3106 break;
3107 case Bytecodes::_imul:
3108 b = pop(); a = pop();
3109 push( _gvn.transform( new MulINode(a,b) ) );
3110 break;
3111 case Bytecodes::_iadd:
3112 b = pop(); a = pop();
3113 push( _gvn.transform( new AddINode(a,b) ) );
3114 break;
3115 case Bytecodes::_ineg:
3116 a = pop();
3117 push( _gvn.transform( new SubINode(_gvn.intcon(0),a)) );
3118 break;
3119 case Bytecodes::_isub:
3120 b = pop(); a = pop();
3121 push( _gvn.transform( new SubINode(a,b) ) );
3122 break;
3123 case Bytecodes::_iand:
3124 b = pop(); a = pop();
3125 push( _gvn.transform( new AndINode(a,b) ) );
3126 break;
3127 case Bytecodes::_ior:
3128 b = pop(); a = pop();
3129 push( _gvn.transform( new OrINode(a,b) ) );
3130 break;
3131 case Bytecodes::_ixor:
3132 b = pop(); a = pop();
3133 push( _gvn.transform( new XorINode(a,b) ) );
3134 break;
3135 case Bytecodes::_ishl:
3136 b = pop(); a = pop();
3137 push( _gvn.transform( new LShiftINode(a,b) ) );
3138 break;
3139 case Bytecodes::_ishr:
3140 b = pop(); a = pop();
3141 push( _gvn.transform( new RShiftINode(a,b) ) );
3142 break;
3143 case Bytecodes::_iushr:
3144 b = pop(); a = pop();
3145 push( _gvn.transform( new URShiftINode(a,b) ) );
3146 break;
3147
3148 case Bytecodes::_fneg:
3149 a = pop();
3150 b = _gvn.transform(new NegFNode (a));
3151 push(b);
3152 break;
3153
3154 case Bytecodes::_fsub:
3155 b = pop();
3156 a = pop();
3157 c = _gvn.transform( new SubFNode(a,b) );
3158 push(c);
3159 break;
3160
3161 case Bytecodes::_fadd:
3162 b = pop();
3163 a = pop();
3164 c = _gvn.transform( new AddFNode(a,b) );
3165 push(c);
3166 break;
3167
3168 case Bytecodes::_fmul:
3169 b = pop();
3170 a = pop();
3171 c = _gvn.transform( new MulFNode(a,b) );
3172 push(c);
3173 break;
3174
3175 case Bytecodes::_fdiv:
3176 b = pop();
3177 a = pop();
3178 c = _gvn.transform( new DivFNode(nullptr,a,b) );
3179 push(c);
3180 break;
3181
3182 case Bytecodes::_frem:
3183 // Generate a ModF node.
3184 b = pop();
3185 a = pop();
3186 push(floating_point_mod(a, b, BasicType::T_FLOAT));
3187 break;
3188
3189 case Bytecodes::_fcmpl:
3190 b = pop();
3191 a = pop();
3192 c = _gvn.transform( new CmpF3Node( a, b));
3193 push(c);
3194 break;
3195 case Bytecodes::_fcmpg:
3196 b = pop();
3197 a = pop();
3198
3199 // Same as fcmpl but need to flip the unordered case. Swap the inputs,
3200 // which negates the result sign except for unordered. Flip the unordered
3201 // as well by using CmpF3 which implements unordered-lesser instead of
3202 // unordered-greater semantics. Finally, commute the result bits. Result
3203 // is same as using a CmpF3Greater except we did it with CmpF3 alone.
3204 c = _gvn.transform( new CmpF3Node( b, a));
3205 c = _gvn.transform( new SubINode(_gvn.intcon(0),c) );
3206 push(c);
3207 break;
3208
3209 case Bytecodes::_f2i:
3210 a = pop();
3211 push(_gvn.transform(new ConvF2INode(a)));
3212 break;
3213
3214 case Bytecodes::_d2i:
3215 a = pop_pair();
3216 b = _gvn.transform(new ConvD2INode(a));
3217 push( b );
3218 break;
3219
3220 case Bytecodes::_f2d:
3221 a = pop();
3222 b = _gvn.transform( new ConvF2DNode(a));
3223 push_pair( b );
3224 break;
3225
3226 case Bytecodes::_d2f:
3227 a = pop_pair();
3228 b = _gvn.transform( new ConvD2FNode(a));
3229 push( b );
3230 break;
3231
3232 case Bytecodes::_l2f:
3233 if (Matcher::convL2FSupported()) {
3234 a = pop_pair();
3235 b = _gvn.transform( new ConvL2FNode(a));
3236 push(b);
3237 } else {
3238 l2f();
3239 }
3240 break;
3241
3242 case Bytecodes::_l2d:
3243 a = pop_pair();
3244 b = _gvn.transform( new ConvL2DNode(a));
3245 push_pair(b);
3246 break;
3247
3248 case Bytecodes::_f2l:
3249 a = pop();
3250 b = _gvn.transform( new ConvF2LNode(a));
3251 push_pair(b);
3252 break;
3253
3254 case Bytecodes::_d2l:
3255 a = pop_pair();
3256 b = _gvn.transform( new ConvD2LNode(a));
3257 push_pair(b);
3258 break;
3259
3260 case Bytecodes::_dsub:
3261 b = pop_pair();
3262 a = pop_pair();
3263 c = _gvn.transform( new SubDNode(a,b) );
3264 push_pair(c);
3265 break;
3266
3267 case Bytecodes::_dadd:
3268 b = pop_pair();
3269 a = pop_pair();
3270 c = _gvn.transform( new AddDNode(a,b) );
3271 push_pair(c);
3272 break;
3273
3274 case Bytecodes::_dmul:
3275 b = pop_pair();
3276 a = pop_pair();
3277 c = _gvn.transform( new MulDNode(a,b) );
3278 push_pair(c);
3279 break;
3280
3281 case Bytecodes::_ddiv:
3282 b = pop_pair();
3283 a = pop_pair();
3284 c = _gvn.transform( new DivDNode(nullptr,a,b) );
3285 push_pair(c);
3286 break;
3287
3288 case Bytecodes::_dneg:
3289 a = pop_pair();
3290 b = _gvn.transform(new NegDNode (a));
3291 push_pair(b);
3292 break;
3293
3294 case Bytecodes::_drem:
3295 // Generate a ModD node.
3296 b = pop_pair();
3297 a = pop_pair();
3298 push_pair(floating_point_mod(a, b, BasicType::T_DOUBLE));
3299 break;
3300
3301 case Bytecodes::_dcmpl:
3302 b = pop_pair();
3303 a = pop_pair();
3304 c = _gvn.transform( new CmpD3Node( a, b));
3305 push(c);
3306 break;
3307
3308 case Bytecodes::_dcmpg:
3309 b = pop_pair();
3310 a = pop_pair();
3311 // Same as dcmpl but need to flip the unordered case.
3312 // Commute the inputs, which negates the result sign except for unordered.
3313 // Flip the unordered as well by using CmpD3 which implements
3314 // unordered-lesser instead of unordered-greater semantics.
3315 // Finally, negate the result bits. Result is same as using a
3316 // CmpD3Greater except we did it with CmpD3 alone.
3317 c = _gvn.transform( new CmpD3Node( b, a));
3318 c = _gvn.transform( new SubINode(_gvn.intcon(0),c) );
3319 push(c);
3320 break;
3321
3322
3323 // Note for longs -> lo word is on TOS, hi word is on TOS - 1
3324 case Bytecodes::_land:
3325 b = pop_pair();
3326 a = pop_pair();
3327 c = _gvn.transform( new AndLNode(a,b) );
3328 push_pair(c);
3329 break;
3330 case Bytecodes::_lor:
3331 b = pop_pair();
3332 a = pop_pair();
3333 c = _gvn.transform( new OrLNode(a,b) );
3334 push_pair(c);
3335 break;
3336 case Bytecodes::_lxor:
3337 b = pop_pair();
3338 a = pop_pair();
3339 c = _gvn.transform( new XorLNode(a,b) );
3340 push_pair(c);
3341 break;
3342
3343 case Bytecodes::_lshl:
3344 b = pop(); // the shift count
3345 a = pop_pair(); // value to be shifted
3346 c = _gvn.transform( new LShiftLNode(a,b) );
3347 push_pair(c);
3348 break;
3349 case Bytecodes::_lshr:
3350 b = pop(); // the shift count
3351 a = pop_pair(); // value to be shifted
3352 c = _gvn.transform( new RShiftLNode(a,b) );
3353 push_pair(c);
3354 break;
3355 case Bytecodes::_lushr:
3356 b = pop(); // the shift count
3357 a = pop_pair(); // value to be shifted
3358 c = _gvn.transform( new URShiftLNode(a,b) );
3359 push_pair(c);
3360 break;
3361 case Bytecodes::_lmul:
3362 b = pop_pair();
3363 a = pop_pair();
3364 c = _gvn.transform( new MulLNode(a,b) );
3365 push_pair(c);
3366 break;
3367
3368 case Bytecodes::_lrem:
3369 // Must keep both values on the expression-stack during null-check
3370 assert(peek(0) == top(), "long word order");
3371 zero_check_long(peek(1));
3372 // Compile-time detect of null-exception?
3373 if (stopped()) return;
3374 b = pop_pair();
3375 a = pop_pair();
3376 c = _gvn.transform( new ModLNode(control(),a,b) );
3377 push_pair(c);
3378 break;
3379
3380 case Bytecodes::_ldiv:
3381 // Must keep both values on the expression-stack during null-check
3382 assert(peek(0) == top(), "long word order");
3383 zero_check_long(peek(1));
3384 // Compile-time detect of null-exception?
3385 if (stopped()) return;
3386 b = pop_pair();
3387 a = pop_pair();
3388 c = _gvn.transform( new DivLNode(control(),a,b) );
3389 push_pair(c);
3390 break;
3391
3392 case Bytecodes::_ladd:
3393 b = pop_pair();
3394 a = pop_pair();
3395 c = _gvn.transform( new AddLNode(a,b) );
3396 push_pair(c);
3397 break;
3398 case Bytecodes::_lsub:
3399 b = pop_pair();
3400 a = pop_pair();
3401 c = _gvn.transform( new SubLNode(a,b) );
3402 push_pair(c);
3403 break;
3404 case Bytecodes::_lcmp:
3405 // Safepoints are now inserted _before_ branches. The long-compare
3406 // bytecode painfully produces a 3-way value (-1,0,+1) which requires a
3407 // slew of control flow. These are usually followed by a CmpI vs zero and
3408 // a branch; this pattern then optimizes to the obvious long-compare and
3409 // branch. However, if the branch is backwards there's a Safepoint
3410 // inserted. The inserted Safepoint captures the JVM state at the
3411 // pre-branch point, i.e. it captures the 3-way value. Thus if a
3412 // long-compare is used to control a loop the debug info will force
3413 // computation of the 3-way value, even though the generated code uses a
3414 // long-compare and branch. We try to rectify the situation by inserting
3415 // a SafePoint here and have it dominate and kill the safepoint added at a
3416 // following backwards branch. At this point the JVM state merely holds 2
3417 // longs but not the 3-way value.
3418 switch (iter().next_bc()) {
3419 case Bytecodes::_ifgt:
3420 case Bytecodes::_iflt:
3421 case Bytecodes::_ifge:
3422 case Bytecodes::_ifle:
3423 case Bytecodes::_ifne:
3424 case Bytecodes::_ifeq:
3425 // If this is a backwards branch in the bytecodes, add Safepoint
3426 maybe_add_safepoint(iter().next_get_dest());
3427 default:
3428 break;
3429 }
3430 b = pop_pair();
3431 a = pop_pair();
3432 c = _gvn.transform( new CmpL3Node( a, b ));
3433 push(c);
3434 break;
3435
3436 case Bytecodes::_lneg:
3437 a = pop_pair();
3438 b = _gvn.transform( new SubLNode(longcon(0),a));
3439 push_pair(b);
3440 break;
3441 case Bytecodes::_l2i:
3442 a = pop_pair();
3443 push( _gvn.transform( new ConvL2INode(a)));
3444 break;
3445 case Bytecodes::_i2l:
3446 a = pop();
3447 b = _gvn.transform( new ConvI2LNode(a));
3448 push_pair(b);
3449 break;
3450 case Bytecodes::_i2b:
3451 // Sign extend
3452 a = pop();
3453 a = Compile::narrow_value(T_BYTE, a, nullptr, &_gvn, true);
3454 push(a);
3455 break;
3456 case Bytecodes::_i2s:
3457 a = pop();
3458 a = Compile::narrow_value(T_SHORT, a, nullptr, &_gvn, true);
3459 push(a);
3460 break;
3461 case Bytecodes::_i2c:
3462 a = pop();
3463 a = Compile::narrow_value(T_CHAR, a, nullptr, &_gvn, true);
3464 push(a);
3465 break;
3466
3467 case Bytecodes::_i2f:
3468 a = pop();
3469 b = _gvn.transform( new ConvI2FNode(a) ) ;
3470 push(b);
3471 break;
3472
3473 case Bytecodes::_i2d:
3474 a = pop();
3475 b = _gvn.transform( new ConvI2DNode(a));
3476 push_pair(b);
3477 break;
3478
3479 case Bytecodes::_iinc: // Increment local
3480 i = iter().get_index(); // Get local index
3481 set_local( i, _gvn.transform( new AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) );
3482 break;
3483
3484 // Exit points of synchronized methods must have an unlock node
3485 case Bytecodes::_return:
3486 return_current(nullptr);
3487 break;
3488
3489 case Bytecodes::_ireturn:
3490 case Bytecodes::_areturn:
3491 case Bytecodes::_freturn:
3492 return_current(pop());
3493 break;
3494 case Bytecodes::_lreturn:
3495 case Bytecodes::_dreturn:
3496 return_current(pop_pair());
3497 break;
3498
3499 case Bytecodes::_athrow:
3500 // null exception oop throws null pointer exception
3501 null_check(peek());
3502 if (stopped()) return;
3503 // Hook the thrown exception directly to subsequent handlers.
3504 if (BailoutToInterpreterForThrows) {
3505 // Keep method interpreted from now on.
3506 uncommon_trap(Deoptimization::Reason_unhandled,
3507 Deoptimization::Action_make_not_compilable);
3508 return;
3509 }
3510 if (env()->jvmti_can_post_on_exceptions()) {
3511 // check if we must post exception events, take uncommon trap if so (with must_throw = false)
3512 uncommon_trap_if_should_post_on_exceptions(Deoptimization::Reason_unhandled, false);
3513 }
3514 // Here if either can_post_on_exceptions or should_post_on_exceptions is false
3515 add_exception_state(make_exception_state(peek()));
3516 break;
3517
3518 case Bytecodes::_goto: // fall through
3519 case Bytecodes::_goto_w: {
3520 int target_bci = (bc() == Bytecodes::_goto) ? iter().get_dest() : iter().get_far_dest();
3521
3522 // If this is a backwards branch in the bytecodes, add Safepoint
3523 maybe_add_safepoint(target_bci);
3524
3525 // Merge the current control into the target basic block
3526 merge(target_bci);
3527
3528 // See if we can get some profile data and hand it off to the next block
3529 Block *target_block = block()->successor_for_bci(target_bci);
3530 if (target_block->pred_count() != 1) break;
3531 ciMethodData* methodData = method()->method_data();
3532 if (!methodData->is_mature()) break;
3533 ciProfileData* data = methodData->bci_to_data(bci());
3534 assert(data != nullptr && data->is_JumpData(), "need JumpData for taken branch");
3535 int taken = ((ciJumpData*)data)->taken();
3536 taken = method()->scale_count(taken);
3537 target_block->set_count(taken);
3538 break;
3539 }
3540
3541 case Bytecodes::_ifnull: btest = BoolTest::eq; goto handle_if_null;
3542 case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null;
3543 handle_if_null:
3544 // If this is a backwards branch in the bytecodes, add Safepoint
3545 maybe_add_safepoint(iter().get_dest());
3546 a = null();
3547 b = pop();
3548 if (b->is_InlineType()) {
3549 // Null checking a scalarized but nullable inline type. Check the null marker
3550 // input instead of the oop input to avoid keeping buffer allocations alive
3551 c = _gvn.transform(new CmpINode(b->as_InlineType()->get_null_marker(), zerocon(T_INT)));
3552 } else {
3553 if (!_gvn.type(b)->speculative_maybe_null() &&
3554 !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
3555 inc_sp(1);
3556 Node* null_ctl = top();
3557 b = null_check_oop(b, &null_ctl, true, true, true);
3558 assert(null_ctl->is_top(), "no null control here");
3559 dec_sp(1);
3560 } else if (_gvn.type(b)->speculative_always_null() &&
3561 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
3562 inc_sp(1);
3563 b = null_assert(b);
3564 dec_sp(1);
3565 }
3566 c = _gvn.transform( new CmpPNode(b, a) );
3567 }
3568 do_ifnull(btest, c);
3569 break;
3570
3571 case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
3572 case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
3573 handle_if_acmp:
3574 // If this is a backwards branch in the bytecodes, add Safepoint
3575 maybe_add_safepoint(iter().get_dest());
3576 a = pop();
3577 b = pop();
3578 do_acmp(btest, b, a);
3579 break;
3580
3581 case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
3582 case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
3583 case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
3584 case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
3585 case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
3586 case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
3587 handle_ifxx:
3588 // If this is a backwards branch in the bytecodes, add Safepoint
3589 maybe_add_safepoint(iter().get_dest());
3590 a = _gvn.intcon(0);
3591 b = pop();
3592 c = _gvn.transform( new CmpINode(b, a) );
3593 do_if(btest, c);
3594 break;
3595
3596 case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
3597 case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
3598 case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;
3599 case Bytecodes::_if_icmple: btest = BoolTest::le; goto handle_if_icmp;
3600 case Bytecodes::_if_icmpgt: btest = BoolTest::gt; goto handle_if_icmp;
3601 case Bytecodes::_if_icmpge: btest = BoolTest::ge; goto handle_if_icmp;
3602 handle_if_icmp:
3603 // If this is a backwards branch in the bytecodes, add Safepoint
3604 maybe_add_safepoint(iter().get_dest());
3605 a = pop();
3606 b = pop();
3607 c = _gvn.transform( new CmpINode( b, a ) );
3608 do_if(btest, c);
3609 break;
3610
3611 case Bytecodes::_tableswitch:
3612 do_tableswitch();
3613 break;
3614
3615 case Bytecodes::_lookupswitch:
3616 do_lookupswitch();
3617 break;
3618
3619 case Bytecodes::_invokestatic:
3620 case Bytecodes::_invokedynamic:
3621 case Bytecodes::_invokespecial:
3622 case Bytecodes::_invokevirtual:
3623 case Bytecodes::_invokeinterface:
3624 do_call();
3625 break;
3626 case Bytecodes::_checkcast:
3627 do_checkcast();
3628 break;
3629 case Bytecodes::_instanceof:
3630 do_instanceof();
3631 break;
3632 case Bytecodes::_anewarray:
3633 do_newarray();
3634 break;
3635 case Bytecodes::_newarray:
3636 do_newarray((BasicType)iter().get_index());
3637 break;
3638 case Bytecodes::_multianewarray:
3639 do_multianewarray();
3640 break;
3641 case Bytecodes::_new:
3642 do_new();
3643 break;
3644
3645 case Bytecodes::_jsr:
3646 case Bytecodes::_jsr_w:
3647 do_jsr();
3648 break;
3649
3650 case Bytecodes::_ret:
3651 do_ret();
3652 break;
3653
3654
3655 case Bytecodes::_monitorenter:
3656 do_monitor_enter();
3657 break;
3658
3659 case Bytecodes::_monitorexit:
3660 do_monitor_exit();
3661 break;
3662
3663 case Bytecodes::_breakpoint:
3664 // Breakpoint set concurrently to compile
3665 // %%% use an uncommon trap?
3666 C->record_failure("breakpoint in method");
3667 return;
3668
3669 default:
3670 #ifndef PRODUCT
3671 map()->dump(99);
3672 #endif
3673 tty->print("\nUnhandled bytecode %s\n", Bytecodes::name(bc()) );
3674 ShouldNotReachHere();
3675 }
3676
3677 #ifndef PRODUCT
3678 if (failing()) { return; }
3679 constexpr int perBytecode = 6;
3680 if (C->should_print_igv(perBytecode)) {
3681 IdealGraphPrinter* printer = C->igv_printer();
3682 char buffer[256];
3683 jio_snprintf(buffer, sizeof(buffer), "Bytecode %d: %s", bci(), Bytecodes::name(bc()));
3684 bool old = printer->traverse_outs();
3685 printer->set_traverse_outs(true);
3686 printer->set_parse(this);
3687 printer->print_graph(buffer);
3688 printer->set_traverse_outs(old);
3689 printer->set_parse(nullptr);
3690 }
3691 #endif
3692 }