1 /*
2 * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/ciInlineKlass.hpp"
26 #include "ci/ciMethodData.hpp"
27 #include "ci/ciSymbols.hpp"
28 #include "classfile/vmSymbols.hpp"
29 #include "compiler/compileLog.hpp"
30 #include "interpreter/linkResolver.hpp"
31 #include "jvm_io.h"
32 #include "memory/resourceArea.hpp"
33 #include "memory/universe.hpp"
34 #include "oops/oop.inline.hpp"
35 #include "opto/addnode.hpp"
36 #include "opto/castnode.hpp"
37 #include "opto/convertnode.hpp"
38 #include "opto/divnode.hpp"
39 #include "opto/idealGraphPrinter.hpp"
40 #include "opto/idealKit.hpp"
41 #include "opto/inlinetypenode.hpp"
42 #include "opto/matcher.hpp"
43 #include "opto/memnode.hpp"
44 #include "opto/mulnode.hpp"
45 #include "opto/opaquenode.hpp"
46 #include "opto/parse.hpp"
47 #include "opto/runtime.hpp"
48 #include "opto/subtypenode.hpp"
49 #include "runtime/arguments.hpp"
50 #include "runtime/deoptimization.hpp"
51 #include "runtime/sharedRuntime.hpp"
52
53 #ifndef PRODUCT
54 extern uint explicit_null_checks_inserted,
55 explicit_null_checks_elided;
56 #endif
57
58 Node* Parse::record_profile_for_speculation_at_array_load(Node* ld) {
59 // Feed unused profile data to type speculation
60 if (UseTypeSpeculation && UseArrayLoadStoreProfile) {
61 ciKlass* array_type = nullptr;
62 ciKlass* element_type = nullptr;
63 ProfilePtrKind element_ptr = ProfileMaybeNull;
64 bool flat_array = true;
65 bool null_free_array = true;
66 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
67 if (element_type != nullptr || element_ptr != ProfileMaybeNull) {
68 ld = record_profile_for_speculation(ld, element_type, element_ptr);
69 }
70 }
71 return ld;
72 }
73
74
75 //---------------------------------array_load----------------------------------
76 void Parse::array_load(BasicType bt) {
77 const Type* elemtype = Type::TOP;
78 Node* adr = array_addressing(bt, 0, elemtype);
79 if (stopped()) return; // guaranteed null or range check
80
81 Node* array_index = pop();
82 Node* array = pop();
83
84 // Handle inline type arrays
85 const TypeOopPtr* element_ptr = elemtype->make_oopptr();
86 const TypeAryPtr* array_type = _gvn.type(array)->is_aryptr();
87
88 if (!array_type->is_not_flat()) {
89 // Cannot statically determine if array is a flat array, emit runtime check
90 assert(UseArrayFlattening && is_reference_type(bt) && element_ptr->can_be_inline_type() &&
91 (!element_ptr->is_inlinetypeptr() || element_ptr->inline_klass()->maybe_flat_in_array()), "array can't be flat");
92 IdealKit ideal(this);
93 IdealVariable res(ideal);
94 ideal.declarations_done();
95 ideal.if_then(flat_array_test(array, /* flat = */ false)); {
96 // Non-flat array
97 sync_kit(ideal);
98 if (!array_type->is_flat()) {
99 assert(array_type->is_flat() || control()->in(0)->as_If()->is_flat_array_check(&_gvn), "Should be found");
100 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
101 DecoratorSet decorator_set = IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD;
102 if (needs_range_check(array_type->size(), array_index)) {
103 // We've emitted a RangeCheck but now insert an additional check between the range check and the actual load.
104 // We cannot pin the load to two separate nodes. Instead, we pin it conservatively here such that it cannot
105 // possibly float above the range check at any point.
106 decorator_set |= C2_UNKNOWN_CONTROL_LOAD;
107 }
108 Node* ld = access_load_at(array, adr, adr_type, element_ptr, bt, decorator_set);
109 if (element_ptr->is_inlinetypeptr()) {
110 ld = InlineTypeNode::make_from_oop(this, ld, element_ptr->inline_klass());
111 }
112 ideal.set(res, ld);
113 }
114 ideal.sync_kit(this);
115 } ideal.else_(); {
116 // Flat array
117 sync_kit(ideal);
118 if (!array_type->is_not_flat()) {
119 if (element_ptr->is_inlinetypeptr()) {
120 ciInlineKlass* vk = element_ptr->inline_klass();
121 Node* flat_array = cast_to_flat_array(array, vk);
122 Node* vt = InlineTypeNode::make_from_flat_array(this, vk, flat_array, array_index);
123 ideal.set(res, vt);
124 } else {
125 // Element type is unknown, and thus we cannot statically determine the exact flat array layout. Emit a
126 // runtime call to correctly load the inline type element from the flat array.
127 Node* inline_type = load_from_unknown_flat_array(array, array_index, element_ptr);
128 bool is_null_free = array_type->is_null_free() || !UseNullableValueFlattening;
129 if (is_null_free) {
130 inline_type = cast_not_null(inline_type);
131 }
132 ideal.set(res, inline_type);
133 }
134 }
135 ideal.sync_kit(this);
136 } ideal.end_if();
137 sync_kit(ideal);
138 Node* ld = _gvn.transform(ideal.value(res));
139 ld = record_profile_for_speculation_at_array_load(ld);
140 push_node(bt, ld);
141 return;
142 }
143
144 if (elemtype == TypeInt::BOOL) {
145 bt = T_BOOLEAN;
146 }
147 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
148 Node* ld = access_load_at(array, adr, adr_type, elemtype, bt,
149 IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD);
150 ld = record_profile_for_speculation_at_array_load(ld);
151 // Loading an inline type from a non-flat array
152 if (element_ptr != nullptr && element_ptr->is_inlinetypeptr()) {
153 assert(!array_type->is_null_free() || !element_ptr->maybe_null(), "inline type array elements should never be null");
154 ld = InlineTypeNode::make_from_oop(this, ld, element_ptr->inline_klass());
155 }
156 push_node(bt, ld);
157 }
158
159 Node* Parse::load_from_unknown_flat_array(Node* array, Node* array_index, const TypeOopPtr* element_ptr) {
160 // Below membars keep this access to an unknown flat array correctly
161 // ordered with other unknown and known flat array accesses.
162 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
163
164 Node* call = nullptr;
165 {
166 // Re-execute flat array load if runtime call triggers deoptimization
167 PreserveReexecuteState preexecs(this);
168 jvms()->set_bci(_bci);
169 jvms()->set_should_reexecute(true);
170 inc_sp(2);
171 kill_dead_locals();
172 call = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
173 OptoRuntime::load_unknown_inline_Type(),
174 OptoRuntime::load_unknown_inline_Java(),
175 nullptr, TypeRawPtr::BOTTOM,
176 array, array_index);
177 }
178 make_slow_call_ex(call, env()->Throwable_klass(), false);
179 Node* buffer = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
180
181 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
182
183 // Keep track of the information that the inline type is in flat arrays
184 const Type* unknown_value = element_ptr->is_instptr()->cast_to_flat_in_array();
185 return _gvn.transform(new CheckCastPPNode(control(), buffer, unknown_value));
186 }
187
188 //--------------------------------array_store----------------------------------
189 void Parse::array_store(BasicType bt) {
190 const Type* elemtype = Type::TOP;
191 Node* adr = array_addressing(bt, type2size[bt], elemtype);
192 if (stopped()) return; // guaranteed null or range check
193 Node* stored_value_casted = nullptr;
194 if (bt == T_OBJECT) {
195 stored_value_casted = array_store_check(adr, elemtype);
196 if (stopped()) {
197 return;
198 }
199 }
200 Node* const stored_value = pop_node(bt); // Value to store
201 Node* const array_index = pop(); // Index in the array
202 Node* array = pop(); // The array itself
203
204 const TypeAryPtr* array_type = _gvn.type(array)->is_aryptr();
205 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
206
207 if (elemtype == TypeInt::BOOL) {
208 bt = T_BOOLEAN;
209 } else if (bt == T_OBJECT) {
210 elemtype = elemtype->make_oopptr();
211 const Type* stored_value_casted_type = _gvn.type(stored_value_casted);
212 // Based on the value to be stored, try to determine if the array is not null-free and/or not flat.
213 // This is only legal for non-null stores because the array_store_check always passes for null, even
214 // if the array is null-free. Null stores are handled in GraphKit::inline_array_null_guard().
215 bool not_inline = !stored_value_casted_type->maybe_null() && !stored_value_casted_type->is_oopptr()->can_be_inline_type();
216 bool not_null_free = not_inline;
217 bool not_flat = not_inline || ( stored_value_casted_type->is_inlinetypeptr() &&
218 !stored_value_casted_type->inline_klass()->maybe_flat_in_array());
219 if (!array_type->is_not_null_free() && not_null_free) {
220 // Storing a non-inline type, mark array as not null-free.
221 array_type = array_type->cast_to_not_null_free();
222 Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, array_type));
223 replace_in_map(array, cast);
224 array = cast;
225 }
226 if (!array_type->is_not_flat() && not_flat) {
227 // Storing to a non-flat array, mark array as not flat.
228 array_type = array_type->cast_to_not_flat();
229 Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, array_type));
230 replace_in_map(array, cast);
231 array = cast;
232 }
233
234 if (array_type->is_null_free() && elemtype->is_inlinetypeptr() && elemtype->inline_klass()->is_empty()) {
235 // Array of null-free empty inline type, there is only 1 state for the elements
236 assert(!stored_value_casted_type->maybe_null(), "should be guaranteed by array store check");
237 return;
238 }
239
240 if (!array_type->is_not_flat()) {
241 // Array might be a flat array, emit runtime checks (for nullptr, a simple inline_array_null_guard is sufficient).
242 assert(UseArrayFlattening && !not_flat && elemtype->is_oopptr()->can_be_inline_type() &&
243 (!array_type->klass_is_exact() || array_type->is_flat()), "array can't be a flat array");
244 // TODO 8350865 Depending on the available layouts, we can avoid this check in below flat/not-flat branches. Also the safe_for_replace arg is now always true.
245 array = inline_array_null_guard(array, stored_value_casted, 3, true);
246 IdealKit ideal(this);
247 ideal.if_then(flat_array_test(array, /* flat = */ false)); {
248 // Non-flat array
249 if (!array_type->is_flat()) {
250 sync_kit(ideal);
251 assert(array_type->is_flat() || ideal.ctrl()->in(0)->as_If()->is_flat_array_check(&_gvn), "Should be found");
252 inc_sp(3);
253 access_store_at(array, adr, adr_type, stored_value_casted, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY, false);
254 dec_sp(3);
255 ideal.sync_kit(this);
256 }
257 } ideal.else_(); {
258 // Flat array
259 sync_kit(ideal);
260 if (!array_type->is_not_flat()) {
261 // Try to determine the inline klass type of the stored value
262 ciInlineKlass* vk = nullptr;
263 if (stored_value_casted_type->is_inlinetypeptr()) {
264 vk = stored_value_casted_type->inline_klass();
265 } else if (elemtype->is_inlinetypeptr()) {
266 vk = elemtype->inline_klass();
267 }
268
269 if (vk != nullptr) {
270 // Element type is known, cast and store to flat array layout.
271 Node* flat_array = cast_to_flat_array(array, vk);
272
273 // Re-execute flat array store if buffering triggers deoptimization
274 PreserveReexecuteState preexecs(this);
275 jvms()->set_should_reexecute(true);
276 inc_sp(3);
277
278 if (!stored_value_casted->is_InlineType()) {
279 assert(_gvn.type(stored_value_casted) == TypePtr::NULL_PTR, "Unexpected value");
280 stored_value_casted = InlineTypeNode::make_null(_gvn, vk);
281 }
282
283 stored_value_casted->as_InlineType()->store_flat_array(this, flat_array, array_index);
284 } else {
285 // Element type is unknown, emit a runtime call since the flat array layout is not statically known.
286 store_to_unknown_flat_array(array, array_index, stored_value_casted);
287 }
288 }
289 ideal.sync_kit(this);
290 }
291 ideal.end_if();
292 sync_kit(ideal);
293 return;
294 } else if (!array_type->is_not_null_free()) {
295 // Array is not flat but may be null free
296 assert(elemtype->is_oopptr()->can_be_inline_type(), "array can't be null-free");
297 array = inline_array_null_guard(array, stored_value_casted, 3, true);
298 }
299 }
300 inc_sp(3);
301 access_store_at(array, adr, adr_type, stored_value, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY);
302 dec_sp(3);
303 }
304
305 // Emit a runtime call to store to a flat array whose element type is either unknown (i.e. we do not know the flat
306 // array layout) or not exact (could have different flat array layouts at runtime).
307 void Parse::store_to_unknown_flat_array(Node* array, Node* const idx, Node* non_null_stored_value) {
308 // Below membars keep this access to an unknown flat array correctly
309 // ordered with other unknown and known flat array accesses.
310 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
311
312 Node* call = nullptr;
313 {
314 // Re-execute flat array store if runtime call triggers deoptimization
315 PreserveReexecuteState preexecs(this);
316 jvms()->set_bci(_bci);
317 jvms()->set_should_reexecute(true);
318 inc_sp(3);
319 kill_dead_locals();
320 call = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
321 OptoRuntime::store_unknown_inline_Type(),
322 OptoRuntime::store_unknown_inline_Java(),
323 nullptr, TypeRawPtr::BOTTOM,
324 non_null_stored_value, array, idx);
325 }
326 make_slow_call_ex(call, env()->Throwable_klass(), false);
327
328 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
329 }
330
331 //------------------------------array_addressing-------------------------------
332 // Pull array and index from the stack. Compute pointer-to-element.
333 Node* Parse::array_addressing(BasicType type, int vals, const Type*& elemtype) {
334 Node *idx = peek(0+vals); // Get from stack without popping
335 Node *ary = peek(1+vals); // in case of exception
336
337 // Null check the array base, with correct stack contents
338 ary = null_check(ary, T_ARRAY);
339 // Compile-time detect of null-exception?
340 if (stopped()) return top();
341
342 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr();
343 const TypeInt* sizetype = arytype->size();
344 elemtype = arytype->elem();
345
346 if (UseUniqueSubclasses) {
347 const Type* el = elemtype->make_ptr();
348 if (el && el->isa_instptr()) {
349 const TypeInstPtr* toop = el->is_instptr();
350 if (toop->instance_klass()->unique_concrete_subklass()) {
351 // If we load from "AbstractClass[]" we must see "ConcreteSubClass".
352 const Type* subklass = Type::get_const_type(toop->instance_klass());
353 elemtype = subklass->join_speculative(el);
354 }
355 }
356 }
357
358 if (!arytype->is_loaded()) {
359 // Only fails for some -Xcomp runs
360 // The class is unloaded. We have to run this bytecode in the interpreter.
361 ciKlass* klass = arytype->unloaded_klass();
362
363 uncommon_trap(Deoptimization::Reason_unloaded,
364 Deoptimization::Action_reinterpret,
365 klass, "!loaded array");
366 return top();
367 }
368
369 ary = create_speculative_inline_type_array_checks(ary, arytype, elemtype);
370
371 if (needs_range_check(sizetype, idx)) {
372 create_range_check(idx, ary, sizetype);
373 } else if (C->log() != nullptr) {
374 C->log()->elem("observe that='!need_range_check'");
375 }
376
377 // Check for always knowing you are throwing a range-check exception
378 if (stopped()) return top();
379
380 // Make array address computation control dependent to prevent it
381 // from floating above the range check during loop optimizations.
382 Node* ptr = array_element_address(ary, idx, type, sizetype, control());
383 assert(ptr != top(), "top should go hand-in-hand with stopped");
384
385 return ptr;
386 }
387
388 // Check if we need a range check for an array access. This is the case if the index is either negative or if it could
389 // be greater or equal the smallest possible array size (i.e. out-of-bounds).
390 bool Parse::needs_range_check(const TypeInt* size_type, const Node* index) const {
391 const TypeInt* index_type = _gvn.type(index)->is_int();
392 return index_type->_hi >= size_type->_lo || index_type->_lo < 0;
393 }
394
395 void Parse::create_range_check(Node* idx, Node* ary, const TypeInt* sizetype) {
396 Node* tst;
397 if (sizetype->_hi <= 0) {
398 // The greatest array bound is negative, so we can conclude that we're
399 // compiling unreachable code, but the unsigned compare trick used below
400 // only works with non-negative lengths. Instead, hack "tst" to be zero so
401 // the uncommon_trap path will always be taken.
402 tst = _gvn.intcon(0);
403 } else {
404 // Range is constant in array-oop, so we can use the original state of mem
405 Node* len = load_array_length(ary);
406
407 // Test length vs index (standard trick using unsigned compare)
408 Node* chk = _gvn.transform(new CmpUNode(idx, len) );
409 BoolTest::mask btest = BoolTest::lt;
410 tst = _gvn.transform(new BoolNode(chk, btest) );
411 }
412 RangeCheckNode* rc = new RangeCheckNode(control(), tst, PROB_MAX, COUNT_UNKNOWN);
413 _gvn.set_type(rc, rc->Value(&_gvn));
414 if (!tst->is_Con()) {
415 record_for_igvn(rc);
416 }
417 set_control(_gvn.transform(new IfTrueNode(rc)));
418 // Branch to failure if out of bounds
419 {
420 PreserveJVMState pjvms(this);
421 set_control(_gvn.transform(new IfFalseNode(rc)));
422 if (C->allow_range_check_smearing()) {
423 // Do not use builtin_throw, since range checks are sometimes
424 // made more stringent by an optimistic transformation.
425 // This creates "tentative" range checks at this point,
426 // which are not guaranteed to throw exceptions.
427 // See IfNode::Ideal, is_range_check, adjust_check.
428 uncommon_trap(Deoptimization::Reason_range_check,
429 Deoptimization::Action_make_not_entrant,
430 nullptr, "range_check");
431 } else {
432 // If we have already recompiled with the range-check-widening
433 // heroic optimization turned off, then we must really be throwing
434 // range check exceptions.
435 builtin_throw(Deoptimization::Reason_range_check);
436 }
437 }
438 }
439
440 // For inline type arrays, we can use the profiling information for array accesses to speculate on the type, flatness,
441 // and null-freeness. We can either prepare the speculative type for later uses or emit explicit speculative checks with
442 // traps now. In the latter case, the speculative type guarantees can avoid additional runtime checks later (e.g.
443 // non-null-free implies non-flat which allows us to remove flatness checks). This makes the graph simpler.
444 Node* Parse::create_speculative_inline_type_array_checks(Node* array, const TypeAryPtr* array_type,
445 const Type*& element_type) {
446 if (!array_type->is_flat() && !array_type->is_not_flat()) {
447 // For arrays that might be flat, speculate that the array has the exact type reported in the profile data such that
448 // we can rely on a fixed memory layout (i.e. either a flat layout or not).
449 array = cast_to_speculative_array_type(array, array_type, element_type);
450 } else if (UseTypeSpeculation && UseArrayLoadStoreProfile) {
451 // Array is known to be either flat or not flat. If possible, update the speculative type by using the profile data
452 // at this bci.
453 array = cast_to_profiled_array_type(array);
454 }
455
456 // Even though the type does not tell us whether we have an inline type array or not, we can still check the profile data
457 // whether we have a non-null-free or non-flat array. Speculating on a non-null-free array doesn't help aaload but could
458 // be profitable for a subsequent aastore.
459 if (!array_type->is_null_free() && !array_type->is_not_null_free()) {
460 array = speculate_non_null_free_array(array, array_type);
461 }
462 if (!array_type->is_flat() && !array_type->is_not_flat()) {
463 array = speculate_non_flat_array(array, array_type);
464 }
465 return array;
466 }
467
468 // Speculate that the array has the exact type reported in the profile data. We emit a trap when this turns out to be
469 // wrong. On the fast path, we add a CheckCastPP to use the exact type.
470 Node* Parse::cast_to_speculative_array_type(Node* const array, const TypeAryPtr*& array_type, const Type*& element_type) {
471 Deoptimization::DeoptReason reason = Deoptimization::Reason_speculate_class_check;
472 ciKlass* speculative_array_type = array_type->speculative_type();
473 if (too_many_traps_or_recompiles(reason) || speculative_array_type == nullptr) {
474 // No speculative type, check profile data at this bci
475 speculative_array_type = nullptr;
476 reason = Deoptimization::Reason_class_check;
477 if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(reason)) {
478 ciKlass* profiled_element_type = nullptr;
479 ProfilePtrKind element_ptr = ProfileMaybeNull;
480 bool flat_array = true;
481 bool null_free_array = true;
482 method()->array_access_profiled_type(bci(), speculative_array_type, profiled_element_type, element_ptr, flat_array,
483 null_free_array);
484 }
485 }
486 if (speculative_array_type != nullptr) {
487 // Speculate that this array has the exact type reported by profile data
488 Node* casted_array = nullptr;
489 DEBUG_ONLY(Node* old_control = control();)
490 Node* slow_ctl = type_check_receiver(array, speculative_array_type, 1.0, &casted_array);
491 if (stopped()) {
492 // The check always fails and therefore profile information is incorrect. Don't use it.
493 assert(old_control == slow_ctl, "type check should have been removed");
494 set_control(slow_ctl);
495 } else if (!slow_ctl->is_top()) {
496 { PreserveJVMState pjvms(this);
497 set_control(slow_ctl);
498 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
499 }
500 replace_in_map(array, casted_array);
501 array_type = _gvn.type(casted_array)->is_aryptr();
502 element_type = array_type->elem();
503 return casted_array;
504 }
505 }
506 return array;
507 }
508
509 // Create a CheckCastPP when the speculative type can improve the current type.
510 Node* Parse::cast_to_profiled_array_type(Node* const array) {
511 ciKlass* array_type = nullptr;
512 ciKlass* element_type = nullptr;
513 ProfilePtrKind element_ptr = ProfileMaybeNull;
514 bool flat_array = true;
515 bool null_free_array = true;
516 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
517 if (array_type != nullptr) {
518 return record_profile_for_speculation(array, array_type, ProfileMaybeNull);
519 }
520 return array;
521 }
522
523 // Speculate that the array is non-null-free. We emit a trap when this turns out to be
524 // wrong. On the fast path, we add a CheckCastPP to use the non-null-free type.
525 Node* Parse::speculate_non_null_free_array(Node* const array, const TypeAryPtr*& array_type) {
526 bool null_free_array = true;
527 Deoptimization::DeoptReason reason = Deoptimization::Reason_none;
528 if (array_type->speculative() != nullptr &&
529 array_type->speculative()->is_aryptr()->is_not_null_free() &&
530 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
531 null_free_array = false;
532 reason = Deoptimization::Reason_speculate_class_check;
533 } else if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(Deoptimization::Reason_class_check)) {
534 ciKlass* profiled_array_type = nullptr;
535 ciKlass* profiled_element_type = nullptr;
536 ProfilePtrKind element_ptr = ProfileMaybeNull;
537 bool flat_array = true;
538 method()->array_access_profiled_type(bci(), profiled_array_type, profiled_element_type, element_ptr, flat_array,
539 null_free_array);
540 reason = Deoptimization::Reason_class_check;
541 }
542 if (!null_free_array) {
543 { // Deoptimize if null-free array
544 BuildCutout unless(this, null_free_array_test(array, /* null_free = */ false), PROB_MAX);
545 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
546 }
547 assert(!stopped(), "null-free array should have been caught earlier");
548 Node* casted_array = _gvn.transform(new CheckCastPPNode(control(), array, array_type->cast_to_not_null_free()));
549 replace_in_map(array, casted_array);
550 array_type = _gvn.type(casted_array)->is_aryptr();
551 return casted_array;
552 }
553 return array;
554 }
555
556 // Speculate that the array is non-flat. We emit a trap when this turns out to be wrong.
557 // On the fast path, we add a CheckCastPP to use the non-flat type.
558 Node* Parse::speculate_non_flat_array(Node* const array, const TypeAryPtr* const array_type) {
559 bool flat_array = true;
560 Deoptimization::DeoptReason reason = Deoptimization::Reason_none;
561 if (array_type->speculative() != nullptr &&
562 array_type->speculative()->is_aryptr()->is_not_flat() &&
563 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
564 flat_array = false;
565 reason = Deoptimization::Reason_speculate_class_check;
566 } else if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(reason)) {
567 ciKlass* profiled_array_type = nullptr;
568 ciKlass* profiled_element_type = nullptr;
569 ProfilePtrKind element_ptr = ProfileMaybeNull;
570 bool null_free_array = true;
571 method()->array_access_profiled_type(bci(), profiled_array_type, profiled_element_type, element_ptr, flat_array,
572 null_free_array);
573 reason = Deoptimization::Reason_class_check;
574 }
575 if (!flat_array) {
576 { // Deoptimize if flat array
577 BuildCutout unless(this, flat_array_test(array, /* flat = */ false), PROB_MAX);
578 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
579 }
580 assert(!stopped(), "flat array should have been caught earlier");
581 Node* casted_array = _gvn.transform(new CheckCastPPNode(control(), array, array_type->cast_to_not_flat()));
582 replace_in_map(array, casted_array);
583 return casted_array;
584 }
585 return array;
586 }
587
588 // returns IfNode
589 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) {
590 Node *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
591 Node *tst = _gvn.transform(new BoolNode(cmp, mask));
592 IfNode *iff = create_and_map_if(control(), tst, prob, cnt);
593 return iff;
594 }
595
596
597 // sentinel value for the target bci to mark never taken branches
598 // (according to profiling)
599 static const int never_reached = INT_MAX;
600
601 //------------------------------helper for tableswitch-------------------------
602 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, bool unc) {
603 // True branch, use existing map info
604 { PreserveJVMState pjvms(this);
605 Node *iftrue = _gvn.transform( new IfTrueNode (iff) );
606 set_control( iftrue );
607 if (unc) {
608 repush_if_args();
609 uncommon_trap(Deoptimization::Reason_unstable_if,
610 Deoptimization::Action_reinterpret,
611 nullptr,
612 "taken always");
613 } else {
614 assert(dest_bci_if_true != never_reached, "inconsistent dest");
615 merge_new_path(dest_bci_if_true);
616 }
617 }
618
619 // False branch
620 Node *iffalse = _gvn.transform( new IfFalseNode(iff) );
621 set_control( iffalse );
622 }
623
624 void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, bool unc) {
625 // True branch, use existing map info
626 { PreserveJVMState pjvms(this);
627 Node *iffalse = _gvn.transform( new IfFalseNode (iff) );
628 set_control( iffalse );
629 if (unc) {
630 repush_if_args();
631 uncommon_trap(Deoptimization::Reason_unstable_if,
632 Deoptimization::Action_reinterpret,
633 nullptr,
634 "taken never");
635 } else {
636 assert(dest_bci_if_true != never_reached, "inconsistent dest");
637 merge_new_path(dest_bci_if_true);
638 }
639 }
640
641 // False branch
642 Node *iftrue = _gvn.transform( new IfTrueNode(iff) );
643 set_control( iftrue );
644 }
645
646 void Parse::jump_if_always_fork(int dest_bci, bool unc) {
647 // False branch, use existing map and control()
648 if (unc) {
649 repush_if_args();
650 uncommon_trap(Deoptimization::Reason_unstable_if,
651 Deoptimization::Action_reinterpret,
652 nullptr,
653 "taken never");
654 } else {
655 assert(dest_bci != never_reached, "inconsistent dest");
656 merge_new_path(dest_bci);
657 }
658 }
659
660
661 extern "C" {
662 static int jint_cmp(const void *i, const void *j) {
663 int a = *(jint *)i;
664 int b = *(jint *)j;
665 return a > b ? 1 : a < b ? -1 : 0;
666 }
667 }
668
669
670 class SwitchRange : public StackObj {
671 // a range of integers coupled with a bci destination
672 jint _lo; // inclusive lower limit
673 jint _hi; // inclusive upper limit
674 int _dest;
675 float _cnt; // how many times this range was hit according to profiling
676
677 public:
678 jint lo() const { return _lo; }
679 jint hi() const { return _hi; }
680 int dest() const { return _dest; }
681 bool is_singleton() const { return _lo == _hi; }
682 float cnt() const { return _cnt; }
683
684 void setRange(jint lo, jint hi, int dest, float cnt) {
685 assert(lo <= hi, "must be a non-empty range");
686 _lo = lo, _hi = hi; _dest = dest; _cnt = cnt;
687 assert(_cnt >= 0, "");
688 }
689 bool adjoinRange(jint lo, jint hi, int dest, float cnt, bool trim_ranges) {
690 assert(lo <= hi, "must be a non-empty range");
691 if (lo == _hi+1) {
692 // see merge_ranges() comment below
693 if (trim_ranges) {
694 if (cnt == 0) {
695 if (_cnt != 0) {
696 return false;
697 }
698 if (dest != _dest) {
699 _dest = never_reached;
700 }
701 } else {
702 if (_cnt == 0) {
703 return false;
704 }
705 if (dest != _dest) {
706 return false;
707 }
708 }
709 } else {
710 if (dest != _dest) {
711 return false;
712 }
713 }
714 _hi = hi;
715 _cnt += cnt;
716 return true;
717 }
718 return false;
719 }
720
721 void set (jint value, int dest, float cnt) {
722 setRange(value, value, dest, cnt);
723 }
724 bool adjoin(jint value, int dest, float cnt, bool trim_ranges) {
725 return adjoinRange(value, value, dest, cnt, trim_ranges);
726 }
727 bool adjoin(SwitchRange& other) {
728 return adjoinRange(other._lo, other._hi, other._dest, other._cnt, false);
729 }
730
731 void print() {
732 if (is_singleton())
733 tty->print(" {%d}=>%d (cnt=%f)", lo(), dest(), cnt());
734 else if (lo() == min_jint)
735 tty->print(" {..%d}=>%d (cnt=%f)", hi(), dest(), cnt());
736 else if (hi() == max_jint)
737 tty->print(" {%d..}=>%d (cnt=%f)", lo(), dest(), cnt());
738 else
739 tty->print(" {%d..%d}=>%d (cnt=%f)", lo(), hi(), dest(), cnt());
740 }
741 };
742
743 // We try to minimize the number of ranges and the size of the taken
744 // ones using profiling data. When ranges are created,
745 // SwitchRange::adjoinRange() only allows 2 adjoining ranges to merge
746 // if both were never hit or both were hit to build longer unreached
747 // ranges. Here, we now merge adjoining ranges with the same
748 // destination and finally set destination of unreached ranges to the
749 // special value never_reached because it can help minimize the number
750 // of tests that are necessary.
751 //
752 // For instance:
753 // [0, 1] to target1 sometimes taken
754 // [1, 2] to target1 never taken
755 // [2, 3] to target2 never taken
756 // would lead to:
757 // [0, 1] to target1 sometimes taken
758 // [1, 3] never taken
759 //
760 // (first 2 ranges to target1 are not merged)
761 static void merge_ranges(SwitchRange* ranges, int& rp) {
762 if (rp == 0) {
763 return;
764 }
765 int shift = 0;
766 for (int j = 0; j < rp; j++) {
767 SwitchRange& r1 = ranges[j-shift];
768 SwitchRange& r2 = ranges[j+1];
769 if (r1.adjoin(r2)) {
770 shift++;
771 } else if (shift > 0) {
772 ranges[j+1-shift] = r2;
773 }
774 }
775 rp -= shift;
776 for (int j = 0; j <= rp; j++) {
777 SwitchRange& r = ranges[j];
778 if (r.cnt() == 0 && r.dest() != never_reached) {
779 r.setRange(r.lo(), r.hi(), never_reached, r.cnt());
780 }
781 }
782 }
783
784 //-------------------------------do_tableswitch--------------------------------
785 void Parse::do_tableswitch() {
786 // Get information about tableswitch
787 int default_dest = iter().get_dest_table(0);
788 jint lo_index = iter().get_int_table(1);
789 jint hi_index = iter().get_int_table(2);
790 int len = hi_index - lo_index + 1;
791
792 if (len < 1) {
793 // If this is a backward branch, add safepoint
794 maybe_add_safepoint(default_dest);
795 pop(); // the effect of the instruction execution on the operand stack
796 merge(default_dest);
797 return;
798 }
799
800 ciMethodData* methodData = method()->method_data();
801 ciMultiBranchData* profile = nullptr;
802 if (methodData->is_mature() && UseSwitchProfiling) {
803 ciProfileData* data = methodData->bci_to_data(bci());
804 if (data != nullptr && data->is_MultiBranchData()) {
805 profile = (ciMultiBranchData*)data;
806 }
807 }
808 bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
809
810 // generate decision tree, using trichotomy when possible
811 int rnum = len+2;
812 bool makes_backward_branch = (default_dest <= bci());
813 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
814 int rp = -1;
815 if (lo_index != min_jint) {
816 float cnt = 1.0F;
817 if (profile != nullptr) {
818 cnt = (float)profile->default_count() / (hi_index != max_jint ? 2.0F : 1.0F);
819 }
820 ranges[++rp].setRange(min_jint, lo_index-1, default_dest, cnt);
821 }
822 for (int j = 0; j < len; j++) {
823 jint match_int = lo_index+j;
824 int dest = iter().get_dest_table(j+3);
825 makes_backward_branch |= (dest <= bci());
826 float cnt = 1.0F;
827 if (profile != nullptr) {
828 cnt = (float)profile->count_at(j);
829 }
830 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, cnt, trim_ranges)) {
831 ranges[++rp].set(match_int, dest, cnt);
832 }
833 }
834 jint highest = lo_index+(len-1);
835 assert(ranges[rp].hi() == highest, "");
836 if (highest != max_jint) {
837 float cnt = 1.0F;
838 if (profile != nullptr) {
839 cnt = (float)profile->default_count() / (lo_index != min_jint ? 2.0F : 1.0F);
840 }
841 if (!ranges[rp].adjoinRange(highest+1, max_jint, default_dest, cnt, trim_ranges)) {
842 ranges[++rp].setRange(highest+1, max_jint, default_dest, cnt);
843 }
844 }
845 assert(rp < len+2, "not too many ranges");
846
847 if (trim_ranges) {
848 merge_ranges(ranges, rp);
849 }
850
851 // Safepoint in case if backward branch observed
852 if (makes_backward_branch) {
853 add_safepoint();
854 }
855
856 Node* lookup = pop(); // lookup value
857 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
858 }
859
860
861 //------------------------------do_lookupswitch--------------------------------
862 void Parse::do_lookupswitch() {
863 // Get information about lookupswitch
864 int default_dest = iter().get_dest_table(0);
865 jint len = iter().get_int_table(1);
866
867 if (len < 1) { // If this is a backward branch, add safepoint
868 maybe_add_safepoint(default_dest);
869 pop(); // the effect of the instruction execution on the operand stack
870 merge(default_dest);
871 return;
872 }
873
874 ciMethodData* methodData = method()->method_data();
875 ciMultiBranchData* profile = nullptr;
876 if (methodData->is_mature() && UseSwitchProfiling) {
877 ciProfileData* data = methodData->bci_to_data(bci());
878 if (data != nullptr && data->is_MultiBranchData()) {
879 profile = (ciMultiBranchData*)data;
880 }
881 }
882 bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
883
884 // generate decision tree, using trichotomy when possible
885 jint* table = NEW_RESOURCE_ARRAY(jint, len*3);
886 {
887 for (int j = 0; j < len; j++) {
888 table[3*j+0] = iter().get_int_table(2+2*j);
889 table[3*j+1] = iter().get_dest_table(2+2*j+1);
890 // Handle overflow when converting from uint to jint
891 table[3*j+2] = (profile == nullptr) ? 1 : (jint)MIN2<uint>((uint)max_jint, profile->count_at(j));
892 }
893 qsort(table, len, 3*sizeof(table[0]), jint_cmp);
894 }
895
896 float default_cnt = 1.0F;
897 if (profile != nullptr) {
898 juint defaults = max_juint - len;
899 default_cnt = (float)profile->default_count()/(float)defaults;
900 }
901
902 int rnum = len*2+1;
903 bool makes_backward_branch = (default_dest <= bci());
904 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
905 int rp = -1;
906 for (int j = 0; j < len; j++) {
907 jint match_int = table[3*j+0];
908 jint dest = table[3*j+1];
909 jint cnt = table[3*j+2];
910 jint next_lo = rp < 0 ? min_jint : ranges[rp].hi()+1;
911 makes_backward_branch |= (dest <= bci());
912 float c = default_cnt * ((float)match_int - (float)next_lo);
913 if (match_int != next_lo && (rp < 0 || !ranges[rp].adjoinRange(next_lo, match_int-1, default_dest, c, trim_ranges))) {
914 assert(default_dest != never_reached, "sentinel value for dead destinations");
915 ranges[++rp].setRange(next_lo, match_int-1, default_dest, c);
916 }
917 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, (float)cnt, trim_ranges)) {
918 assert(dest != never_reached, "sentinel value for dead destinations");
919 ranges[++rp].set(match_int, dest, (float)cnt);
920 }
921 }
922 jint highest = table[3*(len-1)];
923 assert(ranges[rp].hi() == highest, "");
924 if (highest != max_jint &&
925 !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, default_cnt * ((float)max_jint - (float)highest), trim_ranges)) {
926 ranges[++rp].setRange(highest+1, max_jint, default_dest, default_cnt * ((float)max_jint - (float)highest));
927 }
928 assert(rp < rnum, "not too many ranges");
929
930 if (trim_ranges) {
931 merge_ranges(ranges, rp);
932 }
933
934 // Safepoint in case backward branch observed
935 if (makes_backward_branch) {
936 add_safepoint();
937 }
938
939 Node *lookup = pop(); // lookup value
940 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
941 }
942
943 static float if_prob(float taken_cnt, float total_cnt) {
944 assert(taken_cnt <= total_cnt, "");
945 if (total_cnt == 0) {
946 return PROB_FAIR;
947 }
948 float p = taken_cnt / total_cnt;
949 return clamp(p, PROB_MIN, PROB_MAX);
950 }
951
952 static float if_cnt(float cnt) {
953 if (cnt == 0) {
954 return COUNT_UNKNOWN;
955 }
956 return cnt;
957 }
958
959 static float sum_of_cnts(SwitchRange *lo, SwitchRange *hi) {
960 float total_cnt = 0;
961 for (SwitchRange* sr = lo; sr <= hi; sr++) {
962 total_cnt += sr->cnt();
963 }
964 return total_cnt;
965 }
966
967 class SwitchRanges : public ResourceObj {
968 public:
969 SwitchRange* _lo;
970 SwitchRange* _hi;
971 SwitchRange* _mid;
972 float _cost;
973
974 enum {
975 Start,
976 LeftDone,
977 RightDone,
978 Done
979 } _state;
980
981 SwitchRanges(SwitchRange *lo, SwitchRange *hi)
982 : _lo(lo), _hi(hi), _mid(nullptr),
983 _cost(0), _state(Start) {
984 }
985
986 SwitchRanges()
987 : _lo(nullptr), _hi(nullptr), _mid(nullptr),
988 _cost(0), _state(Start) {}
989 };
990
991 // Estimate cost of performing a binary search on lo..hi
992 static float compute_tree_cost(SwitchRange *lo, SwitchRange *hi, float total_cnt) {
993 GrowableArray<SwitchRanges> tree;
994 SwitchRanges root(lo, hi);
995 tree.push(root);
996
997 float cost = 0;
998 do {
999 SwitchRanges& r = *tree.adr_at(tree.length()-1);
1000 if (r._hi != r._lo) {
1001 if (r._mid == nullptr) {
1002 float r_cnt = sum_of_cnts(r._lo, r._hi);
1003
1004 if (r_cnt == 0) {
1005 tree.pop();
1006 cost = 0;
1007 continue;
1008 }
1009
1010 SwitchRange* mid = nullptr;
1011 mid = r._lo;
1012 for (float cnt = 0; ; ) {
1013 assert(mid <= r._hi, "out of bounds");
1014 cnt += mid->cnt();
1015 if (cnt > r_cnt / 2) {
1016 break;
1017 }
1018 mid++;
1019 }
1020 assert(mid <= r._hi, "out of bounds");
1021 r._mid = mid;
1022 r._cost = r_cnt / total_cnt;
1023 }
1024 r._cost += cost;
1025 if (r._state < SwitchRanges::LeftDone && r._mid > r._lo) {
1026 cost = 0;
1027 r._state = SwitchRanges::LeftDone;
1028 tree.push(SwitchRanges(r._lo, r._mid-1));
1029 } else if (r._state < SwitchRanges::RightDone) {
1030 cost = 0;
1031 r._state = SwitchRanges::RightDone;
1032 tree.push(SwitchRanges(r._mid == r._lo ? r._mid+1 : r._mid, r._hi));
1033 } else {
1034 tree.pop();
1035 cost = r._cost;
1036 }
1037 } else {
1038 tree.pop();
1039 cost = r._cost;
1040 }
1041 } while (tree.length() > 0);
1042
1043
1044 return cost;
1045 }
1046
1047 // It sometimes pays off to test most common ranges before the binary search
1048 void Parse::linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi) {
1049 uint nr = hi - lo + 1;
1050 float total_cnt = sum_of_cnts(lo, hi);
1051
1052 float min = compute_tree_cost(lo, hi, total_cnt);
1053 float extra = 1;
1054 float sub = 0;
1055
1056 SwitchRange* array1 = lo;
1057 SwitchRange* array2 = NEW_RESOURCE_ARRAY(SwitchRange, nr);
1058
1059 SwitchRange* ranges = nullptr;
1060
1061 while (nr >= 2) {
1062 assert(lo == array1 || lo == array2, "one the 2 already allocated arrays");
1063 ranges = (lo == array1) ? array2 : array1;
1064
1065 // Find highest frequency range
1066 SwitchRange* candidate = lo;
1067 for (SwitchRange* sr = lo+1; sr <= hi; sr++) {
1068 if (sr->cnt() > candidate->cnt()) {
1069 candidate = sr;
1070 }
1071 }
1072 SwitchRange most_freq = *candidate;
1073 if (most_freq.cnt() == 0) {
1074 break;
1075 }
1076
1077 // Copy remaining ranges into another array
1078 int shift = 0;
1079 for (uint i = 0; i < nr; i++) {
1080 SwitchRange* sr = &lo[i];
1081 if (sr != candidate) {
1082 ranges[i-shift] = *sr;
1083 } else {
1084 shift++;
1085 if (i > 0 && i < nr-1) {
1086 SwitchRange prev = lo[i-1];
1087 prev.setRange(prev.lo(), sr->hi(), prev.dest(), prev.cnt());
1088 if (prev.adjoin(lo[i+1])) {
1089 shift++;
1090 i++;
1091 }
1092 ranges[i-shift] = prev;
1093 }
1094 }
1095 }
1096 nr -= shift;
1097
1098 // Evaluate cost of testing the most common range and performing a
1099 // binary search on the other ranges
1100 float cost = extra + compute_tree_cost(&ranges[0], &ranges[nr-1], total_cnt);
1101 if (cost >= min) {
1102 break;
1103 }
1104 // swap arrays
1105 lo = &ranges[0];
1106 hi = &ranges[nr-1];
1107
1108 // It pays off: emit the test for the most common range
1109 assert(most_freq.cnt() > 0, "must be taken");
1110 Node* val = _gvn.transform(new SubINode(key_val, _gvn.intcon(most_freq.lo())));
1111 Node* cmp = _gvn.transform(new CmpUNode(val, _gvn.intcon(java_subtract(most_freq.hi(), most_freq.lo()))));
1112 Node* tst = _gvn.transform(new BoolNode(cmp, BoolTest::le));
1113 IfNode* iff = create_and_map_if(control(), tst, if_prob(most_freq.cnt(), total_cnt), if_cnt(most_freq.cnt()));
1114 jump_if_true_fork(iff, most_freq.dest(), false);
1115
1116 sub += most_freq.cnt() / total_cnt;
1117 extra += 1 - sub;
1118 min = cost;
1119 }
1120 }
1121
1122 //----------------------------create_jump_tables-------------------------------
1123 bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) {
1124 // Are jumptables enabled
1125 if (!UseJumpTables) return false;
1126
1127 // Are jumptables supported
1128 if (!Matcher::has_match_rule(Op_Jump)) return false;
1129
1130 bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
1131
1132 // Decide if a guard is needed to lop off big ranges at either (or
1133 // both) end(s) of the input set. We'll call this the default target
1134 // even though we can't be sure that it is the true "default".
1135
1136 bool needs_guard = false;
1137 int default_dest;
1138 int64_t total_outlier_size = 0;
1139 int64_t hi_size = ((int64_t)hi->hi()) - ((int64_t)hi->lo()) + 1;
1140 int64_t lo_size = ((int64_t)lo->hi()) - ((int64_t)lo->lo()) + 1;
1141
1142 if (lo->dest() == hi->dest()) {
1143 total_outlier_size = hi_size + lo_size;
1144 default_dest = lo->dest();
1145 } else if (lo_size > hi_size) {
1146 total_outlier_size = lo_size;
1147 default_dest = lo->dest();
1148 } else {
1149 total_outlier_size = hi_size;
1150 default_dest = hi->dest();
1151 }
1152
1153 float total = sum_of_cnts(lo, hi);
1154 float cost = compute_tree_cost(lo, hi, total);
1155
1156 // If a guard test will eliminate very sparse end ranges, then
1157 // it is worth the cost of an extra jump.
1158 float trimmed_cnt = 0;
1159 if (total_outlier_size > (MaxJumpTableSparseness * 4)) {
1160 needs_guard = true;
1161 if (default_dest == lo->dest()) {
1162 trimmed_cnt += lo->cnt();
1163 lo++;
1164 }
1165 if (default_dest == hi->dest()) {
1166 trimmed_cnt += hi->cnt();
1167 hi--;
1168 }
1169 }
1170
1171 // Find the total number of cases and ranges
1172 int64_t num_cases = ((int64_t)hi->hi()) - ((int64_t)lo->lo()) + 1;
1173 int num_range = hi - lo + 1;
1174
1175 // Don't create table if: too large, too small, or too sparse.
1176 if (num_cases > MaxJumpTableSize)
1177 return false;
1178 if (UseSwitchProfiling) {
1179 // MinJumpTableSize is set so with a well balanced binary tree,
1180 // when the number of ranges is MinJumpTableSize, it's cheaper to
1181 // go through a JumpNode that a tree of IfNodes. Average cost of a
1182 // tree of IfNodes with MinJumpTableSize is
1183 // log2f(MinJumpTableSize) comparisons. So if the cost computed
1184 // from profile data is less than log2f(MinJumpTableSize) then
1185 // going with the binary search is cheaper.
1186 if (cost < log2f(MinJumpTableSize)) {
1187 return false;
1188 }
1189 } else {
1190 if (num_cases < MinJumpTableSize)
1191 return false;
1192 }
1193 if (num_cases > (MaxJumpTableSparseness * num_range))
1194 return false;
1195
1196 // Normalize table lookups to zero
1197 int lowval = lo->lo();
1198 key_val = _gvn.transform( new SubINode(key_val, _gvn.intcon(lowval)) );
1199
1200 // Generate a guard to protect against input keyvals that aren't
1201 // in the switch domain.
1202 if (needs_guard) {
1203 Node* size = _gvn.intcon(num_cases);
1204 Node* cmp = _gvn.transform(new CmpUNode(key_val, size));
1205 Node* tst = _gvn.transform(new BoolNode(cmp, BoolTest::ge));
1206 IfNode* iff = create_and_map_if(control(), tst, if_prob(trimmed_cnt, total), if_cnt(trimmed_cnt));
1207 jump_if_true_fork(iff, default_dest, trim_ranges && trimmed_cnt == 0);
1208
1209 total -= trimmed_cnt;
1210 }
1211
1212 // Create an ideal node JumpTable that has projections
1213 // of all possible ranges for a switch statement
1214 // The key_val input must be converted to a pointer offset and scaled.
1215 // Compare Parse::array_addressing above.
1216
1217 // Clean the 32-bit int into a real 64-bit offset.
1218 // Otherwise, the jint value 0 might turn into an offset of 0x0800000000.
1219 // Make I2L conversion control dependent to prevent it from
1220 // floating above the range check during loop optimizations.
1221 // Do not use a narrow int type here to prevent the data path from dying
1222 // while the control path is not removed. This can happen if the type of key_val
1223 // is later known to be out of bounds of [0, num_cases] and therefore a narrow cast
1224 // would be replaced by TOP while C2 is not able to fold the corresponding range checks.
1225 // Set _carry_dependency for the cast to avoid being removed by IGVN.
1226 #ifdef _LP64
1227 key_val = C->constrained_convI2L(&_gvn, key_val, TypeInt::INT, control(), true /* carry_dependency */);
1228 #endif
1229
1230 // Shift the value by wordsize so we have an index into the table, rather
1231 // than a switch value
1232 Node *shiftWord = _gvn.MakeConX(wordSize);
1233 key_val = _gvn.transform( new MulXNode( key_val, shiftWord));
1234
1235 // Create the JumpNode
1236 Arena* arena = C->comp_arena();
1237 float* probs = (float*)arena->Amalloc(sizeof(float)*num_cases);
1238 int i = 0;
1239 if (total == 0) {
1240 for (SwitchRange* r = lo; r <= hi; r++) {
1241 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) {
1242 probs[i] = 1.0F / num_cases;
1243 }
1244 }
1245 } else {
1246 for (SwitchRange* r = lo; r <= hi; r++) {
1247 float prob = r->cnt()/total;
1248 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) {
1249 probs[i] = prob / (r->hi() - r->lo() + 1);
1250 }
1251 }
1252 }
1253
1254 ciMethodData* methodData = method()->method_data();
1255 ciMultiBranchData* profile = nullptr;
1256 if (methodData->is_mature()) {
1257 ciProfileData* data = methodData->bci_to_data(bci());
1258 if (data != nullptr && data->is_MultiBranchData()) {
1259 profile = (ciMultiBranchData*)data;
1260 }
1261 }
1262
1263 Node* jtn = _gvn.transform(new JumpNode(control(), key_val, num_cases, probs, profile == nullptr ? COUNT_UNKNOWN : total));
1264
1265 // These are the switch destinations hanging off the jumpnode
1266 i = 0;
1267 for (SwitchRange* r = lo; r <= hi; r++) {
1268 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) {
1269 Node* input = _gvn.transform(new JumpProjNode(jtn, i, r->dest(), (int)(j - lowval)));
1270 {
1271 PreserveJVMState pjvms(this);
1272 set_control(input);
1273 jump_if_always_fork(r->dest(), trim_ranges && r->cnt() == 0);
1274 }
1275 }
1276 }
1277 assert(i == num_cases, "miscount of cases");
1278 stop_and_kill_map(); // no more uses for this JVMS
1279 return true;
1280 }
1281
1282 //----------------------------jump_switch_ranges-------------------------------
1283 void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, int switch_depth) {
1284 Block* switch_block = block();
1285 bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
1286
1287 if (switch_depth == 0) {
1288 // Do special processing for the top-level call.
1289 assert(lo->lo() == min_jint, "initial range must exhaust Type::INT");
1290 assert(hi->hi() == max_jint, "initial range must exhaust Type::INT");
1291
1292 // Decrement pred-numbers for the unique set of nodes.
1293 #ifdef ASSERT
1294 if (!trim_ranges) {
1295 // Ensure that the block's successors are a (duplicate-free) set.
1296 int successors_counted = 0; // block occurrences in [hi..lo]
1297 int unique_successors = switch_block->num_successors();
1298 for (int i = 0; i < unique_successors; i++) {
1299 Block* target = switch_block->successor_at(i);
1300
1301 // Check that the set of successors is the same in both places.
1302 int successors_found = 0;
1303 for (SwitchRange* p = lo; p <= hi; p++) {
1304 if (p->dest() == target->start()) successors_found++;
1305 }
1306 assert(successors_found > 0, "successor must be known");
1307 successors_counted += successors_found;
1308 }
1309 assert(successors_counted == (hi-lo)+1, "no unexpected successors");
1310 }
1311 #endif
1312
1313 // Maybe prune the inputs, based on the type of key_val.
1314 jint min_val = min_jint;
1315 jint max_val = max_jint;
1316 const TypeInt* ti = key_val->bottom_type()->isa_int();
1317 if (ti != nullptr) {
1318 min_val = ti->_lo;
1319 max_val = ti->_hi;
1320 assert(min_val <= max_val, "invalid int type");
1321 }
1322 while (lo->hi() < min_val) {
1323 lo++;
1324 }
1325 if (lo->lo() < min_val) {
1326 lo->setRange(min_val, lo->hi(), lo->dest(), lo->cnt());
1327 }
1328 while (hi->lo() > max_val) {
1329 hi--;
1330 }
1331 if (hi->hi() > max_val) {
1332 hi->setRange(hi->lo(), max_val, hi->dest(), hi->cnt());
1333 }
1334
1335 linear_search_switch_ranges(key_val, lo, hi);
1336 }
1337
1338 #ifndef PRODUCT
1339 if (switch_depth == 0) {
1340 _max_switch_depth = 0;
1341 _est_switch_depth = log2i_graceful((hi - lo + 1) - 1) + 1;
1342 }
1343 #endif
1344
1345 assert(lo <= hi, "must be a non-empty set of ranges");
1346 if (lo == hi) {
1347 jump_if_always_fork(lo->dest(), trim_ranges && lo->cnt() == 0);
1348 } else {
1349 assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges");
1350 assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges");
1351
1352 if (create_jump_tables(key_val, lo, hi)) return;
1353
1354 SwitchRange* mid = nullptr;
1355 float total_cnt = sum_of_cnts(lo, hi);
1356
1357 int nr = hi - lo + 1;
1358 if (UseSwitchProfiling) {
1359 // Don't keep the binary search tree balanced: pick up mid point
1360 // that split frequencies in half.
1361 float cnt = 0;
1362 for (SwitchRange* sr = lo; sr <= hi; sr++) {
1363 cnt += sr->cnt();
1364 if (cnt >= total_cnt / 2) {
1365 mid = sr;
1366 break;
1367 }
1368 }
1369 } else {
1370 mid = lo + nr/2;
1371
1372 // if there is an easy choice, pivot at a singleton:
1373 if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton()) mid--;
1374
1375 assert(lo < mid && mid <= hi, "good pivot choice");
1376 assert(nr != 2 || mid == hi, "should pick higher of 2");
1377 assert(nr != 3 || mid == hi-1, "should pick middle of 3");
1378 }
1379
1380
1381 Node *test_val = _gvn.intcon(mid == lo ? mid->hi() : mid->lo());
1382
1383 if (mid->is_singleton()) {
1384 IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne, 1-if_prob(mid->cnt(), total_cnt), if_cnt(mid->cnt()));
1385 jump_if_false_fork(iff_ne, mid->dest(), trim_ranges && mid->cnt() == 0);
1386
1387 // Special Case: If there are exactly three ranges, and the high
1388 // and low range each go to the same place, omit the "gt" test,
1389 // since it will not discriminate anything.
1390 bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest() && mid == hi-1) || mid == lo;
1391
1392 // if there is a higher range, test for it and process it:
1393 if (mid < hi && !eq_test_only) {
1394 // two comparisons of same values--should enable 1 test for 2 branches
1395 // Use BoolTest::lt instead of BoolTest::gt
1396 float cnt = sum_of_cnts(lo, mid-1);
1397 IfNode *iff_lt = jump_if_fork_int(key_val, test_val, BoolTest::lt, if_prob(cnt, total_cnt), if_cnt(cnt));
1398 Node *iftrue = _gvn.transform( new IfTrueNode(iff_lt) );
1399 Node *iffalse = _gvn.transform( new IfFalseNode(iff_lt) );
1400 { PreserveJVMState pjvms(this);
1401 set_control(iffalse);
1402 jump_switch_ranges(key_val, mid+1, hi, switch_depth+1);
1403 }
1404 set_control(iftrue);
1405 }
1406
1407 } else {
1408 // mid is a range, not a singleton, so treat mid..hi as a unit
1409 float cnt = sum_of_cnts(mid == lo ? mid+1 : mid, hi);
1410 IfNode *iff_ge = jump_if_fork_int(key_val, test_val, mid == lo ? BoolTest::gt : BoolTest::ge, if_prob(cnt, total_cnt), if_cnt(cnt));
1411
1412 // if there is a higher range, test for it and process it:
1413 if (mid == hi) {
1414 jump_if_true_fork(iff_ge, mid->dest(), trim_ranges && cnt == 0);
1415 } else {
1416 Node *iftrue = _gvn.transform( new IfTrueNode(iff_ge) );
1417 Node *iffalse = _gvn.transform( new IfFalseNode(iff_ge) );
1418 { PreserveJVMState pjvms(this);
1419 set_control(iftrue);
1420 jump_switch_ranges(key_val, mid == lo ? mid+1 : mid, hi, switch_depth+1);
1421 }
1422 set_control(iffalse);
1423 }
1424 }
1425
1426 // in any case, process the lower range
1427 if (mid == lo) {
1428 if (mid->is_singleton()) {
1429 jump_switch_ranges(key_val, lo+1, hi, switch_depth+1);
1430 } else {
1431 jump_if_always_fork(lo->dest(), trim_ranges && lo->cnt() == 0);
1432 }
1433 } else {
1434 jump_switch_ranges(key_val, lo, mid-1, switch_depth+1);
1435 }
1436 }
1437
1438 // Decrease pred_count for each successor after all is done.
1439 if (switch_depth == 0) {
1440 int unique_successors = switch_block->num_successors();
1441 for (int i = 0; i < unique_successors; i++) {
1442 Block* target = switch_block->successor_at(i);
1443 // Throw away the pre-allocated path for each unique successor.
1444 target->next_path_num();
1445 }
1446 }
1447
1448 #ifndef PRODUCT
1449 _max_switch_depth = MAX2(switch_depth, _max_switch_depth);
1450 if (TraceOptoParse && Verbose && WizardMode && switch_depth == 0) {
1451 SwitchRange* r;
1452 int nsing = 0;
1453 for( r = lo; r <= hi; r++ ) {
1454 if( r->is_singleton() ) nsing++;
1455 }
1456 tty->print(">>> ");
1457 _method->print_short_name();
1458 tty->print_cr(" switch decision tree");
1459 tty->print_cr(" %d ranges (%d singletons), max_depth=%d, est_depth=%d",
1460 (int) (hi-lo+1), nsing, _max_switch_depth, _est_switch_depth);
1461 if (_max_switch_depth > _est_switch_depth) {
1462 tty->print_cr("******** BAD SWITCH DEPTH ********");
1463 }
1464 tty->print(" ");
1465 for( r = lo; r <= hi; r++ ) {
1466 r->print();
1467 }
1468 tty->cr();
1469 }
1470 #endif
1471 }
1472
1473 Node* Parse::floating_point_mod(Node* a, Node* b, BasicType type) {
1474 assert(type == BasicType::T_FLOAT || type == BasicType::T_DOUBLE, "only float and double are floating points");
1475 CallLeafPureNode* mod = type == BasicType::T_DOUBLE ? static_cast<CallLeafPureNode*>(new ModDNode(C, a, b)) : new ModFNode(C, a, b);
1476
1477 set_predefined_input_for_runtime_call(mod);
1478 mod = _gvn.transform(mod)->as_CallLeafPure();
1479 set_predefined_output_for_runtime_call(mod);
1480 Node* result = _gvn.transform(new ProjNode(mod, TypeFunc::Parms + 0));
1481 record_for_igvn(mod);
1482 return result;
1483 }
1484
1485 void Parse::l2f() {
1486 Node* f2 = pop();
1487 Node* f1 = pop();
1488 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(),
1489 CAST_FROM_FN_PTR(address, SharedRuntime::l2f),
1490 "l2f", nullptr, //no memory effects
1491 f1, f2);
1492 Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0));
1493
1494 push(res);
1495 }
1496
1497 // Handle jsr and jsr_w bytecode
1498 void Parse::do_jsr() {
1499 assert(bc() == Bytecodes::_jsr || bc() == Bytecodes::_jsr_w, "wrong bytecode");
1500
1501 // Store information about current state, tagged with new _jsr_bci
1502 int return_bci = iter().next_bci();
1503 int jsr_bci = (bc() == Bytecodes::_jsr) ? iter().get_dest() : iter().get_far_dest();
1504
1505 // The way we do things now, there is only one successor block
1506 // for the jsr, because the target code is cloned by ciTypeFlow.
1507 Block* target = successor_for_bci(jsr_bci);
1508
1509 // What got pushed?
1510 const Type* ret_addr = target->peek();
1511 assert(ret_addr->singleton(), "must be a constant (cloned jsr body)");
1512
1513 // Effect on jsr on stack
1514 push(_gvn.makecon(ret_addr));
1515
1516 // Flow to the jsr.
1517 merge(jsr_bci);
1518 }
1519
1520 // Handle ret bytecode
1521 void Parse::do_ret() {
1522 // Find to whom we return.
1523 assert(block()->num_successors() == 1, "a ret can only go one place now");
1524 Block* target = block()->successor_at(0);
1525 assert(!target->is_ready(), "our arrival must be expected");
1526 int pnum = target->next_path_num();
1527 merge_common(target, pnum);
1528 }
1529
1530 static bool has_injected_profile(BoolTest::mask btest, Node* test, int& taken, int& not_taken) {
1531 if (btest != BoolTest::eq && btest != BoolTest::ne) {
1532 // Only ::eq and ::ne are supported for profile injection.
1533 return false;
1534 }
1535 if (test->is_Cmp() &&
1536 test->in(1)->Opcode() == Op_ProfileBoolean) {
1537 ProfileBooleanNode* profile = (ProfileBooleanNode*)test->in(1);
1538 int false_cnt = profile->false_count();
1539 int true_cnt = profile->true_count();
1540
1541 // Counts matching depends on the actual test operation (::eq or ::ne).
1542 // No need to scale the counts because profile injection was designed
1543 // to feed exact counts into VM.
1544 taken = (btest == BoolTest::eq) ? false_cnt : true_cnt;
1545 not_taken = (btest == BoolTest::eq) ? true_cnt : false_cnt;
1546
1547 profile->consume();
1548 return true;
1549 }
1550 return false;
1551 }
1552
1553 // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful.
1554 // We also check that individual counters are positive first, otherwise the sum can become positive.
1555 // (check for saturation, integer overflow, and immature counts)
1556 static bool counters_are_meaningful(int counter1, int counter2, int min) {
1557 // check for saturation, including "uint" values too big to fit in "int"
1558 if (counter1 < 0 || counter2 < 0) {
1559 return false;
1560 }
1561 // check for integer overflow of the sum
1562 int64_t sum = (int64_t)counter1 + (int64_t)counter2;
1563 STATIC_ASSERT(sizeof(counter1) < sizeof(sum));
1564 if (sum > INT_MAX) {
1565 return false;
1566 }
1567 // check if mature
1568 return (counter1 + counter2) >= min;
1569 }
1570
1571 //--------------------------dynamic_branch_prediction--------------------------
1572 // Try to gather dynamic branch prediction behavior. Return a probability
1573 // of the branch being taken and set the "cnt" field. Returns a -1.0
1574 // if we need to use static prediction for some reason.
1575 float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test) {
1576 ResourceMark rm;
1577
1578 cnt = COUNT_UNKNOWN;
1579
1580 int taken = 0;
1581 int not_taken = 0;
1582
1583 bool use_mdo = !has_injected_profile(btest, test, taken, not_taken);
1584
1585 if (use_mdo) {
1586 // Use MethodData information if it is available
1587 // FIXME: free the ProfileData structure
1588 ciMethodData* methodData = method()->method_data();
1589 if (!methodData->is_mature()) return PROB_UNKNOWN;
1590 ciProfileData* data = methodData->bci_to_data(bci());
1591 if (data == nullptr) {
1592 return PROB_UNKNOWN;
1593 }
1594 if (!data->is_JumpData()) return PROB_UNKNOWN;
1595
1596 // get taken and not taken values
1597 // NOTE: saturated UINT_MAX values become negative,
1598 // as do counts above INT_MAX.
1599 taken = data->as_JumpData()->taken();
1600 not_taken = 0;
1601 if (data->is_BranchData()) {
1602 not_taken = data->as_BranchData()->not_taken();
1603 }
1604
1605 // scale the counts to be commensurate with invocation counts:
1606 // NOTE: overflow for positive values is clamped at INT_MAX
1607 taken = method()->scale_count(taken);
1608 not_taken = method()->scale_count(not_taken);
1609 }
1610 // At this point, saturation or overflow is indicated by INT_MAX
1611 // or a negative value.
1612
1613 // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful.
1614 // We also check that individual counters are positive first, otherwise the sum can become positive.
1615 if (!counters_are_meaningful(taken, not_taken, 40)) {
1616 if (C->log() != nullptr) {
1617 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken);
1618 }
1619 return PROB_UNKNOWN;
1620 }
1621
1622 // Compute frequency that we arrive here
1623 float sum = taken + not_taken;
1624 // Adjust, if this block is a cloned private block but the
1625 // Jump counts are shared. Taken the private counts for
1626 // just this path instead of the shared counts.
1627 if( block()->count() > 0 )
1628 sum = block()->count();
1629 cnt = sum / FreqCountInvocations;
1630
1631 // Pin probability to sane limits
1632 float prob;
1633 if( !taken )
1634 prob = (0+PROB_MIN) / 2;
1635 else if( !not_taken )
1636 prob = (1+PROB_MAX) / 2;
1637 else { // Compute probability of true path
1638 prob = (float)taken / (float)(taken + not_taken);
1639 if (prob > PROB_MAX) prob = PROB_MAX;
1640 if (prob < PROB_MIN) prob = PROB_MIN;
1641 }
1642
1643 assert((cnt > 0.0f) && (prob > 0.0f),
1644 "Bad frequency assignment in if cnt=%g prob=%g taken=%d not_taken=%d", cnt, prob, taken, not_taken);
1645
1646 if (C->log() != nullptr) {
1647 const char* prob_str = nullptr;
1648 if (prob >= PROB_MAX) prob_str = (prob == PROB_MAX) ? "max" : "always";
1649 if (prob <= PROB_MIN) prob_str = (prob == PROB_MIN) ? "min" : "never";
1650 char prob_str_buf[30];
1651 if (prob_str == nullptr) {
1652 jio_snprintf(prob_str_buf, sizeof(prob_str_buf), "%20.2f", prob);
1653 prob_str = prob_str_buf;
1654 }
1655 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%f' prob='%s'",
1656 iter().get_dest(), taken, not_taken, cnt, prob_str);
1657 }
1658 return prob;
1659 }
1660
1661 //-----------------------------branch_prediction-------------------------------
1662 float Parse::branch_prediction(float& cnt,
1663 BoolTest::mask btest,
1664 int target_bci,
1665 Node* test) {
1666 float prob = dynamic_branch_prediction(cnt, btest, test);
1667 // If prob is unknown, switch to static prediction
1668 if (prob != PROB_UNKNOWN) return prob;
1669
1670 prob = PROB_FAIR; // Set default value
1671 if (btest == BoolTest::eq) // Exactly equal test?
1672 prob = PROB_STATIC_INFREQUENT; // Assume its relatively infrequent
1673 else if (btest == BoolTest::ne)
1674 prob = PROB_STATIC_FREQUENT; // Assume its relatively frequent
1675
1676 // If this is a conditional test guarding a backwards branch,
1677 // assume its a loop-back edge. Make it a likely taken branch.
1678 if (target_bci < bci()) {
1679 if (is_osr_parse()) { // Could be a hot OSR'd loop; force deopt
1680 // Since it's an OSR, we probably have profile data, but since
1681 // branch_prediction returned PROB_UNKNOWN, the counts are too small.
1682 // Let's make a special check here for completely zero counts.
1683 ciMethodData* methodData = method()->method_data();
1684 if (!methodData->is_empty()) {
1685 ciProfileData* data = methodData->bci_to_data(bci());
1686 // Only stop for truly zero counts, which mean an unknown part
1687 // of the OSR-ed method, and we want to deopt to gather more stats.
1688 // If you have ANY counts, then this loop is simply 'cold' relative
1689 // to the OSR loop.
1690 if (data == nullptr ||
1691 (data->as_BranchData()->taken() + data->as_BranchData()->not_taken() == 0)) {
1692 // This is the only way to return PROB_UNKNOWN:
1693 return PROB_UNKNOWN;
1694 }
1695 }
1696 }
1697 prob = PROB_STATIC_FREQUENT; // Likely to take backwards branch
1698 }
1699
1700 assert(prob != PROB_UNKNOWN, "must have some guess at this point");
1701 return prob;
1702 }
1703
1704 // The magic constants are chosen so as to match the output of
1705 // branch_prediction() when the profile reports a zero taken count.
1706 // It is important to distinguish zero counts unambiguously, because
1707 // some branches (e.g., _213_javac.Assembler.eliminate) validly produce
1708 // very small but nonzero probabilities, which if confused with zero
1709 // counts would keep the program recompiling indefinitely.
1710 bool Parse::seems_never_taken(float prob) const {
1711 return prob < PROB_MIN;
1712 }
1713
1714 //-------------------------------repush_if_args--------------------------------
1715 // Push arguments of an "if" bytecode back onto the stack by adjusting _sp.
1716 inline int Parse::repush_if_args() {
1717 if (PrintOpto && WizardMode) {
1718 tty->print("defending against excessive implicit null exceptions on %s @%d in ",
1719 Bytecodes::name(iter().cur_bc()), iter().cur_bci());
1720 method()->print_name(); tty->cr();
1721 }
1722 int bc_depth = - Bytecodes::depth(iter().cur_bc());
1723 assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches");
1724 DEBUG_ONLY(sync_jvms()); // argument(n) requires a synced jvms
1725 assert(argument(0) != nullptr, "must exist");
1726 assert(bc_depth == 1 || argument(1) != nullptr, "two must exist");
1727 inc_sp(bc_depth);
1728 return bc_depth;
1729 }
1730
1731 // Used by StressUnstableIfTraps
1732 static volatile int _trap_stress_counter = 0;
1733
1734 void Parse::increment_trap_stress_counter(Node*& counter, Node*& incr_store) {
1735 Node* counter_addr = makecon(TypeRawPtr::make((address)&_trap_stress_counter));
1736 counter = make_load(control(), counter_addr, TypeInt::INT, T_INT, MemNode::unordered);
1737 counter = _gvn.transform(new AddINode(counter, intcon(1)));
1738 incr_store = store_to_memory(control(), counter_addr, counter, T_INT, MemNode::unordered);
1739 }
1740
1741 //----------------------------------do_ifnull----------------------------------
1742 void Parse::do_ifnull(BoolTest::mask btest, Node *c) {
1743 int target_bci = iter().get_dest();
1744
1745 Node* counter = nullptr;
1746 Node* incr_store = nullptr;
1747 bool do_stress_trap = StressUnstableIfTraps && ((C->random() % 2) == 0);
1748 if (do_stress_trap) {
1749 increment_trap_stress_counter(counter, incr_store);
1750 }
1751
1752 Block* branch_block = successor_for_bci(target_bci);
1753 Block* next_block = successor_for_bci(iter().next_bci());
1754
1755 float cnt;
1756 float prob = branch_prediction(cnt, btest, target_bci, c);
1757 if (prob == PROB_UNKNOWN) {
1758 // (An earlier version of do_ifnull omitted this trap for OSR methods.)
1759 if (PrintOpto && Verbose) {
1760 tty->print_cr("Never-taken edge stops compilation at bci %d", bci());
1761 }
1762 repush_if_args(); // to gather stats on loop
1763 uncommon_trap(Deoptimization::Reason_unreached,
1764 Deoptimization::Action_reinterpret,
1765 nullptr, "cold");
1766 if (C->eliminate_boxing()) {
1767 // Mark the successor blocks as parsed
1768 branch_block->next_path_num();
1769 next_block->next_path_num();
1770 }
1771 return;
1772 }
1773
1774 NOT_PRODUCT(explicit_null_checks_inserted++);
1775
1776 // Generate real control flow
1777 Node *tst = _gvn.transform( new BoolNode( c, btest ) );
1778
1779 // Sanity check the probability value
1780 assert(prob > 0.0f,"Bad probability in Parser");
1781 // Need xform to put node in hash table
1782 IfNode *iff = create_and_xform_if( control(), tst, prob, cnt );
1783 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1784 // True branch
1785 { PreserveJVMState pjvms(this);
1786 Node* iftrue = _gvn.transform( new IfTrueNode (iff) );
1787 set_control(iftrue);
1788
1789 if (stopped()) { // Path is dead?
1790 NOT_PRODUCT(explicit_null_checks_elided++);
1791 if (C->eliminate_boxing()) {
1792 // Mark the successor block as parsed
1793 branch_block->next_path_num();
1794 }
1795 } else { // Path is live.
1796 adjust_map_after_if(btest, c, prob, branch_block);
1797 if (!stopped()) {
1798 merge(target_bci);
1799 }
1800 }
1801 }
1802
1803 // False branch
1804 Node* iffalse = _gvn.transform( new IfFalseNode(iff) );
1805 set_control(iffalse);
1806
1807 if (stopped()) { // Path is dead?
1808 NOT_PRODUCT(explicit_null_checks_elided++);
1809 if (C->eliminate_boxing()) {
1810 // Mark the successor block as parsed
1811 next_block->next_path_num();
1812 }
1813 } else { // Path is live.
1814 adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, next_block);
1815 }
1816
1817 if (do_stress_trap) {
1818 stress_trap(iff, counter, incr_store);
1819 }
1820 }
1821
1822 //------------------------------------do_if------------------------------------
1823 void Parse::do_if(BoolTest::mask btest, Node* c, bool can_trap, bool new_path, Node** ctrl_taken, Node** stress_count_mem) {
1824 int target_bci = iter().get_dest();
1825
1826 Block* branch_block = successor_for_bci(target_bci);
1827 Block* next_block = successor_for_bci(iter().next_bci());
1828
1829 float cnt;
1830 float prob = branch_prediction(cnt, btest, target_bci, c);
1831 float untaken_prob = 1.0 - prob;
1832
1833 if (prob == PROB_UNKNOWN) {
1834 if (PrintOpto && Verbose) {
1835 tty->print_cr("Never-taken edge stops compilation at bci %d", bci());
1836 }
1837 repush_if_args(); // to gather stats on loop
1838 uncommon_trap(Deoptimization::Reason_unreached,
1839 Deoptimization::Action_reinterpret,
1840 nullptr, "cold");
1841 if (C->eliminate_boxing()) {
1842 // Mark the successor blocks as parsed
1843 branch_block->next_path_num();
1844 next_block->next_path_num();
1845 }
1846 return;
1847 }
1848
1849 Node* counter = nullptr;
1850 Node* incr_store = nullptr;
1851 bool do_stress_trap = StressUnstableIfTraps && ((C->random() % 2) == 0);
1852 if (do_stress_trap) {
1853 increment_trap_stress_counter(counter, incr_store);
1854 if (stress_count_mem != nullptr) {
1855 *stress_count_mem = incr_store;
1856 }
1857 }
1858
1859 // Sanity check the probability value
1860 assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser");
1861
1862 bool taken_if_true = true;
1863 // Convert BoolTest to canonical form:
1864 if (!BoolTest(btest).is_canonical()) {
1865 btest = BoolTest(btest).negate();
1866 taken_if_true = false;
1867 // prob is NOT updated here; it remains the probability of the taken
1868 // path (as opposed to the prob of the path guarded by an 'IfTrueNode').
1869 }
1870 assert(btest != BoolTest::eq, "!= is the only canonical exact test");
1871
1872 Node* tst0 = new BoolNode(c, btest);
1873 Node* tst = _gvn.transform(tst0);
1874 BoolTest::mask taken_btest = BoolTest::illegal;
1875 BoolTest::mask untaken_btest = BoolTest::illegal;
1876
1877 if (tst->is_Bool()) {
1878 // Refresh c from the transformed bool node, since it may be
1879 // simpler than the original c. Also re-canonicalize btest.
1880 // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p null)).
1881 // That can arise from statements like: if (x instanceof C) ...
1882 if (tst != tst0) {
1883 // Canonicalize one more time since transform can change it.
1884 btest = tst->as_Bool()->_test._test;
1885 if (!BoolTest(btest).is_canonical()) {
1886 // Reverse edges one more time...
1887 tst = _gvn.transform( tst->as_Bool()->negate(&_gvn) );
1888 btest = tst->as_Bool()->_test._test;
1889 assert(BoolTest(btest).is_canonical(), "sanity");
1890 taken_if_true = !taken_if_true;
1891 }
1892 c = tst->in(1);
1893 }
1894 BoolTest::mask neg_btest = BoolTest(btest).negate();
1895 taken_btest = taken_if_true ? btest : neg_btest;
1896 untaken_btest = taken_if_true ? neg_btest : btest;
1897 }
1898
1899 // Generate real control flow
1900 float true_prob = (taken_if_true ? prob : untaken_prob);
1901 IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1902 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1903 Node* taken_branch = new IfTrueNode(iff);
1904 Node* untaken_branch = new IfFalseNode(iff);
1905 if (!taken_if_true) { // Finish conversion to canonical form
1906 Node* tmp = taken_branch;
1907 taken_branch = untaken_branch;
1908 untaken_branch = tmp;
1909 }
1910
1911 // Branch is taken:
1912 { PreserveJVMState pjvms(this);
1913 taken_branch = _gvn.transform(taken_branch);
1914 set_control(taken_branch);
1915
1916 if (stopped()) {
1917 if (C->eliminate_boxing() && !new_path) {
1918 // Mark the successor block as parsed (if we haven't created a new path)
1919 branch_block->next_path_num();
1920 }
1921 } else {
1922 adjust_map_after_if(taken_btest, c, prob, branch_block, can_trap);
1923 if (!stopped()) {
1924 if (new_path) {
1925 // Merge by using a new path
1926 merge_new_path(target_bci);
1927 } else if (ctrl_taken != nullptr) {
1928 // Don't merge but save taken branch to be wired by caller
1929 *ctrl_taken = control();
1930 } else {
1931 merge(target_bci);
1932 }
1933 }
1934 }
1935 }
1936
1937 untaken_branch = _gvn.transform(untaken_branch);
1938 set_control(untaken_branch);
1939
1940 // Branch not taken.
1941 if (stopped() && ctrl_taken == nullptr) {
1942 if (C->eliminate_boxing()) {
1943 // Mark the successor block as parsed (if caller does not re-wire control flow)
1944 next_block->next_path_num();
1945 }
1946 } else {
1947 adjust_map_after_if(untaken_btest, c, untaken_prob, next_block, can_trap);
1948 }
1949
1950 if (do_stress_trap) {
1951 stress_trap(iff, counter, incr_store);
1952 }
1953 }
1954
1955
1956 static ProfilePtrKind speculative_ptr_kind(const TypeOopPtr* t) {
1957 if (t->speculative() == nullptr) {
1958 return ProfileUnknownNull;
1959 }
1960 if (t->speculative_always_null()) {
1961 return ProfileAlwaysNull;
1962 }
1963 if (t->speculative_maybe_null()) {
1964 return ProfileMaybeNull;
1965 }
1966 return ProfileNeverNull;
1967 }
1968
1969 void Parse::acmp_always_null_input(Node* input, const TypeOopPtr* tinput, BoolTest::mask btest, Node* eq_region) {
1970 inc_sp(2);
1971 Node* cast = null_check_common(input, T_OBJECT, true, nullptr,
1972 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check) &&
1973 speculative_ptr_kind(tinput) == ProfileAlwaysNull);
1974 dec_sp(2);
1975 if (btest == BoolTest::ne) {
1976 {
1977 PreserveJVMState pjvms(this);
1978 replace_in_map(input, cast);
1979 int target_bci = iter().get_dest();
1980 merge(target_bci);
1981 }
1982 record_for_igvn(eq_region);
1983 set_control(_gvn.transform(eq_region));
1984 } else {
1985 replace_in_map(input, cast);
1986 }
1987 }
1988
1989 Node* Parse::acmp_null_check(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, Node*& null_ctl) {
1990 inc_sp(2);
1991 null_ctl = top();
1992 Node* cast = null_check_oop(input, &null_ctl,
1993 input_ptr == ProfileNeverNull || (input_ptr == ProfileUnknownNull && !too_many_traps_or_recompiles(Deoptimization::Reason_null_check)),
1994 false,
1995 speculative_ptr_kind(tinput) == ProfileNeverNull &&
1996 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check));
1997 dec_sp(2);
1998 return cast;
1999 }
2000
2001 void Parse::acmp_known_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, ciKlass* input_type, BoolTest::mask btest, Node* eq_region) {
2002 Node* ne_region = new RegionNode(1);
2003 Node* null_ctl;
2004 Node* cast = acmp_null_check(input, tinput, input_ptr, null_ctl);
2005 ne_region->add_req(null_ctl);
2006
2007 Node* slow_ctl = type_check_receiver(cast, input_type, 1.0, &cast);
2008 {
2009 PreserveJVMState pjvms(this);
2010 inc_sp(2);
2011 set_control(slow_ctl);
2012 Deoptimization::DeoptReason reason;
2013 if (tinput->speculative_type() != nullptr && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
2014 reason = Deoptimization::Reason_speculate_class_check;
2015 } else {
2016 reason = Deoptimization::Reason_class_check;
2017 }
2018 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
2019 }
2020 ne_region->add_req(control());
2021
2022 record_for_igvn(ne_region);
2023 set_control(_gvn.transform(ne_region));
2024 if (btest == BoolTest::ne) {
2025 {
2026 PreserveJVMState pjvms(this);
2027 if (null_ctl == top()) {
2028 replace_in_map(input, cast);
2029 }
2030 int target_bci = iter().get_dest();
2031 merge(target_bci);
2032 }
2033 record_for_igvn(eq_region);
2034 set_control(_gvn.transform(eq_region));
2035 } else {
2036 if (null_ctl == top()) {
2037 replace_in_map(input, cast);
2038 }
2039 set_control(_gvn.transform(ne_region));
2040 }
2041 }
2042
2043 void Parse::acmp_unknown_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, BoolTest::mask btest, Node* eq_region) {
2044 Node* ne_region = new RegionNode(1);
2045 Node* null_ctl;
2046 Node* cast = acmp_null_check(input, tinput, input_ptr, null_ctl);
2047 ne_region->add_req(null_ctl);
2048
2049 {
2050 BuildCutout unless(this, inline_type_test(cast, /* is_inline = */ false), PROB_MAX);
2051 inc_sp(2);
2052 uncommon_trap_exact(Deoptimization::Reason_class_check, Deoptimization::Action_maybe_recompile);
2053 }
2054
2055 ne_region->add_req(control());
2056
2057 record_for_igvn(ne_region);
2058 set_control(_gvn.transform(ne_region));
2059 if (btest == BoolTest::ne) {
2060 {
2061 PreserveJVMState pjvms(this);
2062 if (null_ctl == top()) {
2063 replace_in_map(input, cast);
2064 }
2065 int target_bci = iter().get_dest();
2066 merge(target_bci);
2067 }
2068 record_for_igvn(eq_region);
2069 set_control(_gvn.transform(eq_region));
2070 } else {
2071 if (null_ctl == top()) {
2072 replace_in_map(input, cast);
2073 }
2074 set_control(_gvn.transform(ne_region));
2075 }
2076 }
2077
2078 void Parse::do_acmp(BoolTest::mask btest, Node* left, Node* right) {
2079 ciKlass* left_type = nullptr;
2080 ciKlass* right_type = nullptr;
2081 ProfilePtrKind left_ptr = ProfileUnknownNull;
2082 ProfilePtrKind right_ptr = ProfileUnknownNull;
2083 bool left_inline_type = true;
2084 bool right_inline_type = true;
2085
2086 // Leverage profiling at acmp
2087 if (UseACmpProfile) {
2088 method()->acmp_profiled_type(bci(), left_type, right_type, left_ptr, right_ptr, left_inline_type, right_inline_type);
2089 if (too_many_traps_or_recompiles(Deoptimization::Reason_class_check)) {
2090 left_type = nullptr;
2091 right_type = nullptr;
2092 left_inline_type = true;
2093 right_inline_type = true;
2094 }
2095 if (too_many_traps_or_recompiles(Deoptimization::Reason_null_check)) {
2096 left_ptr = ProfileUnknownNull;
2097 right_ptr = ProfileUnknownNull;
2098 }
2099 }
2100
2101 if (UseTypeSpeculation) {
2102 record_profile_for_speculation(left, left_type, left_ptr);
2103 record_profile_for_speculation(right, right_type, right_ptr);
2104 }
2105
2106 if (!Arguments::is_valhalla_enabled()) {
2107 Node* cmp = CmpP(left, right);
2108 cmp = optimize_cmp_with_klass(cmp);
2109 do_if(btest, cmp);
2110 return;
2111 }
2112
2113 // Check for equality before potentially allocating
2114 if (left == right) {
2115 do_if(btest, makecon(TypeInt::CC_EQ));
2116 return;
2117 }
2118
2119 // Allocate inline type operands and re-execute on deoptimization
2120 if (left->is_InlineType()) {
2121 PreserveReexecuteState preexecs(this);
2122 inc_sp(2);
2123 jvms()->set_should_reexecute(true);
2124 left = left->as_InlineType()->buffer(this);
2125 }
2126 if (right->is_InlineType()) {
2127 PreserveReexecuteState preexecs(this);
2128 inc_sp(2);
2129 jvms()->set_should_reexecute(true);
2130 right = right->as_InlineType()->buffer(this);
2131 }
2132
2133 // First, do a normal pointer comparison
2134 const TypeOopPtr* tleft = _gvn.type(left)->isa_oopptr();
2135 const TypeOopPtr* tright = _gvn.type(right)->isa_oopptr();
2136 Node* cmp = CmpP(left, right);
2137 cmp = optimize_cmp_with_klass(cmp);
2138 if (tleft == nullptr || !tleft->can_be_inline_type() ||
2139 tright == nullptr || !tright->can_be_inline_type()) {
2140 // This is sufficient, if one of the operands can't be an inline type
2141 do_if(btest, cmp);
2142 return;
2143 }
2144
2145 // Don't add traps to unstable if branches because additional checks are required to
2146 // decide if the operands are equal/substitutable and we therefore shouldn't prune
2147 // branches for one if based on the profiling of the acmp branches.
2148 // Also, OptimizeUnstableIf would set an incorrect re-rexecution state because it
2149 // assumes that there is a 1-1 mapping between the if and the acmp branches and that
2150 // hitting a trap means that we will take the corresponding acmp branch on re-execution.
2151 const bool can_trap = true;
2152
2153 Node* eq_region = nullptr;
2154 if (btest == BoolTest::eq) {
2155 do_if(btest, cmp, !can_trap, true);
2156 if (stopped()) {
2157 // Pointers are equal, operands must be equal
2158 return;
2159 }
2160 } else {
2161 assert(btest == BoolTest::ne, "only eq or ne");
2162 Node* is_not_equal = nullptr;
2163 eq_region = new RegionNode(3);
2164 {
2165 PreserveJVMState pjvms(this);
2166 // Pointers are not equal, but more checks are needed to determine if the operands are (not) substitutable
2167 do_if(btest, cmp, !can_trap, false, &is_not_equal);
2168 if (!stopped()) {
2169 eq_region->init_req(1, control());
2170 }
2171 }
2172 if (is_not_equal == nullptr || is_not_equal->is_top()) {
2173 record_for_igvn(eq_region);
2174 set_control(_gvn.transform(eq_region));
2175 return;
2176 }
2177 set_control(is_not_equal);
2178 }
2179
2180 // Prefer speculative types if available
2181 if (!too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
2182 if (tleft->speculative_type() != nullptr) {
2183 left_type = tleft->speculative_type();
2184 }
2185 if (tright->speculative_type() != nullptr) {
2186 right_type = tright->speculative_type();
2187 }
2188 }
2189
2190 if (speculative_ptr_kind(tleft) != ProfileMaybeNull && speculative_ptr_kind(tleft) != ProfileUnknownNull) {
2191 ProfilePtrKind speculative_left_ptr = speculative_ptr_kind(tleft);
2192 if (speculative_left_ptr == ProfileAlwaysNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_assert)) {
2193 left_ptr = speculative_left_ptr;
2194 } else if (speculative_left_ptr == ProfileNeverNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check)) {
2195 left_ptr = speculative_left_ptr;
2196 }
2197 }
2198 if (speculative_ptr_kind(tright) != ProfileMaybeNull && speculative_ptr_kind(tright) != ProfileUnknownNull) {
2199 ProfilePtrKind speculative_right_ptr = speculative_ptr_kind(tright);
2200 if (speculative_right_ptr == ProfileAlwaysNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_assert)) {
2201 right_ptr = speculative_right_ptr;
2202 } else if (speculative_right_ptr == ProfileNeverNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check)) {
2203 right_ptr = speculative_right_ptr;
2204 }
2205 }
2206
2207 if (left_ptr == ProfileAlwaysNull) {
2208 // Comparison with null. Assert the input is indeed null and we're done.
2209 acmp_always_null_input(left, tleft, btest, eq_region);
2210 return;
2211 }
2212 if (right_ptr == ProfileAlwaysNull) {
2213 // Comparison with null. Assert the input is indeed null and we're done.
2214 acmp_always_null_input(right, tright, btest, eq_region);
2215 return;
2216 }
2217 if (left_type != nullptr && !left_type->is_inlinetype()) {
2218 // Comparison with an object of known type
2219 acmp_known_non_inline_type_input(left, tleft, left_ptr, left_type, btest, eq_region);
2220 return;
2221 }
2222 if (right_type != nullptr && !right_type->is_inlinetype()) {
2223 // Comparison with an object of known type
2224 acmp_known_non_inline_type_input(right, tright, right_ptr, right_type, btest, eq_region);
2225 return;
2226 }
2227 if (!left_inline_type) {
2228 // Comparison with an object known not to be an inline type
2229 acmp_unknown_non_inline_type_input(left, tleft, left_ptr, btest, eq_region);
2230 return;
2231 }
2232 if (!right_inline_type) {
2233 // Comparison with an object known not to be an inline type
2234 acmp_unknown_non_inline_type_input(right, tright, right_ptr, btest, eq_region);
2235 return;
2236 }
2237
2238 // Pointers are not equal, check if first operand is non-null
2239 Node* ne_region = new RegionNode(6);
2240 Node* null_ctl = nullptr;
2241 Node* not_null_left = nullptr;
2242 Node* not_null_right = acmp_null_check(right, tright, right_ptr, null_ctl);
2243 ne_region->init_req(1, null_ctl);
2244
2245 if (!stopped()) {
2246 // First operand is non-null, check if it is an inline type
2247 Node* is_value = inline_type_test(not_null_right);
2248 IfNode* is_value_iff = create_and_map_if(control(), is_value, PROB_FAIR, COUNT_UNKNOWN);
2249 Node* not_value = _gvn.transform(new IfFalseNode(is_value_iff));
2250 ne_region->init_req(2, not_value);
2251 set_control(_gvn.transform(new IfTrueNode(is_value_iff)));
2252
2253 // The first operand is an inline type, check if the second operand is non-null
2254 not_null_left = acmp_null_check(left, tleft, left_ptr, null_ctl);
2255 ne_region->init_req(3, null_ctl);
2256
2257 if (!stopped()) {
2258 // Check if both operands are of the same class.
2259 Node* kls_left = load_object_klass(not_null_left);
2260 Node* kls_right = load_object_klass(not_null_right);
2261 Node* kls_cmp = CmpP(kls_left, kls_right);
2262 Node* kls_bol = _gvn.transform(new BoolNode(kls_cmp, BoolTest::ne));
2263 IfNode* kls_iff = create_and_map_if(control(), kls_bol, PROB_FAIR, COUNT_UNKNOWN);
2264 Node* kls_ne = _gvn.transform(new IfTrueNode(kls_iff));
2265 set_control(_gvn.transform(new IfFalseNode(kls_iff)));
2266 ne_region->init_req(4, kls_ne);
2267 }
2268 }
2269
2270 if (stopped()) {
2271 record_for_igvn(ne_region);
2272 set_control(_gvn.transform(ne_region));
2273 if (btest == BoolTest::ne) {
2274 {
2275 PreserveJVMState pjvms(this);
2276 int target_bci = iter().get_dest();
2277 merge(target_bci);
2278 }
2279 record_for_igvn(eq_region);
2280 set_control(_gvn.transform(eq_region));
2281 }
2282 return;
2283 }
2284
2285 // Both operands are values types of the same class, we need to perform a
2286 // substitutability test. Delegate to ValueObjectMethods::isSubstitutable().
2287 Node* ne_io_phi = PhiNode::make(ne_region, i_o());
2288 Node* mem = reset_memory();
2289 Node* ne_mem_phi = PhiNode::make(ne_region, mem);
2290
2291 Node* eq_io_phi = nullptr;
2292 Node* eq_mem_phi = nullptr;
2293 if (eq_region != nullptr) {
2294 eq_io_phi = PhiNode::make(eq_region, i_o());
2295 eq_mem_phi = PhiNode::make(eq_region, mem);
2296 }
2297
2298 set_all_memory(mem);
2299
2300 kill_dead_locals();
2301 ciSymbol* subst_method_name = UseAltSubstitutabilityMethod ? ciSymbols::isSubstitutableAlt_name() : ciSymbols::isSubstitutable_name();
2302 ciMethod* subst_method = ciEnv::current()->ValueObjectMethods_klass()->find_method(subst_method_name, ciSymbols::object_object_boolean_signature());
2303 CallStaticJavaNode* call = new CallStaticJavaNode(C, TypeFunc::make(subst_method), SharedRuntime::get_resolve_static_call_stub(), subst_method);
2304 call->set_override_symbolic_info(true);
2305 call->init_req(TypeFunc::Parms, not_null_left);
2306 call->init_req(TypeFunc::Parms+1, not_null_right);
2307 inc_sp(2);
2308 set_edges_for_java_call(call, false, false);
2309 Node* ret = set_results_for_java_call(call, false, true);
2310 dec_sp(2);
2311
2312 // Test the return value of ValueObjectMethods::isSubstitutable()
2313 // This is the last check, do_if can emit traps now.
2314 Node* subst_cmp = _gvn.transform(new CmpINode(ret, intcon(1)));
2315 Node* ctl = C->top();
2316 Node* stress_count_mem = nullptr;
2317 if (btest == BoolTest::eq) {
2318 PreserveJVMState pjvms(this);
2319 do_if(btest, subst_cmp, can_trap, false, nullptr, &stress_count_mem);
2320 if (!stopped()) {
2321 ctl = control();
2322 }
2323 } else {
2324 assert(btest == BoolTest::ne, "only eq or ne");
2325 PreserveJVMState pjvms(this);
2326 do_if(btest, subst_cmp, can_trap, false, &ctl, &stress_count_mem);
2327 if (!stopped()) {
2328 eq_region->init_req(2, control());
2329 eq_io_phi->init_req(2, i_o());
2330 eq_mem_phi->init_req(2, reset_memory());
2331 }
2332 }
2333 if (stress_count_mem != nullptr) {
2334 set_memory(stress_count_mem, stress_count_mem->adr_type());
2335 }
2336 ne_region->init_req(5, ctl);
2337 ne_io_phi->init_req(5, i_o());
2338 ne_mem_phi->init_req(5, reset_memory());
2339
2340 record_for_igvn(ne_region);
2341 set_control(_gvn.transform(ne_region));
2342 set_i_o(_gvn.transform(ne_io_phi));
2343 set_all_memory(_gvn.transform(ne_mem_phi));
2344
2345 if (btest == BoolTest::ne) {
2346 {
2347 PreserveJVMState pjvms(this);
2348 int target_bci = iter().get_dest();
2349 merge(target_bci);
2350 }
2351
2352 record_for_igvn(eq_region);
2353 set_control(_gvn.transform(eq_region));
2354 set_i_o(_gvn.transform(eq_io_phi));
2355 set_all_memory(_gvn.transform(eq_mem_phi));
2356 }
2357 }
2358
2359 // Force unstable if traps to be taken randomly to trigger intermittent bugs such as incorrect debug information.
2360 // Add another if before the unstable if that checks a "random" condition at runtime (a simple shared counter) and
2361 // then either takes the trap or executes the original, unstable if.
2362 void Parse::stress_trap(IfNode* orig_iff, Node* counter, Node* incr_store) {
2363 // Search for an unstable if trap
2364 CallStaticJavaNode* trap = nullptr;
2365 assert(orig_iff->Opcode() == Op_If && orig_iff->outcnt() == 2, "malformed if");
2366 ProjNode* trap_proj = orig_iff->uncommon_trap_proj(trap, Deoptimization::Reason_unstable_if);
2367 if (trap == nullptr || !trap->jvms()->should_reexecute()) {
2368 // No suitable trap found. Remove unused counter load and increment.
2369 C->gvn_replace_by(incr_store, incr_store->in(MemNode::Memory));
2370 return;
2371 }
2372
2373 // Remove trap from optimization list since we add another path to the trap.
2374 bool success = C->remove_unstable_if_trap(trap, true);
2375 assert(success, "Trap already modified");
2376
2377 // Add a check before the original if that will trap with a certain frequency and execute the original if otherwise
2378 int freq_log = (C->random() % 31) + 1; // Random logarithmic frequency in [1, 31]
2379 Node* mask = intcon(right_n_bits(freq_log));
2380 counter = _gvn.transform(new AndINode(counter, mask));
2381 Node* cmp = _gvn.transform(new CmpINode(counter, intcon(0)));
2382 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::mask::eq));
2383 IfNode* iff = _gvn.transform(new IfNode(orig_iff->in(0), bol, orig_iff->_prob, orig_iff->_fcnt))->as_If();
2384 Node* if_true = _gvn.transform(new IfTrueNode(iff));
2385 Node* if_false = _gvn.transform(new IfFalseNode(iff));
2386 assert(!if_true->is_top() && !if_false->is_top(), "trap always / never taken");
2387
2388 // Trap
2389 assert(trap_proj->outcnt() == 1, "some other nodes are dependent on the trap projection");
2390
2391 Node* trap_region = new RegionNode(3);
2392 trap_region->set_req(1, trap_proj);
2393 trap_region->set_req(2, if_true);
2394 trap->set_req(0, _gvn.transform(trap_region));
2395
2396 // Don't trap, execute original if
2397 orig_iff->set_req(0, if_false);
2398 }
2399
2400 bool Parse::path_is_suitable_for_uncommon_trap(float prob) const {
2401 // Randomly skip emitting an uncommon trap
2402 if (StressUnstableIfTraps && ((C->random() % 2) == 0)) {
2403 return false;
2404 }
2405 // Don't want to speculate on uncommon traps when running with -Xcomp
2406 if (!UseInterpreter) {
2407 return false;
2408 }
2409 return seems_never_taken(prob) &&
2410 !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
2411 }
2412
2413 void Parse::maybe_add_predicate_after_if(Block* path) {
2414 if (path->is_SEL_head() && path->preds_parsed() == 0) {
2415 // Add predicates at bci of if dominating the loop so traps can be
2416 // recorded on the if's profile data
2417 int bc_depth = repush_if_args();
2418 add_parse_predicates();
2419 dec_sp(bc_depth);
2420 path->set_has_predicates();
2421 }
2422 }
2423
2424
2425 //----------------------------adjust_map_after_if------------------------------
2426 // Adjust the JVM state to reflect the result of taking this path.
2427 // Basically, it means inspecting the CmpNode controlling this
2428 // branch, seeing how it constrains a tested value, and then
2429 // deciding if it's worth our while to encode this constraint
2430 // as graph nodes in the current abstract interpretation map.
2431 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path, bool can_trap) {
2432 if (!c->is_Cmp()) {
2433 maybe_add_predicate_after_if(path);
2434 return;
2435 }
2436
2437 if (stopped() || btest == BoolTest::illegal) {
2438 return; // nothing to do
2439 }
2440
2441 bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
2442
2443 if (can_trap && path_is_suitable_for_uncommon_trap(prob)) {
2444 repush_if_args();
2445 Node* call = uncommon_trap(Deoptimization::Reason_unstable_if,
2446 Deoptimization::Action_reinterpret,
2447 nullptr,
2448 (is_fallthrough ? "taken always" : "taken never"));
2449
2450 if (call != nullptr) {
2451 C->record_unstable_if_trap(new UnstableIfTrap(call->as_CallStaticJava(), path));
2452 }
2453 return;
2454 }
2455
2456 Node* val = c->in(1);
2457 Node* con = c->in(2);
2458 const Type* tcon = _gvn.type(con);
2459 const Type* tval = _gvn.type(val);
2460 bool have_con = tcon->singleton();
2461 if (tval->singleton()) {
2462 if (!have_con) {
2463 // Swap, so constant is in con.
2464 con = val;
2465 tcon = tval;
2466 val = c->in(2);
2467 tval = _gvn.type(val);
2468 btest = BoolTest(btest).commute();
2469 have_con = true;
2470 } else {
2471 // Do we have two constants? Then leave well enough alone.
2472 have_con = false;
2473 }
2474 }
2475 if (!have_con) { // remaining adjustments need a con
2476 maybe_add_predicate_after_if(path);
2477 return;
2478 }
2479
2480 sharpen_type_after_if(btest, con, tcon, val, tval);
2481 maybe_add_predicate_after_if(path);
2482 }
2483
2484
2485 static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) {
2486 Node* ldk;
2487 if (n->is_DecodeNKlass()) {
2488 if (n->in(1)->Opcode() != Op_LoadNKlass) {
2489 return nullptr;
2490 } else {
2491 ldk = n->in(1);
2492 }
2493 } else if (n->Opcode() != Op_LoadKlass) {
2494 return nullptr;
2495 } else {
2496 ldk = n;
2497 }
2498 assert(ldk != nullptr && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node");
2499
2500 Node* adr = ldk->in(MemNode::Address);
2501 intptr_t off = 0;
2502 Node* obj = AddPNode::Ideal_base_and_offset(adr, gvn, off);
2503 if (obj == nullptr || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass?
2504 return nullptr;
2505 const TypePtr* tp = gvn->type(obj)->is_ptr();
2506 if (tp == nullptr || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr?
2507 return nullptr;
2508
2509 return obj;
2510 }
2511
2512 // Matches exact and inexact type check IR shapes during parsing.
2513 // On successful match, returns type checked object node and its type after successful check
2514 // as out parameters.
2515 static bool match_type_check(PhaseGVN& gvn,
2516 BoolTest::mask btest,
2517 Node* con, const Type* tcon,
2518 Node* val, const Type* tval,
2519 Node** obj, const TypeOopPtr** cast_type) { // out-parameters
2520 // Look for opportunities to sharpen the type of a node whose klass is compared with a constant klass.
2521 // The constant klass being tested against can come from many bytecode instructions (implicitly or explicitly),
2522 // and also from profile data used by speculative casts.
2523 if (btest == BoolTest::eq && tcon->isa_klassptr()) {
2524 // Found:
2525 // Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq])
2526 // or the narrowOop equivalent.
2527 (*obj) = extract_obj_from_klass_load(&gvn, val);
2528 (*cast_type) = tcon->isa_klassptr()->as_instance_type();
2529 return true; // found
2530 }
2531
2532 // Match an instanceof check.
2533 // During parsing its IR shape is not canonicalized yet.
2534 //
2535 // obj superklass
2536 // | |
2537 // SubTypeCheck
2538 // |
2539 // Bool [eq] / [ne]
2540 // |
2541 // If
2542 // / \
2543 // T F
2544 // \ /
2545 // Region
2546 // \ ConI ConI
2547 // \ | /
2548 // val -> Phi ConI <- con
2549 // \ /
2550 // CmpI
2551 // |
2552 // Bool [btest]
2553 // |
2554 //
2555 if (tval->isa_int() && val->is_Phi() && val->in(0)->as_Region()->is_diamond()) {
2556 RegionNode* diamond = val->in(0)->as_Region();
2557 IfNode* if1 = diamond->in(1)->in(0)->as_If();
2558 BoolNode* b1 = if1->in(1)->isa_Bool();
2559 if (b1 != nullptr && b1->in(1)->isa_SubTypeCheck()) {
2560 assert(b1->_test._test == BoolTest::eq ||
2561 b1->_test._test == BoolTest::ne, "%d", b1->_test._test);
2562
2563 ProjNode* success_proj = if1->proj_out(b1->_test._test == BoolTest::eq ? 1 : 0);
2564 int idx = diamond->find_edge(success_proj);
2565 assert(idx == 1 || idx == 2, "");
2566 Node* vcon = val->in(idx);
2567
2568 assert(val->find_edge(con) > 0, "");
2569 if ((btest == BoolTest::eq && vcon == con) || (btest == BoolTest::ne && vcon != con)) {
2570 SubTypeCheckNode* sub = b1->in(1)->as_SubTypeCheck();
2571 Node* obj_or_subklass = sub->in(SubTypeCheckNode::ObjOrSubKlass);
2572 Node* superklass = sub->in(SubTypeCheckNode::SuperKlass);
2573
2574 if (gvn.type(obj_or_subklass)->isa_oopptr()) {
2575 const TypeKlassPtr* klass_ptr_type = gvn.type(superklass)->is_klassptr();
2576 const TypeKlassPtr* improved_klass_ptr_type = klass_ptr_type->try_improve();
2577
2578 (*obj) = obj_or_subklass;
2579 (*cast_type) = improved_klass_ptr_type->cast_to_exactness(false)->as_instance_type();
2580 return true; // found
2581 }
2582 }
2583 }
2584 }
2585 return false; // not found
2586 }
2587
2588 void Parse::sharpen_type_after_if(BoolTest::mask btest,
2589 Node* con, const Type* tcon,
2590 Node* val, const Type* tval) {
2591 Node* obj = nullptr;
2592 const TypeOopPtr* cast_type = nullptr;
2593 // Insert a cast node with a narrowed type after a successful type check.
2594 if (match_type_check(_gvn, btest, con, tcon, val, tval,
2595 &obj, &cast_type)) {
2596 assert(obj != nullptr && cast_type != nullptr, "missing type check info");
2597 const Type* obj_type = _gvn.type(obj);
2598 const TypeOopPtr* tboth = obj_type->join_speculative(cast_type)->isa_oopptr();
2599 if (tboth != nullptr && tboth != obj_type && tboth->higher_equal(obj_type)) {
2600 int obj_in_map = map()->find_edge(obj);
2601 JVMState* jvms = this->jvms();
2602 if (obj_in_map >= 0 &&
2603 (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
2604 TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth);
2605 const Type* tcc = ccast->as_Type()->type();
2606 assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve");
2607 // Delay transform() call to allow recovery of pre-cast value
2608 // at the control merge.
2609 _gvn.set_type_bottom(ccast);
2610 record_for_igvn(ccast);
2611 if (tboth->is_inlinetypeptr()) {
2612 ccast = InlineTypeNode::make_from_oop(this, ccast, tboth->exact_klass(true)->as_inline_klass());
2613 }
2614 // Here's the payoff.
2615 replace_in_map(obj, ccast);
2616 }
2617 }
2618 }
2619
2620 int val_in_map = map()->find_edge(val);
2621 if (val_in_map < 0) return; // replace_in_map would be useless
2622 {
2623 JVMState* jvms = this->jvms();
2624 if (!(jvms->is_loc(val_in_map) ||
2625 jvms->is_stk(val_in_map)))
2626 return; // again, it would be useless
2627 }
2628
2629 // Check for a comparison to a constant, and "know" that the compared
2630 // value is constrained on this path.
2631 assert(tcon->singleton(), "");
2632 ConstraintCastNode* ccast = nullptr;
2633 Node* cast = nullptr;
2634
2635 switch (btest) {
2636 case BoolTest::eq: // Constant test?
2637 {
2638 const Type* tboth = tcon->join_speculative(tval);
2639 if (tboth == tval) break; // Nothing to gain.
2640 if (tcon->isa_int()) {
2641 ccast = new CastIINode(control(), val, tboth);
2642 } else if (tcon == TypePtr::NULL_PTR) {
2643 // Cast to null, but keep the pointer identity temporarily live.
2644 ccast = new CastPPNode(control(), val, tboth);
2645 } else {
2646 const TypeF* tf = tcon->isa_float_constant();
2647 const TypeD* td = tcon->isa_double_constant();
2648 // Exclude tests vs float/double 0 as these could be
2649 // either +0 or -0. Just because you are equal to +0
2650 // doesn't mean you ARE +0!
2651 // Note, following code also replaces Long and Oop values.
2652 if ((!tf || tf->_f != 0.0) &&
2653 (!td || td->_d != 0.0))
2654 cast = con; // Replace non-constant val by con.
2655 }
2656 }
2657 break;
2658
2659 case BoolTest::ne:
2660 if (tcon == TypePtr::NULL_PTR) {
2661 cast = cast_not_null(val, false);
2662 }
2663 break;
2664
2665 default:
2666 // (At this point we could record int range types with CastII.)
2667 break;
2668 }
2669
2670 if (ccast != nullptr) {
2671 const Type* tcc = ccast->as_Type()->type();
2672 assert(tcc != tval && tcc->higher_equal(tval), "must improve");
2673 // Delay transform() call to allow recovery of pre-cast value
2674 // at the control merge.
2675 _gvn.set_type_bottom(ccast);
2676 record_for_igvn(ccast);
2677 cast = ccast;
2678 }
2679
2680 if (cast != nullptr) { // Here's the payoff.
2681 replace_in_map(val, cast);
2682 }
2683 }
2684
2685 /**
2686 * Use speculative type to optimize CmpP node: if comparison is
2687 * against the low level class, cast the object to the speculative
2688 * type if any. CmpP should then go away.
2689 *
2690 * @param c expected CmpP node
2691 * @return result of CmpP on object casted to speculative type
2692 *
2693 */
2694 Node* Parse::optimize_cmp_with_klass(Node* c) {
2695 // If this is transformed by the _gvn to a comparison with the low
2696 // level klass then we may be able to use speculation
2697 if (c->Opcode() == Op_CmpP &&
2698 (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
2699 c->in(2)->is_Con()) {
2700 Node* load_klass = nullptr;
2701 Node* decode = nullptr;
2702 if (c->in(1)->Opcode() == Op_DecodeNKlass) {
2703 decode = c->in(1);
2704 load_klass = c->in(1)->in(1);
2705 } else {
2706 load_klass = c->in(1);
2707 }
2708 if (load_klass->in(2)->is_AddP()) {
2709 Node* addp = load_klass->in(2);
2710 Node* obj = addp->in(AddPNode::Address);
2711 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
2712 if (obj_type->speculative_type_not_null() != nullptr) {
2713 ciKlass* k = obj_type->speculative_type();
2714 inc_sp(2);
2715 obj = maybe_cast_profiled_obj(obj, k);
2716 dec_sp(2);
2717 if (obj->is_InlineType()) {
2718 assert(obj->as_InlineType()->is_allocated(&_gvn), "must be allocated");
2719 obj = obj->as_InlineType()->get_oop();
2720 }
2721 // Make the CmpP use the casted obj
2722 addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
2723 load_klass = load_klass->clone();
2724 load_klass->set_req(2, addp);
2725 load_klass = _gvn.transform(load_klass);
2726 if (decode != nullptr) {
2727 decode = decode->clone();
2728 decode->set_req(1, load_klass);
2729 load_klass = _gvn.transform(decode);
2730 }
2731 c = c->clone();
2732 c->set_req(1, load_klass);
2733 c = _gvn.transform(c);
2734 }
2735 }
2736 }
2737 return c;
2738 }
2739
2740 //------------------------------do_one_bytecode--------------------------------
2741 // Parse this bytecode, and alter the Parsers JVM->Node mapping
2742 void Parse::do_one_bytecode() {
2743 Node *a, *b, *c, *d; // Handy temps
2744 BoolTest::mask btest;
2745 int i;
2746
2747 assert(!has_exceptions(), "bytecode entry state must be clear of throws");
2748
2749 if (C->check_node_count(NodeLimitFudgeFactor * 5,
2750 "out of nodes parsing method")) {
2751 return;
2752 }
2753
2754 #ifdef ASSERT
2755 // for setting breakpoints
2756 if (TraceOptoParse) {
2757 tty->print(" @");
2758 dump_bci(bci());
2759 tty->print(" %s", Bytecodes::name(bc()));
2760 tty->cr();
2761 }
2762 #endif
2763
2764 switch (bc()) {
2765 case Bytecodes::_nop:
2766 // do nothing
2767 break;
2768 case Bytecodes::_lconst_0:
2769 push_pair(longcon(0));
2770 break;
2771
2772 case Bytecodes::_lconst_1:
2773 push_pair(longcon(1));
2774 break;
2775
2776 case Bytecodes::_fconst_0:
2777 push(zerocon(T_FLOAT));
2778 break;
2779
2780 case Bytecodes::_fconst_1:
2781 push(makecon(TypeF::ONE));
2782 break;
2783
2784 case Bytecodes::_fconst_2:
2785 push(makecon(TypeF::make(2.0f)));
2786 break;
2787
2788 case Bytecodes::_dconst_0:
2789 push_pair(zerocon(T_DOUBLE));
2790 break;
2791
2792 case Bytecodes::_dconst_1:
2793 push_pair(makecon(TypeD::ONE));
2794 break;
2795
2796 case Bytecodes::_iconst_m1:push(intcon(-1)); break;
2797 case Bytecodes::_iconst_0: push(intcon( 0)); break;
2798 case Bytecodes::_iconst_1: push(intcon( 1)); break;
2799 case Bytecodes::_iconst_2: push(intcon( 2)); break;
2800 case Bytecodes::_iconst_3: push(intcon( 3)); break;
2801 case Bytecodes::_iconst_4: push(intcon( 4)); break;
2802 case Bytecodes::_iconst_5: push(intcon( 5)); break;
2803 case Bytecodes::_bipush: push(intcon(iter().get_constant_u1())); break;
2804 case Bytecodes::_sipush: push(intcon(iter().get_constant_u2())); break;
2805 case Bytecodes::_aconst_null: push(null()); break;
2806
2807 case Bytecodes::_ldc:
2808 case Bytecodes::_ldc_w:
2809 case Bytecodes::_ldc2_w: {
2810 // ciTypeFlow should trap if the ldc is in error state or if the constant is not loaded
2811 assert(!iter().is_in_error(), "ldc is in error state");
2812 ciConstant constant = iter().get_constant();
2813 assert(constant.is_loaded(), "constant is not loaded");
2814 const Type* con_type = Type::make_from_constant(constant);
2815 if (con_type != nullptr) {
2816 push_node(con_type->basic_type(), makecon(con_type));
2817 }
2818 break;
2819 }
2820
2821 case Bytecodes::_aload_0:
2822 push( local(0) );
2823 break;
2824 case Bytecodes::_aload_1:
2825 push( local(1) );
2826 break;
2827 case Bytecodes::_aload_2:
2828 push( local(2) );
2829 break;
2830 case Bytecodes::_aload_3:
2831 push( local(3) );
2832 break;
2833 case Bytecodes::_aload:
2834 push( local(iter().get_index()) );
2835 break;
2836
2837 case Bytecodes::_fload_0:
2838 case Bytecodes::_iload_0:
2839 push( local(0) );
2840 break;
2841 case Bytecodes::_fload_1:
2842 case Bytecodes::_iload_1:
2843 push( local(1) );
2844 break;
2845 case Bytecodes::_fload_2:
2846 case Bytecodes::_iload_2:
2847 push( local(2) );
2848 break;
2849 case Bytecodes::_fload_3:
2850 case Bytecodes::_iload_3:
2851 push( local(3) );
2852 break;
2853 case Bytecodes::_fload:
2854 case Bytecodes::_iload:
2855 push( local(iter().get_index()) );
2856 break;
2857 case Bytecodes::_lload_0:
2858 push_pair_local( 0 );
2859 break;
2860 case Bytecodes::_lload_1:
2861 push_pair_local( 1 );
2862 break;
2863 case Bytecodes::_lload_2:
2864 push_pair_local( 2 );
2865 break;
2866 case Bytecodes::_lload_3:
2867 push_pair_local( 3 );
2868 break;
2869 case Bytecodes::_lload:
2870 push_pair_local( iter().get_index() );
2871 break;
2872
2873 case Bytecodes::_dload_0:
2874 push_pair_local(0);
2875 break;
2876 case Bytecodes::_dload_1:
2877 push_pair_local(1);
2878 break;
2879 case Bytecodes::_dload_2:
2880 push_pair_local(2);
2881 break;
2882 case Bytecodes::_dload_3:
2883 push_pair_local(3);
2884 break;
2885 case Bytecodes::_dload:
2886 push_pair_local(iter().get_index());
2887 break;
2888 case Bytecodes::_fstore_0:
2889 case Bytecodes::_istore_0:
2890 case Bytecodes::_astore_0:
2891 set_local( 0, pop() );
2892 break;
2893 case Bytecodes::_fstore_1:
2894 case Bytecodes::_istore_1:
2895 case Bytecodes::_astore_1:
2896 set_local( 1, pop() );
2897 break;
2898 case Bytecodes::_fstore_2:
2899 case Bytecodes::_istore_2:
2900 case Bytecodes::_astore_2:
2901 set_local( 2, pop() );
2902 break;
2903 case Bytecodes::_fstore_3:
2904 case Bytecodes::_istore_3:
2905 case Bytecodes::_astore_3:
2906 set_local( 3, pop() );
2907 break;
2908 case Bytecodes::_fstore:
2909 case Bytecodes::_istore:
2910 case Bytecodes::_astore:
2911 set_local( iter().get_index(), pop() );
2912 break;
2913 // long stores
2914 case Bytecodes::_lstore_0:
2915 set_pair_local( 0, pop_pair() );
2916 break;
2917 case Bytecodes::_lstore_1:
2918 set_pair_local( 1, pop_pair() );
2919 break;
2920 case Bytecodes::_lstore_2:
2921 set_pair_local( 2, pop_pair() );
2922 break;
2923 case Bytecodes::_lstore_3:
2924 set_pair_local( 3, pop_pair() );
2925 break;
2926 case Bytecodes::_lstore:
2927 set_pair_local( iter().get_index(), pop_pair() );
2928 break;
2929
2930 // double stores
2931 case Bytecodes::_dstore_0:
2932 set_pair_local( 0, pop_pair() );
2933 break;
2934 case Bytecodes::_dstore_1:
2935 set_pair_local( 1, pop_pair() );
2936 break;
2937 case Bytecodes::_dstore_2:
2938 set_pair_local( 2, pop_pair() );
2939 break;
2940 case Bytecodes::_dstore_3:
2941 set_pair_local( 3, pop_pair() );
2942 break;
2943 case Bytecodes::_dstore:
2944 set_pair_local( iter().get_index(), pop_pair() );
2945 break;
2946
2947 case Bytecodes::_pop: dec_sp(1); break;
2948 case Bytecodes::_pop2: dec_sp(2); break;
2949 case Bytecodes::_swap:
2950 a = pop();
2951 b = pop();
2952 push(a);
2953 push(b);
2954 break;
2955 case Bytecodes::_dup:
2956 a = pop();
2957 push(a);
2958 push(a);
2959 break;
2960 case Bytecodes::_dup_x1:
2961 a = pop();
2962 b = pop();
2963 push( a );
2964 push( b );
2965 push( a );
2966 break;
2967 case Bytecodes::_dup_x2:
2968 a = pop();
2969 b = pop();
2970 c = pop();
2971 push( a );
2972 push( c );
2973 push( b );
2974 push( a );
2975 break;
2976 case Bytecodes::_dup2:
2977 a = pop();
2978 b = pop();
2979 push( b );
2980 push( a );
2981 push( b );
2982 push( a );
2983 break;
2984
2985 case Bytecodes::_dup2_x1:
2986 // before: .. c, b, a
2987 // after: .. b, a, c, b, a
2988 // not tested
2989 a = pop();
2990 b = pop();
2991 c = pop();
2992 push( b );
2993 push( a );
2994 push( c );
2995 push( b );
2996 push( a );
2997 break;
2998 case Bytecodes::_dup2_x2:
2999 // before: .. d, c, b, a
3000 // after: .. b, a, d, c, b, a
3001 // not tested
3002 a = pop();
3003 b = pop();
3004 c = pop();
3005 d = pop();
3006 push( b );
3007 push( a );
3008 push( d );
3009 push( c );
3010 push( b );
3011 push( a );
3012 break;
3013
3014 case Bytecodes::_arraylength: {
3015 // Must do null-check with value on expression stack
3016 Node *ary = null_check(peek(), T_ARRAY);
3017 // Compile-time detect of null-exception?
3018 if (stopped()) return;
3019 a = pop();
3020 push(load_array_length(a));
3021 break;
3022 }
3023
3024 case Bytecodes::_baload: array_load(T_BYTE); break;
3025 case Bytecodes::_caload: array_load(T_CHAR); break;
3026 case Bytecodes::_iaload: array_load(T_INT); break;
3027 case Bytecodes::_saload: array_load(T_SHORT); break;
3028 case Bytecodes::_faload: array_load(T_FLOAT); break;
3029 case Bytecodes::_aaload: array_load(T_OBJECT); break;
3030 case Bytecodes::_laload: array_load(T_LONG); break;
3031 case Bytecodes::_daload: array_load(T_DOUBLE); break;
3032 case Bytecodes::_bastore: array_store(T_BYTE); break;
3033 case Bytecodes::_castore: array_store(T_CHAR); break;
3034 case Bytecodes::_iastore: array_store(T_INT); break;
3035 case Bytecodes::_sastore: array_store(T_SHORT); break;
3036 case Bytecodes::_fastore: array_store(T_FLOAT); break;
3037 case Bytecodes::_aastore: array_store(T_OBJECT); break;
3038 case Bytecodes::_lastore: array_store(T_LONG); break;
3039 case Bytecodes::_dastore: array_store(T_DOUBLE); break;
3040
3041 case Bytecodes::_getfield:
3042 do_getfield();
3043 break;
3044
3045 case Bytecodes::_getstatic:
3046 do_getstatic();
3047 break;
3048
3049 case Bytecodes::_putfield:
3050 do_putfield();
3051 break;
3052
3053 case Bytecodes::_putstatic:
3054 do_putstatic();
3055 break;
3056
3057 case Bytecodes::_irem:
3058 // Must keep both values on the expression-stack during null-check
3059 zero_check_int(peek());
3060 // Compile-time detect of null-exception?
3061 if (stopped()) return;
3062 b = pop();
3063 a = pop();
3064 push(_gvn.transform(new ModINode(control(), a, b)));
3065 break;
3066 case Bytecodes::_idiv:
3067 // Must keep both values on the expression-stack during null-check
3068 zero_check_int(peek());
3069 // Compile-time detect of null-exception?
3070 if (stopped()) return;
3071 b = pop();
3072 a = pop();
3073 push( _gvn.transform( new DivINode(control(),a,b) ) );
3074 break;
3075 case Bytecodes::_imul:
3076 b = pop(); a = pop();
3077 push( _gvn.transform( new MulINode(a,b) ) );
3078 break;
3079 case Bytecodes::_iadd:
3080 b = pop(); a = pop();
3081 push( _gvn.transform( new AddINode(a,b) ) );
3082 break;
3083 case Bytecodes::_ineg:
3084 a = pop();
3085 push( _gvn.transform( new SubINode(_gvn.intcon(0),a)) );
3086 break;
3087 case Bytecodes::_isub:
3088 b = pop(); a = pop();
3089 push( _gvn.transform( new SubINode(a,b) ) );
3090 break;
3091 case Bytecodes::_iand:
3092 b = pop(); a = pop();
3093 push( _gvn.transform( new AndINode(a,b) ) );
3094 break;
3095 case Bytecodes::_ior:
3096 b = pop(); a = pop();
3097 push( _gvn.transform( new OrINode(a,b) ) );
3098 break;
3099 case Bytecodes::_ixor:
3100 b = pop(); a = pop();
3101 push( _gvn.transform( new XorINode(a,b) ) );
3102 break;
3103 case Bytecodes::_ishl:
3104 b = pop(); a = pop();
3105 push( _gvn.transform( new LShiftINode(a,b) ) );
3106 break;
3107 case Bytecodes::_ishr:
3108 b = pop(); a = pop();
3109 push( _gvn.transform( new RShiftINode(a,b) ) );
3110 break;
3111 case Bytecodes::_iushr:
3112 b = pop(); a = pop();
3113 push( _gvn.transform( new URShiftINode(a,b) ) );
3114 break;
3115
3116 case Bytecodes::_fneg:
3117 a = pop();
3118 b = _gvn.transform(new NegFNode (a));
3119 push(b);
3120 break;
3121
3122 case Bytecodes::_fsub:
3123 b = pop();
3124 a = pop();
3125 c = _gvn.transform( new SubFNode(a,b) );
3126 push(c);
3127 break;
3128
3129 case Bytecodes::_fadd:
3130 b = pop();
3131 a = pop();
3132 c = _gvn.transform( new AddFNode(a,b) );
3133 push(c);
3134 break;
3135
3136 case Bytecodes::_fmul:
3137 b = pop();
3138 a = pop();
3139 c = _gvn.transform( new MulFNode(a,b) );
3140 push(c);
3141 break;
3142
3143 case Bytecodes::_fdiv:
3144 b = pop();
3145 a = pop();
3146 c = _gvn.transform( new DivFNode(nullptr,a,b) );
3147 push(c);
3148 break;
3149
3150 case Bytecodes::_frem:
3151 // Generate a ModF node.
3152 b = pop();
3153 a = pop();
3154 push(floating_point_mod(a, b, BasicType::T_FLOAT));
3155 break;
3156
3157 case Bytecodes::_fcmpl:
3158 b = pop();
3159 a = pop();
3160 c = _gvn.transform( new CmpF3Node( a, b));
3161 push(c);
3162 break;
3163 case Bytecodes::_fcmpg:
3164 b = pop();
3165 a = pop();
3166
3167 // Same as fcmpl but need to flip the unordered case. Swap the inputs,
3168 // which negates the result sign except for unordered. Flip the unordered
3169 // as well by using CmpF3 which implements unordered-lesser instead of
3170 // unordered-greater semantics. Finally, commute the result bits. Result
3171 // is same as using a CmpF3Greater except we did it with CmpF3 alone.
3172 c = _gvn.transform( new CmpF3Node( b, a));
3173 c = _gvn.transform( new SubINode(_gvn.intcon(0),c) );
3174 push(c);
3175 break;
3176
3177 case Bytecodes::_f2i:
3178 a = pop();
3179 push(_gvn.transform(new ConvF2INode(a)));
3180 break;
3181
3182 case Bytecodes::_d2i:
3183 a = pop_pair();
3184 b = _gvn.transform(new ConvD2INode(a));
3185 push( b );
3186 break;
3187
3188 case Bytecodes::_f2d:
3189 a = pop();
3190 b = _gvn.transform( new ConvF2DNode(a));
3191 push_pair( b );
3192 break;
3193
3194 case Bytecodes::_d2f:
3195 a = pop_pair();
3196 b = _gvn.transform( new ConvD2FNode(a));
3197 push( b );
3198 break;
3199
3200 case Bytecodes::_l2f:
3201 if (Matcher::convL2FSupported()) {
3202 a = pop_pair();
3203 b = _gvn.transform( new ConvL2FNode(a));
3204 push(b);
3205 } else {
3206 l2f();
3207 }
3208 break;
3209
3210 case Bytecodes::_l2d:
3211 a = pop_pair();
3212 b = _gvn.transform( new ConvL2DNode(a));
3213 push_pair(b);
3214 break;
3215
3216 case Bytecodes::_f2l:
3217 a = pop();
3218 b = _gvn.transform( new ConvF2LNode(a));
3219 push_pair(b);
3220 break;
3221
3222 case Bytecodes::_d2l:
3223 a = pop_pair();
3224 b = _gvn.transform( new ConvD2LNode(a));
3225 push_pair(b);
3226 break;
3227
3228 case Bytecodes::_dsub:
3229 b = pop_pair();
3230 a = pop_pair();
3231 c = _gvn.transform( new SubDNode(a,b) );
3232 push_pair(c);
3233 break;
3234
3235 case Bytecodes::_dadd:
3236 b = pop_pair();
3237 a = pop_pair();
3238 c = _gvn.transform( new AddDNode(a,b) );
3239 push_pair(c);
3240 break;
3241
3242 case Bytecodes::_dmul:
3243 b = pop_pair();
3244 a = pop_pair();
3245 c = _gvn.transform( new MulDNode(a,b) );
3246 push_pair(c);
3247 break;
3248
3249 case Bytecodes::_ddiv:
3250 b = pop_pair();
3251 a = pop_pair();
3252 c = _gvn.transform( new DivDNode(nullptr,a,b) );
3253 push_pair(c);
3254 break;
3255
3256 case Bytecodes::_dneg:
3257 a = pop_pair();
3258 b = _gvn.transform(new NegDNode (a));
3259 push_pair(b);
3260 break;
3261
3262 case Bytecodes::_drem:
3263 // Generate a ModD node.
3264 b = pop_pair();
3265 a = pop_pair();
3266 push_pair(floating_point_mod(a, b, BasicType::T_DOUBLE));
3267 break;
3268
3269 case Bytecodes::_dcmpl:
3270 b = pop_pair();
3271 a = pop_pair();
3272 c = _gvn.transform( new CmpD3Node( a, b));
3273 push(c);
3274 break;
3275
3276 case Bytecodes::_dcmpg:
3277 b = pop_pair();
3278 a = pop_pair();
3279 // Same as dcmpl but need to flip the unordered case.
3280 // Commute the inputs, which negates the result sign except for unordered.
3281 // Flip the unordered as well by using CmpD3 which implements
3282 // unordered-lesser instead of unordered-greater semantics.
3283 // Finally, negate the result bits. Result is same as using a
3284 // CmpD3Greater except we did it with CmpD3 alone.
3285 c = _gvn.transform( new CmpD3Node( b, a));
3286 c = _gvn.transform( new SubINode(_gvn.intcon(0),c) );
3287 push(c);
3288 break;
3289
3290
3291 // Note for longs -> lo word is on TOS, hi word is on TOS - 1
3292 case Bytecodes::_land:
3293 b = pop_pair();
3294 a = pop_pair();
3295 c = _gvn.transform( new AndLNode(a,b) );
3296 push_pair(c);
3297 break;
3298 case Bytecodes::_lor:
3299 b = pop_pair();
3300 a = pop_pair();
3301 c = _gvn.transform( new OrLNode(a,b) );
3302 push_pair(c);
3303 break;
3304 case Bytecodes::_lxor:
3305 b = pop_pair();
3306 a = pop_pair();
3307 c = _gvn.transform( new XorLNode(a,b) );
3308 push_pair(c);
3309 break;
3310
3311 case Bytecodes::_lshl:
3312 b = pop(); // the shift count
3313 a = pop_pair(); // value to be shifted
3314 c = _gvn.transform( new LShiftLNode(a,b) );
3315 push_pair(c);
3316 break;
3317 case Bytecodes::_lshr:
3318 b = pop(); // the shift count
3319 a = pop_pair(); // value to be shifted
3320 c = _gvn.transform( new RShiftLNode(a,b) );
3321 push_pair(c);
3322 break;
3323 case Bytecodes::_lushr:
3324 b = pop(); // the shift count
3325 a = pop_pair(); // value to be shifted
3326 c = _gvn.transform( new URShiftLNode(a,b) );
3327 push_pair(c);
3328 break;
3329 case Bytecodes::_lmul:
3330 b = pop_pair();
3331 a = pop_pair();
3332 c = _gvn.transform( new MulLNode(a,b) );
3333 push_pair(c);
3334 break;
3335
3336 case Bytecodes::_lrem:
3337 // Must keep both values on the expression-stack during null-check
3338 assert(peek(0) == top(), "long word order");
3339 zero_check_long(peek(1));
3340 // Compile-time detect of null-exception?
3341 if (stopped()) return;
3342 b = pop_pair();
3343 a = pop_pair();
3344 c = _gvn.transform( new ModLNode(control(),a,b) );
3345 push_pair(c);
3346 break;
3347
3348 case Bytecodes::_ldiv:
3349 // Must keep both values on the expression-stack during null-check
3350 assert(peek(0) == top(), "long word order");
3351 zero_check_long(peek(1));
3352 // Compile-time detect of null-exception?
3353 if (stopped()) return;
3354 b = pop_pair();
3355 a = pop_pair();
3356 c = _gvn.transform( new DivLNode(control(),a,b) );
3357 push_pair(c);
3358 break;
3359
3360 case Bytecodes::_ladd:
3361 b = pop_pair();
3362 a = pop_pair();
3363 c = _gvn.transform( new AddLNode(a,b) );
3364 push_pair(c);
3365 break;
3366 case Bytecodes::_lsub:
3367 b = pop_pair();
3368 a = pop_pair();
3369 c = _gvn.transform( new SubLNode(a,b) );
3370 push_pair(c);
3371 break;
3372 case Bytecodes::_lcmp:
3373 // Safepoints are now inserted _before_ branches. The long-compare
3374 // bytecode painfully produces a 3-way value (-1,0,+1) which requires a
3375 // slew of control flow. These are usually followed by a CmpI vs zero and
3376 // a branch; this pattern then optimizes to the obvious long-compare and
3377 // branch. However, if the branch is backwards there's a Safepoint
3378 // inserted. The inserted Safepoint captures the JVM state at the
3379 // pre-branch point, i.e. it captures the 3-way value. Thus if a
3380 // long-compare is used to control a loop the debug info will force
3381 // computation of the 3-way value, even though the generated code uses a
3382 // long-compare and branch. We try to rectify the situation by inserting
3383 // a SafePoint here and have it dominate and kill the safepoint added at a
3384 // following backwards branch. At this point the JVM state merely holds 2
3385 // longs but not the 3-way value.
3386 switch (iter().next_bc()) {
3387 case Bytecodes::_ifgt:
3388 case Bytecodes::_iflt:
3389 case Bytecodes::_ifge:
3390 case Bytecodes::_ifle:
3391 case Bytecodes::_ifne:
3392 case Bytecodes::_ifeq:
3393 // If this is a backwards branch in the bytecodes, add Safepoint
3394 maybe_add_safepoint(iter().next_get_dest());
3395 default:
3396 break;
3397 }
3398 b = pop_pair();
3399 a = pop_pair();
3400 c = _gvn.transform( new CmpL3Node( a, b ));
3401 push(c);
3402 break;
3403
3404 case Bytecodes::_lneg:
3405 a = pop_pair();
3406 b = _gvn.transform( new SubLNode(longcon(0),a));
3407 push_pair(b);
3408 break;
3409 case Bytecodes::_l2i:
3410 a = pop_pair();
3411 push( _gvn.transform( new ConvL2INode(a)));
3412 break;
3413 case Bytecodes::_i2l:
3414 a = pop();
3415 b = _gvn.transform( new ConvI2LNode(a));
3416 push_pair(b);
3417 break;
3418 case Bytecodes::_i2b:
3419 // Sign extend
3420 a = pop();
3421 a = Compile::narrow_value(T_BYTE, a, nullptr, &_gvn, true);
3422 push(a);
3423 break;
3424 case Bytecodes::_i2s:
3425 a = pop();
3426 a = Compile::narrow_value(T_SHORT, a, nullptr, &_gvn, true);
3427 push(a);
3428 break;
3429 case Bytecodes::_i2c:
3430 a = pop();
3431 a = Compile::narrow_value(T_CHAR, a, nullptr, &_gvn, true);
3432 push(a);
3433 break;
3434
3435 case Bytecodes::_i2f:
3436 a = pop();
3437 b = _gvn.transform( new ConvI2FNode(a) ) ;
3438 push(b);
3439 break;
3440
3441 case Bytecodes::_i2d:
3442 a = pop();
3443 b = _gvn.transform( new ConvI2DNode(a));
3444 push_pair(b);
3445 break;
3446
3447 case Bytecodes::_iinc: // Increment local
3448 i = iter().get_index(); // Get local index
3449 set_local( i, _gvn.transform( new AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) );
3450 break;
3451
3452 // Exit points of synchronized methods must have an unlock node
3453 case Bytecodes::_return:
3454 return_current(nullptr);
3455 break;
3456
3457 case Bytecodes::_ireturn:
3458 case Bytecodes::_areturn:
3459 case Bytecodes::_freturn:
3460 return_current(cast_to_non_larval(pop()));
3461 break;
3462 case Bytecodes::_lreturn:
3463 case Bytecodes::_dreturn:
3464 return_current(pop_pair());
3465 break;
3466
3467 case Bytecodes::_athrow:
3468 // null exception oop throws null pointer exception
3469 null_check(peek());
3470 if (stopped()) return;
3471 // Hook the thrown exception directly to subsequent handlers.
3472 if (BailoutToInterpreterForThrows) {
3473 // Keep method interpreted from now on.
3474 uncommon_trap(Deoptimization::Reason_unhandled,
3475 Deoptimization::Action_make_not_compilable);
3476 return;
3477 }
3478 if (env()->jvmti_can_post_on_exceptions()) {
3479 // check if we must post exception events, take uncommon trap if so (with must_throw = false)
3480 uncommon_trap_if_should_post_on_exceptions(Deoptimization::Reason_unhandled, false);
3481 }
3482 // Here if either can_post_on_exceptions or should_post_on_exceptions is false
3483 add_exception_state(make_exception_state(peek()));
3484 break;
3485
3486 case Bytecodes::_goto: // fall through
3487 case Bytecodes::_goto_w: {
3488 int target_bci = (bc() == Bytecodes::_goto) ? iter().get_dest() : iter().get_far_dest();
3489
3490 // If this is a backwards branch in the bytecodes, add Safepoint
3491 maybe_add_safepoint(target_bci);
3492
3493 // Merge the current control into the target basic block
3494 merge(target_bci);
3495
3496 // See if we can get some profile data and hand it off to the next block
3497 Block *target_block = block()->successor_for_bci(target_bci);
3498 if (target_block->pred_count() != 1) break;
3499 ciMethodData* methodData = method()->method_data();
3500 if (!methodData->is_mature()) break;
3501 ciProfileData* data = methodData->bci_to_data(bci());
3502 assert(data != nullptr && data->is_JumpData(), "need JumpData for taken branch");
3503 int taken = ((ciJumpData*)data)->taken();
3504 taken = method()->scale_count(taken);
3505 target_block->set_count(taken);
3506 break;
3507 }
3508
3509 case Bytecodes::_ifnull: btest = BoolTest::eq; goto handle_if_null;
3510 case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null;
3511 handle_if_null:
3512 // If this is a backwards branch in the bytecodes, add Safepoint
3513 maybe_add_safepoint(iter().get_dest());
3514 a = null();
3515 b = cast_to_non_larval(pop());
3516 if (b->is_InlineType()) {
3517 // Null checking a scalarized but nullable inline type. Check the null marker
3518 // input instead of the oop input to avoid keeping buffer allocations alive
3519 c = _gvn.transform(new CmpINode(b->as_InlineType()->get_null_marker(), zerocon(T_INT)));
3520 } else {
3521 if (!_gvn.type(b)->speculative_maybe_null() &&
3522 !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
3523 inc_sp(1);
3524 Node* null_ctl = top();
3525 b = null_check_oop(b, &null_ctl, true, true, true);
3526 assert(null_ctl->is_top(), "no null control here");
3527 dec_sp(1);
3528 } else if (_gvn.type(b)->speculative_always_null() &&
3529 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
3530 inc_sp(1);
3531 b = null_assert(b);
3532 dec_sp(1);
3533 }
3534 c = _gvn.transform( new CmpPNode(b, a) );
3535 }
3536 do_ifnull(btest, c);
3537 break;
3538
3539 case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
3540 case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
3541 handle_if_acmp:
3542 // If this is a backwards branch in the bytecodes, add Safepoint
3543 maybe_add_safepoint(iter().get_dest());
3544 a = cast_to_non_larval(pop());
3545 b = cast_to_non_larval(pop());
3546 do_acmp(btest, b, a);
3547 break;
3548
3549 case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
3550 case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
3551 case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
3552 case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
3553 case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
3554 case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
3555 handle_ifxx:
3556 // If this is a backwards branch in the bytecodes, add Safepoint
3557 maybe_add_safepoint(iter().get_dest());
3558 a = _gvn.intcon(0);
3559 b = pop();
3560 c = _gvn.transform( new CmpINode(b, a) );
3561 do_if(btest, c);
3562 break;
3563
3564 case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
3565 case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
3566 case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;
3567 case Bytecodes::_if_icmple: btest = BoolTest::le; goto handle_if_icmp;
3568 case Bytecodes::_if_icmpgt: btest = BoolTest::gt; goto handle_if_icmp;
3569 case Bytecodes::_if_icmpge: btest = BoolTest::ge; goto handle_if_icmp;
3570 handle_if_icmp:
3571 // If this is a backwards branch in the bytecodes, add Safepoint
3572 maybe_add_safepoint(iter().get_dest());
3573 a = pop();
3574 b = pop();
3575 c = _gvn.transform( new CmpINode( b, a ) );
3576 do_if(btest, c);
3577 break;
3578
3579 case Bytecodes::_tableswitch:
3580 do_tableswitch();
3581 break;
3582
3583 case Bytecodes::_lookupswitch:
3584 do_lookupswitch();
3585 break;
3586
3587 case Bytecodes::_invokestatic:
3588 case Bytecodes::_invokedynamic:
3589 case Bytecodes::_invokespecial:
3590 case Bytecodes::_invokevirtual:
3591 case Bytecodes::_invokeinterface:
3592 do_call();
3593 break;
3594 case Bytecodes::_checkcast:
3595 do_checkcast();
3596 break;
3597 case Bytecodes::_instanceof:
3598 do_instanceof();
3599 break;
3600 case Bytecodes::_anewarray:
3601 do_newarray();
3602 break;
3603 case Bytecodes::_newarray:
3604 do_newarray((BasicType)iter().get_index());
3605 break;
3606 case Bytecodes::_multianewarray:
3607 do_multianewarray();
3608 break;
3609 case Bytecodes::_new:
3610 do_new();
3611 break;
3612
3613 case Bytecodes::_jsr:
3614 case Bytecodes::_jsr_w:
3615 do_jsr();
3616 break;
3617
3618 case Bytecodes::_ret:
3619 do_ret();
3620 break;
3621
3622
3623 case Bytecodes::_monitorenter:
3624 do_monitor_enter();
3625 break;
3626
3627 case Bytecodes::_monitorexit:
3628 do_monitor_exit();
3629 break;
3630
3631 case Bytecodes::_breakpoint:
3632 // Breakpoint set concurrently to compile
3633 // %%% use an uncommon trap?
3634 C->record_failure("breakpoint in method");
3635 return;
3636
3637 default:
3638 #ifndef PRODUCT
3639 map()->dump(99);
3640 #endif
3641 tty->print("\nUnhandled bytecode %s\n", Bytecodes::name(bc()) );
3642 ShouldNotReachHere();
3643 }
3644
3645 #ifndef PRODUCT
3646 if (failing()) { return; }
3647 constexpr int perBytecode = 6;
3648 if (C->should_print_igv(perBytecode)) {
3649 IdealGraphPrinter* printer = C->igv_printer();
3650 char buffer[256];
3651 jio_snprintf(buffer, sizeof(buffer), "Bytecode %d: %s", bci(), Bytecodes::name(bc()));
3652 bool old = printer->traverse_outs();
3653 printer->set_traverse_outs(true);
3654 printer->set_parse(this);
3655 printer->print_graph(buffer);
3656 printer->set_traverse_outs(old);
3657 printer->set_parse(nullptr);
3658 }
3659 #endif
3660 }