1 /* 2 * Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm_io.h" 27 #include "ci/ciMethodData.hpp" 28 #include "ci/ciSymbols.hpp" 29 #include "classfile/vmSymbols.hpp" 30 #include "compiler/compileLog.hpp" 31 #include "interpreter/linkResolver.hpp" 32 #include "memory/resourceArea.hpp" 33 #include "memory/universe.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "opto/addnode.hpp" 36 #include "opto/castnode.hpp" 37 #include "opto/convertnode.hpp" 38 #include "opto/divnode.hpp" 39 #include "opto/idealGraphPrinter.hpp" 40 #include "opto/idealKit.hpp" 41 #include "opto/inlinetypenode.hpp" 42 #include "opto/matcher.hpp" 43 #include "opto/memnode.hpp" 44 #include "opto/mulnode.hpp" 45 #include "opto/opaquenode.hpp" 46 #include "opto/parse.hpp" 47 #include "opto/runtime.hpp" 48 #include "runtime/deoptimization.hpp" 49 #include "runtime/sharedRuntime.hpp" 50 51 #ifndef PRODUCT 52 extern int explicit_null_checks_inserted, 53 explicit_null_checks_elided; 54 #endif 55 56 Node* Parse::record_profile_for_speculation_at_array_load(Node* ld) { 57 // Feed unused profile data to type speculation 58 if (UseTypeSpeculation && UseArrayLoadStoreProfile) { 59 ciKlass* array_type = NULL; 60 ciKlass* element_type = NULL; 61 ProfilePtrKind element_ptr = ProfileMaybeNull; 62 bool flat_array = true; 63 bool null_free_array = true; 64 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array); 65 if (element_type != NULL || element_ptr != ProfileMaybeNull) { 66 ld = record_profile_for_speculation(ld, element_type, element_ptr); 67 } 68 } 69 return ld; 70 } 71 72 73 //---------------------------------array_load---------------------------------- 74 void Parse::array_load(BasicType bt) { 75 const Type* elemtype = Type::TOP; 76 Node* adr = array_addressing(bt, 0, elemtype); 77 if (stopped()) return; // guaranteed null or range check 78 79 Node* idx = pop(); 80 Node* ary = pop(); 81 82 // Handle inline type arrays 83 const TypeOopPtr* elemptr = elemtype->make_oopptr(); 84 const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr(); 85 if (ary_t->is_flat()) { 86 // Load from flattened inline type array 87 Node* vt = InlineTypeNode::make_from_flattened(this, elemtype->inline_klass(), ary, adr); 88 push(vt); 89 return; 90 } else if (ary_t->is_null_free()) { 91 // Load from non-flattened inline type array (elements can never be null) 92 bt = T_PRIMITIVE_OBJECT; 93 } else if (!ary_t->is_not_flat()) { 94 // Cannot statically determine if array is flattened, emit runtime check 95 assert(UseFlatArray && is_reference_type(bt) && elemptr->can_be_inline_type() && !ary_t->klass_is_exact() && !ary_t->is_not_null_free() && 96 (!elemptr->is_inlinetypeptr() || elemptr->inline_klass()->flatten_array()), "array can't be flattened"); 97 IdealKit ideal(this); 98 IdealVariable res(ideal); 99 ideal.declarations_done(); 100 ideal.if_then(flat_array_test(ary, /* flat = */ false)); { 101 // non-flattened 102 assert(ideal.ctrl()->in(0)->as_If()->is_flat_array_check(&_gvn), "Should be found"); 103 sync_kit(ideal); 104 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); 105 Node* ld = access_load_at(ary, adr, adr_type, elemptr, bt, 106 IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD); 107 if (elemptr->is_inlinetypeptr()) { 108 assert(elemptr->maybe_null(), "null free array should be handled above"); 109 ld = InlineTypeNode::make_from_oop(this, ld, elemptr->inline_klass(), false); 110 } 111 ideal.sync_kit(this); 112 ideal.set(res, ld); 113 } ideal.else_(); { 114 // flattened 115 sync_kit(ideal); 116 if (elemptr->is_inlinetypeptr()) { 117 // Element type is known, cast and load from flattened representation 118 ciInlineKlass* vk = elemptr->inline_klass(); 119 assert(vk->flatten_array() && elemptr->maybe_null(), "never/always flat - should be optimized"); 120 ciArrayKlass* array_klass = ciArrayKlass::make(vk, /* null_free */ true); 121 const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr(); 122 Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, arytype)); 123 Node* casted_adr = array_element_address(cast, idx, T_PRIMITIVE_OBJECT, ary_t->size(), control()); 124 // Re-execute flattened array load if buffering triggers deoptimization 125 PreserveReexecuteState preexecs(this); 126 jvms()->set_should_reexecute(true); 127 inc_sp(2); 128 Node* vt = InlineTypeNode::make_from_flattened(this, vk, cast, casted_adr)->buffer(this, false); 129 ideal.set(res, vt); 130 ideal.sync_kit(this); 131 } else { 132 // Element type is unknown, emit runtime call 133 134 // Below membars keep this access to an unknown flattened array correctly 135 // ordered with other unknown and known flattened array accesses. 136 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES)); 137 138 Node* call = NULL; 139 { 140 // Re-execute flattened array load if runtime call triggers deoptimization 141 PreserveReexecuteState preexecs(this); 142 jvms()->set_bci(_bci); 143 jvms()->set_should_reexecute(true); 144 inc_sp(2); 145 kill_dead_locals(); 146 call = make_runtime_call(RC_NO_LEAF | RC_NO_IO, 147 OptoRuntime::load_unknown_inline_type(), 148 OptoRuntime::load_unknown_inline_Java(), 149 NULL, TypeRawPtr::BOTTOM, 150 ary, idx); 151 } 152 make_slow_call_ex(call, env()->Throwable_klass(), false); 153 Node* buffer = _gvn.transform(new ProjNode(call, TypeFunc::Parms)); 154 155 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES)); 156 157 // Keep track of the information that the inline type is flattened in arrays 158 const Type* unknown_value = elemptr->is_instptr()->cast_to_flatten_array(); 159 buffer = _gvn.transform(new CheckCastPPNode(control(), buffer, unknown_value)); 160 161 ideal.sync_kit(this); 162 ideal.set(res, buffer); 163 } 164 } ideal.end_if(); 165 sync_kit(ideal); 166 Node* ld = _gvn.transform(ideal.value(res)); 167 ld = record_profile_for_speculation_at_array_load(ld); 168 push_node(bt, ld); 169 return; 170 } 171 172 if (elemtype == TypeInt::BOOL) { 173 bt = T_BOOLEAN; 174 } 175 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); 176 Node* ld = access_load_at(ary, adr, adr_type, elemtype, bt, 177 IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD); 178 ld = record_profile_for_speculation_at_array_load(ld); 179 // Loading a non-flattened inline type 180 if (elemptr != NULL && elemptr->is_inlinetypeptr()) { 181 assert(!ary_t->is_null_free() || !elemptr->maybe_null(), "inline type array elements should never be null"); 182 ld = InlineTypeNode::make_from_oop(this, ld, elemptr->inline_klass(), !elemptr->maybe_null()); 183 } 184 push_node(bt, ld); 185 } 186 187 188 //--------------------------------array_store---------------------------------- 189 void Parse::array_store(BasicType bt) { 190 const Type* elemtype = Type::TOP; 191 Node* adr = array_addressing(bt, type2size[bt], elemtype); 192 if (stopped()) return; // guaranteed null or range check 193 Node* cast_val = NULL; 194 if (bt == T_OBJECT) { 195 cast_val = array_store_check(adr, elemtype); 196 if (stopped()) return; 197 } 198 Node* val = pop_node(bt); // Value to store 199 Node* idx = pop(); // Index in the array 200 Node* ary = pop(); // The array itself 201 202 const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr(); 203 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); 204 assert(adr->as_AddP()->in(AddPNode::Base) == ary, "inconsistent address base"); 205 206 if (elemtype == TypeInt::BOOL) { 207 bt = T_BOOLEAN; 208 } else if (bt == T_OBJECT) { 209 elemtype = elemtype->make_oopptr(); 210 const Type* tval = _gvn.type(cast_val); 211 // We may have lost type information for 'val' here due to the casts 212 // emitted by the array_store_check code (see JDK-6312651) 213 // TODO Remove this code once JDK-6312651 is in. 214 const Type* tval_init = _gvn.type(val); 215 // Based on the value to be stored, try to determine if the array is not null-free and/or not flat. 216 // This is only legal for non-null stores because the array_store_check always passes for null, even 217 // if the array is null-free. Null stores are handled in GraphKit::gen_inline_array_null_guard(). 218 bool not_inline = !tval->isa_inlinetype() && 219 ((!tval_init->maybe_null() && !tval_init->is_oopptr()->can_be_inline_type()) || 220 (!tval->maybe_null() && !tval->is_oopptr()->can_be_inline_type())); 221 bool not_flattened = not_inline || ((tval_init->is_inlinetypeptr() || tval_init->isa_inlinetype()) && !tval_init->inline_klass()->flatten_array()); 222 if (!ary_t->is_not_null_free() && not_inline) { 223 // Storing a non-inline type, mark array as not null-free (-> not flat). 224 ary_t = ary_t->cast_to_not_null_free(); 225 Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, ary_t)); 226 replace_in_map(ary, cast); 227 ary = cast; 228 } else if (!ary_t->is_not_flat() && not_flattened) { 229 // Storing a non-flattened value, mark array as not flat. 230 ary_t = ary_t->cast_to_not_flat(); 231 Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, ary_t)); 232 replace_in_map(ary, cast); 233 ary = cast; 234 } 235 236 if (ary_t->is_flat()) { 237 // Store to flattened inline type array 238 assert(!tval->maybe_null(), "should be guaranteed by array store check"); 239 // Re-execute flattened array store if buffering triggers deoptimization 240 PreserveReexecuteState preexecs(this); 241 inc_sp(3); 242 jvms()->set_should_reexecute(true); 243 cast_val->as_InlineTypeBase()->store_flattened(this, ary, adr, NULL, 0, MO_UNORDERED | IN_HEAP | IS_ARRAY); 244 return; 245 } else if (ary_t->is_null_free()) { 246 // Store to non-flattened inline type array (elements can never be null) 247 assert(!tval->maybe_null(), "should be guaranteed by array store check"); 248 if (elemtype->inline_klass()->is_empty()) { 249 // Ignore empty inline stores, array is already initialized. 250 return; 251 } 252 } else if (!ary_t->is_not_flat() && (tval != TypePtr::NULL_PTR || StressReflectiveCode)) { 253 // Array might be flattened, emit runtime checks (for NULL, a simple inline_array_null_guard is sufficient). 254 assert(UseFlatArray && !not_flattened && elemtype->is_oopptr()->can_be_inline_type() && 255 !ary_t->klass_is_exact() && !ary_t->is_not_null_free(), "array can't be flattened"); 256 IdealKit ideal(this); 257 ideal.if_then(flat_array_test(ary, /* flat = */ false)); { 258 // non-flattened 259 assert(ideal.ctrl()->in(0)->as_If()->is_flat_array_check(&_gvn), "Should be found"); 260 sync_kit(ideal); 261 Node* cast_ary = inline_array_null_guard(ary, cast_val, 3); 262 inc_sp(3); 263 access_store_at(cast_ary, adr, adr_type, cast_val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY, false); 264 dec_sp(3); 265 ideal.sync_kit(this); 266 } ideal.else_(); { 267 sync_kit(ideal); 268 // flattened 269 Node* null_ctl = top(); 270 Node* val = null_check_oop(cast_val, &null_ctl); 271 if (null_ctl != top()) { 272 PreserveJVMState pjvms(this); 273 inc_sp(3); 274 set_control(null_ctl); 275 uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none); 276 dec_sp(3); 277 } 278 // Try to determine the inline klass 279 ciInlineKlass* vk = NULL; 280 if (tval->isa_inlinetype() || tval->is_inlinetypeptr()) { 281 vk = tval->inline_klass(); 282 } else if (tval_init->isa_inlinetype() || tval_init->is_inlinetypeptr()) { 283 vk = tval_init->inline_klass(); 284 } else if (elemtype->is_inlinetypeptr()) { 285 vk = elemtype->inline_klass(); 286 } 287 Node* casted_ary = ary; 288 if (vk != NULL && !stopped()) { 289 // Element type is known, cast and store to flattened representation 290 assert(vk->flatten_array() && elemtype->maybe_null(), "never/always flat - should be optimized"); 291 ciArrayKlass* array_klass = ciArrayKlass::make(vk, /* null_free */ true); 292 const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr(); 293 casted_ary = _gvn.transform(new CheckCastPPNode(control(), casted_ary, arytype)); 294 Node* casted_adr = array_element_address(casted_ary, idx, T_OBJECT, arytype->size(), control()); 295 if (!val->is_InlineType()) { 296 assert(!gvn().type(val)->maybe_null(), "inline type array elements should never be null"); 297 val = InlineTypeNode::make_from_oop(this, val, vk); 298 } 299 // Re-execute flattened array store if buffering triggers deoptimization 300 PreserveReexecuteState preexecs(this); 301 inc_sp(3); 302 jvms()->set_should_reexecute(true); 303 val->as_InlineTypeBase()->store_flattened(this, casted_ary, casted_adr, NULL, 0, MO_UNORDERED | IN_HEAP | IS_ARRAY); 304 } else if (!stopped()) { 305 // Element type is unknown, emit runtime call 306 307 // Below membars keep this access to an unknown flattened array correctly 308 // ordered with other unknown and known flattened array accesses. 309 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES)); 310 311 make_runtime_call(RC_LEAF, 312 OptoRuntime::store_unknown_inline_type(), 313 CAST_FROM_FN_PTR(address, OptoRuntime::store_unknown_inline), 314 "store_unknown_inline", TypeRawPtr::BOTTOM, 315 val, casted_ary, idx); 316 317 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES)); 318 } 319 ideal.sync_kit(this); 320 } 321 ideal.end_if(); 322 sync_kit(ideal); 323 return; 324 } else if (!ary_t->is_not_null_free()) { 325 // Array is not flattened but may be null free 326 assert(elemtype->is_oopptr()->can_be_inline_type() && !ary_t->klass_is_exact(), "array can't be null-free"); 327 ary = inline_array_null_guard(ary, cast_val, 3, true); 328 } 329 } 330 inc_sp(3); 331 access_store_at(ary, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY); 332 dec_sp(3); 333 } 334 335 336 //------------------------------array_addressing------------------------------- 337 // Pull array and index from the stack. Compute pointer-to-element. 338 Node* Parse::array_addressing(BasicType type, int vals, const Type*& elemtype) { 339 Node *idx = peek(0+vals); // Get from stack without popping 340 Node *ary = peek(1+vals); // in case of exception 341 342 // Null check the array base, with correct stack contents 343 ary = null_check(ary, T_ARRAY); 344 // Compile-time detect of null-exception? 345 if (stopped()) return top(); 346 347 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr(); 348 const TypeInt* sizetype = arytype->size(); 349 elemtype = arytype->elem(); 350 351 if (UseUniqueSubclasses) { 352 const Type* el = elemtype->make_ptr(); 353 if (el && el->isa_instptr()) { 354 const TypeInstPtr* toop = el->is_instptr(); 355 if (toop->klass()->as_instance_klass()->unique_concrete_subklass()) { 356 // If we load from "AbstractClass[]" we must see "ConcreteSubClass". 357 const Type* subklass = Type::get_const_type(toop->klass()); 358 elemtype = subklass->join_speculative(el); 359 } 360 } 361 } 362 363 // Check for big class initializers with all constant offsets 364 // feeding into a known-size array. 365 const TypeInt* idxtype = _gvn.type(idx)->is_int(); 366 // See if the highest idx value is less than the lowest array bound, 367 // and if the idx value cannot be negative: 368 bool need_range_check = true; 369 if (idxtype->_hi < sizetype->_lo && idxtype->_lo >= 0) { 370 need_range_check = false; 371 if (C->log() != NULL) C->log()->elem("observe that='!need_range_check'"); 372 } 373 374 ciKlass * arytype_klass = arytype->klass(); 375 if ((arytype_klass != NULL) && (!arytype_klass->is_loaded())) { 376 // Only fails for some -Xcomp runs 377 // The class is unloaded. We have to run this bytecode in the interpreter. 378 uncommon_trap(Deoptimization::Reason_unloaded, 379 Deoptimization::Action_reinterpret, 380 arytype->klass(), "!loaded array"); 381 return top(); 382 } 383 384 // Do the range check 385 if (GenerateRangeChecks && need_range_check) { 386 Node* tst; 387 if (sizetype->_hi <= 0) { 388 // The greatest array bound is negative, so we can conclude that we're 389 // compiling unreachable code, but the unsigned compare trick used below 390 // only works with non-negative lengths. Instead, hack "tst" to be zero so 391 // the uncommon_trap path will always be taken. 392 tst = _gvn.intcon(0); 393 } else { 394 // Range is constant in array-oop, so we can use the original state of mem 395 Node* len = load_array_length(ary); 396 397 // Test length vs index (standard trick using unsigned compare) 398 Node* chk = _gvn.transform( new CmpUNode(idx, len) ); 399 BoolTest::mask btest = BoolTest::lt; 400 tst = _gvn.transform( new BoolNode(chk, btest) ); 401 } 402 RangeCheckNode* rc = new RangeCheckNode(control(), tst, PROB_MAX, COUNT_UNKNOWN); 403 _gvn.set_type(rc, rc->Value(&_gvn)); 404 if (!tst->is_Con()) { 405 record_for_igvn(rc); 406 } 407 set_control(_gvn.transform(new IfTrueNode(rc))); 408 // Branch to failure if out of bounds 409 { 410 PreserveJVMState pjvms(this); 411 set_control(_gvn.transform(new IfFalseNode(rc))); 412 if (C->allow_range_check_smearing()) { 413 // Do not use builtin_throw, since range checks are sometimes 414 // made more stringent by an optimistic transformation. 415 // This creates "tentative" range checks at this point, 416 // which are not guaranteed to throw exceptions. 417 // See IfNode::Ideal, is_range_check, adjust_check. 418 uncommon_trap(Deoptimization::Reason_range_check, 419 Deoptimization::Action_make_not_entrant, 420 NULL, "range_check"); 421 } else { 422 // If we have already recompiled with the range-check-widening 423 // heroic optimization turned off, then we must really be throwing 424 // range check exceptions. 425 builtin_throw(Deoptimization::Reason_range_check, idx); 426 } 427 } 428 } 429 // Check for always knowing you are throwing a range-check exception 430 if (stopped()) return top(); 431 432 // This could be an access to an inline type array. We can't tell if it's 433 // flat or not. Knowing the exact type avoids runtime checks and leads to 434 // a much simpler graph shape. Check profile information. 435 if (!arytype->is_flat() && !arytype->is_not_flat()) { 436 // First check the speculative type 437 Deoptimization::DeoptReason reason = Deoptimization::Reason_speculate_class_check; 438 ciKlass* array_type = arytype->speculative_type(); 439 if (too_many_traps_or_recompiles(reason) || array_type == NULL) { 440 // No speculative type, check profile data at this bci 441 array_type = NULL; 442 reason = Deoptimization::Reason_class_check; 443 if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(reason)) { 444 ciKlass* element_type = NULL; 445 ProfilePtrKind element_ptr = ProfileMaybeNull; 446 bool flat_array = true; 447 bool null_free_array = true; 448 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array); 449 } 450 } 451 if (array_type != NULL) { 452 // Speculate that this array has the exact type reported by profile data 453 Node* better_ary = NULL; 454 DEBUG_ONLY(Node* old_control = control();) 455 Node* slow_ctl = type_check_receiver(ary, array_type, 1.0, &better_ary); 456 if (stopped()) { 457 // The check always fails and therefore profile information is incorrect. Don't use it. 458 assert(old_control == slow_ctl, "type check should have been removed"); 459 set_control(slow_ctl); 460 } else if (!slow_ctl->is_top()) { 461 { PreserveJVMState pjvms(this); 462 set_control(slow_ctl); 463 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile); 464 } 465 replace_in_map(ary, better_ary); 466 ary = better_ary; 467 arytype = _gvn.type(ary)->is_aryptr(); 468 elemtype = arytype->elem(); 469 } 470 } 471 } else if (UseTypeSpeculation && UseArrayLoadStoreProfile) { 472 // No need to speculate: feed profile data at this bci for the 473 // array to type speculation 474 ciKlass* array_type = NULL; 475 ciKlass* element_type = NULL; 476 ProfilePtrKind element_ptr = ProfileMaybeNull; 477 bool flat_array = true; 478 bool null_free_array = true; 479 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array); 480 if (array_type != NULL) { 481 ary = record_profile_for_speculation(ary, array_type, ProfileMaybeNull); 482 } 483 } 484 485 // We have no exact array type from profile data. Check profile data 486 // for a non null-free or non flat array. Non null-free implies non 487 // flat so check this one first. Speculating on a non null-free 488 // array doesn't help aaload but could be profitable for a 489 // subsequent aastore. 490 if (!arytype->is_null_free() && !arytype->is_not_null_free()) { 491 bool null_free_array = true; 492 Deoptimization::DeoptReason reason = Deoptimization::Reason_none; 493 if (arytype->speculative() != NULL && 494 arytype->speculative()->is_aryptr()->is_not_null_free() && 495 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) { 496 null_free_array = false; 497 reason = Deoptimization::Reason_speculate_class_check; 498 } else if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(Deoptimization::Reason_class_check)) { 499 ciKlass* array_type = NULL; 500 ciKlass* element_type = NULL; 501 ProfilePtrKind element_ptr = ProfileMaybeNull; 502 bool flat_array = true; 503 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array); 504 reason = Deoptimization::Reason_class_check; 505 } 506 if (!null_free_array) { 507 { // Deoptimize if null-free array 508 BuildCutout unless(this, null_free_array_test(load_object_klass(ary), /* null_free = */ false), PROB_MAX); 509 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile); 510 } 511 assert(!stopped(), "null-free array should have been caught earlier"); 512 Node* better_ary = _gvn.transform(new CheckCastPPNode(control(), ary, arytype->cast_to_not_null_free())); 513 replace_in_map(ary, better_ary); 514 ary = better_ary; 515 arytype = _gvn.type(ary)->is_aryptr(); 516 } 517 } 518 519 if (!arytype->is_flat() && !arytype->is_not_flat()) { 520 bool flat_array = true; 521 Deoptimization::DeoptReason reason = Deoptimization::Reason_none; 522 if (arytype->speculative() != NULL && 523 arytype->speculative()->is_aryptr()->is_not_flat() && 524 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) { 525 flat_array = false; 526 reason = Deoptimization::Reason_speculate_class_check; 527 } else if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(reason)) { 528 ciKlass* array_type = NULL; 529 ciKlass* element_type = NULL; 530 ProfilePtrKind element_ptr = ProfileMaybeNull; 531 bool null_free_array = true; 532 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array); 533 reason = Deoptimization::Reason_class_check; 534 } 535 if (!flat_array) { 536 { // Deoptimize if flat array 537 BuildCutout unless(this, flat_array_test(ary, /* flat = */ false), PROB_MAX); 538 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile); 539 } 540 assert(!stopped(), "flat array should have been caught earlier"); 541 Node* better_ary = _gvn.transform(new CheckCastPPNode(control(), ary, arytype->cast_to_not_flat())); 542 replace_in_map(ary, better_ary); 543 ary = better_ary; 544 arytype = _gvn.type(ary)->is_aryptr(); 545 } 546 } 547 548 // Make array address computation control dependent to prevent it 549 // from floating above the range check during loop optimizations. 550 Node* ptr = array_element_address(ary, idx, type, sizetype, control()); 551 assert(ptr != top(), "top should go hand-in-hand with stopped"); 552 553 return ptr; 554 } 555 556 557 // returns IfNode 558 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) { 559 Node *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32 560 Node *tst = _gvn.transform(new BoolNode(cmp, mask)); 561 IfNode *iff = create_and_map_if(control(), tst, prob, cnt); 562 return iff; 563 } 564 565 566 // sentinel value for the target bci to mark never taken branches 567 // (according to profiling) 568 static const int never_reached = INT_MAX; 569 570 //------------------------------helper for tableswitch------------------------- 571 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, bool unc) { 572 // True branch, use existing map info 573 { PreserveJVMState pjvms(this); 574 Node *iftrue = _gvn.transform( new IfTrueNode (iff) ); 575 set_control( iftrue ); 576 if (unc) { 577 repush_if_args(); 578 uncommon_trap(Deoptimization::Reason_unstable_if, 579 Deoptimization::Action_reinterpret, 580 NULL, 581 "taken always"); 582 } else { 583 assert(dest_bci_if_true != never_reached, "inconsistent dest"); 584 merge_new_path(dest_bci_if_true); 585 } 586 } 587 588 // False branch 589 Node *iffalse = _gvn.transform( new IfFalseNode(iff) ); 590 set_control( iffalse ); 591 } 592 593 void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, bool unc) { 594 // True branch, use existing map info 595 { PreserveJVMState pjvms(this); 596 Node *iffalse = _gvn.transform( new IfFalseNode (iff) ); 597 set_control( iffalse ); 598 if (unc) { 599 repush_if_args(); 600 uncommon_trap(Deoptimization::Reason_unstable_if, 601 Deoptimization::Action_reinterpret, 602 NULL, 603 "taken never"); 604 } else { 605 assert(dest_bci_if_true != never_reached, "inconsistent dest"); 606 merge_new_path(dest_bci_if_true); 607 } 608 } 609 610 // False branch 611 Node *iftrue = _gvn.transform( new IfTrueNode(iff) ); 612 set_control( iftrue ); 613 } 614 615 void Parse::jump_if_always_fork(int dest_bci, bool unc) { 616 // False branch, use existing map and control() 617 if (unc) { 618 repush_if_args(); 619 uncommon_trap(Deoptimization::Reason_unstable_if, 620 Deoptimization::Action_reinterpret, 621 NULL, 622 "taken never"); 623 } else { 624 assert(dest_bci != never_reached, "inconsistent dest"); 625 merge_new_path(dest_bci); 626 } 627 } 628 629 630 extern "C" { 631 static int jint_cmp(const void *i, const void *j) { 632 int a = *(jint *)i; 633 int b = *(jint *)j; 634 return a > b ? 1 : a < b ? -1 : 0; 635 } 636 } 637 638 639 class SwitchRange : public StackObj { 640 // a range of integers coupled with a bci destination 641 jint _lo; // inclusive lower limit 642 jint _hi; // inclusive upper limit 643 int _dest; 644 float _cnt; // how many times this range was hit according to profiling 645 646 public: 647 jint lo() const { return _lo; } 648 jint hi() const { return _hi; } 649 int dest() const { return _dest; } 650 bool is_singleton() const { return _lo == _hi; } 651 float cnt() const { return _cnt; } 652 653 void setRange(jint lo, jint hi, int dest, float cnt) { 654 assert(lo <= hi, "must be a non-empty range"); 655 _lo = lo, _hi = hi; _dest = dest; _cnt = cnt; 656 assert(_cnt >= 0, ""); 657 } 658 bool adjoinRange(jint lo, jint hi, int dest, float cnt, bool trim_ranges) { 659 assert(lo <= hi, "must be a non-empty range"); 660 if (lo == _hi+1) { 661 // see merge_ranges() comment below 662 if (trim_ranges) { 663 if (cnt == 0) { 664 if (_cnt != 0) { 665 return false; 666 } 667 if (dest != _dest) { 668 _dest = never_reached; 669 } 670 } else { 671 if (_cnt == 0) { 672 return false; 673 } 674 if (dest != _dest) { 675 return false; 676 } 677 } 678 } else { 679 if (dest != _dest) { 680 return false; 681 } 682 } 683 _hi = hi; 684 _cnt += cnt; 685 return true; 686 } 687 return false; 688 } 689 690 void set (jint value, int dest, float cnt) { 691 setRange(value, value, dest, cnt); 692 } 693 bool adjoin(jint value, int dest, float cnt, bool trim_ranges) { 694 return adjoinRange(value, value, dest, cnt, trim_ranges); 695 } 696 bool adjoin(SwitchRange& other) { 697 return adjoinRange(other._lo, other._hi, other._dest, other._cnt, false); 698 } 699 700 void print() { 701 if (is_singleton()) 702 tty->print(" {%d}=>%d (cnt=%f)", lo(), dest(), cnt()); 703 else if (lo() == min_jint) 704 tty->print(" {..%d}=>%d (cnt=%f)", hi(), dest(), cnt()); 705 else if (hi() == max_jint) 706 tty->print(" {%d..}=>%d (cnt=%f)", lo(), dest(), cnt()); 707 else 708 tty->print(" {%d..%d}=>%d (cnt=%f)", lo(), hi(), dest(), cnt()); 709 } 710 }; 711 712 // We try to minimize the number of ranges and the size of the taken 713 // ones using profiling data. When ranges are created, 714 // SwitchRange::adjoinRange() only allows 2 adjoining ranges to merge 715 // if both were never hit or both were hit to build longer unreached 716 // ranges. Here, we now merge adjoining ranges with the same 717 // destination and finally set destination of unreached ranges to the 718 // special value never_reached because it can help minimize the number 719 // of tests that are necessary. 720 // 721 // For instance: 722 // [0, 1] to target1 sometimes taken 723 // [1, 2] to target1 never taken 724 // [2, 3] to target2 never taken 725 // would lead to: 726 // [0, 1] to target1 sometimes taken 727 // [1, 3] never taken 728 // 729 // (first 2 ranges to target1 are not merged) 730 static void merge_ranges(SwitchRange* ranges, int& rp) { 731 if (rp == 0) { 732 return; 733 } 734 int shift = 0; 735 for (int j = 0; j < rp; j++) { 736 SwitchRange& r1 = ranges[j-shift]; 737 SwitchRange& r2 = ranges[j+1]; 738 if (r1.adjoin(r2)) { 739 shift++; 740 } else if (shift > 0) { 741 ranges[j+1-shift] = r2; 742 } 743 } 744 rp -= shift; 745 for (int j = 0; j <= rp; j++) { 746 SwitchRange& r = ranges[j]; 747 if (r.cnt() == 0 && r.dest() != never_reached) { 748 r.setRange(r.lo(), r.hi(), never_reached, r.cnt()); 749 } 750 } 751 } 752 753 //-------------------------------do_tableswitch-------------------------------- 754 void Parse::do_tableswitch() { 755 // Get information about tableswitch 756 int default_dest = iter().get_dest_table(0); 757 jint lo_index = iter().get_int_table(1); 758 jint hi_index = iter().get_int_table(2); 759 int len = hi_index - lo_index + 1; 760 761 if (len < 1) { 762 // If this is a backward branch, add safepoint 763 maybe_add_safepoint(default_dest); 764 pop(); // the effect of the instruction execution on the operand stack 765 merge(default_dest); 766 return; 767 } 768 769 ciMethodData* methodData = method()->method_data(); 770 ciMultiBranchData* profile = NULL; 771 if (methodData->is_mature() && UseSwitchProfiling) { 772 ciProfileData* data = methodData->bci_to_data(bci()); 773 if (data != NULL && data->is_MultiBranchData()) { 774 profile = (ciMultiBranchData*)data; 775 } 776 } 777 bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if); 778 779 // generate decision tree, using trichotomy when possible 780 int rnum = len+2; 781 bool makes_backward_branch = false; 782 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum); 783 int rp = -1; 784 if (lo_index != min_jint) { 785 float cnt = 1.0F; 786 if (profile != NULL) { 787 cnt = (float)profile->default_count() / (hi_index != max_jint ? 2.0F : 1.0F); 788 } 789 ranges[++rp].setRange(min_jint, lo_index-1, default_dest, cnt); 790 } 791 for (int j = 0; j < len; j++) { 792 jint match_int = lo_index+j; 793 int dest = iter().get_dest_table(j+3); 794 makes_backward_branch |= (dest <= bci()); 795 float cnt = 1.0F; 796 if (profile != NULL) { 797 cnt = (float)profile->count_at(j); 798 } 799 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, cnt, trim_ranges)) { 800 ranges[++rp].set(match_int, dest, cnt); 801 } 802 } 803 jint highest = lo_index+(len-1); 804 assert(ranges[rp].hi() == highest, ""); 805 if (highest != max_jint) { 806 float cnt = 1.0F; 807 if (profile != NULL) { 808 cnt = (float)profile->default_count() / (lo_index != min_jint ? 2.0F : 1.0F); 809 } 810 if (!ranges[rp].adjoinRange(highest+1, max_jint, default_dest, cnt, trim_ranges)) { 811 ranges[++rp].setRange(highest+1, max_jint, default_dest, cnt); 812 } 813 } 814 assert(rp < len+2, "not too many ranges"); 815 816 if (trim_ranges) { 817 merge_ranges(ranges, rp); 818 } 819 820 // Safepoint in case if backward branch observed 821 if (makes_backward_branch) { 822 add_safepoint(); 823 } 824 825 Node* lookup = pop(); // lookup value 826 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]); 827 } 828 829 830 //------------------------------do_lookupswitch-------------------------------- 831 void Parse::do_lookupswitch() { 832 // Get information about lookupswitch 833 int default_dest = iter().get_dest_table(0); 834 jint len = iter().get_int_table(1); 835 836 if (len < 1) { // If this is a backward branch, add safepoint 837 maybe_add_safepoint(default_dest); 838 pop(); // the effect of the instruction execution on the operand stack 839 merge(default_dest); 840 return; 841 } 842 843 ciMethodData* methodData = method()->method_data(); 844 ciMultiBranchData* profile = NULL; 845 if (methodData->is_mature() && UseSwitchProfiling) { 846 ciProfileData* data = methodData->bci_to_data(bci()); 847 if (data != NULL && data->is_MultiBranchData()) { 848 profile = (ciMultiBranchData*)data; 849 } 850 } 851 bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if); 852 853 // generate decision tree, using trichotomy when possible 854 jint* table = NEW_RESOURCE_ARRAY(jint, len*3); 855 { 856 for (int j = 0; j < len; j++) { 857 table[3*j+0] = iter().get_int_table(2+2*j); 858 table[3*j+1] = iter().get_dest_table(2+2*j+1); 859 // Handle overflow when converting from uint to jint 860 table[3*j+2] = (profile == NULL) ? 1 : (jint)MIN2<uint>((uint)max_jint, profile->count_at(j)); 861 } 862 qsort(table, len, 3*sizeof(table[0]), jint_cmp); 863 } 864 865 float default_cnt = 1.0F; 866 if (profile != NULL) { 867 juint defaults = max_juint - len; 868 default_cnt = (float)profile->default_count()/(float)defaults; 869 } 870 871 int rnum = len*2+1; 872 bool makes_backward_branch = false; 873 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum); 874 int rp = -1; 875 for (int j = 0; j < len; j++) { 876 jint match_int = table[3*j+0]; 877 jint dest = table[3*j+1]; 878 jint cnt = table[3*j+2]; 879 jint next_lo = rp < 0 ? min_jint : ranges[rp].hi()+1; 880 makes_backward_branch |= (dest <= bci()); 881 float c = default_cnt * ((float)match_int - (float)next_lo); 882 if (match_int != next_lo && (rp < 0 || !ranges[rp].adjoinRange(next_lo, match_int-1, default_dest, c, trim_ranges))) { 883 assert(default_dest != never_reached, "sentinel value for dead destinations"); 884 ranges[++rp].setRange(next_lo, match_int-1, default_dest, c); 885 } 886 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, (float)cnt, trim_ranges)) { 887 assert(dest != never_reached, "sentinel value for dead destinations"); 888 ranges[++rp].set(match_int, dest, (float)cnt); 889 } 890 } 891 jint highest = table[3*(len-1)]; 892 assert(ranges[rp].hi() == highest, ""); 893 if (highest != max_jint && 894 !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, default_cnt * ((float)max_jint - (float)highest), trim_ranges)) { 895 ranges[++rp].setRange(highest+1, max_jint, default_dest, default_cnt * ((float)max_jint - (float)highest)); 896 } 897 assert(rp < rnum, "not too many ranges"); 898 899 if (trim_ranges) { 900 merge_ranges(ranges, rp); 901 } 902 903 // Safepoint in case backward branch observed 904 if (makes_backward_branch) { 905 add_safepoint(); 906 } 907 908 Node *lookup = pop(); // lookup value 909 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]); 910 } 911 912 static float if_prob(float taken_cnt, float total_cnt) { 913 assert(taken_cnt <= total_cnt, ""); 914 if (total_cnt == 0) { 915 return PROB_FAIR; 916 } 917 float p = taken_cnt / total_cnt; 918 return clamp(p, PROB_MIN, PROB_MAX); 919 } 920 921 static float if_cnt(float cnt) { 922 if (cnt == 0) { 923 return COUNT_UNKNOWN; 924 } 925 return cnt; 926 } 927 928 static float sum_of_cnts(SwitchRange *lo, SwitchRange *hi) { 929 float total_cnt = 0; 930 for (SwitchRange* sr = lo; sr <= hi; sr++) { 931 total_cnt += sr->cnt(); 932 } 933 return total_cnt; 934 } 935 936 class SwitchRanges : public ResourceObj { 937 public: 938 SwitchRange* _lo; 939 SwitchRange* _hi; 940 SwitchRange* _mid; 941 float _cost; 942 943 enum { 944 Start, 945 LeftDone, 946 RightDone, 947 Done 948 } _state; 949 950 SwitchRanges(SwitchRange *lo, SwitchRange *hi) 951 : _lo(lo), _hi(hi), _mid(NULL), 952 _cost(0), _state(Start) { 953 } 954 955 SwitchRanges() 956 : _lo(NULL), _hi(NULL), _mid(NULL), 957 _cost(0), _state(Start) {} 958 }; 959 960 // Estimate cost of performing a binary search on lo..hi 961 static float compute_tree_cost(SwitchRange *lo, SwitchRange *hi, float total_cnt) { 962 GrowableArray<SwitchRanges> tree; 963 SwitchRanges root(lo, hi); 964 tree.push(root); 965 966 float cost = 0; 967 do { 968 SwitchRanges& r = *tree.adr_at(tree.length()-1); 969 if (r._hi != r._lo) { 970 if (r._mid == NULL) { 971 float r_cnt = sum_of_cnts(r._lo, r._hi); 972 973 if (r_cnt == 0) { 974 tree.pop(); 975 cost = 0; 976 continue; 977 } 978 979 SwitchRange* mid = NULL; 980 mid = r._lo; 981 for (float cnt = 0; ; ) { 982 assert(mid <= r._hi, "out of bounds"); 983 cnt += mid->cnt(); 984 if (cnt > r_cnt / 2) { 985 break; 986 } 987 mid++; 988 } 989 assert(mid <= r._hi, "out of bounds"); 990 r._mid = mid; 991 r._cost = r_cnt / total_cnt; 992 } 993 r._cost += cost; 994 if (r._state < SwitchRanges::LeftDone && r._mid > r._lo) { 995 cost = 0; 996 r._state = SwitchRanges::LeftDone; 997 tree.push(SwitchRanges(r._lo, r._mid-1)); 998 } else if (r._state < SwitchRanges::RightDone) { 999 cost = 0; 1000 r._state = SwitchRanges::RightDone; 1001 tree.push(SwitchRanges(r._mid == r._lo ? r._mid+1 : r._mid, r._hi)); 1002 } else { 1003 tree.pop(); 1004 cost = r._cost; 1005 } 1006 } else { 1007 tree.pop(); 1008 cost = r._cost; 1009 } 1010 } while (tree.length() > 0); 1011 1012 1013 return cost; 1014 } 1015 1016 // It sometimes pays off to test most common ranges before the binary search 1017 void Parse::linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi) { 1018 uint nr = hi - lo + 1; 1019 float total_cnt = sum_of_cnts(lo, hi); 1020 1021 float min = compute_tree_cost(lo, hi, total_cnt); 1022 float extra = 1; 1023 float sub = 0; 1024 1025 SwitchRange* array1 = lo; 1026 SwitchRange* array2 = NEW_RESOURCE_ARRAY(SwitchRange, nr); 1027 1028 SwitchRange* ranges = NULL; 1029 1030 while (nr >= 2) { 1031 assert(lo == array1 || lo == array2, "one the 2 already allocated arrays"); 1032 ranges = (lo == array1) ? array2 : array1; 1033 1034 // Find highest frequency range 1035 SwitchRange* candidate = lo; 1036 for (SwitchRange* sr = lo+1; sr <= hi; sr++) { 1037 if (sr->cnt() > candidate->cnt()) { 1038 candidate = sr; 1039 } 1040 } 1041 SwitchRange most_freq = *candidate; 1042 if (most_freq.cnt() == 0) { 1043 break; 1044 } 1045 1046 // Copy remaining ranges into another array 1047 int shift = 0; 1048 for (uint i = 0; i < nr; i++) { 1049 SwitchRange* sr = &lo[i]; 1050 if (sr != candidate) { 1051 ranges[i-shift] = *sr; 1052 } else { 1053 shift++; 1054 if (i > 0 && i < nr-1) { 1055 SwitchRange prev = lo[i-1]; 1056 prev.setRange(prev.lo(), sr->hi(), prev.dest(), prev.cnt()); 1057 if (prev.adjoin(lo[i+1])) { 1058 shift++; 1059 i++; 1060 } 1061 ranges[i-shift] = prev; 1062 } 1063 } 1064 } 1065 nr -= shift; 1066 1067 // Evaluate cost of testing the most common range and performing a 1068 // binary search on the other ranges 1069 float cost = extra + compute_tree_cost(&ranges[0], &ranges[nr-1], total_cnt); 1070 if (cost >= min) { 1071 break; 1072 } 1073 // swap arrays 1074 lo = &ranges[0]; 1075 hi = &ranges[nr-1]; 1076 1077 // It pays off: emit the test for the most common range 1078 assert(most_freq.cnt() > 0, "must be taken"); 1079 Node* val = _gvn.transform(new SubINode(key_val, _gvn.intcon(most_freq.lo()))); 1080 Node* cmp = _gvn.transform(new CmpUNode(val, _gvn.intcon(most_freq.hi() - most_freq.lo()))); 1081 Node* tst = _gvn.transform(new BoolNode(cmp, BoolTest::le)); 1082 IfNode* iff = create_and_map_if(control(), tst, if_prob(most_freq.cnt(), total_cnt), if_cnt(most_freq.cnt())); 1083 jump_if_true_fork(iff, most_freq.dest(), false); 1084 1085 sub += most_freq.cnt() / total_cnt; 1086 extra += 1 - sub; 1087 min = cost; 1088 } 1089 } 1090 1091 //----------------------------create_jump_tables------------------------------- 1092 bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) { 1093 // Are jumptables enabled 1094 if (!UseJumpTables) return false; 1095 1096 // Are jumptables supported 1097 if (!Matcher::has_match_rule(Op_Jump)) return false; 1098 1099 bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if); 1100 1101 // Decide if a guard is needed to lop off big ranges at either (or 1102 // both) end(s) of the input set. We'll call this the default target 1103 // even though we can't be sure that it is the true "default". 1104 1105 bool needs_guard = false; 1106 int default_dest; 1107 int64_t total_outlier_size = 0; 1108 int64_t hi_size = ((int64_t)hi->hi()) - ((int64_t)hi->lo()) + 1; 1109 int64_t lo_size = ((int64_t)lo->hi()) - ((int64_t)lo->lo()) + 1; 1110 1111 if (lo->dest() == hi->dest()) { 1112 total_outlier_size = hi_size + lo_size; 1113 default_dest = lo->dest(); 1114 } else if (lo_size > hi_size) { 1115 total_outlier_size = lo_size; 1116 default_dest = lo->dest(); 1117 } else { 1118 total_outlier_size = hi_size; 1119 default_dest = hi->dest(); 1120 } 1121 1122 float total = sum_of_cnts(lo, hi); 1123 float cost = compute_tree_cost(lo, hi, total); 1124 1125 // If a guard test will eliminate very sparse end ranges, then 1126 // it is worth the cost of an extra jump. 1127 float trimmed_cnt = 0; 1128 if (total_outlier_size > (MaxJumpTableSparseness * 4)) { 1129 needs_guard = true; 1130 if (default_dest == lo->dest()) { 1131 trimmed_cnt += lo->cnt(); 1132 lo++; 1133 } 1134 if (default_dest == hi->dest()) { 1135 trimmed_cnt += hi->cnt(); 1136 hi--; 1137 } 1138 } 1139 1140 // Find the total number of cases and ranges 1141 int64_t num_cases = ((int64_t)hi->hi()) - ((int64_t)lo->lo()) + 1; 1142 int num_range = hi - lo + 1; 1143 1144 // Don't create table if: too large, too small, or too sparse. 1145 if (num_cases > MaxJumpTableSize) 1146 return false; 1147 if (UseSwitchProfiling) { 1148 // MinJumpTableSize is set so with a well balanced binary tree, 1149 // when the number of ranges is MinJumpTableSize, it's cheaper to 1150 // go through a JumpNode that a tree of IfNodes. Average cost of a 1151 // tree of IfNodes with MinJumpTableSize is 1152 // log2f(MinJumpTableSize) comparisons. So if the cost computed 1153 // from profile data is less than log2f(MinJumpTableSize) then 1154 // going with the binary search is cheaper. 1155 if (cost < log2f(MinJumpTableSize)) { 1156 return false; 1157 } 1158 } else { 1159 if (num_cases < MinJumpTableSize) 1160 return false; 1161 } 1162 if (num_cases > (MaxJumpTableSparseness * num_range)) 1163 return false; 1164 1165 // Normalize table lookups to zero 1166 int lowval = lo->lo(); 1167 key_val = _gvn.transform( new SubINode(key_val, _gvn.intcon(lowval)) ); 1168 1169 // Generate a guard to protect against input keyvals that aren't 1170 // in the switch domain. 1171 if (needs_guard) { 1172 Node* size = _gvn.intcon(num_cases); 1173 Node* cmp = _gvn.transform(new CmpUNode(key_val, size)); 1174 Node* tst = _gvn.transform(new BoolNode(cmp, BoolTest::ge)); 1175 IfNode* iff = create_and_map_if(control(), tst, if_prob(trimmed_cnt, total), if_cnt(trimmed_cnt)); 1176 jump_if_true_fork(iff, default_dest, trim_ranges && trimmed_cnt == 0); 1177 1178 total -= trimmed_cnt; 1179 } 1180 1181 // Create an ideal node JumpTable that has projections 1182 // of all possible ranges for a switch statement 1183 // The key_val input must be converted to a pointer offset and scaled. 1184 // Compare Parse::array_addressing above. 1185 1186 // Clean the 32-bit int into a real 64-bit offset. 1187 // Otherwise, the jint value 0 might turn into an offset of 0x0800000000. 1188 // Make I2L conversion control dependent to prevent it from 1189 // floating above the range check during loop optimizations. 1190 // Do not use a narrow int type here to prevent the data path from dying 1191 // while the control path is not removed. This can happen if the type of key_val 1192 // is later known to be out of bounds of [0, num_cases] and therefore a narrow cast 1193 // would be replaced by TOP while C2 is not able to fold the corresponding range checks. 1194 // Set _carry_dependency for the cast to avoid being removed by IGVN. 1195 #ifdef _LP64 1196 key_val = C->constrained_convI2L(&_gvn, key_val, TypeInt::INT, control(), true /* carry_dependency */); 1197 #endif 1198 1199 // Shift the value by wordsize so we have an index into the table, rather 1200 // than a switch value 1201 Node *shiftWord = _gvn.MakeConX(wordSize); 1202 key_val = _gvn.transform( new MulXNode( key_val, shiftWord)); 1203 1204 // Create the JumpNode 1205 Arena* arena = C->comp_arena(); 1206 float* probs = (float*)arena->Amalloc(sizeof(float)*num_cases); 1207 int i = 0; 1208 if (total == 0) { 1209 for (SwitchRange* r = lo; r <= hi; r++) { 1210 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) { 1211 probs[i] = 1.0F / num_cases; 1212 } 1213 } 1214 } else { 1215 for (SwitchRange* r = lo; r <= hi; r++) { 1216 float prob = r->cnt()/total; 1217 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) { 1218 probs[i] = prob / (r->hi() - r->lo() + 1); 1219 } 1220 } 1221 } 1222 1223 ciMethodData* methodData = method()->method_data(); 1224 ciMultiBranchData* profile = NULL; 1225 if (methodData->is_mature()) { 1226 ciProfileData* data = methodData->bci_to_data(bci()); 1227 if (data != NULL && data->is_MultiBranchData()) { 1228 profile = (ciMultiBranchData*)data; 1229 } 1230 } 1231 1232 Node* jtn = _gvn.transform(new JumpNode(control(), key_val, num_cases, probs, profile == NULL ? COUNT_UNKNOWN : total)); 1233 1234 // These are the switch destinations hanging off the jumpnode 1235 i = 0; 1236 for (SwitchRange* r = lo; r <= hi; r++) { 1237 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) { 1238 Node* input = _gvn.transform(new JumpProjNode(jtn, i, r->dest(), (int)(j - lowval))); 1239 { 1240 PreserveJVMState pjvms(this); 1241 set_control(input); 1242 jump_if_always_fork(r->dest(), trim_ranges && r->cnt() == 0); 1243 } 1244 } 1245 } 1246 assert(i == num_cases, "miscount of cases"); 1247 stop_and_kill_map(); // no more uses for this JVMS 1248 return true; 1249 } 1250 1251 //----------------------------jump_switch_ranges------------------------------- 1252 void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, int switch_depth) { 1253 Block* switch_block = block(); 1254 bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if); 1255 1256 if (switch_depth == 0) { 1257 // Do special processing for the top-level call. 1258 assert(lo->lo() == min_jint, "initial range must exhaust Type::INT"); 1259 assert(hi->hi() == max_jint, "initial range must exhaust Type::INT"); 1260 1261 // Decrement pred-numbers for the unique set of nodes. 1262 #ifdef ASSERT 1263 if (!trim_ranges) { 1264 // Ensure that the block's successors are a (duplicate-free) set. 1265 int successors_counted = 0; // block occurrences in [hi..lo] 1266 int unique_successors = switch_block->num_successors(); 1267 for (int i = 0; i < unique_successors; i++) { 1268 Block* target = switch_block->successor_at(i); 1269 1270 // Check that the set of successors is the same in both places. 1271 int successors_found = 0; 1272 for (SwitchRange* p = lo; p <= hi; p++) { 1273 if (p->dest() == target->start()) successors_found++; 1274 } 1275 assert(successors_found > 0, "successor must be known"); 1276 successors_counted += successors_found; 1277 } 1278 assert(successors_counted == (hi-lo)+1, "no unexpected successors"); 1279 } 1280 #endif 1281 1282 // Maybe prune the inputs, based on the type of key_val. 1283 jint min_val = min_jint; 1284 jint max_val = max_jint; 1285 const TypeInt* ti = key_val->bottom_type()->isa_int(); 1286 if (ti != NULL) { 1287 min_val = ti->_lo; 1288 max_val = ti->_hi; 1289 assert(min_val <= max_val, "invalid int type"); 1290 } 1291 while (lo->hi() < min_val) { 1292 lo++; 1293 } 1294 if (lo->lo() < min_val) { 1295 lo->setRange(min_val, lo->hi(), lo->dest(), lo->cnt()); 1296 } 1297 while (hi->lo() > max_val) { 1298 hi--; 1299 } 1300 if (hi->hi() > max_val) { 1301 hi->setRange(hi->lo(), max_val, hi->dest(), hi->cnt()); 1302 } 1303 1304 linear_search_switch_ranges(key_val, lo, hi); 1305 } 1306 1307 #ifndef PRODUCT 1308 if (switch_depth == 0) { 1309 _max_switch_depth = 0; 1310 _est_switch_depth = log2i_graceful((hi - lo + 1) - 1) + 1; 1311 } 1312 #endif 1313 1314 assert(lo <= hi, "must be a non-empty set of ranges"); 1315 if (lo == hi) { 1316 jump_if_always_fork(lo->dest(), trim_ranges && lo->cnt() == 0); 1317 } else { 1318 assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges"); 1319 assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges"); 1320 1321 if (create_jump_tables(key_val, lo, hi)) return; 1322 1323 SwitchRange* mid = NULL; 1324 float total_cnt = sum_of_cnts(lo, hi); 1325 1326 int nr = hi - lo + 1; 1327 if (UseSwitchProfiling) { 1328 // Don't keep the binary search tree balanced: pick up mid point 1329 // that split frequencies in half. 1330 float cnt = 0; 1331 for (SwitchRange* sr = lo; sr <= hi; sr++) { 1332 cnt += sr->cnt(); 1333 if (cnt >= total_cnt / 2) { 1334 mid = sr; 1335 break; 1336 } 1337 } 1338 } else { 1339 mid = lo + nr/2; 1340 1341 // if there is an easy choice, pivot at a singleton: 1342 if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton()) mid--; 1343 1344 assert(lo < mid && mid <= hi, "good pivot choice"); 1345 assert(nr != 2 || mid == hi, "should pick higher of 2"); 1346 assert(nr != 3 || mid == hi-1, "should pick middle of 3"); 1347 } 1348 1349 1350 Node *test_val = _gvn.intcon(mid == lo ? mid->hi() : mid->lo()); 1351 1352 if (mid->is_singleton()) { 1353 IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne, 1-if_prob(mid->cnt(), total_cnt), if_cnt(mid->cnt())); 1354 jump_if_false_fork(iff_ne, mid->dest(), trim_ranges && mid->cnt() == 0); 1355 1356 // Special Case: If there are exactly three ranges, and the high 1357 // and low range each go to the same place, omit the "gt" test, 1358 // since it will not discriminate anything. 1359 bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest() && mid == hi-1) || mid == lo; 1360 1361 // if there is a higher range, test for it and process it: 1362 if (mid < hi && !eq_test_only) { 1363 // two comparisons of same values--should enable 1 test for 2 branches 1364 // Use BoolTest::lt instead of BoolTest::gt 1365 float cnt = sum_of_cnts(lo, mid-1); 1366 IfNode *iff_lt = jump_if_fork_int(key_val, test_val, BoolTest::lt, if_prob(cnt, total_cnt), if_cnt(cnt)); 1367 Node *iftrue = _gvn.transform( new IfTrueNode(iff_lt) ); 1368 Node *iffalse = _gvn.transform( new IfFalseNode(iff_lt) ); 1369 { PreserveJVMState pjvms(this); 1370 set_control(iffalse); 1371 jump_switch_ranges(key_val, mid+1, hi, switch_depth+1); 1372 } 1373 set_control(iftrue); 1374 } 1375 1376 } else { 1377 // mid is a range, not a singleton, so treat mid..hi as a unit 1378 float cnt = sum_of_cnts(mid == lo ? mid+1 : mid, hi); 1379 IfNode *iff_ge = jump_if_fork_int(key_val, test_val, mid == lo ? BoolTest::gt : BoolTest::ge, if_prob(cnt, total_cnt), if_cnt(cnt)); 1380 1381 // if there is a higher range, test for it and process it: 1382 if (mid == hi) { 1383 jump_if_true_fork(iff_ge, mid->dest(), trim_ranges && cnt == 0); 1384 } else { 1385 Node *iftrue = _gvn.transform( new IfTrueNode(iff_ge) ); 1386 Node *iffalse = _gvn.transform( new IfFalseNode(iff_ge) ); 1387 { PreserveJVMState pjvms(this); 1388 set_control(iftrue); 1389 jump_switch_ranges(key_val, mid == lo ? mid+1 : mid, hi, switch_depth+1); 1390 } 1391 set_control(iffalse); 1392 } 1393 } 1394 1395 // in any case, process the lower range 1396 if (mid == lo) { 1397 if (mid->is_singleton()) { 1398 jump_switch_ranges(key_val, lo+1, hi, switch_depth+1); 1399 } else { 1400 jump_if_always_fork(lo->dest(), trim_ranges && lo->cnt() == 0); 1401 } 1402 } else { 1403 jump_switch_ranges(key_val, lo, mid-1, switch_depth+1); 1404 } 1405 } 1406 1407 // Decrease pred_count for each successor after all is done. 1408 if (switch_depth == 0) { 1409 int unique_successors = switch_block->num_successors(); 1410 for (int i = 0; i < unique_successors; i++) { 1411 Block* target = switch_block->successor_at(i); 1412 // Throw away the pre-allocated path for each unique successor. 1413 target->next_path_num(); 1414 } 1415 } 1416 1417 #ifndef PRODUCT 1418 _max_switch_depth = MAX2(switch_depth, _max_switch_depth); 1419 if (TraceOptoParse && Verbose && WizardMode && switch_depth == 0) { 1420 SwitchRange* r; 1421 int nsing = 0; 1422 for( r = lo; r <= hi; r++ ) { 1423 if( r->is_singleton() ) nsing++; 1424 } 1425 tty->print(">>> "); 1426 _method->print_short_name(); 1427 tty->print_cr(" switch decision tree"); 1428 tty->print_cr(" %d ranges (%d singletons), max_depth=%d, est_depth=%d", 1429 (int) (hi-lo+1), nsing, _max_switch_depth, _est_switch_depth); 1430 if (_max_switch_depth > _est_switch_depth) { 1431 tty->print_cr("******** BAD SWITCH DEPTH ********"); 1432 } 1433 tty->print(" "); 1434 for( r = lo; r <= hi; r++ ) { 1435 r->print(); 1436 } 1437 tty->cr(); 1438 } 1439 #endif 1440 } 1441 1442 void Parse::modf() { 1443 Node *f2 = pop(); 1444 Node *f1 = pop(); 1445 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::modf_Type(), 1446 CAST_FROM_FN_PTR(address, SharedRuntime::frem), 1447 "frem", NULL, //no memory effects 1448 f1, f2); 1449 Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 1450 1451 push(res); 1452 } 1453 1454 void Parse::modd() { 1455 Node *d2 = pop_pair(); 1456 Node *d1 = pop_pair(); 1457 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(), 1458 CAST_FROM_FN_PTR(address, SharedRuntime::drem), 1459 "drem", NULL, //no memory effects 1460 d1, top(), d2, top()); 1461 Node* res_d = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 1462 1463 #ifdef ASSERT 1464 Node* res_top = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 1)); 1465 assert(res_top == top(), "second value must be top"); 1466 #endif 1467 1468 push_pair(res_d); 1469 } 1470 1471 void Parse::l2f() { 1472 Node* f2 = pop(); 1473 Node* f1 = pop(); 1474 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(), 1475 CAST_FROM_FN_PTR(address, SharedRuntime::l2f), 1476 "l2f", NULL, //no memory effects 1477 f1, f2); 1478 Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 1479 1480 push(res); 1481 } 1482 1483 // Handle jsr and jsr_w bytecode 1484 void Parse::do_jsr() { 1485 assert(bc() == Bytecodes::_jsr || bc() == Bytecodes::_jsr_w, "wrong bytecode"); 1486 1487 // Store information about current state, tagged with new _jsr_bci 1488 int return_bci = iter().next_bci(); 1489 int jsr_bci = (bc() == Bytecodes::_jsr) ? iter().get_dest() : iter().get_far_dest(); 1490 1491 // The way we do things now, there is only one successor block 1492 // for the jsr, because the target code is cloned by ciTypeFlow. 1493 Block* target = successor_for_bci(jsr_bci); 1494 1495 // What got pushed? 1496 const Type* ret_addr = target->peek(); 1497 assert(ret_addr->singleton(), "must be a constant (cloned jsr body)"); 1498 1499 // Effect on jsr on stack 1500 push(_gvn.makecon(ret_addr)); 1501 1502 // Flow to the jsr. 1503 merge(jsr_bci); 1504 } 1505 1506 // Handle ret bytecode 1507 void Parse::do_ret() { 1508 // Find to whom we return. 1509 assert(block()->num_successors() == 1, "a ret can only go one place now"); 1510 Block* target = block()->successor_at(0); 1511 assert(!target->is_ready(), "our arrival must be expected"); 1512 int pnum = target->next_path_num(); 1513 merge_common(target, pnum); 1514 } 1515 1516 static bool has_injected_profile(BoolTest::mask btest, Node* test, int& taken, int& not_taken) { 1517 if (btest != BoolTest::eq && btest != BoolTest::ne) { 1518 // Only ::eq and ::ne are supported for profile injection. 1519 return false; 1520 } 1521 if (test->is_Cmp() && 1522 test->in(1)->Opcode() == Op_ProfileBoolean) { 1523 ProfileBooleanNode* profile = (ProfileBooleanNode*)test->in(1); 1524 int false_cnt = profile->false_count(); 1525 int true_cnt = profile->true_count(); 1526 1527 // Counts matching depends on the actual test operation (::eq or ::ne). 1528 // No need to scale the counts because profile injection was designed 1529 // to feed exact counts into VM. 1530 taken = (btest == BoolTest::eq) ? false_cnt : true_cnt; 1531 not_taken = (btest == BoolTest::eq) ? true_cnt : false_cnt; 1532 1533 profile->consume(); 1534 return true; 1535 } 1536 return false; 1537 } 1538 //--------------------------dynamic_branch_prediction-------------------------- 1539 // Try to gather dynamic branch prediction behavior. Return a probability 1540 // of the branch being taken and set the "cnt" field. Returns a -1.0 1541 // if we need to use static prediction for some reason. 1542 float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test) { 1543 ResourceMark rm; 1544 1545 cnt = COUNT_UNKNOWN; 1546 1547 int taken = 0; 1548 int not_taken = 0; 1549 1550 bool use_mdo = !has_injected_profile(btest, test, taken, not_taken); 1551 1552 if (use_mdo) { 1553 // Use MethodData information if it is available 1554 // FIXME: free the ProfileData structure 1555 ciMethodData* methodData = method()->method_data(); 1556 if (!methodData->is_mature()) return PROB_UNKNOWN; 1557 ciProfileData* data = methodData->bci_to_data(bci()); 1558 if (data == NULL) { 1559 return PROB_UNKNOWN; 1560 } 1561 if (!data->is_JumpData()) return PROB_UNKNOWN; 1562 1563 // get taken and not taken values 1564 taken = data->as_JumpData()->taken(); 1565 not_taken = 0; 1566 if (data->is_BranchData()) { 1567 not_taken = data->as_BranchData()->not_taken(); 1568 } 1569 1570 // scale the counts to be commensurate with invocation counts: 1571 taken = method()->scale_count(taken); 1572 not_taken = method()->scale_count(not_taken); 1573 } 1574 1575 // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful. 1576 // We also check that individual counters are positive first, otherwise the sum can become positive. 1577 if (taken < 0 || not_taken < 0 || taken + not_taken < 40) { 1578 if (C->log() != NULL) { 1579 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken); 1580 } 1581 return PROB_UNKNOWN; 1582 } 1583 1584 // Compute frequency that we arrive here 1585 float sum = taken + not_taken; 1586 // Adjust, if this block is a cloned private block but the 1587 // Jump counts are shared. Taken the private counts for 1588 // just this path instead of the shared counts. 1589 if( block()->count() > 0 ) 1590 sum = block()->count(); 1591 cnt = sum / FreqCountInvocations; 1592 1593 // Pin probability to sane limits 1594 float prob; 1595 if( !taken ) 1596 prob = (0+PROB_MIN) / 2; 1597 else if( !not_taken ) 1598 prob = (1+PROB_MAX) / 2; 1599 else { // Compute probability of true path 1600 prob = (float)taken / (float)(taken + not_taken); 1601 if (prob > PROB_MAX) prob = PROB_MAX; 1602 if (prob < PROB_MIN) prob = PROB_MIN; 1603 } 1604 1605 assert((cnt > 0.0f) && (prob > 0.0f), 1606 "Bad frequency assignment in if"); 1607 1608 if (C->log() != NULL) { 1609 const char* prob_str = NULL; 1610 if (prob >= PROB_MAX) prob_str = (prob == PROB_MAX) ? "max" : "always"; 1611 if (prob <= PROB_MIN) prob_str = (prob == PROB_MIN) ? "min" : "never"; 1612 char prob_str_buf[30]; 1613 if (prob_str == NULL) { 1614 jio_snprintf(prob_str_buf, sizeof(prob_str_buf), "%20.2f", prob); 1615 prob_str = prob_str_buf; 1616 } 1617 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%f' prob='%s'", 1618 iter().get_dest(), taken, not_taken, cnt, prob_str); 1619 } 1620 return prob; 1621 } 1622 1623 //-----------------------------branch_prediction------------------------------- 1624 float Parse::branch_prediction(float& cnt, 1625 BoolTest::mask btest, 1626 int target_bci, 1627 Node* test) { 1628 float prob = dynamic_branch_prediction(cnt, btest, test); 1629 // If prob is unknown, switch to static prediction 1630 if (prob != PROB_UNKNOWN) return prob; 1631 1632 prob = PROB_FAIR; // Set default value 1633 if (btest == BoolTest::eq) // Exactly equal test? 1634 prob = PROB_STATIC_INFREQUENT; // Assume its relatively infrequent 1635 else if (btest == BoolTest::ne) 1636 prob = PROB_STATIC_FREQUENT; // Assume its relatively frequent 1637 1638 // If this is a conditional test guarding a backwards branch, 1639 // assume its a loop-back edge. Make it a likely taken branch. 1640 if (target_bci < bci()) { 1641 if (is_osr_parse()) { // Could be a hot OSR'd loop; force deopt 1642 // Since it's an OSR, we probably have profile data, but since 1643 // branch_prediction returned PROB_UNKNOWN, the counts are too small. 1644 // Let's make a special check here for completely zero counts. 1645 ciMethodData* methodData = method()->method_data(); 1646 if (!methodData->is_empty()) { 1647 ciProfileData* data = methodData->bci_to_data(bci()); 1648 // Only stop for truly zero counts, which mean an unknown part 1649 // of the OSR-ed method, and we want to deopt to gather more stats. 1650 // If you have ANY counts, then this loop is simply 'cold' relative 1651 // to the OSR loop. 1652 if (data == NULL || 1653 (data->as_BranchData()->taken() + data->as_BranchData()->not_taken() == 0)) { 1654 // This is the only way to return PROB_UNKNOWN: 1655 return PROB_UNKNOWN; 1656 } 1657 } 1658 } 1659 prob = PROB_STATIC_FREQUENT; // Likely to take backwards branch 1660 } 1661 1662 assert(prob != PROB_UNKNOWN, "must have some guess at this point"); 1663 return prob; 1664 } 1665 1666 // The magic constants are chosen so as to match the output of 1667 // branch_prediction() when the profile reports a zero taken count. 1668 // It is important to distinguish zero counts unambiguously, because 1669 // some branches (e.g., _213_javac.Assembler.eliminate) validly produce 1670 // very small but nonzero probabilities, which if confused with zero 1671 // counts would keep the program recompiling indefinitely. 1672 bool Parse::seems_never_taken(float prob) const { 1673 return prob < PROB_MIN; 1674 } 1675 1676 // True if the comparison seems to be the kind that will not change its 1677 // statistics from true to false. See comments in adjust_map_after_if. 1678 // This question is only asked along paths which are already 1679 // classifed as untaken (by seems_never_taken), so really, 1680 // if a path is never taken, its controlling comparison is 1681 // already acting in a stable fashion. If the comparison 1682 // seems stable, we will put an expensive uncommon trap 1683 // on the untaken path. 1684 bool Parse::seems_stable_comparison() const { 1685 if (C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if)) { 1686 return false; 1687 } 1688 return true; 1689 } 1690 1691 //-------------------------------repush_if_args-------------------------------- 1692 // Push arguments of an "if" bytecode back onto the stack by adjusting _sp. 1693 inline int Parse::repush_if_args() { 1694 if (PrintOpto && WizardMode) { 1695 tty->print("defending against excessive implicit null exceptions on %s @%d in ", 1696 Bytecodes::name(iter().cur_bc()), iter().cur_bci()); 1697 method()->print_name(); tty->cr(); 1698 } 1699 int bc_depth = - Bytecodes::depth(iter().cur_bc()); 1700 assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches"); 1701 DEBUG_ONLY(sync_jvms()); // argument(n) requires a synced jvms 1702 assert(argument(0) != NULL, "must exist"); 1703 assert(bc_depth == 1 || argument(1) != NULL, "two must exist"); 1704 inc_sp(bc_depth); 1705 return bc_depth; 1706 } 1707 1708 //----------------------------------do_ifnull---------------------------------- 1709 void Parse::do_ifnull(BoolTest::mask btest, Node *c) { 1710 int target_bci = iter().get_dest(); 1711 1712 Block* branch_block = successor_for_bci(target_bci); 1713 Block* next_block = successor_for_bci(iter().next_bci()); 1714 1715 float cnt; 1716 float prob = branch_prediction(cnt, btest, target_bci, c); 1717 if (prob == PROB_UNKNOWN) { 1718 // (An earlier version of do_ifnull omitted this trap for OSR methods.) 1719 if (PrintOpto && Verbose) { 1720 tty->print_cr("Never-taken edge stops compilation at bci %d", bci()); 1721 } 1722 repush_if_args(); // to gather stats on loop 1723 uncommon_trap(Deoptimization::Reason_unreached, 1724 Deoptimization::Action_reinterpret, 1725 NULL, "cold"); 1726 if (C->eliminate_boxing()) { 1727 // Mark the successor blocks as parsed 1728 branch_block->next_path_num(); 1729 next_block->next_path_num(); 1730 } 1731 return; 1732 } 1733 1734 NOT_PRODUCT(explicit_null_checks_inserted++); 1735 1736 // Generate real control flow 1737 Node *tst = _gvn.transform( new BoolNode( c, btest ) ); 1738 1739 // Sanity check the probability value 1740 assert(prob > 0.0f,"Bad probability in Parser"); 1741 // Need xform to put node in hash table 1742 IfNode *iff = create_and_xform_if( control(), tst, prob, cnt ); 1743 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser"); 1744 // True branch 1745 { PreserveJVMState pjvms(this); 1746 Node* iftrue = _gvn.transform( new IfTrueNode (iff) ); 1747 set_control(iftrue); 1748 1749 if (stopped()) { // Path is dead? 1750 NOT_PRODUCT(explicit_null_checks_elided++); 1751 if (C->eliminate_boxing()) { 1752 // Mark the successor block as parsed 1753 branch_block->next_path_num(); 1754 } 1755 } else { // Path is live. 1756 adjust_map_after_if(btest, c, prob, branch_block); 1757 if (!stopped()) { 1758 merge(target_bci); 1759 } 1760 } 1761 } 1762 1763 // False branch 1764 Node* iffalse = _gvn.transform( new IfFalseNode(iff) ); 1765 set_control(iffalse); 1766 1767 if (stopped()) { // Path is dead? 1768 NOT_PRODUCT(explicit_null_checks_elided++); 1769 if (C->eliminate_boxing()) { 1770 // Mark the successor block as parsed 1771 next_block->next_path_num(); 1772 } 1773 } else { // Path is live. 1774 adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, next_block); 1775 } 1776 } 1777 1778 //------------------------------------do_if------------------------------------ 1779 void Parse::do_if(BoolTest::mask btest, Node* c, bool new_path, Node** ctrl_taken) { 1780 int target_bci = iter().get_dest(); 1781 1782 Block* branch_block = successor_for_bci(target_bci); 1783 Block* next_block = successor_for_bci(iter().next_bci()); 1784 1785 float cnt; 1786 float prob = branch_prediction(cnt, btest, target_bci, c); 1787 float untaken_prob = 1.0 - prob; 1788 1789 if (prob == PROB_UNKNOWN) { 1790 if (PrintOpto && Verbose) { 1791 tty->print_cr("Never-taken edge stops compilation at bci %d", bci()); 1792 } 1793 repush_if_args(); // to gather stats on loop 1794 uncommon_trap(Deoptimization::Reason_unreached, 1795 Deoptimization::Action_reinterpret, 1796 NULL, "cold"); 1797 if (C->eliminate_boxing()) { 1798 // Mark the successor blocks as parsed 1799 branch_block->next_path_num(); 1800 next_block->next_path_num(); 1801 } 1802 return; 1803 } 1804 1805 // Sanity check the probability value 1806 assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser"); 1807 1808 bool taken_if_true = true; 1809 // Convert BoolTest to canonical form: 1810 if (!BoolTest(btest).is_canonical()) { 1811 btest = BoolTest(btest).negate(); 1812 taken_if_true = false; 1813 // prob is NOT updated here; it remains the probability of the taken 1814 // path (as opposed to the prob of the path guarded by an 'IfTrueNode'). 1815 } 1816 assert(btest != BoolTest::eq, "!= is the only canonical exact test"); 1817 1818 Node* tst0 = new BoolNode(c, btest); 1819 Node* tst = _gvn.transform(tst0); 1820 BoolTest::mask taken_btest = BoolTest::illegal; 1821 BoolTest::mask untaken_btest = BoolTest::illegal; 1822 1823 if (tst->is_Bool()) { 1824 // Refresh c from the transformed bool node, since it may be 1825 // simpler than the original c. Also re-canonicalize btest. 1826 // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p NULL)). 1827 // That can arise from statements like: if (x instanceof C) ... 1828 if (tst != tst0) { 1829 // Canonicalize one more time since transform can change it. 1830 btest = tst->as_Bool()->_test._test; 1831 if (!BoolTest(btest).is_canonical()) { 1832 // Reverse edges one more time... 1833 tst = _gvn.transform( tst->as_Bool()->negate(&_gvn) ); 1834 btest = tst->as_Bool()->_test._test; 1835 assert(BoolTest(btest).is_canonical(), "sanity"); 1836 taken_if_true = !taken_if_true; 1837 } 1838 c = tst->in(1); 1839 } 1840 BoolTest::mask neg_btest = BoolTest(btest).negate(); 1841 taken_btest = taken_if_true ? btest : neg_btest; 1842 untaken_btest = taken_if_true ? neg_btest : btest; 1843 } 1844 1845 // Generate real control flow 1846 float true_prob = (taken_if_true ? prob : untaken_prob); 1847 IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt); 1848 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser"); 1849 Node* taken_branch = new IfTrueNode(iff); 1850 Node* untaken_branch = new IfFalseNode(iff); 1851 if (!taken_if_true) { // Finish conversion to canonical form 1852 Node* tmp = taken_branch; 1853 taken_branch = untaken_branch; 1854 untaken_branch = tmp; 1855 } 1856 1857 // Branch is taken: 1858 { PreserveJVMState pjvms(this); 1859 taken_branch = _gvn.transform(taken_branch); 1860 set_control(taken_branch); 1861 1862 if (stopped()) { 1863 if (C->eliminate_boxing() && !new_path) { 1864 // Mark the successor block as parsed (if we haven't created a new path) 1865 branch_block->next_path_num(); 1866 } 1867 } else { 1868 adjust_map_after_if(taken_btest, c, prob, branch_block); 1869 if (!stopped()) { 1870 if (new_path) { 1871 // Merge by using a new path 1872 merge_new_path(target_bci); 1873 } else if (ctrl_taken != NULL) { 1874 // Don't merge but save taken branch to be wired by caller 1875 *ctrl_taken = control(); 1876 } else { 1877 merge(target_bci); 1878 } 1879 } 1880 } 1881 } 1882 1883 untaken_branch = _gvn.transform(untaken_branch); 1884 set_control(untaken_branch); 1885 1886 // Branch not taken. 1887 if (stopped() && ctrl_taken == NULL) { 1888 if (C->eliminate_boxing()) { 1889 // Mark the successor block as parsed (if caller does not re-wire control flow) 1890 next_block->next_path_num(); 1891 } 1892 } else { 1893 adjust_map_after_if(untaken_btest, c, untaken_prob, next_block); 1894 } 1895 } 1896 1897 1898 static ProfilePtrKind speculative_ptr_kind(const TypeOopPtr* t) { 1899 if (t->speculative() == NULL) { 1900 return ProfileUnknownNull; 1901 } 1902 if (t->speculative_always_null()) { 1903 return ProfileAlwaysNull; 1904 } 1905 if (t->speculative_maybe_null()) { 1906 return ProfileMaybeNull; 1907 } 1908 return ProfileNeverNull; 1909 } 1910 1911 void Parse::acmp_always_null_input(Node* input, const TypeOopPtr* tinput, BoolTest::mask btest, Node* eq_region) { 1912 inc_sp(2); 1913 Node* cast = null_check_common(input, T_OBJECT, true, NULL, 1914 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check) && 1915 speculative_ptr_kind(tinput) == ProfileAlwaysNull); 1916 dec_sp(2); 1917 if (btest == BoolTest::ne) { 1918 { 1919 PreserveJVMState pjvms(this); 1920 replace_in_map(input, cast); 1921 int target_bci = iter().get_dest(); 1922 merge(target_bci); 1923 } 1924 record_for_igvn(eq_region); 1925 set_control(_gvn.transform(eq_region)); 1926 } else { 1927 replace_in_map(input, cast); 1928 } 1929 } 1930 1931 Node* Parse::acmp_null_check(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, Node*& null_ctl) { 1932 inc_sp(2); 1933 null_ctl = top(); 1934 Node* cast = null_check_oop(input, &null_ctl, 1935 input_ptr == ProfileNeverNull || (input_ptr == ProfileUnknownNull && !too_many_traps_or_recompiles(Deoptimization::Reason_null_check)), 1936 false, 1937 speculative_ptr_kind(tinput) == ProfileNeverNull && 1938 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check)); 1939 dec_sp(2); 1940 assert(!stopped(), "null input should have been caught earlier"); 1941 if (cast->is_InlineType()) { 1942 cast = cast->as_InlineType()->get_oop(); 1943 } 1944 return cast; 1945 } 1946 1947 void Parse::acmp_known_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, ciKlass* input_type, BoolTest::mask btest, Node* eq_region) { 1948 Node* ne_region = new RegionNode(1); 1949 Node* null_ctl; 1950 Node* cast = acmp_null_check(input, tinput, input_ptr, null_ctl); 1951 ne_region->add_req(null_ctl); 1952 1953 Node* slow_ctl = type_check_receiver(cast, input_type, 1.0, &cast); 1954 { 1955 PreserveJVMState pjvms(this); 1956 inc_sp(2); 1957 set_control(slow_ctl); 1958 Deoptimization::DeoptReason reason; 1959 if (tinput->speculative_type() != NULL && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) { 1960 reason = Deoptimization::Reason_speculate_class_check; 1961 } else { 1962 reason = Deoptimization::Reason_class_check; 1963 } 1964 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile); 1965 } 1966 ne_region->add_req(control()); 1967 1968 record_for_igvn(ne_region); 1969 set_control(_gvn.transform(ne_region)); 1970 if (btest == BoolTest::ne) { 1971 { 1972 PreserveJVMState pjvms(this); 1973 if (null_ctl == top()) { 1974 replace_in_map(input, cast); 1975 } 1976 int target_bci = iter().get_dest(); 1977 merge(target_bci); 1978 } 1979 record_for_igvn(eq_region); 1980 set_control(_gvn.transform(eq_region)); 1981 } else { 1982 if (null_ctl == top()) { 1983 replace_in_map(input, cast); 1984 } 1985 set_control(_gvn.transform(ne_region)); 1986 } 1987 } 1988 1989 void Parse::acmp_unknown_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, BoolTest::mask btest, Node* eq_region) { 1990 Node* ne_region = new RegionNode(1); 1991 Node* null_ctl; 1992 Node* cast = acmp_null_check(input, tinput, input_ptr, null_ctl); 1993 ne_region->add_req(null_ctl); 1994 1995 { 1996 BuildCutout unless(this, inline_type_test(cast, /* is_inline = */ false), PROB_MAX); 1997 inc_sp(2); 1998 uncommon_trap_exact(Deoptimization::Reason_class_check, Deoptimization::Action_maybe_recompile); 1999 } 2000 2001 ne_region->add_req(control()); 2002 2003 record_for_igvn(ne_region); 2004 set_control(_gvn.transform(ne_region)); 2005 if (btest == BoolTest::ne) { 2006 { 2007 PreserveJVMState pjvms(this); 2008 if (null_ctl == top()) { 2009 replace_in_map(input, cast); 2010 } 2011 int target_bci = iter().get_dest(); 2012 merge(target_bci); 2013 } 2014 record_for_igvn(eq_region); 2015 set_control(_gvn.transform(eq_region)); 2016 } else { 2017 if (null_ctl == top()) { 2018 replace_in_map(input, cast); 2019 } 2020 set_control(_gvn.transform(ne_region)); 2021 } 2022 } 2023 2024 void Parse::do_acmp(BoolTest::mask btest, Node* left, Node* right) { 2025 ciKlass* left_type = NULL; 2026 ciKlass* right_type = NULL; 2027 ProfilePtrKind left_ptr = ProfileUnknownNull; 2028 ProfilePtrKind right_ptr = ProfileUnknownNull; 2029 bool left_inline_type = true; 2030 bool right_inline_type = true; 2031 2032 // Leverage profiling at acmp 2033 if (UseACmpProfile) { 2034 method()->acmp_profiled_type(bci(), left_type, right_type, left_ptr, right_ptr, left_inline_type, right_inline_type); 2035 if (too_many_traps_or_recompiles(Deoptimization::Reason_class_check)) { 2036 left_type = NULL; 2037 right_type = NULL; 2038 left_inline_type = true; 2039 right_inline_type = true; 2040 } 2041 if (too_many_traps_or_recompiles(Deoptimization::Reason_null_check)) { 2042 left_ptr = ProfileUnknownNull; 2043 right_ptr = ProfileUnknownNull; 2044 } 2045 } 2046 2047 if (UseTypeSpeculation) { 2048 record_profile_for_speculation(left, left_type, left_ptr); 2049 record_profile_for_speculation(right, right_type, right_ptr); 2050 } 2051 2052 if (!EnableValhalla) { 2053 Node* cmp = CmpP(left, right); 2054 cmp = optimize_cmp_with_klass(cmp); 2055 do_if(btest, cmp); 2056 return; 2057 } 2058 2059 // Check for equality before potentially allocating 2060 if (left == right) { 2061 do_if(btest, makecon(TypeInt::CC_EQ)); 2062 return; 2063 } 2064 2065 // Allocate inline type operands and re-execute on deoptimization 2066 if (left->is_InlineTypeBase()) { 2067 if (_gvn.type(right)->is_zero_type() || 2068 (right->is_InlineTypeBase() && _gvn.type(right->as_InlineTypeBase()->get_is_init())->is_zero_type())) { 2069 // Null checking a scalarized but nullable inline type. Check the IsInit 2070 // input instead of the oop input to avoid keeping buffer allocations alive. 2071 Node* cmp = CmpI(left->as_InlineTypeBase()->get_is_init(), intcon(0)); 2072 do_if(btest, cmp); 2073 return; 2074 } else if (left->is_InlineType()){ 2075 PreserveReexecuteState preexecs(this); 2076 inc_sp(2); 2077 jvms()->set_should_reexecute(true); 2078 left = left->as_InlineType()->buffer(this)->get_oop(); 2079 } 2080 } 2081 if (right->is_InlineType()) { 2082 PreserveReexecuteState preexecs(this); 2083 inc_sp(2); 2084 jvms()->set_should_reexecute(true); 2085 right = right->as_InlineType()->buffer(this)->get_oop(); 2086 } 2087 2088 // First, do a normal pointer comparison 2089 const TypeOopPtr* tleft = _gvn.type(left)->isa_oopptr(); 2090 const TypeOopPtr* tright = _gvn.type(right)->isa_oopptr(); 2091 Node* cmp = CmpP(left, right); 2092 cmp = optimize_cmp_with_klass(cmp); 2093 if (tleft == NULL || !tleft->can_be_inline_type() || 2094 tright == NULL || !tright->can_be_inline_type()) { 2095 // This is sufficient, if one of the operands can't be an inline type 2096 do_if(btest, cmp); 2097 return; 2098 } 2099 Node* eq_region = NULL; 2100 if (btest == BoolTest::eq) { 2101 do_if(btest, cmp, true); 2102 if (stopped()) { 2103 return; 2104 } 2105 } else { 2106 assert(btest == BoolTest::ne, "only eq or ne"); 2107 Node* is_not_equal = NULL; 2108 eq_region = new RegionNode(3); 2109 { 2110 PreserveJVMState pjvms(this); 2111 do_if(btest, cmp, false, &is_not_equal); 2112 if (!stopped()) { 2113 eq_region->init_req(1, control()); 2114 } 2115 } 2116 if (is_not_equal == NULL || is_not_equal->is_top()) { 2117 record_for_igvn(eq_region); 2118 set_control(_gvn.transform(eq_region)); 2119 return; 2120 } 2121 set_control(is_not_equal); 2122 } 2123 2124 // Prefer speculative types if available 2125 if (!too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) { 2126 if (tleft->speculative_type() != NULL) { 2127 left_type = tleft->speculative_type(); 2128 } 2129 if (tright->speculative_type() != NULL) { 2130 right_type = tright->speculative_type(); 2131 } 2132 } 2133 2134 if (speculative_ptr_kind(tleft) != ProfileMaybeNull && speculative_ptr_kind(tleft) != ProfileUnknownNull) { 2135 ProfilePtrKind speculative_left_ptr = speculative_ptr_kind(tleft); 2136 if (speculative_left_ptr == ProfileAlwaysNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_assert)) { 2137 left_ptr = speculative_left_ptr; 2138 } else if (speculative_left_ptr == ProfileNeverNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check)) { 2139 left_ptr = speculative_left_ptr; 2140 } 2141 } 2142 if (speculative_ptr_kind(tright) != ProfileMaybeNull && speculative_ptr_kind(tright) != ProfileUnknownNull) { 2143 ProfilePtrKind speculative_right_ptr = speculative_ptr_kind(tright); 2144 if (speculative_right_ptr == ProfileAlwaysNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_assert)) { 2145 right_ptr = speculative_right_ptr; 2146 } else if (speculative_right_ptr == ProfileNeverNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check)) { 2147 right_ptr = speculative_right_ptr; 2148 } 2149 } 2150 2151 if (left_ptr == ProfileAlwaysNull) { 2152 // Comparison with null. Assert the input is indeed null and we're done. 2153 acmp_always_null_input(left, tleft, btest, eq_region); 2154 return; 2155 } 2156 if (right_ptr == ProfileAlwaysNull) { 2157 // Comparison with null. Assert the input is indeed null and we're done. 2158 acmp_always_null_input(right, tright, btest, eq_region); 2159 return; 2160 } 2161 if (left_type != NULL && !left_type->is_inlinetype()) { 2162 // Comparison with an object of known type 2163 acmp_known_non_inline_type_input(left, tleft, left_ptr, left_type, btest, eq_region); 2164 return; 2165 } 2166 if (right_type != NULL && !right_type->is_inlinetype()) { 2167 // Comparison with an object of known type 2168 acmp_known_non_inline_type_input(right, tright, right_ptr, right_type, btest, eq_region); 2169 return; 2170 } 2171 if (!left_inline_type) { 2172 // Comparison with an object known not to be an inline type 2173 acmp_unknown_non_inline_type_input(left, tleft, left_ptr, btest, eq_region); 2174 return; 2175 } 2176 if (!right_inline_type) { 2177 // Comparison with an object known not to be an inline type 2178 acmp_unknown_non_inline_type_input(right, tright, right_ptr, btest, eq_region); 2179 return; 2180 } 2181 2182 // Pointers are not equal, check if first operand is non-null 2183 Node* ne_region = new RegionNode(6); 2184 Node* null_ctl; 2185 Node* not_null_right = acmp_null_check(right, tright, right_ptr, null_ctl); 2186 ne_region->init_req(1, null_ctl); 2187 2188 // First operand is non-null, check if it is an inline type 2189 Node* is_value = inline_type_test(not_null_right); 2190 IfNode* is_value_iff = create_and_map_if(control(), is_value, PROB_FAIR, COUNT_UNKNOWN); 2191 Node* not_value = _gvn.transform(new IfFalseNode(is_value_iff)); 2192 ne_region->init_req(2, not_value); 2193 set_control(_gvn.transform(new IfTrueNode(is_value_iff))); 2194 2195 // The first operand is an inline type, check if the second operand is non-null 2196 Node* not_null_left = acmp_null_check(left, tleft, left_ptr, null_ctl); 2197 ne_region->init_req(3, null_ctl); 2198 2199 // Check if both operands are of the same class. 2200 Node* kls_left = load_object_klass(not_null_left); 2201 Node* kls_right = load_object_klass(not_null_right); 2202 Node* kls_cmp = CmpP(kls_left, kls_right); 2203 Node* kls_bol = _gvn.transform(new BoolNode(kls_cmp, BoolTest::ne)); 2204 IfNode* kls_iff = create_and_map_if(control(), kls_bol, PROB_FAIR, COUNT_UNKNOWN); 2205 Node* kls_ne = _gvn.transform(new IfTrueNode(kls_iff)); 2206 set_control(_gvn.transform(new IfFalseNode(kls_iff))); 2207 ne_region->init_req(4, kls_ne); 2208 2209 if (stopped()) { 2210 record_for_igvn(ne_region); 2211 set_control(_gvn.transform(ne_region)); 2212 if (btest == BoolTest::ne) { 2213 { 2214 PreserveJVMState pjvms(this); 2215 int target_bci = iter().get_dest(); 2216 merge(target_bci); 2217 } 2218 record_for_igvn(eq_region); 2219 set_control(_gvn.transform(eq_region)); 2220 } 2221 return; 2222 } 2223 2224 // Both operands are values types of the same class, we need to perform a 2225 // substitutability test. Delegate to PrimitiveObjectMethods::isSubstitutable(). 2226 Node* ne_io_phi = PhiNode::make(ne_region, i_o()); 2227 Node* mem = reset_memory(); 2228 Node* ne_mem_phi = PhiNode::make(ne_region, mem); 2229 2230 Node* eq_io_phi = NULL; 2231 Node* eq_mem_phi = NULL; 2232 if (eq_region != NULL) { 2233 eq_io_phi = PhiNode::make(eq_region, i_o()); 2234 eq_mem_phi = PhiNode::make(eq_region, mem); 2235 } 2236 2237 set_all_memory(mem); 2238 2239 kill_dead_locals(); 2240 ciMethod* subst_method = ciEnv::current()->PrimitiveObjectMethods_klass()->find_method(ciSymbols::isSubstitutable_name(), ciSymbols::object_object_boolean_signature()); 2241 CallStaticJavaNode *call = new CallStaticJavaNode(C, TypeFunc::make(subst_method), SharedRuntime::get_resolve_static_call_stub(), subst_method); 2242 call->set_override_symbolic_info(true); 2243 call->init_req(TypeFunc::Parms, not_null_left); 2244 call->init_req(TypeFunc::Parms+1, not_null_right); 2245 inc_sp(2); 2246 set_edges_for_java_call(call, false, false); 2247 Node* ret = set_results_for_java_call(call, false, true); 2248 dec_sp(2); 2249 2250 // Test the return value of PrimitiveObjectMethods::isSubstitutable() 2251 Node* subst_cmp = _gvn.transform(new CmpINode(ret, intcon(1))); 2252 Node* ctl = C->top(); 2253 if (btest == BoolTest::eq) { 2254 PreserveJVMState pjvms(this); 2255 do_if(btest, subst_cmp); 2256 if (!stopped()) { 2257 ctl = control(); 2258 } 2259 } else { 2260 assert(btest == BoolTest::ne, "only eq or ne"); 2261 PreserveJVMState pjvms(this); 2262 do_if(btest, subst_cmp, false, &ctl); 2263 if (!stopped()) { 2264 eq_region->init_req(2, control()); 2265 eq_io_phi->init_req(2, i_o()); 2266 eq_mem_phi->init_req(2, reset_memory()); 2267 } 2268 } 2269 ne_region->init_req(5, ctl); 2270 ne_io_phi->init_req(5, i_o()); 2271 ne_mem_phi->init_req(5, reset_memory()); 2272 2273 record_for_igvn(ne_region); 2274 set_control(_gvn.transform(ne_region)); 2275 set_i_o(_gvn.transform(ne_io_phi)); 2276 set_all_memory(_gvn.transform(ne_mem_phi)); 2277 2278 if (btest == BoolTest::ne) { 2279 { 2280 PreserveJVMState pjvms(this); 2281 int target_bci = iter().get_dest(); 2282 merge(target_bci); 2283 } 2284 2285 record_for_igvn(eq_region); 2286 set_control(_gvn.transform(eq_region)); 2287 set_i_o(_gvn.transform(eq_io_phi)); 2288 set_all_memory(_gvn.transform(eq_mem_phi)); 2289 } 2290 } 2291 2292 bool Parse::path_is_suitable_for_uncommon_trap(float prob) const { 2293 // Don't want to speculate on uncommon traps when running with -Xcomp 2294 if (!UseInterpreter) { 2295 return false; 2296 } 2297 return (seems_never_taken(prob) && seems_stable_comparison()); 2298 } 2299 2300 void Parse::maybe_add_predicate_after_if(Block* path) { 2301 if (path->is_SEL_head() && path->preds_parsed() == 0) { 2302 // Add predicates at bci of if dominating the loop so traps can be 2303 // recorded on the if's profile data 2304 int bc_depth = repush_if_args(); 2305 add_empty_predicates(); 2306 dec_sp(bc_depth); 2307 path->set_has_predicates(); 2308 } 2309 } 2310 2311 2312 //----------------------------adjust_map_after_if------------------------------ 2313 // Adjust the JVM state to reflect the result of taking this path. 2314 // Basically, it means inspecting the CmpNode controlling this 2315 // branch, seeing how it constrains a tested value, and then 2316 // deciding if it's worth our while to encode this constraint 2317 // as graph nodes in the current abstract interpretation map. 2318 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path) { 2319 if (!c->is_Cmp()) { 2320 maybe_add_predicate_after_if(path); 2321 return; 2322 } 2323 2324 if (stopped() || btest == BoolTest::illegal) { 2325 return; // nothing to do 2326 } 2327 2328 bool is_fallthrough = (path == successor_for_bci(iter().next_bci())); 2329 2330 if (path_is_suitable_for_uncommon_trap(prob)) { 2331 repush_if_args(); 2332 uncommon_trap(Deoptimization::Reason_unstable_if, 2333 Deoptimization::Action_reinterpret, 2334 NULL, 2335 (is_fallthrough ? "taken always" : "taken never")); 2336 return; 2337 } 2338 2339 Node* val = c->in(1); 2340 Node* con = c->in(2); 2341 const Type* tcon = _gvn.type(con); 2342 const Type* tval = _gvn.type(val); 2343 bool have_con = tcon->singleton(); 2344 if (tval->singleton()) { 2345 if (!have_con) { 2346 // Swap, so constant is in con. 2347 con = val; 2348 tcon = tval; 2349 val = c->in(2); 2350 tval = _gvn.type(val); 2351 btest = BoolTest(btest).commute(); 2352 have_con = true; 2353 } else { 2354 // Do we have two constants? Then leave well enough alone. 2355 have_con = false; 2356 } 2357 } 2358 if (!have_con) { // remaining adjustments need a con 2359 maybe_add_predicate_after_if(path); 2360 return; 2361 } 2362 2363 sharpen_type_after_if(btest, con, tcon, val, tval); 2364 maybe_add_predicate_after_if(path); 2365 } 2366 2367 2368 static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) { 2369 Node* ldk; 2370 if (n->is_DecodeNKlass()) { 2371 if (n->in(1)->Opcode() != Op_LoadNKlass) { 2372 return NULL; 2373 } else { 2374 ldk = n->in(1); 2375 } 2376 } else if (n->Opcode() != Op_LoadKlass) { 2377 return NULL; 2378 } else { 2379 ldk = n; 2380 } 2381 assert(ldk != NULL && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node"); 2382 2383 Node* adr = ldk->in(MemNode::Address); 2384 intptr_t off = 0; 2385 Node* obj = AddPNode::Ideal_base_and_offset(adr, gvn, off); 2386 if (obj == NULL || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass? 2387 return NULL; 2388 const TypePtr* tp = gvn->type(obj)->is_ptr(); 2389 if (tp == NULL || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr? 2390 return NULL; 2391 2392 return obj; 2393 } 2394 2395 void Parse::sharpen_type_after_if(BoolTest::mask btest, 2396 Node* con, const Type* tcon, 2397 Node* val, const Type* tval) { 2398 // Look for opportunities to sharpen the type of a node 2399 // whose klass is compared with a constant klass. 2400 if (btest == BoolTest::eq && tcon->isa_klassptr()) { 2401 Node* obj = extract_obj_from_klass_load(&_gvn, val); 2402 const TypeOopPtr* con_type = tcon->isa_klassptr()->as_instance_type(); 2403 if (obj != NULL && (con_type->isa_instptr() || con_type->isa_aryptr())) { 2404 // Found: 2405 // Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq]) 2406 // or the narrowOop equivalent. 2407 const Type* obj_type = _gvn.type(obj); 2408 const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr(); 2409 if (tboth != NULL && tboth->klass_is_exact() && tboth != obj_type && 2410 tboth->higher_equal(obj_type)) { 2411 // obj has to be of the exact type Foo if the CmpP succeeds. 2412 int obj_in_map = map()->find_edge(obj); 2413 JVMState* jvms = this->jvms(); 2414 if (obj_in_map >= 0 && 2415 (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) { 2416 TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth); 2417 const Type* tcc = ccast->as_Type()->type(); 2418 assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve"); 2419 // Delay transform() call to allow recovery of pre-cast value 2420 // at the control merge. 2421 _gvn.set_type_bottom(ccast); 2422 record_for_igvn(ccast); 2423 // Here's the payoff. 2424 replace_in_map(obj, ccast); 2425 } 2426 } 2427 } 2428 } 2429 2430 int val_in_map = map()->find_edge(val); 2431 if (val_in_map < 0) return; // replace_in_map would be useless 2432 { 2433 JVMState* jvms = this->jvms(); 2434 if (!(jvms->is_loc(val_in_map) || 2435 jvms->is_stk(val_in_map))) 2436 return; // again, it would be useless 2437 } 2438 2439 // Check for a comparison to a constant, and "know" that the compared 2440 // value is constrained on this path. 2441 assert(tcon->singleton(), ""); 2442 ConstraintCastNode* ccast = NULL; 2443 Node* cast = NULL; 2444 2445 switch (btest) { 2446 case BoolTest::eq: // Constant test? 2447 { 2448 const Type* tboth = tcon->join_speculative(tval); 2449 if (tboth == tval) break; // Nothing to gain. 2450 if (tcon->isa_int()) { 2451 ccast = new CastIINode(val, tboth); 2452 } else if (tcon == TypePtr::NULL_PTR) { 2453 // Cast to null, but keep the pointer identity temporarily live. 2454 ccast = new CastPPNode(val, tboth); 2455 } else { 2456 const TypeF* tf = tcon->isa_float_constant(); 2457 const TypeD* td = tcon->isa_double_constant(); 2458 // Exclude tests vs float/double 0 as these could be 2459 // either +0 or -0. Just because you are equal to +0 2460 // doesn't mean you ARE +0! 2461 // Note, following code also replaces Long and Oop values. 2462 if ((!tf || tf->_f != 0.0) && 2463 (!td || td->_d != 0.0)) 2464 cast = con; // Replace non-constant val by con. 2465 } 2466 } 2467 break; 2468 2469 case BoolTest::ne: 2470 if (tcon == TypePtr::NULL_PTR) { 2471 cast = cast_not_null(val, false); 2472 } 2473 break; 2474 2475 default: 2476 // (At this point we could record int range types with CastII.) 2477 break; 2478 } 2479 2480 if (ccast != NULL) { 2481 const Type* tcc = ccast->as_Type()->type(); 2482 assert(tcc != tval && tcc->higher_equal(tval), "must improve"); 2483 // Delay transform() call to allow recovery of pre-cast value 2484 // at the control merge. 2485 ccast->set_req(0, control()); 2486 _gvn.set_type_bottom(ccast); 2487 record_for_igvn(ccast); 2488 cast = ccast; 2489 } 2490 2491 if (cast != NULL) { // Here's the payoff. 2492 replace_in_map(val, cast); 2493 } 2494 } 2495 2496 /** 2497 * Use speculative type to optimize CmpP node: if comparison is 2498 * against the low level class, cast the object to the speculative 2499 * type if any. CmpP should then go away. 2500 * 2501 * @param c expected CmpP node 2502 * @return result of CmpP on object casted to speculative type 2503 * 2504 */ 2505 Node* Parse::optimize_cmp_with_klass(Node* c) { 2506 // If this is transformed by the _gvn to a comparison with the low 2507 // level klass then we may be able to use speculation 2508 if (c->Opcode() == Op_CmpP && 2509 (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) && 2510 c->in(2)->is_Con()) { 2511 Node* load_klass = NULL; 2512 Node* decode = NULL; 2513 if (c->in(1)->Opcode() == Op_DecodeNKlass) { 2514 decode = c->in(1); 2515 load_klass = c->in(1)->in(1); 2516 } else { 2517 load_klass = c->in(1); 2518 } 2519 if (load_klass->in(2)->is_AddP()) { 2520 Node* addp = load_klass->in(2); 2521 Node* obj = addp->in(AddPNode::Address); 2522 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); 2523 if (obj_type->speculative_type_not_null() != NULL) { 2524 ciKlass* k = obj_type->speculative_type(); 2525 inc_sp(2); 2526 obj = maybe_cast_profiled_obj(obj, k); 2527 dec_sp(2); 2528 if (obj->is_InlineType()) { 2529 assert(obj->as_InlineType()->is_allocated(&_gvn), "must be allocated"); 2530 obj = obj->as_InlineType()->get_oop(); 2531 } 2532 // Make the CmpP use the casted obj 2533 addp = basic_plus_adr(obj, addp->in(AddPNode::Offset)); 2534 load_klass = load_klass->clone(); 2535 load_klass->set_req(2, addp); 2536 load_klass = _gvn.transform(load_klass); 2537 if (decode != NULL) { 2538 decode = decode->clone(); 2539 decode->set_req(1, load_klass); 2540 load_klass = _gvn.transform(decode); 2541 } 2542 c = c->clone(); 2543 c->set_req(1, load_klass); 2544 c = _gvn.transform(c); 2545 } 2546 } 2547 } 2548 return c; 2549 } 2550 2551 //------------------------------do_one_bytecode-------------------------------- 2552 // Parse this bytecode, and alter the Parsers JVM->Node mapping 2553 void Parse::do_one_bytecode() { 2554 Node *a, *b, *c, *d; // Handy temps 2555 BoolTest::mask btest; 2556 int i; 2557 2558 assert(!has_exceptions(), "bytecode entry state must be clear of throws"); 2559 2560 if (C->check_node_count(NodeLimitFudgeFactor * 5, 2561 "out of nodes parsing method")) { 2562 return; 2563 } 2564 2565 #ifdef ASSERT 2566 // for setting breakpoints 2567 if (TraceOptoParse) { 2568 tty->print(" @"); 2569 dump_bci(bci()); 2570 tty->cr(); 2571 } 2572 #endif 2573 2574 switch (bc()) { 2575 case Bytecodes::_nop: 2576 // do nothing 2577 break; 2578 case Bytecodes::_lconst_0: 2579 push_pair(longcon(0)); 2580 break; 2581 2582 case Bytecodes::_lconst_1: 2583 push_pair(longcon(1)); 2584 break; 2585 2586 case Bytecodes::_fconst_0: 2587 push(zerocon(T_FLOAT)); 2588 break; 2589 2590 case Bytecodes::_fconst_1: 2591 push(makecon(TypeF::ONE)); 2592 break; 2593 2594 case Bytecodes::_fconst_2: 2595 push(makecon(TypeF::make(2.0f))); 2596 break; 2597 2598 case Bytecodes::_dconst_0: 2599 push_pair(zerocon(T_DOUBLE)); 2600 break; 2601 2602 case Bytecodes::_dconst_1: 2603 push_pair(makecon(TypeD::ONE)); 2604 break; 2605 2606 case Bytecodes::_iconst_m1:push(intcon(-1)); break; 2607 case Bytecodes::_iconst_0: push(intcon( 0)); break; 2608 case Bytecodes::_iconst_1: push(intcon( 1)); break; 2609 case Bytecodes::_iconst_2: push(intcon( 2)); break; 2610 case Bytecodes::_iconst_3: push(intcon( 3)); break; 2611 case Bytecodes::_iconst_4: push(intcon( 4)); break; 2612 case Bytecodes::_iconst_5: push(intcon( 5)); break; 2613 case Bytecodes::_bipush: push(intcon(iter().get_constant_u1())); break; 2614 case Bytecodes::_sipush: push(intcon(iter().get_constant_u2())); break; 2615 case Bytecodes::_aconst_null: push(null()); break; 2616 2617 case Bytecodes::_ldc: 2618 case Bytecodes::_ldc_w: 2619 case Bytecodes::_ldc2_w: { 2620 ciConstant constant = iter().get_constant(); 2621 if (constant.is_loaded()) { 2622 const Type* con_type = Type::make_from_constant(constant); 2623 if (con_type != NULL) { 2624 push_node(con_type->basic_type(), makecon(con_type)); 2625 } 2626 } else { 2627 // If the constant is unresolved or in error state, run this BC in the interpreter. 2628 if (iter().is_in_error()) { 2629 uncommon_trap(Deoptimization::make_trap_request(Deoptimization::Reason_unhandled, 2630 Deoptimization::Action_none), 2631 NULL, "constant in error state", true /* must_throw */); 2632 2633 } else { 2634 int index = iter().get_constant_pool_index(); 2635 uncommon_trap(Deoptimization::make_trap_request(Deoptimization::Reason_unloaded, 2636 Deoptimization::Action_reinterpret, 2637 index), 2638 NULL, "unresolved constant", false /* must_throw */); 2639 } 2640 } 2641 break; 2642 } 2643 2644 case Bytecodes::_aload_0: 2645 push( local(0) ); 2646 break; 2647 case Bytecodes::_aload_1: 2648 push( local(1) ); 2649 break; 2650 case Bytecodes::_aload_2: 2651 push( local(2) ); 2652 break; 2653 case Bytecodes::_aload_3: 2654 push( local(3) ); 2655 break; 2656 case Bytecodes::_aload: 2657 push( local(iter().get_index()) ); 2658 break; 2659 2660 case Bytecodes::_fload_0: 2661 case Bytecodes::_iload_0: 2662 push( local(0) ); 2663 break; 2664 case Bytecodes::_fload_1: 2665 case Bytecodes::_iload_1: 2666 push( local(1) ); 2667 break; 2668 case Bytecodes::_fload_2: 2669 case Bytecodes::_iload_2: 2670 push( local(2) ); 2671 break; 2672 case Bytecodes::_fload_3: 2673 case Bytecodes::_iload_3: 2674 push( local(3) ); 2675 break; 2676 case Bytecodes::_fload: 2677 case Bytecodes::_iload: 2678 push( local(iter().get_index()) ); 2679 break; 2680 case Bytecodes::_lload_0: 2681 push_pair_local( 0 ); 2682 break; 2683 case Bytecodes::_lload_1: 2684 push_pair_local( 1 ); 2685 break; 2686 case Bytecodes::_lload_2: 2687 push_pair_local( 2 ); 2688 break; 2689 case Bytecodes::_lload_3: 2690 push_pair_local( 3 ); 2691 break; 2692 case Bytecodes::_lload: 2693 push_pair_local( iter().get_index() ); 2694 break; 2695 2696 case Bytecodes::_dload_0: 2697 push_pair_local(0); 2698 break; 2699 case Bytecodes::_dload_1: 2700 push_pair_local(1); 2701 break; 2702 case Bytecodes::_dload_2: 2703 push_pair_local(2); 2704 break; 2705 case Bytecodes::_dload_3: 2706 push_pair_local(3); 2707 break; 2708 case Bytecodes::_dload: 2709 push_pair_local(iter().get_index()); 2710 break; 2711 case Bytecodes::_fstore_0: 2712 case Bytecodes::_istore_0: 2713 case Bytecodes::_astore_0: 2714 set_local( 0, pop() ); 2715 break; 2716 case Bytecodes::_fstore_1: 2717 case Bytecodes::_istore_1: 2718 case Bytecodes::_astore_1: 2719 set_local( 1, pop() ); 2720 break; 2721 case Bytecodes::_fstore_2: 2722 case Bytecodes::_istore_2: 2723 case Bytecodes::_astore_2: 2724 set_local( 2, pop() ); 2725 break; 2726 case Bytecodes::_fstore_3: 2727 case Bytecodes::_istore_3: 2728 case Bytecodes::_astore_3: 2729 set_local( 3, pop() ); 2730 break; 2731 case Bytecodes::_fstore: 2732 case Bytecodes::_istore: 2733 case Bytecodes::_astore: 2734 set_local( iter().get_index(), pop() ); 2735 break; 2736 // long stores 2737 case Bytecodes::_lstore_0: 2738 set_pair_local( 0, pop_pair() ); 2739 break; 2740 case Bytecodes::_lstore_1: 2741 set_pair_local( 1, pop_pair() ); 2742 break; 2743 case Bytecodes::_lstore_2: 2744 set_pair_local( 2, pop_pair() ); 2745 break; 2746 case Bytecodes::_lstore_3: 2747 set_pair_local( 3, pop_pair() ); 2748 break; 2749 case Bytecodes::_lstore: 2750 set_pair_local( iter().get_index(), pop_pair() ); 2751 break; 2752 2753 // double stores 2754 case Bytecodes::_dstore_0: 2755 set_pair_local( 0, dprecision_rounding(pop_pair()) ); 2756 break; 2757 case Bytecodes::_dstore_1: 2758 set_pair_local( 1, dprecision_rounding(pop_pair()) ); 2759 break; 2760 case Bytecodes::_dstore_2: 2761 set_pair_local( 2, dprecision_rounding(pop_pair()) ); 2762 break; 2763 case Bytecodes::_dstore_3: 2764 set_pair_local( 3, dprecision_rounding(pop_pair()) ); 2765 break; 2766 case Bytecodes::_dstore: 2767 set_pair_local( iter().get_index(), dprecision_rounding(pop_pair()) ); 2768 break; 2769 2770 case Bytecodes::_pop: dec_sp(1); break; 2771 case Bytecodes::_pop2: dec_sp(2); break; 2772 case Bytecodes::_swap: 2773 a = pop(); 2774 b = pop(); 2775 push(a); 2776 push(b); 2777 break; 2778 case Bytecodes::_dup: 2779 a = pop(); 2780 push(a); 2781 push(a); 2782 break; 2783 case Bytecodes::_dup_x1: 2784 a = pop(); 2785 b = pop(); 2786 push( a ); 2787 push( b ); 2788 push( a ); 2789 break; 2790 case Bytecodes::_dup_x2: 2791 a = pop(); 2792 b = pop(); 2793 c = pop(); 2794 push( a ); 2795 push( c ); 2796 push( b ); 2797 push( a ); 2798 break; 2799 case Bytecodes::_dup2: 2800 a = pop(); 2801 b = pop(); 2802 push( b ); 2803 push( a ); 2804 push( b ); 2805 push( a ); 2806 break; 2807 2808 case Bytecodes::_dup2_x1: 2809 // before: .. c, b, a 2810 // after: .. b, a, c, b, a 2811 // not tested 2812 a = pop(); 2813 b = pop(); 2814 c = pop(); 2815 push( b ); 2816 push( a ); 2817 push( c ); 2818 push( b ); 2819 push( a ); 2820 break; 2821 case Bytecodes::_dup2_x2: 2822 // before: .. d, c, b, a 2823 // after: .. b, a, d, c, b, a 2824 // not tested 2825 a = pop(); 2826 b = pop(); 2827 c = pop(); 2828 d = pop(); 2829 push( b ); 2830 push( a ); 2831 push( d ); 2832 push( c ); 2833 push( b ); 2834 push( a ); 2835 break; 2836 2837 case Bytecodes::_arraylength: { 2838 // Must do null-check with value on expression stack 2839 Node *ary = null_check(peek(), T_ARRAY); 2840 // Compile-time detect of null-exception? 2841 if (stopped()) return; 2842 a = pop(); 2843 push(load_array_length(a)); 2844 break; 2845 } 2846 2847 case Bytecodes::_baload: array_load(T_BYTE); break; 2848 case Bytecodes::_caload: array_load(T_CHAR); break; 2849 case Bytecodes::_iaload: array_load(T_INT); break; 2850 case Bytecodes::_saload: array_load(T_SHORT); break; 2851 case Bytecodes::_faload: array_load(T_FLOAT); break; 2852 case Bytecodes::_aaload: array_load(T_OBJECT); break; 2853 case Bytecodes::_laload: array_load(T_LONG); break; 2854 case Bytecodes::_daload: array_load(T_DOUBLE); break; 2855 case Bytecodes::_bastore: array_store(T_BYTE); break; 2856 case Bytecodes::_castore: array_store(T_CHAR); break; 2857 case Bytecodes::_iastore: array_store(T_INT); break; 2858 case Bytecodes::_sastore: array_store(T_SHORT); break; 2859 case Bytecodes::_fastore: array_store(T_FLOAT); break; 2860 case Bytecodes::_aastore: array_store(T_OBJECT); break; 2861 case Bytecodes::_lastore: array_store(T_LONG); break; 2862 case Bytecodes::_dastore: array_store(T_DOUBLE); break; 2863 2864 case Bytecodes::_getfield: 2865 do_getfield(); 2866 break; 2867 2868 case Bytecodes::_getstatic: 2869 do_getstatic(); 2870 break; 2871 2872 case Bytecodes::_putfield: 2873 do_putfield(); 2874 break; 2875 2876 case Bytecodes::_putstatic: 2877 do_putstatic(); 2878 break; 2879 2880 case Bytecodes::_irem: 2881 // Must keep both values on the expression-stack during null-check 2882 zero_check_int(peek()); 2883 // Compile-time detect of null-exception? 2884 if (stopped()) return; 2885 b = pop(); 2886 a = pop(); 2887 push(_gvn.transform(new ModINode(control(), a, b))); 2888 break; 2889 case Bytecodes::_idiv: 2890 // Must keep both values on the expression-stack during null-check 2891 zero_check_int(peek()); 2892 // Compile-time detect of null-exception? 2893 if (stopped()) return; 2894 b = pop(); 2895 a = pop(); 2896 push( _gvn.transform( new DivINode(control(),a,b) ) ); 2897 break; 2898 case Bytecodes::_imul: 2899 b = pop(); a = pop(); 2900 push( _gvn.transform( new MulINode(a,b) ) ); 2901 break; 2902 case Bytecodes::_iadd: 2903 b = pop(); a = pop(); 2904 push( _gvn.transform( new AddINode(a,b) ) ); 2905 break; 2906 case Bytecodes::_ineg: 2907 a = pop(); 2908 push( _gvn.transform( new SubINode(_gvn.intcon(0),a)) ); 2909 break; 2910 case Bytecodes::_isub: 2911 b = pop(); a = pop(); 2912 push( _gvn.transform( new SubINode(a,b) ) ); 2913 break; 2914 case Bytecodes::_iand: 2915 b = pop(); a = pop(); 2916 push( _gvn.transform( new AndINode(a,b) ) ); 2917 break; 2918 case Bytecodes::_ior: 2919 b = pop(); a = pop(); 2920 push( _gvn.transform( new OrINode(a,b) ) ); 2921 break; 2922 case Bytecodes::_ixor: 2923 b = pop(); a = pop(); 2924 push( _gvn.transform( new XorINode(a,b) ) ); 2925 break; 2926 case Bytecodes::_ishl: 2927 b = pop(); a = pop(); 2928 push( _gvn.transform( new LShiftINode(a,b) ) ); 2929 break; 2930 case Bytecodes::_ishr: 2931 b = pop(); a = pop(); 2932 push( _gvn.transform( new RShiftINode(a,b) ) ); 2933 break; 2934 case Bytecodes::_iushr: 2935 b = pop(); a = pop(); 2936 push( _gvn.transform( new URShiftINode(a,b) ) ); 2937 break; 2938 2939 case Bytecodes::_fneg: 2940 a = pop(); 2941 b = _gvn.transform(new NegFNode (a)); 2942 push(b); 2943 break; 2944 2945 case Bytecodes::_fsub: 2946 b = pop(); 2947 a = pop(); 2948 c = _gvn.transform( new SubFNode(a,b) ); 2949 d = precision_rounding(c); 2950 push( d ); 2951 break; 2952 2953 case Bytecodes::_fadd: 2954 b = pop(); 2955 a = pop(); 2956 c = _gvn.transform( new AddFNode(a,b) ); 2957 d = precision_rounding(c); 2958 push( d ); 2959 break; 2960 2961 case Bytecodes::_fmul: 2962 b = pop(); 2963 a = pop(); 2964 c = _gvn.transform( new MulFNode(a,b) ); 2965 d = precision_rounding(c); 2966 push( d ); 2967 break; 2968 2969 case Bytecodes::_fdiv: 2970 b = pop(); 2971 a = pop(); 2972 c = _gvn.transform( new DivFNode(0,a,b) ); 2973 d = precision_rounding(c); 2974 push( d ); 2975 break; 2976 2977 case Bytecodes::_frem: 2978 if (Matcher::has_match_rule(Op_ModF)) { 2979 // Generate a ModF node. 2980 b = pop(); 2981 a = pop(); 2982 c = _gvn.transform( new ModFNode(0,a,b) ); 2983 d = precision_rounding(c); 2984 push( d ); 2985 } 2986 else { 2987 // Generate a call. 2988 modf(); 2989 } 2990 break; 2991 2992 case Bytecodes::_fcmpl: 2993 b = pop(); 2994 a = pop(); 2995 c = _gvn.transform( new CmpF3Node( a, b)); 2996 push(c); 2997 break; 2998 case Bytecodes::_fcmpg: 2999 b = pop(); 3000 a = pop(); 3001 3002 // Same as fcmpl but need to flip the unordered case. Swap the inputs, 3003 // which negates the result sign except for unordered. Flip the unordered 3004 // as well by using CmpF3 which implements unordered-lesser instead of 3005 // unordered-greater semantics. Finally, commute the result bits. Result 3006 // is same as using a CmpF3Greater except we did it with CmpF3 alone. 3007 c = _gvn.transform( new CmpF3Node( b, a)); 3008 c = _gvn.transform( new SubINode(_gvn.intcon(0),c) ); 3009 push(c); 3010 break; 3011 3012 case Bytecodes::_f2i: 3013 a = pop(); 3014 push(_gvn.transform(new ConvF2INode(a))); 3015 break; 3016 3017 case Bytecodes::_d2i: 3018 a = pop_pair(); 3019 b = _gvn.transform(new ConvD2INode(a)); 3020 push( b ); 3021 break; 3022 3023 case Bytecodes::_f2d: 3024 a = pop(); 3025 b = _gvn.transform( new ConvF2DNode(a)); 3026 push_pair( b ); 3027 break; 3028 3029 case Bytecodes::_d2f: 3030 a = pop_pair(); 3031 b = _gvn.transform( new ConvD2FNode(a)); 3032 // This breaks _227_mtrt (speed & correctness) and _222_mpegaudio (speed) 3033 //b = _gvn.transform(new RoundFloatNode(0, b) ); 3034 push( b ); 3035 break; 3036 3037 case Bytecodes::_l2f: 3038 if (Matcher::convL2FSupported()) { 3039 a = pop_pair(); 3040 b = _gvn.transform( new ConvL2FNode(a)); 3041 // For x86_32.ad, FILD doesn't restrict precision to 24 or 53 bits. 3042 // Rather than storing the result into an FP register then pushing 3043 // out to memory to round, the machine instruction that implements 3044 // ConvL2D is responsible for rounding. 3045 // c = precision_rounding(b); 3046 push(b); 3047 } else { 3048 l2f(); 3049 } 3050 break; 3051 3052 case Bytecodes::_l2d: 3053 a = pop_pair(); 3054 b = _gvn.transform( new ConvL2DNode(a)); 3055 // For x86_32.ad, rounding is always necessary (see _l2f above). 3056 // c = dprecision_rounding(b); 3057 push_pair(b); 3058 break; 3059 3060 case Bytecodes::_f2l: 3061 a = pop(); 3062 b = _gvn.transform( new ConvF2LNode(a)); 3063 push_pair(b); 3064 break; 3065 3066 case Bytecodes::_d2l: 3067 a = pop_pair(); 3068 b = _gvn.transform( new ConvD2LNode(a)); 3069 push_pair(b); 3070 break; 3071 3072 case Bytecodes::_dsub: 3073 b = pop_pair(); 3074 a = pop_pair(); 3075 c = _gvn.transform( new SubDNode(a,b) ); 3076 d = dprecision_rounding(c); 3077 push_pair( d ); 3078 break; 3079 3080 case Bytecodes::_dadd: 3081 b = pop_pair(); 3082 a = pop_pair(); 3083 c = _gvn.transform( new AddDNode(a,b) ); 3084 d = dprecision_rounding(c); 3085 push_pair( d ); 3086 break; 3087 3088 case Bytecodes::_dmul: 3089 b = pop_pair(); 3090 a = pop_pair(); 3091 c = _gvn.transform( new MulDNode(a,b) ); 3092 d = dprecision_rounding(c); 3093 push_pair( d ); 3094 break; 3095 3096 case Bytecodes::_ddiv: 3097 b = pop_pair(); 3098 a = pop_pair(); 3099 c = _gvn.transform( new DivDNode(0,a,b) ); 3100 d = dprecision_rounding(c); 3101 push_pair( d ); 3102 break; 3103 3104 case Bytecodes::_dneg: 3105 a = pop_pair(); 3106 b = _gvn.transform(new NegDNode (a)); 3107 push_pair(b); 3108 break; 3109 3110 case Bytecodes::_drem: 3111 if (Matcher::has_match_rule(Op_ModD)) { 3112 // Generate a ModD node. 3113 b = pop_pair(); 3114 a = pop_pair(); 3115 // a % b 3116 3117 c = _gvn.transform( new ModDNode(0,a,b) ); 3118 d = dprecision_rounding(c); 3119 push_pair( d ); 3120 } 3121 else { 3122 // Generate a call. 3123 modd(); 3124 } 3125 break; 3126 3127 case Bytecodes::_dcmpl: 3128 b = pop_pair(); 3129 a = pop_pair(); 3130 c = _gvn.transform( new CmpD3Node( a, b)); 3131 push(c); 3132 break; 3133 3134 case Bytecodes::_dcmpg: 3135 b = pop_pair(); 3136 a = pop_pair(); 3137 // Same as dcmpl but need to flip the unordered case. 3138 // Commute the inputs, which negates the result sign except for unordered. 3139 // Flip the unordered as well by using CmpD3 which implements 3140 // unordered-lesser instead of unordered-greater semantics. 3141 // Finally, negate the result bits. Result is same as using a 3142 // CmpD3Greater except we did it with CmpD3 alone. 3143 c = _gvn.transform( new CmpD3Node( b, a)); 3144 c = _gvn.transform( new SubINode(_gvn.intcon(0),c) ); 3145 push(c); 3146 break; 3147 3148 3149 // Note for longs -> lo word is on TOS, hi word is on TOS - 1 3150 case Bytecodes::_land: 3151 b = pop_pair(); 3152 a = pop_pair(); 3153 c = _gvn.transform( new AndLNode(a,b) ); 3154 push_pair(c); 3155 break; 3156 case Bytecodes::_lor: 3157 b = pop_pair(); 3158 a = pop_pair(); 3159 c = _gvn.transform( new OrLNode(a,b) ); 3160 push_pair(c); 3161 break; 3162 case Bytecodes::_lxor: 3163 b = pop_pair(); 3164 a = pop_pair(); 3165 c = _gvn.transform( new XorLNode(a,b) ); 3166 push_pair(c); 3167 break; 3168 3169 case Bytecodes::_lshl: 3170 b = pop(); // the shift count 3171 a = pop_pair(); // value to be shifted 3172 c = _gvn.transform( new LShiftLNode(a,b) ); 3173 push_pair(c); 3174 break; 3175 case Bytecodes::_lshr: 3176 b = pop(); // the shift count 3177 a = pop_pair(); // value to be shifted 3178 c = _gvn.transform( new RShiftLNode(a,b) ); 3179 push_pair(c); 3180 break; 3181 case Bytecodes::_lushr: 3182 b = pop(); // the shift count 3183 a = pop_pair(); // value to be shifted 3184 c = _gvn.transform( new URShiftLNode(a,b) ); 3185 push_pair(c); 3186 break; 3187 case Bytecodes::_lmul: 3188 b = pop_pair(); 3189 a = pop_pair(); 3190 c = _gvn.transform( new MulLNode(a,b) ); 3191 push_pair(c); 3192 break; 3193 3194 case Bytecodes::_lrem: 3195 // Must keep both values on the expression-stack during null-check 3196 assert(peek(0) == top(), "long word order"); 3197 zero_check_long(peek(1)); 3198 // Compile-time detect of null-exception? 3199 if (stopped()) return; 3200 b = pop_pair(); 3201 a = pop_pair(); 3202 c = _gvn.transform( new ModLNode(control(),a,b) ); 3203 push_pair(c); 3204 break; 3205 3206 case Bytecodes::_ldiv: 3207 // Must keep both values on the expression-stack during null-check 3208 assert(peek(0) == top(), "long word order"); 3209 zero_check_long(peek(1)); 3210 // Compile-time detect of null-exception? 3211 if (stopped()) return; 3212 b = pop_pair(); 3213 a = pop_pair(); 3214 c = _gvn.transform( new DivLNode(control(),a,b) ); 3215 push_pair(c); 3216 break; 3217 3218 case Bytecodes::_ladd: 3219 b = pop_pair(); 3220 a = pop_pair(); 3221 c = _gvn.transform( new AddLNode(a,b) ); 3222 push_pair(c); 3223 break; 3224 case Bytecodes::_lsub: 3225 b = pop_pair(); 3226 a = pop_pair(); 3227 c = _gvn.transform( new SubLNode(a,b) ); 3228 push_pair(c); 3229 break; 3230 case Bytecodes::_lcmp: 3231 // Safepoints are now inserted _before_ branches. The long-compare 3232 // bytecode painfully produces a 3-way value (-1,0,+1) which requires a 3233 // slew of control flow. These are usually followed by a CmpI vs zero and 3234 // a branch; this pattern then optimizes to the obvious long-compare and 3235 // branch. However, if the branch is backwards there's a Safepoint 3236 // inserted. The inserted Safepoint captures the JVM state at the 3237 // pre-branch point, i.e. it captures the 3-way value. Thus if a 3238 // long-compare is used to control a loop the debug info will force 3239 // computation of the 3-way value, even though the generated code uses a 3240 // long-compare and branch. We try to rectify the situation by inserting 3241 // a SafePoint here and have it dominate and kill the safepoint added at a 3242 // following backwards branch. At this point the JVM state merely holds 2 3243 // longs but not the 3-way value. 3244 switch (iter().next_bc()) { 3245 case Bytecodes::_ifgt: 3246 case Bytecodes::_iflt: 3247 case Bytecodes::_ifge: 3248 case Bytecodes::_ifle: 3249 case Bytecodes::_ifne: 3250 case Bytecodes::_ifeq: 3251 // If this is a backwards branch in the bytecodes, add Safepoint 3252 maybe_add_safepoint(iter().next_get_dest()); 3253 default: 3254 break; 3255 } 3256 b = pop_pair(); 3257 a = pop_pair(); 3258 c = _gvn.transform( new CmpL3Node( a, b )); 3259 push(c); 3260 break; 3261 3262 case Bytecodes::_lneg: 3263 a = pop_pair(); 3264 b = _gvn.transform( new SubLNode(longcon(0),a)); 3265 push_pair(b); 3266 break; 3267 case Bytecodes::_l2i: 3268 a = pop_pair(); 3269 push( _gvn.transform( new ConvL2INode(a))); 3270 break; 3271 case Bytecodes::_i2l: 3272 a = pop(); 3273 b = _gvn.transform( new ConvI2LNode(a)); 3274 push_pair(b); 3275 break; 3276 case Bytecodes::_i2b: 3277 // Sign extend 3278 a = pop(); 3279 a = Compile::narrow_value(T_BYTE, a, NULL, &_gvn, true); 3280 push(a); 3281 break; 3282 case Bytecodes::_i2s: 3283 a = pop(); 3284 a = Compile::narrow_value(T_SHORT, a, NULL, &_gvn, true); 3285 push(a); 3286 break; 3287 case Bytecodes::_i2c: 3288 a = pop(); 3289 a = Compile::narrow_value(T_CHAR, a, NULL, &_gvn, true); 3290 push(a); 3291 break; 3292 3293 case Bytecodes::_i2f: 3294 a = pop(); 3295 b = _gvn.transform( new ConvI2FNode(a) ) ; 3296 c = precision_rounding(b); 3297 push (b); 3298 break; 3299 3300 case Bytecodes::_i2d: 3301 a = pop(); 3302 b = _gvn.transform( new ConvI2DNode(a)); 3303 push_pair(b); 3304 break; 3305 3306 case Bytecodes::_iinc: // Increment local 3307 i = iter().get_index(); // Get local index 3308 set_local( i, _gvn.transform( new AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) ); 3309 break; 3310 3311 // Exit points of synchronized methods must have an unlock node 3312 case Bytecodes::_return: 3313 return_current(NULL); 3314 break; 3315 3316 case Bytecodes::_ireturn: 3317 case Bytecodes::_areturn: 3318 case Bytecodes::_freturn: 3319 return_current(pop()); 3320 break; 3321 case Bytecodes::_lreturn: 3322 return_current(pop_pair()); 3323 break; 3324 case Bytecodes::_dreturn: 3325 return_current(pop_pair()); 3326 break; 3327 3328 case Bytecodes::_athrow: 3329 // null exception oop throws NULL pointer exception 3330 null_check(peek()); 3331 if (stopped()) return; 3332 // Hook the thrown exception directly to subsequent handlers. 3333 if (BailoutToInterpreterForThrows) { 3334 // Keep method interpreted from now on. 3335 uncommon_trap(Deoptimization::Reason_unhandled, 3336 Deoptimization::Action_make_not_compilable); 3337 return; 3338 } 3339 if (env()->jvmti_can_post_on_exceptions()) { 3340 // check if we must post exception events, take uncommon trap if so (with must_throw = false) 3341 uncommon_trap_if_should_post_on_exceptions(Deoptimization::Reason_unhandled, false); 3342 } 3343 // Here if either can_post_on_exceptions or should_post_on_exceptions is false 3344 add_exception_state(make_exception_state(peek())); 3345 break; 3346 3347 case Bytecodes::_goto: // fall through 3348 case Bytecodes::_goto_w: { 3349 int target_bci = (bc() == Bytecodes::_goto) ? iter().get_dest() : iter().get_far_dest(); 3350 3351 // If this is a backwards branch in the bytecodes, add Safepoint 3352 maybe_add_safepoint(target_bci); 3353 3354 // Merge the current control into the target basic block 3355 merge(target_bci); 3356 3357 // See if we can get some profile data and hand it off to the next block 3358 Block *target_block = block()->successor_for_bci(target_bci); 3359 if (target_block->pred_count() != 1) break; 3360 ciMethodData* methodData = method()->method_data(); 3361 if (!methodData->is_mature()) break; 3362 ciProfileData* data = methodData->bci_to_data(bci()); 3363 assert(data != NULL && data->is_JumpData(), "need JumpData for taken branch"); 3364 int taken = ((ciJumpData*)data)->taken(); 3365 taken = method()->scale_count(taken); 3366 target_block->set_count(taken); 3367 break; 3368 } 3369 3370 case Bytecodes::_ifnull: btest = BoolTest::eq; goto handle_if_null; 3371 case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null; 3372 handle_if_null: 3373 // If this is a backwards branch in the bytecodes, add Safepoint 3374 maybe_add_safepoint(iter().get_dest()); 3375 a = null(); 3376 b = pop(); 3377 if (b->is_InlineType()) { 3378 // Null checking a scalarized but nullable inline type. Check the IsInit 3379 // input instead of the oop input to avoid keeping buffer allocations alive 3380 c = _gvn.transform(new CmpINode(b->as_InlineType()->get_is_init(), zerocon(T_INT))); 3381 } else { 3382 if (!_gvn.type(b)->speculative_maybe_null() && 3383 !too_many_traps(Deoptimization::Reason_speculate_null_check)) { 3384 inc_sp(1); 3385 Node* null_ctl = top(); 3386 b = null_check_oop(b, &null_ctl, true, true, true); 3387 assert(null_ctl->is_top(), "no null control here"); 3388 dec_sp(1); 3389 } else if (_gvn.type(b)->speculative_always_null() && 3390 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) { 3391 inc_sp(1); 3392 b = null_assert(b); 3393 dec_sp(1); 3394 } 3395 c = _gvn.transform( new CmpPNode(b, a) ); 3396 } 3397 do_ifnull(btest, c); 3398 break; 3399 3400 case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp; 3401 case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp; 3402 handle_if_acmp: 3403 // If this is a backwards branch in the bytecodes, add Safepoint 3404 maybe_add_safepoint(iter().get_dest()); 3405 a = pop(); 3406 b = pop(); 3407 do_acmp(btest, b, a); 3408 break; 3409 3410 case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx; 3411 case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx; 3412 case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx; 3413 case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx; 3414 case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx; 3415 case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx; 3416 handle_ifxx: 3417 // If this is a backwards branch in the bytecodes, add Safepoint 3418 maybe_add_safepoint(iter().get_dest()); 3419 a = _gvn.intcon(0); 3420 b = pop(); 3421 c = _gvn.transform( new CmpINode(b, a) ); 3422 do_if(btest, c); 3423 break; 3424 3425 case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp; 3426 case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp; 3427 case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp; 3428 case Bytecodes::_if_icmple: btest = BoolTest::le; goto handle_if_icmp; 3429 case Bytecodes::_if_icmpgt: btest = BoolTest::gt; goto handle_if_icmp; 3430 case Bytecodes::_if_icmpge: btest = BoolTest::ge; goto handle_if_icmp; 3431 handle_if_icmp: 3432 // If this is a backwards branch in the bytecodes, add Safepoint 3433 maybe_add_safepoint(iter().get_dest()); 3434 a = pop(); 3435 b = pop(); 3436 c = _gvn.transform( new CmpINode( b, a ) ); 3437 do_if(btest, c); 3438 break; 3439 3440 case Bytecodes::_tableswitch: 3441 do_tableswitch(); 3442 break; 3443 3444 case Bytecodes::_lookupswitch: 3445 do_lookupswitch(); 3446 break; 3447 3448 case Bytecodes::_invokestatic: 3449 case Bytecodes::_invokedynamic: 3450 case Bytecodes::_invokespecial: 3451 case Bytecodes::_invokevirtual: 3452 case Bytecodes::_invokeinterface: 3453 do_call(); 3454 break; 3455 case Bytecodes::_checkcast: 3456 do_checkcast(); 3457 break; 3458 case Bytecodes::_instanceof: 3459 do_instanceof(); 3460 break; 3461 case Bytecodes::_anewarray: 3462 do_newarray(); 3463 break; 3464 case Bytecodes::_newarray: 3465 do_newarray((BasicType)iter().get_index()); 3466 break; 3467 case Bytecodes::_multianewarray: 3468 do_multianewarray(); 3469 break; 3470 case Bytecodes::_new: 3471 do_new(); 3472 break; 3473 case Bytecodes::_aconst_init: 3474 do_aconst_init(); 3475 break; 3476 case Bytecodes::_withfield: 3477 do_withfield(); 3478 break; 3479 3480 case Bytecodes::_jsr: 3481 case Bytecodes::_jsr_w: 3482 do_jsr(); 3483 break; 3484 3485 case Bytecodes::_ret: 3486 do_ret(); 3487 break; 3488 3489 3490 case Bytecodes::_monitorenter: 3491 do_monitor_enter(); 3492 break; 3493 3494 case Bytecodes::_monitorexit: 3495 do_monitor_exit(); 3496 break; 3497 3498 case Bytecodes::_breakpoint: 3499 // Breakpoint set concurrently to compile 3500 // %%% use an uncommon trap? 3501 C->record_failure("breakpoint in method"); 3502 return; 3503 3504 default: 3505 #ifndef PRODUCT 3506 map()->dump(99); 3507 #endif 3508 tty->print("\nUnhandled bytecode %s\n", Bytecodes::name(bc()) ); 3509 ShouldNotReachHere(); 3510 } 3511 3512 #ifndef PRODUCT 3513 if (C->should_print_igv(1)) { 3514 IdealGraphPrinter* printer = C->igv_printer(); 3515 char buffer[256]; 3516 jio_snprintf(buffer, sizeof(buffer), "Bytecode %d: %s", bci(), Bytecodes::name(bc())); 3517 bool old = printer->traverse_outs(); 3518 printer->set_traverse_outs(true); 3519 printer->print_method(buffer, 4); 3520 printer->set_traverse_outs(old); 3521 } 3522 #endif 3523 }