1 /* 2 * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/ciMethodData.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "compiler/compileLog.hpp" 30 #include "interpreter/linkResolver.hpp" 31 #include "memory/universe.inline.hpp" 32 #include "opto/addnode.hpp" 33 #include "opto/divnode.hpp" 34 #include "opto/idealGraphPrinter.hpp" 35 #include "opto/matcher.hpp" 36 #include "opto/memnode.hpp" 37 #include "opto/mulnode.hpp" 38 #include "opto/parse.hpp" 39 #include "opto/runtime.hpp" 40 #include "runtime/deoptimization.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 43 #if INCLUDE_ALL_GCS 44 #include "gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.hpp" 45 #endif 46 47 extern int explicit_null_checks_inserted, 48 explicit_null_checks_elided; 49 50 //---------------------------------array_load---------------------------------- 51 void Parse::array_load(BasicType elem_type) { 52 const Type* elem = Type::TOP; 53 Node* adr = array_addressing(elem_type, 0, &elem); 54 if (stopped()) return; // guaranteed null or range check 55 dec_sp(2); // Pop array and index 56 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type); 57 Node* ld = make_load(control(), adr, elem, elem_type, adr_type, MemNode::unordered); 58 #if INCLUDE_ALL_GCS 59 if (UseShenandoahGC && (elem_type == T_OBJECT || elem_type == T_ARRAY)) { 60 ld = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, ld); 61 } 62 #endif 63 push(ld); 64 } 65 66 67 //--------------------------------array_store---------------------------------- 68 void Parse::array_store(BasicType elem_type) { 69 const Type* elem = Type::TOP; 70 Node* adr = array_addressing(elem_type, 1, &elem); 71 if (stopped()) return; // guaranteed null or range check 72 Node* val = pop(); 73 dec_sp(2); // Pop array and index 74 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type); 75 if (elem == TypeInt::BOOL) { 76 elem_type = T_BOOLEAN; 77 } 78 store_to_memory(control(), adr, val, elem_type, adr_type, StoreNode::release_if_reference(elem_type)); 79 } 80 81 82 //------------------------------array_addressing------------------------------- 83 // Pull array and index from the stack. Compute pointer-to-element. 84 Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) { 85 Node *idx = peek(0+vals); // Get from stack without popping 86 Node *ary = peek(1+vals); // in case of exception 87 88 // Null check the array base, with correct stack contents 89 ary = null_check(ary, T_ARRAY); 90 // Compile-time detect of null-exception? 91 if (stopped()) return top(); 92 93 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr(); 94 const TypeInt* sizetype = arytype->size(); 95 const Type* elemtype = arytype->elem(); 96 97 if (UseUniqueSubclasses && result2 != NULL) { 98 const Type* el = elemtype->make_ptr(); 99 if (el && el->isa_instptr()) { 100 const TypeInstPtr* toop = el->is_instptr(); 101 if (toop->klass()->as_instance_klass()->unique_concrete_subklass()) { 102 // If we load from "AbstractClass[]" we must see "ConcreteSubClass". 103 const Type* subklass = Type::get_const_type(toop->klass()); 104 elemtype = subklass->join_speculative(el); 105 } 106 } 107 } 108 109 // Check for big class initializers with all constant offsets 110 // feeding into a known-size array. 111 const TypeInt* idxtype = _gvn.type(idx)->is_int(); 112 // See if the highest idx value is less than the lowest array bound, 113 // and if the idx value cannot be negative: 114 bool need_range_check = true; 115 if (idxtype->_hi < sizetype->_lo && idxtype->_lo >= 0) { 116 need_range_check = false; 117 if (C->log() != NULL) C->log()->elem("observe that='!need_range_check'"); 118 } 119 120 ciKlass * arytype_klass = arytype->klass(); 121 if ((arytype_klass != NULL) && (!arytype_klass->is_loaded())) { 122 // Only fails for some -Xcomp runs 123 // The class is unloaded. We have to run this bytecode in the interpreter. 124 uncommon_trap(Deoptimization::Reason_unloaded, 125 Deoptimization::Action_reinterpret, 126 arytype->klass(), "!loaded array"); 127 return top(); 128 } 129 130 // Do the range check 131 if (GenerateRangeChecks && need_range_check) { 132 Node* tst; 133 if (sizetype->_hi <= 0) { 134 // The greatest array bound is negative, so we can conclude that we're 135 // compiling unreachable code, but the unsigned compare trick used below 136 // only works with non-negative lengths. Instead, hack "tst" to be zero so 137 // the uncommon_trap path will always be taken. 138 tst = _gvn.intcon(0); 139 } else { 140 // Range is constant in array-oop, so we can use the original state of mem 141 Node* len = load_array_length(ary); 142 143 // Test length vs index (standard trick using unsigned compare) 144 Node* chk = _gvn.transform( new (C) CmpUNode(idx, len) ); 145 BoolTest::mask btest = BoolTest::lt; 146 tst = _gvn.transform( new (C) BoolNode(chk, btest) ); 147 } 148 // Branch to failure if out of bounds 149 { BuildCutout unless(this, tst, PROB_MAX); 150 if (C->allow_range_check_smearing()) { 151 // Do not use builtin_throw, since range checks are sometimes 152 // made more stringent by an optimistic transformation. 153 // This creates "tentative" range checks at this point, 154 // which are not guaranteed to throw exceptions. 155 // See IfNode::Ideal, is_range_check, adjust_check. 156 uncommon_trap(Deoptimization::Reason_range_check, 157 Deoptimization::Action_make_not_entrant, 158 NULL, "range_check"); 159 } else { 160 // If we have already recompiled with the range-check-widening 161 // heroic optimization turned off, then we must really be throwing 162 // range check exceptions. 163 builtin_throw(Deoptimization::Reason_range_check, idx); 164 } 165 } 166 } 167 // Check for always knowing you are throwing a range-check exception 168 if (stopped()) return top(); 169 170 // Make array address computation control dependent to prevent it 171 // from floating above the range check during loop optimizations. 172 Node* ptr = array_element_address(ary, idx, type, sizetype, control()); 173 174 if (result2 != NULL) *result2 = elemtype; 175 176 assert(ptr != top(), "top should go hand-in-hand with stopped"); 177 178 return ptr; 179 } 180 181 182 // returns IfNode 183 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask) { 184 Node *cmp = _gvn.transform( new (C) CmpINode( a, b)); // two cases: shiftcount > 32 and shiftcount <= 32 185 Node *tst = _gvn.transform( new (C) BoolNode( cmp, mask)); 186 IfNode *iff = create_and_map_if( control(), tst, ((mask == BoolTest::eq) ? PROB_STATIC_INFREQUENT : PROB_FAIR), COUNT_UNKNOWN ); 187 return iff; 188 } 189 190 // return Region node 191 Node* Parse::jump_if_join(Node* iffalse, Node* iftrue) { 192 Node *region = new (C) RegionNode(3); // 2 results 193 record_for_igvn(region); 194 region->init_req(1, iffalse); 195 region->init_req(2, iftrue ); 196 _gvn.set_type(region, Type::CONTROL); 197 region = _gvn.transform(region); 198 set_control (region); 199 return region; 200 } 201 202 203 //------------------------------helper for tableswitch------------------------- 204 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) { 205 // True branch, use existing map info 206 { PreserveJVMState pjvms(this); 207 Node *iftrue = _gvn.transform( new (C) IfTrueNode (iff) ); 208 set_control( iftrue ); 209 profile_switch_case(prof_table_index); 210 merge_new_path(dest_bci_if_true); 211 } 212 213 // False branch 214 Node *iffalse = _gvn.transform( new (C) IfFalseNode(iff) ); 215 set_control( iffalse ); 216 } 217 218 void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) { 219 // True branch, use existing map info 220 { PreserveJVMState pjvms(this); 221 Node *iffalse = _gvn.transform( new (C) IfFalseNode (iff) ); 222 set_control( iffalse ); 223 profile_switch_case(prof_table_index); 224 merge_new_path(dest_bci_if_true); 225 } 226 227 // False branch 228 Node *iftrue = _gvn.transform( new (C) IfTrueNode(iff) ); 229 set_control( iftrue ); 230 } 231 232 void Parse::jump_if_always_fork(int dest_bci, int prof_table_index) { 233 // False branch, use existing map and control() 234 profile_switch_case(prof_table_index); 235 merge_new_path(dest_bci); 236 } 237 238 239 extern "C" { 240 static int jint_cmp(const void *i, const void *j) { 241 int a = *(jint *)i; 242 int b = *(jint *)j; 243 return a > b ? 1 : a < b ? -1 : 0; 244 } 245 } 246 247 248 // Default value for methodData switch indexing. Must be a negative value to avoid 249 // conflict with any legal switch index. 250 #define NullTableIndex -1 251 252 class SwitchRange : public StackObj { 253 // a range of integers coupled with a bci destination 254 jint _lo; // inclusive lower limit 255 jint _hi; // inclusive upper limit 256 int _dest; 257 int _table_index; // index into method data table 258 259 public: 260 jint lo() const { return _lo; } 261 jint hi() const { return _hi; } 262 int dest() const { return _dest; } 263 int table_index() const { return _table_index; } 264 bool is_singleton() const { return _lo == _hi; } 265 266 void setRange(jint lo, jint hi, int dest, int table_index) { 267 assert(lo <= hi, "must be a non-empty range"); 268 _lo = lo, _hi = hi; _dest = dest; _table_index = table_index; 269 } 270 bool adjoinRange(jint lo, jint hi, int dest, int table_index) { 271 assert(lo <= hi, "must be a non-empty range"); 272 if (lo == _hi+1 && dest == _dest && table_index == _table_index) { 273 _hi = hi; 274 return true; 275 } 276 return false; 277 } 278 279 void set (jint value, int dest, int table_index) { 280 setRange(value, value, dest, table_index); 281 } 282 bool adjoin(jint value, int dest, int table_index) { 283 return adjoinRange(value, value, dest, table_index); 284 } 285 286 void print() { 287 if (is_singleton()) 288 tty->print(" {%d}=>%d", lo(), dest()); 289 else if (lo() == min_jint) 290 tty->print(" {..%d}=>%d", hi(), dest()); 291 else if (hi() == max_jint) 292 tty->print(" {%d..}=>%d", lo(), dest()); 293 else 294 tty->print(" {%d..%d}=>%d", lo(), hi(), dest()); 295 } 296 }; 297 298 299 //-------------------------------do_tableswitch-------------------------------- 300 void Parse::do_tableswitch() { 301 Node* lookup = pop(); 302 303 // Get information about tableswitch 304 int default_dest = iter().get_dest_table(0); 305 int lo_index = iter().get_int_table(1); 306 int hi_index = iter().get_int_table(2); 307 int len = hi_index - lo_index + 1; 308 309 if (len < 1) { 310 // If this is a backward branch, add safepoint 311 maybe_add_safepoint(default_dest); 312 merge(default_dest); 313 return; 314 } 315 316 // generate decision tree, using trichotomy when possible 317 int rnum = len+2; 318 bool makes_backward_branch = false; 319 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum); 320 int rp = -1; 321 if (lo_index != min_jint) { 322 ranges[++rp].setRange(min_jint, lo_index-1, default_dest, NullTableIndex); 323 } 324 for (int j = 0; j < len; j++) { 325 jint match_int = lo_index+j; 326 int dest = iter().get_dest_table(j+3); 327 makes_backward_branch |= (dest <= bci()); 328 int table_index = method_data_update() ? j : NullTableIndex; 329 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index)) { 330 ranges[++rp].set(match_int, dest, table_index); 331 } 332 } 333 jint highest = lo_index+(len-1); 334 assert(ranges[rp].hi() == highest, ""); 335 if (highest != max_jint 336 && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex)) { 337 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex); 338 } 339 assert(rp < len+2, "not too many ranges"); 340 341 // Safepoint in case if backward branch observed 342 if( makes_backward_branch && UseLoopSafepoints ) 343 add_safepoint(); 344 345 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]); 346 } 347 348 349 //------------------------------do_lookupswitch-------------------------------- 350 void Parse::do_lookupswitch() { 351 Node *lookup = pop(); // lookup value 352 // Get information about lookupswitch 353 int default_dest = iter().get_dest_table(0); 354 int len = iter().get_int_table(1); 355 356 if (len < 1) { // If this is a backward branch, add safepoint 357 maybe_add_safepoint(default_dest); 358 merge(default_dest); 359 return; 360 } 361 362 // generate decision tree, using trichotomy when possible 363 jint* table = NEW_RESOURCE_ARRAY(jint, len*2); 364 { 365 for( int j = 0; j < len; j++ ) { 366 table[j+j+0] = iter().get_int_table(2+j+j); 367 table[j+j+1] = iter().get_dest_table(2+j+j+1); 368 } 369 qsort( table, len, 2*sizeof(table[0]), jint_cmp ); 370 } 371 372 int rnum = len*2+1; 373 bool makes_backward_branch = false; 374 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum); 375 int rp = -1; 376 for( int j = 0; j < len; j++ ) { 377 jint match_int = table[j+j+0]; 378 int dest = table[j+j+1]; 379 int next_lo = rp < 0 ? min_jint : ranges[rp].hi()+1; 380 int table_index = method_data_update() ? j : NullTableIndex; 381 makes_backward_branch |= (dest <= bci()); 382 if( match_int != next_lo ) { 383 ranges[++rp].setRange(next_lo, match_int-1, default_dest, NullTableIndex); 384 } 385 if( rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index) ) { 386 ranges[++rp].set(match_int, dest, table_index); 387 } 388 } 389 jint highest = table[2*(len-1)]; 390 assert(ranges[rp].hi() == highest, ""); 391 if( highest != max_jint 392 && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex) ) { 393 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex); 394 } 395 assert(rp < rnum, "not too many ranges"); 396 397 // Safepoint in case backward branch observed 398 if( makes_backward_branch && UseLoopSafepoints ) 399 add_safepoint(); 400 401 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]); 402 } 403 404 //----------------------------create_jump_tables------------------------------- 405 bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) { 406 // Are jumptables enabled 407 if (!UseJumpTables) return false; 408 409 // Are jumptables supported 410 if (!Matcher::has_match_rule(Op_Jump)) return false; 411 412 // Don't make jump table if profiling 413 if (method_data_update()) return false; 414 415 // Decide if a guard is needed to lop off big ranges at either (or 416 // both) end(s) of the input set. We'll call this the default target 417 // even though we can't be sure that it is the true "default". 418 419 bool needs_guard = false; 420 int default_dest; 421 int64 total_outlier_size = 0; 422 int64 hi_size = ((int64)hi->hi()) - ((int64)hi->lo()) + 1; 423 int64 lo_size = ((int64)lo->hi()) - ((int64)lo->lo()) + 1; 424 425 if (lo->dest() == hi->dest()) { 426 total_outlier_size = hi_size + lo_size; 427 default_dest = lo->dest(); 428 } else if (lo_size > hi_size) { 429 total_outlier_size = lo_size; 430 default_dest = lo->dest(); 431 } else { 432 total_outlier_size = hi_size; 433 default_dest = hi->dest(); 434 } 435 436 // If a guard test will eliminate very sparse end ranges, then 437 // it is worth the cost of an extra jump. 438 if (total_outlier_size > (MaxJumpTableSparseness * 4)) { 439 needs_guard = true; 440 if (default_dest == lo->dest()) lo++; 441 if (default_dest == hi->dest()) hi--; 442 } 443 444 // Find the total number of cases and ranges 445 int64 num_cases = ((int64)hi->hi()) - ((int64)lo->lo()) + 1; 446 int num_range = hi - lo + 1; 447 448 // Don't create table if: too large, too small, or too sparse. 449 if (num_cases < MinJumpTableSize || num_cases > MaxJumpTableSize) 450 return false; 451 if (num_cases > (MaxJumpTableSparseness * num_range)) 452 return false; 453 454 // Normalize table lookups to zero 455 int lowval = lo->lo(); 456 key_val = _gvn.transform( new (C) SubINode(key_val, _gvn.intcon(lowval)) ); 457 458 // Generate a guard to protect against input keyvals that aren't 459 // in the switch domain. 460 if (needs_guard) { 461 Node* size = _gvn.intcon(num_cases); 462 Node* cmp = _gvn.transform( new (C) CmpUNode(key_val, size) ); 463 Node* tst = _gvn.transform( new (C) BoolNode(cmp, BoolTest::ge) ); 464 IfNode* iff = create_and_map_if( control(), tst, PROB_FAIR, COUNT_UNKNOWN); 465 jump_if_true_fork(iff, default_dest, NullTableIndex); 466 } 467 468 // Create an ideal node JumpTable that has projections 469 // of all possible ranges for a switch statement 470 // The key_val input must be converted to a pointer offset and scaled. 471 // Compare Parse::array_addressing above. 472 #ifdef _LP64 473 // Clean the 32-bit int into a real 64-bit offset. 474 // Otherwise, the jint value 0 might turn into an offset of 0x0800000000. 475 const TypeInt* ikeytype = TypeInt::make(0, num_cases-1, Type::WidenMin); 476 // Make I2L conversion control dependent to prevent it from 477 // floating above the range check during loop optimizations. 478 key_val = C->constrained_convI2L(&_gvn, key_val, ikeytype, control()); 479 #endif 480 481 // Shift the value by wordsize so we have an index into the table, rather 482 // than a switch value 483 Node *shiftWord = _gvn.MakeConX(wordSize); 484 key_val = _gvn.transform( new (C) MulXNode( key_val, shiftWord)); 485 486 // Create the JumpNode 487 Node* jtn = _gvn.transform( new (C) JumpNode(control(), key_val, num_cases) ); 488 489 // These are the switch destinations hanging off the jumpnode 490 int i = 0; 491 for (SwitchRange* r = lo; r <= hi; r++) { 492 for (int64 j = r->lo(); j <= r->hi(); j++, i++) { 493 Node* input = _gvn.transform(new (C) JumpProjNode(jtn, i, r->dest(), (int)(j - lowval))); 494 { 495 PreserveJVMState pjvms(this); 496 set_control(input); 497 jump_if_always_fork(r->dest(), r->table_index()); 498 } 499 } 500 } 501 assert(i == num_cases, "miscount of cases"); 502 stop_and_kill_map(); // no more uses for this JVMS 503 return true; 504 } 505 506 //----------------------------jump_switch_ranges------------------------------- 507 void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, int switch_depth) { 508 Block* switch_block = block(); 509 510 if (switch_depth == 0) { 511 // Do special processing for the top-level call. 512 assert(lo->lo() == min_jint, "initial range must exhaust Type::INT"); 513 assert(hi->hi() == max_jint, "initial range must exhaust Type::INT"); 514 515 // Decrement pred-numbers for the unique set of nodes. 516 #ifdef ASSERT 517 // Ensure that the block's successors are a (duplicate-free) set. 518 int successors_counted = 0; // block occurrences in [hi..lo] 519 int unique_successors = switch_block->num_successors(); 520 for (int i = 0; i < unique_successors; i++) { 521 Block* target = switch_block->successor_at(i); 522 523 // Check that the set of successors is the same in both places. 524 int successors_found = 0; 525 for (SwitchRange* p = lo; p <= hi; p++) { 526 if (p->dest() == target->start()) successors_found++; 527 } 528 assert(successors_found > 0, "successor must be known"); 529 successors_counted += successors_found; 530 } 531 assert(successors_counted == (hi-lo)+1, "no unexpected successors"); 532 #endif 533 534 // Maybe prune the inputs, based on the type of key_val. 535 jint min_val = min_jint; 536 jint max_val = max_jint; 537 const TypeInt* ti = key_val->bottom_type()->isa_int(); 538 if (ti != NULL) { 539 min_val = ti->_lo; 540 max_val = ti->_hi; 541 assert(min_val <= max_val, "invalid int type"); 542 } 543 while (lo->hi() < min_val) lo++; 544 if (lo->lo() < min_val) lo->setRange(min_val, lo->hi(), lo->dest(), lo->table_index()); 545 while (hi->lo() > max_val) hi--; 546 if (hi->hi() > max_val) hi->setRange(hi->lo(), max_val, hi->dest(), hi->table_index()); 547 } 548 549 #ifndef PRODUCT 550 if (switch_depth == 0) { 551 _max_switch_depth = 0; 552 _est_switch_depth = log2_intptr((hi-lo+1)-1)+1; 553 } 554 #endif 555 556 assert(lo <= hi, "must be a non-empty set of ranges"); 557 if (lo == hi) { 558 jump_if_always_fork(lo->dest(), lo->table_index()); 559 } else { 560 assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges"); 561 assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges"); 562 563 if (create_jump_tables(key_val, lo, hi)) return; 564 565 int nr = hi - lo + 1; 566 567 SwitchRange* mid = lo + nr/2; 568 // if there is an easy choice, pivot at a singleton: 569 if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton()) mid--; 570 571 assert(lo < mid && mid <= hi, "good pivot choice"); 572 assert(nr != 2 || mid == hi, "should pick higher of 2"); 573 assert(nr != 3 || mid == hi-1, "should pick middle of 3"); 574 575 Node *test_val = _gvn.intcon(mid->lo()); 576 577 if (mid->is_singleton()) { 578 IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne); 579 jump_if_false_fork(iff_ne, mid->dest(), mid->table_index()); 580 581 // Special Case: If there are exactly three ranges, and the high 582 // and low range each go to the same place, omit the "gt" test, 583 // since it will not discriminate anything. 584 bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest()); 585 if (eq_test_only) { 586 assert(mid == hi-1, ""); 587 } 588 589 // if there is a higher range, test for it and process it: 590 if (mid < hi && !eq_test_only) { 591 // two comparisons of same values--should enable 1 test for 2 branches 592 // Use BoolTest::le instead of BoolTest::gt 593 IfNode *iff_le = jump_if_fork_int(key_val, test_val, BoolTest::le); 594 Node *iftrue = _gvn.transform( new (C) IfTrueNode(iff_le) ); 595 Node *iffalse = _gvn.transform( new (C) IfFalseNode(iff_le) ); 596 { PreserveJVMState pjvms(this); 597 set_control(iffalse); 598 jump_switch_ranges(key_val, mid+1, hi, switch_depth+1); 599 } 600 set_control(iftrue); 601 } 602 603 } else { 604 // mid is a range, not a singleton, so treat mid..hi as a unit 605 IfNode *iff_ge = jump_if_fork_int(key_val, test_val, BoolTest::ge); 606 607 // if there is a higher range, test for it and process it: 608 if (mid == hi) { 609 jump_if_true_fork(iff_ge, mid->dest(), mid->table_index()); 610 } else { 611 Node *iftrue = _gvn.transform( new (C) IfTrueNode(iff_ge) ); 612 Node *iffalse = _gvn.transform( new (C) IfFalseNode(iff_ge) ); 613 { PreserveJVMState pjvms(this); 614 set_control(iftrue); 615 jump_switch_ranges(key_val, mid, hi, switch_depth+1); 616 } 617 set_control(iffalse); 618 } 619 } 620 621 // in any case, process the lower range 622 jump_switch_ranges(key_val, lo, mid-1, switch_depth+1); 623 } 624 625 // Decrease pred_count for each successor after all is done. 626 if (switch_depth == 0) { 627 int unique_successors = switch_block->num_successors(); 628 for (int i = 0; i < unique_successors; i++) { 629 Block* target = switch_block->successor_at(i); 630 // Throw away the pre-allocated path for each unique successor. 631 target->next_path_num(); 632 } 633 } 634 635 #ifndef PRODUCT 636 _max_switch_depth = MAX2(switch_depth, _max_switch_depth); 637 if (TraceOptoParse && Verbose && WizardMode && switch_depth == 0) { 638 SwitchRange* r; 639 int nsing = 0; 640 for( r = lo; r <= hi; r++ ) { 641 if( r->is_singleton() ) nsing++; 642 } 643 tty->print(">>> "); 644 _method->print_short_name(); 645 tty->print_cr(" switch decision tree"); 646 tty->print_cr(" %d ranges (%d singletons), max_depth=%d, est_depth=%d", 647 (int) (hi-lo+1), nsing, _max_switch_depth, _est_switch_depth); 648 if (_max_switch_depth > _est_switch_depth) { 649 tty->print_cr("******** BAD SWITCH DEPTH ********"); 650 } 651 tty->print(" "); 652 for( r = lo; r <= hi; r++ ) { 653 r->print(); 654 } 655 tty->cr(); 656 } 657 #endif 658 } 659 660 void Parse::modf() { 661 Node *f2 = pop(); 662 Node *f1 = pop(); 663 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::modf_Type(), 664 CAST_FROM_FN_PTR(address, SharedRuntime::frem), 665 "frem", NULL, //no memory effects 666 f1, f2); 667 Node* res = _gvn.transform(new (C) ProjNode(c, TypeFunc::Parms + 0)); 668 669 push(res); 670 } 671 672 void Parse::modd() { 673 Node *d2 = pop_pair(); 674 Node *d1 = pop_pair(); 675 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(), 676 CAST_FROM_FN_PTR(address, SharedRuntime::drem), 677 "drem", NULL, //no memory effects 678 d1, top(), d2, top()); 679 Node* res_d = _gvn.transform(new (C) ProjNode(c, TypeFunc::Parms + 0)); 680 681 #ifdef ASSERT 682 Node* res_top = _gvn.transform(new (C) ProjNode(c, TypeFunc::Parms + 1)); 683 assert(res_top == top(), "second value must be top"); 684 #endif 685 686 push_pair(res_d); 687 } 688 689 void Parse::l2f() { 690 Node* f2 = pop(); 691 Node* f1 = pop(); 692 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(), 693 CAST_FROM_FN_PTR(address, SharedRuntime::l2f), 694 "l2f", NULL, //no memory effects 695 f1, f2); 696 Node* res = _gvn.transform(new (C) ProjNode(c, TypeFunc::Parms + 0)); 697 698 push(res); 699 } 700 701 void Parse::do_irem() { 702 // Must keep both values on the expression-stack during null-check 703 zero_check_int(peek()); 704 // Compile-time detect of null-exception? 705 if (stopped()) return; 706 707 Node* b = pop(); 708 Node* a = pop(); 709 710 const Type *t = _gvn.type(b); 711 if (t != Type::TOP) { 712 const TypeInt *ti = t->is_int(); 713 if (ti->is_con()) { 714 int divisor = ti->get_con(); 715 // check for positive power of 2 716 if (divisor > 0 && 717 (divisor & ~(divisor-1)) == divisor) { 718 // yes ! 719 Node *mask = _gvn.intcon((divisor - 1)); 720 // Sigh, must handle negative dividends 721 Node *zero = _gvn.intcon(0); 722 IfNode *ifff = jump_if_fork_int(a, zero, BoolTest::lt); 723 Node *iff = _gvn.transform( new (C) IfFalseNode(ifff) ); 724 Node *ift = _gvn.transform( new (C) IfTrueNode (ifff) ); 725 Node *reg = jump_if_join(ift, iff); 726 Node *phi = PhiNode::make(reg, NULL, TypeInt::INT); 727 // Negative path; negate/and/negate 728 Node *neg = _gvn.transform( new (C) SubINode(zero, a) ); 729 Node *andn= _gvn.transform( new (C) AndINode(neg, mask) ); 730 Node *negn= _gvn.transform( new (C) SubINode(zero, andn) ); 731 phi->init_req(1, negn); 732 // Fast positive case 733 Node *andx = _gvn.transform( new (C) AndINode(a, mask) ); 734 phi->init_req(2, andx); 735 // Push the merge 736 push( _gvn.transform(phi) ); 737 return; 738 } 739 } 740 } 741 // Default case 742 push( _gvn.transform( new (C) ModINode(control(),a,b) ) ); 743 } 744 745 // Handle jsr and jsr_w bytecode 746 void Parse::do_jsr() { 747 assert(bc() == Bytecodes::_jsr || bc() == Bytecodes::_jsr_w, "wrong bytecode"); 748 749 // Store information about current state, tagged with new _jsr_bci 750 int return_bci = iter().next_bci(); 751 int jsr_bci = (bc() == Bytecodes::_jsr) ? iter().get_dest() : iter().get_far_dest(); 752 753 // Update method data 754 profile_taken_branch(jsr_bci); 755 756 // The way we do things now, there is only one successor block 757 // for the jsr, because the target code is cloned by ciTypeFlow. 758 Block* target = successor_for_bci(jsr_bci); 759 760 // What got pushed? 761 const Type* ret_addr = target->peek(); 762 assert(ret_addr->singleton(), "must be a constant (cloned jsr body)"); 763 764 // Effect on jsr on stack 765 push(_gvn.makecon(ret_addr)); 766 767 // Flow to the jsr. 768 merge(jsr_bci); 769 } 770 771 // Handle ret bytecode 772 void Parse::do_ret() { 773 // Find to whom we return. 774 assert(block()->num_successors() == 1, "a ret can only go one place now"); 775 Block* target = block()->successor_at(0); 776 assert(!target->is_ready(), "our arrival must be expected"); 777 profile_ret(target->flow()->start()); 778 int pnum = target->next_path_num(); 779 merge_common(target, pnum); 780 } 781 782 static bool has_injected_profile(BoolTest::mask btest, Node* test, int& taken, int& not_taken) { 783 if (btest != BoolTest::eq && btest != BoolTest::ne) { 784 // Only ::eq and ::ne are supported for profile injection. 785 return false; 786 } 787 if (test->is_Cmp() && 788 test->in(1)->Opcode() == Op_ProfileBoolean) { 789 ProfileBooleanNode* profile = (ProfileBooleanNode*)test->in(1); 790 int false_cnt = profile->false_count(); 791 int true_cnt = profile->true_count(); 792 793 // Counts matching depends on the actual test operation (::eq or ::ne). 794 // No need to scale the counts because profile injection was designed 795 // to feed exact counts into VM. 796 taken = (btest == BoolTest::eq) ? false_cnt : true_cnt; 797 not_taken = (btest == BoolTest::eq) ? true_cnt : false_cnt; 798 799 profile->consume(); 800 return true; 801 } 802 return false; 803 } 804 //--------------------------dynamic_branch_prediction-------------------------- 805 // Try to gather dynamic branch prediction behavior. Return a probability 806 // of the branch being taken and set the "cnt" field. Returns a -1.0 807 // if we need to use static prediction for some reason. 808 float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test) { 809 ResourceMark rm; 810 811 cnt = COUNT_UNKNOWN; 812 813 int taken = 0; 814 int not_taken = 0; 815 816 bool use_mdo = !has_injected_profile(btest, test, taken, not_taken); 817 818 if (use_mdo) { 819 // Use MethodData information if it is available 820 // FIXME: free the ProfileData structure 821 ciMethodData* methodData = method()->method_data(); 822 if (!methodData->is_mature()) return PROB_UNKNOWN; 823 ciProfileData* data = methodData->bci_to_data(bci()); 824 if (data == NULL) { 825 return PROB_UNKNOWN; 826 } 827 if (!data->is_JumpData()) return PROB_UNKNOWN; 828 829 // get taken and not taken values 830 taken = data->as_JumpData()->taken(); 831 not_taken = 0; 832 if (data->is_BranchData()) { 833 not_taken = data->as_BranchData()->not_taken(); 834 } 835 836 // scale the counts to be commensurate with invocation counts: 837 taken = method()->scale_count(taken); 838 not_taken = method()->scale_count(not_taken); 839 } 840 841 // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful. 842 // We also check that individual counters are positive first, otherwise the sum can become positive. 843 if (taken < 0 || not_taken < 0 || taken + not_taken < 40) { 844 if (C->log() != NULL) { 845 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken); 846 } 847 return PROB_UNKNOWN; 848 } 849 850 // Compute frequency that we arrive here 851 float sum = taken + not_taken; 852 // Adjust, if this block is a cloned private block but the 853 // Jump counts are shared. Taken the private counts for 854 // just this path instead of the shared counts. 855 if( block()->count() > 0 ) 856 sum = block()->count(); 857 cnt = sum / FreqCountInvocations; 858 859 // Pin probability to sane limits 860 float prob; 861 if( !taken ) 862 prob = (0+PROB_MIN) / 2; 863 else if( !not_taken ) 864 prob = (1+PROB_MAX) / 2; 865 else { // Compute probability of true path 866 prob = (float)taken / (float)(taken + not_taken); 867 if (prob > PROB_MAX) prob = PROB_MAX; 868 if (prob < PROB_MIN) prob = PROB_MIN; 869 } 870 871 assert((cnt > 0.0f) && (prob > 0.0f), 872 "Bad frequency assignment in if"); 873 874 if (C->log() != NULL) { 875 const char* prob_str = NULL; 876 if (prob >= PROB_MAX) prob_str = (prob == PROB_MAX) ? "max" : "always"; 877 if (prob <= PROB_MIN) prob_str = (prob == PROB_MIN) ? "min" : "never"; 878 char prob_str_buf[30]; 879 if (prob_str == NULL) { 880 sprintf(prob_str_buf, "%g", prob); 881 prob_str = prob_str_buf; 882 } 883 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%g' prob='%s'", 884 iter().get_dest(), taken, not_taken, cnt, prob_str); 885 } 886 return prob; 887 } 888 889 //-----------------------------branch_prediction------------------------------- 890 float Parse::branch_prediction(float& cnt, 891 BoolTest::mask btest, 892 int target_bci, 893 Node* test) { 894 float prob = dynamic_branch_prediction(cnt, btest, test); 895 // If prob is unknown, switch to static prediction 896 if (prob != PROB_UNKNOWN) return prob; 897 898 prob = PROB_FAIR; // Set default value 899 if (btest == BoolTest::eq) // Exactly equal test? 900 prob = PROB_STATIC_INFREQUENT; // Assume its relatively infrequent 901 else if (btest == BoolTest::ne) 902 prob = PROB_STATIC_FREQUENT; // Assume its relatively frequent 903 904 // If this is a conditional test guarding a backwards branch, 905 // assume its a loop-back edge. Make it a likely taken branch. 906 if (target_bci < bci()) { 907 if (is_osr_parse()) { // Could be a hot OSR'd loop; force deopt 908 // Since it's an OSR, we probably have profile data, but since 909 // branch_prediction returned PROB_UNKNOWN, the counts are too small. 910 // Let's make a special check here for completely zero counts. 911 ciMethodData* methodData = method()->method_data(); 912 if (!methodData->is_empty()) { 913 ciProfileData* data = methodData->bci_to_data(bci()); 914 // Only stop for truly zero counts, which mean an unknown part 915 // of the OSR-ed method, and we want to deopt to gather more stats. 916 // If you have ANY counts, then this loop is simply 'cold' relative 917 // to the OSR loop. 918 if (data == NULL || 919 (data->as_BranchData()->taken() + data->as_BranchData()->not_taken() == 0)) { 920 // This is the only way to return PROB_UNKNOWN: 921 return PROB_UNKNOWN; 922 } 923 } 924 } 925 prob = PROB_STATIC_FREQUENT; // Likely to take backwards branch 926 } 927 928 assert(prob != PROB_UNKNOWN, "must have some guess at this point"); 929 return prob; 930 } 931 932 // The magic constants are chosen so as to match the output of 933 // branch_prediction() when the profile reports a zero taken count. 934 // It is important to distinguish zero counts unambiguously, because 935 // some branches (e.g., _213_javac.Assembler.eliminate) validly produce 936 // very small but nonzero probabilities, which if confused with zero 937 // counts would keep the program recompiling indefinitely. 938 bool Parse::seems_never_taken(float prob) const { 939 return prob < PROB_MIN; 940 } 941 942 // True if the comparison seems to be the kind that will not change its 943 // statistics from true to false. See comments in adjust_map_after_if. 944 // This question is only asked along paths which are already 945 // classifed as untaken (by seems_never_taken), so really, 946 // if a path is never taken, its controlling comparison is 947 // already acting in a stable fashion. If the comparison 948 // seems stable, we will put an expensive uncommon trap 949 // on the untaken path. 950 bool Parse::seems_stable_comparison() const { 951 if (C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if)) { 952 return false; 953 } 954 return true; 955 } 956 957 //-------------------------------repush_if_args-------------------------------- 958 // Push arguments of an "if" bytecode back onto the stack by adjusting _sp. 959 inline int Parse::repush_if_args() { 960 #ifndef PRODUCT 961 if (PrintOpto && WizardMode) { 962 tty->print("defending against excessive implicit null exceptions on %s @%d in ", 963 Bytecodes::name(iter().cur_bc()), iter().cur_bci()); 964 method()->print_name(); tty->cr(); 965 } 966 #endif 967 int bc_depth = - Bytecodes::depth(iter().cur_bc()); 968 assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches"); 969 DEBUG_ONLY(sync_jvms()); // argument(n) requires a synced jvms 970 assert(argument(0) != NULL, "must exist"); 971 assert(bc_depth == 1 || argument(1) != NULL, "two must exist"); 972 inc_sp(bc_depth); 973 return bc_depth; 974 } 975 976 //----------------------------------do_ifnull---------------------------------- 977 void Parse::do_ifnull(BoolTest::mask btest, Node *c) { 978 int target_bci = iter().get_dest(); 979 980 Block* branch_block = successor_for_bci(target_bci); 981 Block* next_block = successor_for_bci(iter().next_bci()); 982 983 float cnt; 984 float prob = branch_prediction(cnt, btest, target_bci, c); 985 if (prob == PROB_UNKNOWN) { 986 // (An earlier version of do_ifnull omitted this trap for OSR methods.) 987 #ifndef PRODUCT 988 if (PrintOpto && Verbose) 989 tty->print_cr("Never-taken edge stops compilation at bci %d",bci()); 990 #endif 991 repush_if_args(); // to gather stats on loop 992 // We need to mark this branch as taken so that if we recompile we will 993 // see that it is possible. In the tiered system the interpreter doesn't 994 // do profiling and by the time we get to the lower tier from the interpreter 995 // the path may be cold again. Make sure it doesn't look untaken 996 profile_taken_branch(target_bci, !ProfileInterpreter); 997 uncommon_trap(Deoptimization::Reason_unreached, 998 Deoptimization::Action_reinterpret, 999 NULL, "cold"); 1000 if (C->eliminate_boxing()) { 1001 // Mark the successor blocks as parsed 1002 branch_block->next_path_num(); 1003 next_block->next_path_num(); 1004 } 1005 return; 1006 } 1007 1008 explicit_null_checks_inserted++; 1009 1010 // Generate real control flow 1011 Node *tst = _gvn.transform( new (C) BoolNode( c, btest ) ); 1012 1013 // Sanity check the probability value 1014 assert(prob > 0.0f,"Bad probability in Parser"); 1015 // Need xform to put node in hash table 1016 IfNode *iff = create_and_xform_if( control(), tst, prob, cnt ); 1017 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser"); 1018 // True branch 1019 { PreserveJVMState pjvms(this); 1020 Node* iftrue = _gvn.transform( new (C) IfTrueNode (iff) ); 1021 set_control(iftrue); 1022 1023 if (stopped()) { // Path is dead? 1024 explicit_null_checks_elided++; 1025 if (C->eliminate_boxing()) { 1026 // Mark the successor block as parsed 1027 branch_block->next_path_num(); 1028 } 1029 } else { // Path is live. 1030 // Update method data 1031 profile_taken_branch(target_bci); 1032 adjust_map_after_if(btest, c, prob, branch_block, next_block); 1033 if (!stopped()) { 1034 merge(target_bci); 1035 } 1036 } 1037 } 1038 1039 // False branch 1040 Node* iffalse = _gvn.transform( new (C) IfFalseNode(iff) ); 1041 set_control(iffalse); 1042 1043 if (stopped()) { // Path is dead? 1044 explicit_null_checks_elided++; 1045 if (C->eliminate_boxing()) { 1046 // Mark the successor block as parsed 1047 next_block->next_path_num(); 1048 } 1049 } else { // Path is live. 1050 // Update method data 1051 profile_not_taken_branch(); 1052 adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, 1053 next_block, branch_block); 1054 } 1055 } 1056 1057 //------------------------------------do_if------------------------------------ 1058 void Parse::do_if(BoolTest::mask btest, Node* c) { 1059 int target_bci = iter().get_dest(); 1060 1061 Block* branch_block = successor_for_bci(target_bci); 1062 Block* next_block = successor_for_bci(iter().next_bci()); 1063 1064 float cnt; 1065 float prob = branch_prediction(cnt, btest, target_bci, c); 1066 float untaken_prob = 1.0 - prob; 1067 1068 if (prob == PROB_UNKNOWN) { 1069 #ifndef PRODUCT 1070 if (PrintOpto && Verbose) 1071 tty->print_cr("Never-taken edge stops compilation at bci %d",bci()); 1072 #endif 1073 repush_if_args(); // to gather stats on loop 1074 // We need to mark this branch as taken so that if we recompile we will 1075 // see that it is possible. In the tiered system the interpreter doesn't 1076 // do profiling and by the time we get to the lower tier from the interpreter 1077 // the path may be cold again. Make sure it doesn't look untaken 1078 profile_taken_branch(target_bci, !ProfileInterpreter); 1079 uncommon_trap(Deoptimization::Reason_unreached, 1080 Deoptimization::Action_reinterpret, 1081 NULL, "cold"); 1082 if (C->eliminate_boxing()) { 1083 // Mark the successor blocks as parsed 1084 branch_block->next_path_num(); 1085 next_block->next_path_num(); 1086 } 1087 return; 1088 } 1089 1090 // Sanity check the probability value 1091 assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser"); 1092 1093 bool taken_if_true = true; 1094 // Convert BoolTest to canonical form: 1095 if (!BoolTest(btest).is_canonical()) { 1096 btest = BoolTest(btest).negate(); 1097 taken_if_true = false; 1098 // prob is NOT updated here; it remains the probability of the taken 1099 // path (as opposed to the prob of the path guarded by an 'IfTrueNode'). 1100 } 1101 assert(btest != BoolTest::eq, "!= is the only canonical exact test"); 1102 1103 Node* tst0 = new (C) BoolNode(c, btest); 1104 Node* tst = _gvn.transform(tst0); 1105 BoolTest::mask taken_btest = BoolTest::illegal; 1106 BoolTest::mask untaken_btest = BoolTest::illegal; 1107 1108 if (tst->is_Bool()) { 1109 // Refresh c from the transformed bool node, since it may be 1110 // simpler than the original c. Also re-canonicalize btest. 1111 // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p NULL)). 1112 // That can arise from statements like: if (x instanceof C) ... 1113 if (tst != tst0) { 1114 // Canonicalize one more time since transform can change it. 1115 btest = tst->as_Bool()->_test._test; 1116 if (!BoolTest(btest).is_canonical()) { 1117 // Reverse edges one more time... 1118 tst = _gvn.transform( tst->as_Bool()->negate(&_gvn) ); 1119 btest = tst->as_Bool()->_test._test; 1120 assert(BoolTest(btest).is_canonical(), "sanity"); 1121 taken_if_true = !taken_if_true; 1122 } 1123 c = tst->in(1); 1124 } 1125 BoolTest::mask neg_btest = BoolTest(btest).negate(); 1126 taken_btest = taken_if_true ? btest : neg_btest; 1127 untaken_btest = taken_if_true ? neg_btest : btest; 1128 } 1129 1130 // Generate real control flow 1131 float true_prob = (taken_if_true ? prob : untaken_prob); 1132 IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt); 1133 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser"); 1134 Node* taken_branch = new (C) IfTrueNode(iff); 1135 Node* untaken_branch = new (C) IfFalseNode(iff); 1136 if (!taken_if_true) { // Finish conversion to canonical form 1137 Node* tmp = taken_branch; 1138 taken_branch = untaken_branch; 1139 untaken_branch = tmp; 1140 } 1141 1142 // Branch is taken: 1143 { PreserveJVMState pjvms(this); 1144 taken_branch = _gvn.transform(taken_branch); 1145 set_control(taken_branch); 1146 1147 if (stopped()) { 1148 if (C->eliminate_boxing()) { 1149 // Mark the successor block as parsed 1150 branch_block->next_path_num(); 1151 } 1152 } else { 1153 // Update method data 1154 profile_taken_branch(target_bci); 1155 adjust_map_after_if(taken_btest, c, prob, branch_block, next_block); 1156 if (!stopped()) { 1157 merge(target_bci); 1158 } 1159 } 1160 } 1161 1162 untaken_branch = _gvn.transform(untaken_branch); 1163 set_control(untaken_branch); 1164 1165 // Branch not taken. 1166 if (stopped()) { 1167 if (C->eliminate_boxing()) { 1168 // Mark the successor block as parsed 1169 next_block->next_path_num(); 1170 } 1171 } else { 1172 // Update method data 1173 profile_not_taken_branch(); 1174 adjust_map_after_if(untaken_btest, c, untaken_prob, 1175 next_block, branch_block); 1176 } 1177 } 1178 1179 bool Parse::path_is_suitable_for_uncommon_trap(float prob) const { 1180 // Don't want to speculate on uncommon traps when running with -Xcomp 1181 if (!UseInterpreter) { 1182 return false; 1183 } 1184 return (seems_never_taken(prob) && seems_stable_comparison()); 1185 } 1186 1187 //----------------------------adjust_map_after_if------------------------------ 1188 // Adjust the JVM state to reflect the result of taking this path. 1189 // Basically, it means inspecting the CmpNode controlling this 1190 // branch, seeing how it constrains a tested value, and then 1191 // deciding if it's worth our while to encode this constraint 1192 // as graph nodes in the current abstract interpretation map. 1193 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, 1194 Block* path, Block* other_path) { 1195 if (stopped() || !c->is_Cmp() || btest == BoolTest::illegal) 1196 return; // nothing to do 1197 1198 bool is_fallthrough = (path == successor_for_bci(iter().next_bci())); 1199 1200 if (path_is_suitable_for_uncommon_trap(prob)) { 1201 repush_if_args(); 1202 uncommon_trap(Deoptimization::Reason_unstable_if, 1203 Deoptimization::Action_reinterpret, 1204 NULL, 1205 (is_fallthrough ? "taken always" : "taken never")); 1206 return; 1207 } 1208 1209 Node* val = c->in(1); 1210 Node* con = c->in(2); 1211 const Type* tcon = _gvn.type(con); 1212 const Type* tval = _gvn.type(val); 1213 bool have_con = tcon->singleton(); 1214 if (tval->singleton()) { 1215 if (!have_con) { 1216 // Swap, so constant is in con. 1217 con = val; 1218 tcon = tval; 1219 val = c->in(2); 1220 tval = _gvn.type(val); 1221 btest = BoolTest(btest).commute(); 1222 have_con = true; 1223 } else { 1224 // Do we have two constants? Then leave well enough alone. 1225 have_con = false; 1226 } 1227 } 1228 if (!have_con) // remaining adjustments need a con 1229 return; 1230 1231 sharpen_type_after_if(btest, con, tcon, val, tval); 1232 } 1233 1234 1235 static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) { 1236 Node* ldk; 1237 if (n->is_DecodeNKlass()) { 1238 if (n->in(1)->Opcode() != Op_LoadNKlass) { 1239 return NULL; 1240 } else { 1241 ldk = n->in(1); 1242 } 1243 } else if (n->Opcode() != Op_LoadKlass) { 1244 return NULL; 1245 } else { 1246 ldk = n; 1247 } 1248 assert(ldk != NULL && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node"); 1249 1250 Node* adr = ldk->in(MemNode::Address); 1251 intptr_t off = 0; 1252 Node* obj = AddPNode::Ideal_base_and_offset(adr, gvn, off); 1253 if (obj == NULL || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass? 1254 return NULL; 1255 const TypePtr* tp = gvn->type(obj)->is_ptr(); 1256 if (tp == NULL || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr? 1257 return NULL; 1258 1259 return obj; 1260 } 1261 1262 void Parse::sharpen_type_after_if(BoolTest::mask btest, 1263 Node* con, const Type* tcon, 1264 Node* val, const Type* tval) { 1265 // Look for opportunities to sharpen the type of a node 1266 // whose klass is compared with a constant klass. 1267 if (btest == BoolTest::eq && tcon->isa_klassptr()) { 1268 Node* obj = extract_obj_from_klass_load(&_gvn, val); 1269 const TypeOopPtr* con_type = tcon->isa_klassptr()->as_instance_type(); 1270 if (obj != NULL && (con_type->isa_instptr() || con_type->isa_aryptr())) { 1271 // Found: 1272 // Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq]) 1273 // or the narrowOop equivalent. 1274 const Type* obj_type = _gvn.type(obj); 1275 const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr(); 1276 if (tboth != NULL && tboth->klass_is_exact() && tboth != obj_type && 1277 tboth->higher_equal(obj_type)) { 1278 // obj has to be of the exact type Foo if the CmpP succeeds. 1279 int obj_in_map = map()->find_edge(obj); 1280 JVMState* jvms = this->jvms(); 1281 if (obj_in_map >= 0 && 1282 (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) { 1283 TypeNode* ccast = new (C) CheckCastPPNode(control(), obj, tboth); 1284 const Type* tcc = ccast->as_Type()->type(); 1285 assert(tcc != obj_type && tcc->higher_equal_speculative(obj_type), "must improve"); 1286 // Delay transform() call to allow recovery of pre-cast value 1287 // at the control merge. 1288 _gvn.set_type_bottom(ccast); 1289 record_for_igvn(ccast); 1290 // Here's the payoff. 1291 replace_in_map(obj, ccast); 1292 } 1293 } 1294 } 1295 } 1296 1297 int val_in_map = map()->find_edge(val); 1298 if (val_in_map < 0) return; // replace_in_map would be useless 1299 { 1300 JVMState* jvms = this->jvms(); 1301 if (!(jvms->is_loc(val_in_map) || 1302 jvms->is_stk(val_in_map))) 1303 return; // again, it would be useless 1304 } 1305 1306 // Check for a comparison to a constant, and "know" that the compared 1307 // value is constrained on this path. 1308 assert(tcon->singleton(), ""); 1309 ConstraintCastNode* ccast = NULL; 1310 Node* cast = NULL; 1311 1312 switch (btest) { 1313 case BoolTest::eq: // Constant test? 1314 { 1315 const Type* tboth = tcon->join_speculative(tval); 1316 if (tboth == tval) break; // Nothing to gain. 1317 if (tcon->isa_int()) { 1318 ccast = new (C) CastIINode(val, tboth); 1319 } else if (tcon == TypePtr::NULL_PTR) { 1320 // Cast to null, but keep the pointer identity temporarily live. 1321 ccast = new (C) CastPPNode(val, tboth); 1322 } else { 1323 const TypeF* tf = tcon->isa_float_constant(); 1324 const TypeD* td = tcon->isa_double_constant(); 1325 // Exclude tests vs float/double 0 as these could be 1326 // either +0 or -0. Just because you are equal to +0 1327 // doesn't mean you ARE +0! 1328 // Note, following code also replaces Long and Oop values. 1329 if ((!tf || tf->_f != 0.0) && 1330 (!td || td->_d != 0.0)) 1331 cast = con; // Replace non-constant val by con. 1332 } 1333 } 1334 break; 1335 1336 case BoolTest::ne: 1337 if (tcon == TypePtr::NULL_PTR) { 1338 cast = cast_not_null(val, false); 1339 } 1340 break; 1341 1342 default: 1343 // (At this point we could record int range types with CastII.) 1344 break; 1345 } 1346 1347 if (ccast != NULL) { 1348 const Type* tcc = ccast->as_Type()->type(); 1349 assert(tcc != tval && tcc->higher_equal_speculative(tval), "must improve"); 1350 // Delay transform() call to allow recovery of pre-cast value 1351 // at the control merge. 1352 ccast->set_req(0, control()); 1353 _gvn.set_type_bottom(ccast); 1354 record_for_igvn(ccast); 1355 cast = ccast; 1356 } 1357 1358 if (cast != NULL) { // Here's the payoff. 1359 replace_in_map(val, cast); 1360 } 1361 } 1362 1363 /** 1364 * Use speculative type to optimize CmpP node: if comparison is 1365 * against the low level class, cast the object to the speculative 1366 * type if any. CmpP should then go away. 1367 * 1368 * @param c expected CmpP node 1369 * @return result of CmpP on object casted to speculative type 1370 * 1371 */ 1372 Node* Parse::optimize_cmp_with_klass(Node* c) { 1373 // If this is transformed by the _gvn to a comparison with the low 1374 // level klass then we may be able to use speculation 1375 if (c->Opcode() == Op_CmpP && 1376 (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) && 1377 c->in(2)->is_Con()) { 1378 Node* load_klass = NULL; 1379 Node* decode = NULL; 1380 if (c->in(1)->Opcode() == Op_DecodeNKlass) { 1381 decode = c->in(1); 1382 load_klass = c->in(1)->in(1); 1383 } else { 1384 load_klass = c->in(1); 1385 } 1386 if (load_klass->in(2)->is_AddP()) { 1387 Node* addp = load_klass->in(2); 1388 Node* obj = addp->in(AddPNode::Address); 1389 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); 1390 if (obj_type->speculative_type() != NULL) { 1391 ciKlass* k = obj_type->speculative_type(); 1392 inc_sp(2); 1393 obj = maybe_cast_profiled_obj(obj, k); 1394 dec_sp(2); 1395 // Make the CmpP use the casted obj 1396 addp = basic_plus_adr(obj, addp->in(AddPNode::Offset)); 1397 load_klass = load_klass->clone(); 1398 load_klass->set_req(2, addp); 1399 load_klass = _gvn.transform(load_klass); 1400 if (decode != NULL) { 1401 decode = decode->clone(); 1402 decode->set_req(1, load_klass); 1403 load_klass = _gvn.transform(decode); 1404 } 1405 c = c->clone(); 1406 c->set_req(1, load_klass); 1407 c = _gvn.transform(c); 1408 } 1409 } 1410 } 1411 return c; 1412 } 1413 1414 //------------------------------do_one_bytecode-------------------------------- 1415 // Parse this bytecode, and alter the Parsers JVM->Node mapping 1416 void Parse::do_one_bytecode() { 1417 Node *a, *b, *c, *d; // Handy temps 1418 BoolTest::mask btest; 1419 int i; 1420 1421 assert(!has_exceptions(), "bytecode entry state must be clear of throws"); 1422 1423 if (C->check_node_count(NodeLimitFudgeFactor * 5, 1424 "out of nodes parsing method")) { 1425 return; 1426 } 1427 1428 #ifdef ASSERT 1429 // for setting breakpoints 1430 if (TraceOptoParse) { 1431 tty->print(" @"); 1432 dump_bci(bci()); 1433 tty->cr(); 1434 } 1435 #endif 1436 1437 switch (bc()) { 1438 case Bytecodes::_nop: 1439 // do nothing 1440 break; 1441 case Bytecodes::_lconst_0: 1442 push_pair(longcon(0)); 1443 break; 1444 1445 case Bytecodes::_lconst_1: 1446 push_pair(longcon(1)); 1447 break; 1448 1449 case Bytecodes::_fconst_0: 1450 push(zerocon(T_FLOAT)); 1451 break; 1452 1453 case Bytecodes::_fconst_1: 1454 push(makecon(TypeF::ONE)); 1455 break; 1456 1457 case Bytecodes::_fconst_2: 1458 push(makecon(TypeF::make(2.0f))); 1459 break; 1460 1461 case Bytecodes::_dconst_0: 1462 push_pair(zerocon(T_DOUBLE)); 1463 break; 1464 1465 case Bytecodes::_dconst_1: 1466 push_pair(makecon(TypeD::ONE)); 1467 break; 1468 1469 case Bytecodes::_iconst_m1:push(intcon(-1)); break; 1470 case Bytecodes::_iconst_0: push(intcon( 0)); break; 1471 case Bytecodes::_iconst_1: push(intcon( 1)); break; 1472 case Bytecodes::_iconst_2: push(intcon( 2)); break; 1473 case Bytecodes::_iconst_3: push(intcon( 3)); break; 1474 case Bytecodes::_iconst_4: push(intcon( 4)); break; 1475 case Bytecodes::_iconst_5: push(intcon( 5)); break; 1476 case Bytecodes::_bipush: push(intcon(iter().get_constant_u1())); break; 1477 case Bytecodes::_sipush: push(intcon(iter().get_constant_u2())); break; 1478 case Bytecodes::_aconst_null: push(null()); break; 1479 case Bytecodes::_ldc: 1480 case Bytecodes::_ldc_w: 1481 case Bytecodes::_ldc2_w: 1482 // If the constant is unresolved, run this BC once in the interpreter. 1483 { 1484 ciConstant constant = iter().get_constant(); 1485 if (constant.basic_type() == T_OBJECT && 1486 !constant.as_object()->is_loaded()) { 1487 int index = iter().get_constant_pool_index(); 1488 constantTag tag = iter().get_constant_pool_tag(index); 1489 uncommon_trap(Deoptimization::make_trap_request 1490 (Deoptimization::Reason_unloaded, 1491 Deoptimization::Action_reinterpret, 1492 index), 1493 NULL, tag.internal_name()); 1494 break; 1495 } 1496 assert(constant.basic_type() != T_OBJECT || constant.as_object()->is_instance(), 1497 "must be java_mirror of klass"); 1498 bool pushed = push_constant(constant, true); 1499 guarantee(pushed, "must be possible to push this constant"); 1500 } 1501 1502 break; 1503 1504 case Bytecodes::_aload_0: 1505 push( local(0) ); 1506 break; 1507 case Bytecodes::_aload_1: 1508 push( local(1) ); 1509 break; 1510 case Bytecodes::_aload_2: 1511 push( local(2) ); 1512 break; 1513 case Bytecodes::_aload_3: 1514 push( local(3) ); 1515 break; 1516 case Bytecodes::_aload: 1517 push( local(iter().get_index()) ); 1518 break; 1519 1520 case Bytecodes::_fload_0: 1521 case Bytecodes::_iload_0: 1522 push( local(0) ); 1523 break; 1524 case Bytecodes::_fload_1: 1525 case Bytecodes::_iload_1: 1526 push( local(1) ); 1527 break; 1528 case Bytecodes::_fload_2: 1529 case Bytecodes::_iload_2: 1530 push( local(2) ); 1531 break; 1532 case Bytecodes::_fload_3: 1533 case Bytecodes::_iload_3: 1534 push( local(3) ); 1535 break; 1536 case Bytecodes::_fload: 1537 case Bytecodes::_iload: 1538 push( local(iter().get_index()) ); 1539 break; 1540 case Bytecodes::_lload_0: 1541 push_pair_local( 0 ); 1542 break; 1543 case Bytecodes::_lload_1: 1544 push_pair_local( 1 ); 1545 break; 1546 case Bytecodes::_lload_2: 1547 push_pair_local( 2 ); 1548 break; 1549 case Bytecodes::_lload_3: 1550 push_pair_local( 3 ); 1551 break; 1552 case Bytecodes::_lload: 1553 push_pair_local( iter().get_index() ); 1554 break; 1555 1556 case Bytecodes::_dload_0: 1557 push_pair_local(0); 1558 break; 1559 case Bytecodes::_dload_1: 1560 push_pair_local(1); 1561 break; 1562 case Bytecodes::_dload_2: 1563 push_pair_local(2); 1564 break; 1565 case Bytecodes::_dload_3: 1566 push_pair_local(3); 1567 break; 1568 case Bytecodes::_dload: 1569 push_pair_local(iter().get_index()); 1570 break; 1571 case Bytecodes::_fstore_0: 1572 case Bytecodes::_istore_0: 1573 case Bytecodes::_astore_0: 1574 set_local( 0, pop() ); 1575 break; 1576 case Bytecodes::_fstore_1: 1577 case Bytecodes::_istore_1: 1578 case Bytecodes::_astore_1: 1579 set_local( 1, pop() ); 1580 break; 1581 case Bytecodes::_fstore_2: 1582 case Bytecodes::_istore_2: 1583 case Bytecodes::_astore_2: 1584 set_local( 2, pop() ); 1585 break; 1586 case Bytecodes::_fstore_3: 1587 case Bytecodes::_istore_3: 1588 case Bytecodes::_astore_3: 1589 set_local( 3, pop() ); 1590 break; 1591 case Bytecodes::_fstore: 1592 case Bytecodes::_istore: 1593 case Bytecodes::_astore: 1594 set_local( iter().get_index(), pop() ); 1595 break; 1596 // long stores 1597 case Bytecodes::_lstore_0: 1598 set_pair_local( 0, pop_pair() ); 1599 break; 1600 case Bytecodes::_lstore_1: 1601 set_pair_local( 1, pop_pair() ); 1602 break; 1603 case Bytecodes::_lstore_2: 1604 set_pair_local( 2, pop_pair() ); 1605 break; 1606 case Bytecodes::_lstore_3: 1607 set_pair_local( 3, pop_pair() ); 1608 break; 1609 case Bytecodes::_lstore: 1610 set_pair_local( iter().get_index(), pop_pair() ); 1611 break; 1612 1613 // double stores 1614 case Bytecodes::_dstore_0: 1615 set_pair_local( 0, dstore_rounding(pop_pair()) ); 1616 break; 1617 case Bytecodes::_dstore_1: 1618 set_pair_local( 1, dstore_rounding(pop_pair()) ); 1619 break; 1620 case Bytecodes::_dstore_2: 1621 set_pair_local( 2, dstore_rounding(pop_pair()) ); 1622 break; 1623 case Bytecodes::_dstore_3: 1624 set_pair_local( 3, dstore_rounding(pop_pair()) ); 1625 break; 1626 case Bytecodes::_dstore: 1627 set_pair_local( iter().get_index(), dstore_rounding(pop_pair()) ); 1628 break; 1629 1630 case Bytecodes::_pop: dec_sp(1); break; 1631 case Bytecodes::_pop2: dec_sp(2); break; 1632 case Bytecodes::_swap: 1633 a = pop(); 1634 b = pop(); 1635 push(a); 1636 push(b); 1637 break; 1638 case Bytecodes::_dup: 1639 a = pop(); 1640 push(a); 1641 push(a); 1642 break; 1643 case Bytecodes::_dup_x1: 1644 a = pop(); 1645 b = pop(); 1646 push( a ); 1647 push( b ); 1648 push( a ); 1649 break; 1650 case Bytecodes::_dup_x2: 1651 a = pop(); 1652 b = pop(); 1653 c = pop(); 1654 push( a ); 1655 push( c ); 1656 push( b ); 1657 push( a ); 1658 break; 1659 case Bytecodes::_dup2: 1660 a = pop(); 1661 b = pop(); 1662 push( b ); 1663 push( a ); 1664 push( b ); 1665 push( a ); 1666 break; 1667 1668 case Bytecodes::_dup2_x1: 1669 // before: .. c, b, a 1670 // after: .. b, a, c, b, a 1671 // not tested 1672 a = pop(); 1673 b = pop(); 1674 c = pop(); 1675 push( b ); 1676 push( a ); 1677 push( c ); 1678 push( b ); 1679 push( a ); 1680 break; 1681 case Bytecodes::_dup2_x2: 1682 // before: .. d, c, b, a 1683 // after: .. b, a, d, c, b, a 1684 // not tested 1685 a = pop(); 1686 b = pop(); 1687 c = pop(); 1688 d = pop(); 1689 push( b ); 1690 push( a ); 1691 push( d ); 1692 push( c ); 1693 push( b ); 1694 push( a ); 1695 break; 1696 1697 case Bytecodes::_arraylength: { 1698 // Must do null-check with value on expression stack 1699 Node *ary = null_check(peek(), T_ARRAY); 1700 // Compile-time detect of null-exception? 1701 if (stopped()) return; 1702 a = pop(); 1703 push(load_array_length(a)); 1704 break; 1705 } 1706 1707 case Bytecodes::_baload: array_load(T_BYTE); break; 1708 case Bytecodes::_caload: array_load(T_CHAR); break; 1709 case Bytecodes::_iaload: array_load(T_INT); break; 1710 case Bytecodes::_saload: array_load(T_SHORT); break; 1711 case Bytecodes::_faload: array_load(T_FLOAT); break; 1712 case Bytecodes::_aaload: array_load(T_OBJECT); break; 1713 case Bytecodes::_laload: { 1714 a = array_addressing(T_LONG, 0); 1715 if (stopped()) return; // guaranteed null or range check 1716 dec_sp(2); // Pop array and index 1717 push_pair(make_load(control(), a, TypeLong::LONG, T_LONG, TypeAryPtr::LONGS, MemNode::unordered)); 1718 break; 1719 } 1720 case Bytecodes::_daload: { 1721 a = array_addressing(T_DOUBLE, 0); 1722 if (stopped()) return; // guaranteed null or range check 1723 dec_sp(2); // Pop array and index 1724 push_pair(make_load(control(), a, Type::DOUBLE, T_DOUBLE, TypeAryPtr::DOUBLES, MemNode::unordered)); 1725 break; 1726 } 1727 case Bytecodes::_bastore: array_store(T_BYTE); break; 1728 case Bytecodes::_castore: array_store(T_CHAR); break; 1729 case Bytecodes::_iastore: array_store(T_INT); break; 1730 case Bytecodes::_sastore: array_store(T_SHORT); break; 1731 case Bytecodes::_fastore: array_store(T_FLOAT); break; 1732 case Bytecodes::_aastore: { 1733 d = array_addressing(T_OBJECT, 1); 1734 if (stopped()) return; // guaranteed null or range check 1735 array_store_check(); 1736 c = pop(); // Oop to store 1737 b = pop(); // index (already used) 1738 a = pop(); // the array itself 1739 const TypeOopPtr* elemtype = _gvn.type(a)->is_aryptr()->elem()->make_oopptr(); 1740 const TypeAryPtr* adr_type = TypeAryPtr::OOPS; 1741 Node* store = store_oop_to_array(control(), a, d, adr_type, c, elemtype, T_OBJECT, MemNode::release); 1742 break; 1743 } 1744 case Bytecodes::_lastore: { 1745 a = array_addressing(T_LONG, 2); 1746 if (stopped()) return; // guaranteed null or range check 1747 c = pop_pair(); 1748 dec_sp(2); // Pop array and index 1749 store_to_memory(control(), a, c, T_LONG, TypeAryPtr::LONGS, MemNode::unordered); 1750 break; 1751 } 1752 case Bytecodes::_dastore: { 1753 a = array_addressing(T_DOUBLE, 2); 1754 if (stopped()) return; // guaranteed null or range check 1755 c = pop_pair(); 1756 dec_sp(2); // Pop array and index 1757 c = dstore_rounding(c); 1758 store_to_memory(control(), a, c, T_DOUBLE, TypeAryPtr::DOUBLES, MemNode::unordered); 1759 break; 1760 } 1761 case Bytecodes::_getfield: 1762 do_getfield(); 1763 break; 1764 1765 case Bytecodes::_getstatic: 1766 do_getstatic(); 1767 break; 1768 1769 case Bytecodes::_putfield: 1770 do_putfield(); 1771 break; 1772 1773 case Bytecodes::_putstatic: 1774 do_putstatic(); 1775 break; 1776 1777 case Bytecodes::_irem: 1778 do_irem(); 1779 break; 1780 case Bytecodes::_idiv: 1781 // Must keep both values on the expression-stack during null-check 1782 zero_check_int(peek()); 1783 // Compile-time detect of null-exception? 1784 if (stopped()) return; 1785 b = pop(); 1786 a = pop(); 1787 push( _gvn.transform( new (C) DivINode(control(),a,b) ) ); 1788 break; 1789 case Bytecodes::_imul: 1790 b = pop(); a = pop(); 1791 push( _gvn.transform( new (C) MulINode(a,b) ) ); 1792 break; 1793 case Bytecodes::_iadd: 1794 b = pop(); a = pop(); 1795 push( _gvn.transform( new (C) AddINode(a,b) ) ); 1796 break; 1797 case Bytecodes::_ineg: 1798 a = pop(); 1799 push( _gvn.transform( new (C) SubINode(_gvn.intcon(0),a)) ); 1800 break; 1801 case Bytecodes::_isub: 1802 b = pop(); a = pop(); 1803 push( _gvn.transform( new (C) SubINode(a,b) ) ); 1804 break; 1805 case Bytecodes::_iand: 1806 b = pop(); a = pop(); 1807 push( _gvn.transform( new (C) AndINode(a,b) ) ); 1808 break; 1809 case Bytecodes::_ior: 1810 b = pop(); a = pop(); 1811 push( _gvn.transform( new (C) OrINode(a,b) ) ); 1812 break; 1813 case Bytecodes::_ixor: 1814 b = pop(); a = pop(); 1815 push( _gvn.transform( new (C) XorINode(a,b) ) ); 1816 break; 1817 case Bytecodes::_ishl: 1818 b = pop(); a = pop(); 1819 push( _gvn.transform( new (C) LShiftINode(a,b) ) ); 1820 break; 1821 case Bytecodes::_ishr: 1822 b = pop(); a = pop(); 1823 push( _gvn.transform( new (C) RShiftINode(a,b) ) ); 1824 break; 1825 case Bytecodes::_iushr: 1826 b = pop(); a = pop(); 1827 push( _gvn.transform( new (C) URShiftINode(a,b) ) ); 1828 break; 1829 1830 case Bytecodes::_fneg: 1831 a = pop(); 1832 b = _gvn.transform(new (C) NegFNode (a)); 1833 push(b); 1834 break; 1835 1836 case Bytecodes::_fsub: 1837 b = pop(); 1838 a = pop(); 1839 c = _gvn.transform( new (C) SubFNode(a,b) ); 1840 d = precision_rounding(c); 1841 push( d ); 1842 break; 1843 1844 case Bytecodes::_fadd: 1845 b = pop(); 1846 a = pop(); 1847 c = _gvn.transform( new (C) AddFNode(a,b) ); 1848 d = precision_rounding(c); 1849 push( d ); 1850 break; 1851 1852 case Bytecodes::_fmul: 1853 b = pop(); 1854 a = pop(); 1855 c = _gvn.transform( new (C) MulFNode(a,b) ); 1856 d = precision_rounding(c); 1857 push( d ); 1858 break; 1859 1860 case Bytecodes::_fdiv: 1861 b = pop(); 1862 a = pop(); 1863 c = _gvn.transform( new (C) DivFNode(0,a,b) ); 1864 d = precision_rounding(c); 1865 push( d ); 1866 break; 1867 1868 case Bytecodes::_frem: 1869 if (Matcher::has_match_rule(Op_ModF)) { 1870 // Generate a ModF node. 1871 b = pop(); 1872 a = pop(); 1873 c = _gvn.transform( new (C) ModFNode(0,a,b) ); 1874 d = precision_rounding(c); 1875 push( d ); 1876 } 1877 else { 1878 // Generate a call. 1879 modf(); 1880 } 1881 break; 1882 1883 case Bytecodes::_fcmpl: 1884 b = pop(); 1885 a = pop(); 1886 c = _gvn.transform( new (C) CmpF3Node( a, b)); 1887 push(c); 1888 break; 1889 case Bytecodes::_fcmpg: 1890 b = pop(); 1891 a = pop(); 1892 1893 // Same as fcmpl but need to flip the unordered case. Swap the inputs, 1894 // which negates the result sign except for unordered. Flip the unordered 1895 // as well by using CmpF3 which implements unordered-lesser instead of 1896 // unordered-greater semantics. Finally, commute the result bits. Result 1897 // is same as using a CmpF3Greater except we did it with CmpF3 alone. 1898 c = _gvn.transform( new (C) CmpF3Node( b, a)); 1899 c = _gvn.transform( new (C) SubINode(_gvn.intcon(0),c) ); 1900 push(c); 1901 break; 1902 1903 case Bytecodes::_f2i: 1904 a = pop(); 1905 push(_gvn.transform(new (C) ConvF2INode(a))); 1906 break; 1907 1908 case Bytecodes::_d2i: 1909 a = pop_pair(); 1910 b = _gvn.transform(new (C) ConvD2INode(a)); 1911 push( b ); 1912 break; 1913 1914 case Bytecodes::_f2d: 1915 a = pop(); 1916 b = _gvn.transform( new (C) ConvF2DNode(a)); 1917 push_pair( b ); 1918 break; 1919 1920 case Bytecodes::_d2f: 1921 a = pop_pair(); 1922 b = _gvn.transform( new (C) ConvD2FNode(a)); 1923 // This breaks _227_mtrt (speed & correctness) and _222_mpegaudio (speed) 1924 //b = _gvn.transform(new (C) RoundFloatNode(0, b) ); 1925 push( b ); 1926 break; 1927 1928 case Bytecodes::_l2f: 1929 if (Matcher::convL2FSupported()) { 1930 a = pop_pair(); 1931 b = _gvn.transform( new (C) ConvL2FNode(a)); 1932 // For i486.ad, FILD doesn't restrict precision to 24 or 53 bits. 1933 // Rather than storing the result into an FP register then pushing 1934 // out to memory to round, the machine instruction that implements 1935 // ConvL2D is responsible for rounding. 1936 // c = precision_rounding(b); 1937 c = _gvn.transform(b); 1938 push(c); 1939 } else { 1940 l2f(); 1941 } 1942 break; 1943 1944 case Bytecodes::_l2d: 1945 a = pop_pair(); 1946 b = _gvn.transform( new (C) ConvL2DNode(a)); 1947 // For i486.ad, rounding is always necessary (see _l2f above). 1948 // c = dprecision_rounding(b); 1949 c = _gvn.transform(b); 1950 push_pair(c); 1951 break; 1952 1953 case Bytecodes::_f2l: 1954 a = pop(); 1955 b = _gvn.transform( new (C) ConvF2LNode(a)); 1956 push_pair(b); 1957 break; 1958 1959 case Bytecodes::_d2l: 1960 a = pop_pair(); 1961 b = _gvn.transform( new (C) ConvD2LNode(a)); 1962 push_pair(b); 1963 break; 1964 1965 case Bytecodes::_dsub: 1966 b = pop_pair(); 1967 a = pop_pair(); 1968 c = _gvn.transform( new (C) SubDNode(a,b) ); 1969 d = dprecision_rounding(c); 1970 push_pair( d ); 1971 break; 1972 1973 case Bytecodes::_dadd: 1974 b = pop_pair(); 1975 a = pop_pair(); 1976 c = _gvn.transform( new (C) AddDNode(a,b) ); 1977 d = dprecision_rounding(c); 1978 push_pair( d ); 1979 break; 1980 1981 case Bytecodes::_dmul: 1982 b = pop_pair(); 1983 a = pop_pair(); 1984 c = _gvn.transform( new (C) MulDNode(a,b) ); 1985 d = dprecision_rounding(c); 1986 push_pair( d ); 1987 break; 1988 1989 case Bytecodes::_ddiv: 1990 b = pop_pair(); 1991 a = pop_pair(); 1992 c = _gvn.transform( new (C) DivDNode(0,a,b) ); 1993 d = dprecision_rounding(c); 1994 push_pair( d ); 1995 break; 1996 1997 case Bytecodes::_dneg: 1998 a = pop_pair(); 1999 b = _gvn.transform(new (C) NegDNode (a)); 2000 push_pair(b); 2001 break; 2002 2003 case Bytecodes::_drem: 2004 if (Matcher::has_match_rule(Op_ModD)) { 2005 // Generate a ModD node. 2006 b = pop_pair(); 2007 a = pop_pair(); 2008 // a % b 2009 2010 c = _gvn.transform( new (C) ModDNode(0,a,b) ); 2011 d = dprecision_rounding(c); 2012 push_pair( d ); 2013 } 2014 else { 2015 // Generate a call. 2016 modd(); 2017 } 2018 break; 2019 2020 case Bytecodes::_dcmpl: 2021 b = pop_pair(); 2022 a = pop_pair(); 2023 c = _gvn.transform( new (C) CmpD3Node( a, b)); 2024 push(c); 2025 break; 2026 2027 case Bytecodes::_dcmpg: 2028 b = pop_pair(); 2029 a = pop_pair(); 2030 // Same as dcmpl but need to flip the unordered case. 2031 // Commute the inputs, which negates the result sign except for unordered. 2032 // Flip the unordered as well by using CmpD3 which implements 2033 // unordered-lesser instead of unordered-greater semantics. 2034 // Finally, negate the result bits. Result is same as using a 2035 // CmpD3Greater except we did it with CmpD3 alone. 2036 c = _gvn.transform( new (C) CmpD3Node( b, a)); 2037 c = _gvn.transform( new (C) SubINode(_gvn.intcon(0),c) ); 2038 push(c); 2039 break; 2040 2041 2042 // Note for longs -> lo word is on TOS, hi word is on TOS - 1 2043 case Bytecodes::_land: 2044 b = pop_pair(); 2045 a = pop_pair(); 2046 c = _gvn.transform( new (C) AndLNode(a,b) ); 2047 push_pair(c); 2048 break; 2049 case Bytecodes::_lor: 2050 b = pop_pair(); 2051 a = pop_pair(); 2052 c = _gvn.transform( new (C) OrLNode(a,b) ); 2053 push_pair(c); 2054 break; 2055 case Bytecodes::_lxor: 2056 b = pop_pair(); 2057 a = pop_pair(); 2058 c = _gvn.transform( new (C) XorLNode(a,b) ); 2059 push_pair(c); 2060 break; 2061 2062 case Bytecodes::_lshl: 2063 b = pop(); // the shift count 2064 a = pop_pair(); // value to be shifted 2065 c = _gvn.transform( new (C) LShiftLNode(a,b) ); 2066 push_pair(c); 2067 break; 2068 case Bytecodes::_lshr: 2069 b = pop(); // the shift count 2070 a = pop_pair(); // value to be shifted 2071 c = _gvn.transform( new (C) RShiftLNode(a,b) ); 2072 push_pair(c); 2073 break; 2074 case Bytecodes::_lushr: 2075 b = pop(); // the shift count 2076 a = pop_pair(); // value to be shifted 2077 c = _gvn.transform( new (C) URShiftLNode(a,b) ); 2078 push_pair(c); 2079 break; 2080 case Bytecodes::_lmul: 2081 b = pop_pair(); 2082 a = pop_pair(); 2083 c = _gvn.transform( new (C) MulLNode(a,b) ); 2084 push_pair(c); 2085 break; 2086 2087 case Bytecodes::_lrem: 2088 // Must keep both values on the expression-stack during null-check 2089 assert(peek(0) == top(), "long word order"); 2090 zero_check_long(peek(1)); 2091 // Compile-time detect of null-exception? 2092 if (stopped()) return; 2093 b = pop_pair(); 2094 a = pop_pair(); 2095 c = _gvn.transform( new (C) ModLNode(control(),a,b) ); 2096 push_pair(c); 2097 break; 2098 2099 case Bytecodes::_ldiv: 2100 // Must keep both values on the expression-stack during null-check 2101 assert(peek(0) == top(), "long word order"); 2102 zero_check_long(peek(1)); 2103 // Compile-time detect of null-exception? 2104 if (stopped()) return; 2105 b = pop_pair(); 2106 a = pop_pair(); 2107 c = _gvn.transform( new (C) DivLNode(control(),a,b) ); 2108 push_pair(c); 2109 break; 2110 2111 case Bytecodes::_ladd: 2112 b = pop_pair(); 2113 a = pop_pair(); 2114 c = _gvn.transform( new (C) AddLNode(a,b) ); 2115 push_pair(c); 2116 break; 2117 case Bytecodes::_lsub: 2118 b = pop_pair(); 2119 a = pop_pair(); 2120 c = _gvn.transform( new (C) SubLNode(a,b) ); 2121 push_pair(c); 2122 break; 2123 case Bytecodes::_lcmp: 2124 // Safepoints are now inserted _before_ branches. The long-compare 2125 // bytecode painfully produces a 3-way value (-1,0,+1) which requires a 2126 // slew of control flow. These are usually followed by a CmpI vs zero and 2127 // a branch; this pattern then optimizes to the obvious long-compare and 2128 // branch. However, if the branch is backwards there's a Safepoint 2129 // inserted. The inserted Safepoint captures the JVM state at the 2130 // pre-branch point, i.e. it captures the 3-way value. Thus if a 2131 // long-compare is used to control a loop the debug info will force 2132 // computation of the 3-way value, even though the generated code uses a 2133 // long-compare and branch. We try to rectify the situation by inserting 2134 // a SafePoint here and have it dominate and kill the safepoint added at a 2135 // following backwards branch. At this point the JVM state merely holds 2 2136 // longs but not the 3-way value. 2137 if( UseLoopSafepoints ) { 2138 switch( iter().next_bc() ) { 2139 case Bytecodes::_ifgt: 2140 case Bytecodes::_iflt: 2141 case Bytecodes::_ifge: 2142 case Bytecodes::_ifle: 2143 case Bytecodes::_ifne: 2144 case Bytecodes::_ifeq: 2145 // If this is a backwards branch in the bytecodes, add Safepoint 2146 maybe_add_safepoint(iter().next_get_dest()); 2147 } 2148 } 2149 b = pop_pair(); 2150 a = pop_pair(); 2151 c = _gvn.transform( new (C) CmpL3Node( a, b )); 2152 push(c); 2153 break; 2154 2155 case Bytecodes::_lneg: 2156 a = pop_pair(); 2157 b = _gvn.transform( new (C) SubLNode(longcon(0),a)); 2158 push_pair(b); 2159 break; 2160 case Bytecodes::_l2i: 2161 a = pop_pair(); 2162 push( _gvn.transform( new (C) ConvL2INode(a))); 2163 break; 2164 case Bytecodes::_i2l: 2165 a = pop(); 2166 b = _gvn.transform( new (C) ConvI2LNode(a)); 2167 push_pair(b); 2168 break; 2169 case Bytecodes::_i2b: 2170 // Sign extend 2171 a = pop(); 2172 a = _gvn.transform( new (C) LShiftINode(a,_gvn.intcon(24)) ); 2173 a = _gvn.transform( new (C) RShiftINode(a,_gvn.intcon(24)) ); 2174 push( a ); 2175 break; 2176 case Bytecodes::_i2s: 2177 a = pop(); 2178 a = _gvn.transform( new (C) LShiftINode(a,_gvn.intcon(16)) ); 2179 a = _gvn.transform( new (C) RShiftINode(a,_gvn.intcon(16)) ); 2180 push( a ); 2181 break; 2182 case Bytecodes::_i2c: 2183 a = pop(); 2184 push( _gvn.transform( new (C) AndINode(a,_gvn.intcon(0xFFFF)) ) ); 2185 break; 2186 2187 case Bytecodes::_i2f: 2188 a = pop(); 2189 b = _gvn.transform( new (C) ConvI2FNode(a) ) ; 2190 c = precision_rounding(b); 2191 push (b); 2192 break; 2193 2194 case Bytecodes::_i2d: 2195 a = pop(); 2196 b = _gvn.transform( new (C) ConvI2DNode(a)); 2197 push_pair(b); 2198 break; 2199 2200 case Bytecodes::_iinc: // Increment local 2201 i = iter().get_index(); // Get local index 2202 set_local( i, _gvn.transform( new (C) AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) ); 2203 break; 2204 2205 // Exit points of synchronized methods must have an unlock node 2206 case Bytecodes::_return: 2207 return_current(NULL); 2208 break; 2209 2210 case Bytecodes::_ireturn: 2211 case Bytecodes::_areturn: 2212 case Bytecodes::_freturn: 2213 return_current(pop()); 2214 break; 2215 case Bytecodes::_lreturn: 2216 return_current(pop_pair()); 2217 break; 2218 case Bytecodes::_dreturn: 2219 return_current(pop_pair()); 2220 break; 2221 2222 case Bytecodes::_athrow: 2223 // null exception oop throws NULL pointer exception 2224 null_check(peek()); 2225 if (stopped()) return; 2226 // Hook the thrown exception directly to subsequent handlers. 2227 if (BailoutToInterpreterForThrows) { 2228 // Keep method interpreted from now on. 2229 uncommon_trap(Deoptimization::Reason_unhandled, 2230 Deoptimization::Action_make_not_compilable); 2231 return; 2232 } 2233 if (env()->jvmti_can_post_on_exceptions()) { 2234 // check if we must post exception events, take uncommon trap if so (with must_throw = false) 2235 uncommon_trap_if_should_post_on_exceptions(Deoptimization::Reason_unhandled, false); 2236 } 2237 // Here if either can_post_on_exceptions or should_post_on_exceptions is false 2238 add_exception_state(make_exception_state(peek())); 2239 break; 2240 2241 case Bytecodes::_goto: // fall through 2242 case Bytecodes::_goto_w: { 2243 int target_bci = (bc() == Bytecodes::_goto) ? iter().get_dest() : iter().get_far_dest(); 2244 2245 // If this is a backwards branch in the bytecodes, add Safepoint 2246 maybe_add_safepoint(target_bci); 2247 2248 // Update method data 2249 profile_taken_branch(target_bci); 2250 2251 // Merge the current control into the target basic block 2252 merge(target_bci); 2253 2254 // See if we can get some profile data and hand it off to the next block 2255 Block *target_block = block()->successor_for_bci(target_bci); 2256 if (target_block->pred_count() != 1) break; 2257 ciMethodData* methodData = method()->method_data(); 2258 if (!methodData->is_mature()) break; 2259 ciProfileData* data = methodData->bci_to_data(bci()); 2260 assert( data->is_JumpData(), "" ); 2261 int taken = ((ciJumpData*)data)->taken(); 2262 taken = method()->scale_count(taken); 2263 target_block->set_count(taken); 2264 break; 2265 } 2266 2267 case Bytecodes::_ifnull: btest = BoolTest::eq; goto handle_if_null; 2268 case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null; 2269 handle_if_null: 2270 // If this is a backwards branch in the bytecodes, add Safepoint 2271 maybe_add_safepoint(iter().get_dest()); 2272 a = null(); 2273 b = pop(); 2274 c = _gvn.transform( new (C) CmpPNode(b, a) ); 2275 do_ifnull(btest, c); 2276 break; 2277 2278 case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp; 2279 case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp; 2280 handle_if_acmp: 2281 // If this is a backwards branch in the bytecodes, add Safepoint 2282 maybe_add_safepoint(iter().get_dest()); 2283 a = pop(); 2284 b = pop(); 2285 c = _gvn.transform( new (C) CmpPNode(b, a) ); 2286 c = optimize_cmp_with_klass(c); 2287 do_if(btest, c); 2288 break; 2289 2290 case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx; 2291 case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx; 2292 case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx; 2293 case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx; 2294 case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx; 2295 case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx; 2296 handle_ifxx: 2297 // If this is a backwards branch in the bytecodes, add Safepoint 2298 maybe_add_safepoint(iter().get_dest()); 2299 a = _gvn.intcon(0); 2300 b = pop(); 2301 c = _gvn.transform( new (C) CmpINode(b, a) ); 2302 do_if(btest, c); 2303 break; 2304 2305 case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp; 2306 case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp; 2307 case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp; 2308 case Bytecodes::_if_icmple: btest = BoolTest::le; goto handle_if_icmp; 2309 case Bytecodes::_if_icmpgt: btest = BoolTest::gt; goto handle_if_icmp; 2310 case Bytecodes::_if_icmpge: btest = BoolTest::ge; goto handle_if_icmp; 2311 handle_if_icmp: 2312 // If this is a backwards branch in the bytecodes, add Safepoint 2313 maybe_add_safepoint(iter().get_dest()); 2314 a = pop(); 2315 b = pop(); 2316 c = _gvn.transform( new (C) CmpINode( b, a ) ); 2317 do_if(btest, c); 2318 break; 2319 2320 case Bytecodes::_tableswitch: 2321 do_tableswitch(); 2322 break; 2323 2324 case Bytecodes::_lookupswitch: 2325 do_lookupswitch(); 2326 break; 2327 2328 case Bytecodes::_invokestatic: 2329 case Bytecodes::_invokedynamic: 2330 case Bytecodes::_invokespecial: 2331 case Bytecodes::_invokevirtual: 2332 case Bytecodes::_invokeinterface: 2333 do_call(); 2334 break; 2335 case Bytecodes::_checkcast: 2336 do_checkcast(); 2337 break; 2338 case Bytecodes::_instanceof: 2339 do_instanceof(); 2340 break; 2341 case Bytecodes::_anewarray: 2342 do_anewarray(); 2343 break; 2344 case Bytecodes::_newarray: 2345 do_newarray((BasicType)iter().get_index()); 2346 break; 2347 case Bytecodes::_multianewarray: 2348 do_multianewarray(); 2349 break; 2350 case Bytecodes::_new: 2351 do_new(); 2352 break; 2353 2354 case Bytecodes::_jsr: 2355 case Bytecodes::_jsr_w: 2356 do_jsr(); 2357 break; 2358 2359 case Bytecodes::_ret: 2360 do_ret(); 2361 break; 2362 2363 2364 case Bytecodes::_monitorenter: 2365 do_monitor_enter(); 2366 break; 2367 2368 case Bytecodes::_monitorexit: 2369 do_monitor_exit(); 2370 break; 2371 2372 case Bytecodes::_breakpoint: 2373 // Breakpoint set concurrently to compile 2374 // %%% use an uncommon trap? 2375 C->record_failure("breakpoint in method"); 2376 return; 2377 2378 default: 2379 #ifndef PRODUCT 2380 map()->dump(99); 2381 #endif 2382 tty->print("\nUnhandled bytecode %s\n", Bytecodes::name(bc()) ); 2383 ShouldNotReachHere(); 2384 } 2385 2386 #ifndef PRODUCT 2387 IdealGraphPrinter *printer = IdealGraphPrinter::printer(); 2388 if(printer) { 2389 char buffer[256]; 2390 sprintf(buffer, "Bytecode %d: %s", bci(), Bytecodes::name(bc())); 2391 bool old = printer->traverse_outs(); 2392 printer->set_traverse_outs(true); 2393 printer->print_method(C, buffer, 4); 2394 printer->set_traverse_outs(old); 2395 } 2396 #endif 2397 }