1 /* 2 * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "c1/c1_CFGPrinter.hpp" 26 #include "c1/c1_Canonicalizer.hpp" 27 #include "c1/c1_Compilation.hpp" 28 #include "c1/c1_GraphBuilder.hpp" 29 #include "c1/c1_InstructionPrinter.hpp" 30 #include "ci/ciCallSite.hpp" 31 #include "ci/ciField.hpp" 32 #include "ci/ciFlatArrayKlass.hpp" 33 #include "ci/ciInlineKlass.hpp" 34 #include "ci/ciKlass.hpp" 35 #include "ci/ciMemberName.hpp" 36 #include "ci/ciSymbols.hpp" 37 #include "ci/ciUtilities.inline.hpp" 38 #include "classfile/javaClasses.hpp" 39 #include "compiler/compilationPolicy.hpp" 40 #include "compiler/compileBroker.hpp" 41 #include "compiler/compilerEvent.hpp" 42 #include "interpreter/bytecode.hpp" 43 #include "jfr/jfrEvents.hpp" 44 #include "memory/resourceArea.hpp" 45 #include "runtime/sharedRuntime.hpp" 46 #include "utilities/checkedCast.hpp" 47 #include "utilities/macros.hpp" 48 #if INCLUDE_JFR 49 #include "jfr/jfr.hpp" 50 #endif 51 52 class BlockListBuilder { 53 private: 54 Compilation* _compilation; 55 IRScope* _scope; 56 57 BlockList _blocks; // internal list of all blocks 58 BlockList* _bci2block; // mapping from bci to blocks for GraphBuilder 59 GrowableArray<BlockList> _bci2block_successors; // Mapping bcis to their blocks successors while we dont have a blockend 60 61 // fields used by mark_loops 62 ResourceBitMap _active; // for iteration of control flow graph 63 ResourceBitMap _visited; // for iteration of control flow graph 64 GrowableArray<ResourceBitMap> _loop_map; // caches the information if a block is contained in a loop 65 int _next_loop_index; // next free loop number 66 int _next_block_number; // for reverse postorder numbering of blocks 67 int _block_id_start; 68 69 int bit_number(int block_id) const { return block_id - _block_id_start; } 70 // accessors 71 Compilation* compilation() const { return _compilation; } 72 IRScope* scope() const { return _scope; } 73 ciMethod* method() const { return scope()->method(); } 74 XHandlers* xhandlers() const { return scope()->xhandlers(); } 75 76 // unified bailout support 77 void bailout(const char* msg) const { compilation()->bailout(msg); } 78 bool bailed_out() const { return compilation()->bailed_out(); } 79 80 // helper functions 81 BlockBegin* make_block_at(int bci, BlockBegin* predecessor); 82 void handle_exceptions(BlockBegin* current, int cur_bci); 83 void handle_jsr(BlockBegin* current, int sr_bci, int next_bci); 84 void store_one(BlockBegin* current, int local); 85 void store_two(BlockBegin* current, int local); 86 void set_entries(int osr_bci); 87 void set_leaders(); 88 89 void make_loop_header(BlockBegin* block); 90 void mark_loops(); 91 BitMap& mark_loops(BlockBegin* b, bool in_subroutine); 92 93 // debugging 94 #ifndef PRODUCT 95 void print(); 96 #endif 97 98 int number_of_successors(BlockBegin* block); 99 BlockBegin* successor_at(BlockBegin* block, int i); 100 void add_successor(BlockBegin* block, BlockBegin* sux); 101 bool is_successor(BlockBegin* block, BlockBegin* sux); 102 103 public: 104 // creation 105 BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci); 106 107 // accessors for GraphBuilder 108 BlockList* bci2block() const { return _bci2block; } 109 }; 110 111 112 // Implementation of BlockListBuilder 113 114 BlockListBuilder::BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci) 115 : _compilation(compilation) 116 , _scope(scope) 117 , _blocks(16) 118 , _bci2block(new BlockList(scope->method()->code_size(), nullptr)) 119 , _bci2block_successors(scope->method()->code_size()) 120 , _active() // size not known yet 121 , _visited() // size not known yet 122 , _loop_map() // size not known yet 123 , _next_loop_index(0) 124 , _next_block_number(0) 125 , _block_id_start(0) 126 { 127 set_entries(osr_bci); 128 set_leaders(); 129 CHECK_BAILOUT(); 130 131 mark_loops(); 132 NOT_PRODUCT(if (PrintInitialBlockList) print()); 133 134 // _bci2block still contains blocks with _end == null and > 0 sux in _bci2block_successors. 135 136 #ifndef PRODUCT 137 if (PrintCFGToFile) { 138 stringStream title; 139 title.print("BlockListBuilder "); 140 scope->method()->print_name(&title); 141 CFGPrinter::print_cfg(_bci2block, title.freeze(), false, false); 142 } 143 #endif 144 } 145 146 147 void BlockListBuilder::set_entries(int osr_bci) { 148 // generate start blocks 149 BlockBegin* std_entry = make_block_at(0, nullptr); 150 if (scope()->caller() == nullptr) { 151 std_entry->set(BlockBegin::std_entry_flag); 152 } 153 if (osr_bci != -1) { 154 BlockBegin* osr_entry = make_block_at(osr_bci, nullptr); 155 osr_entry->set(BlockBegin::osr_entry_flag); 156 } 157 158 // generate exception entry blocks 159 XHandlers* list = xhandlers(); 160 const int n = list->length(); 161 for (int i = 0; i < n; i++) { 162 XHandler* h = list->handler_at(i); 163 BlockBegin* entry = make_block_at(h->handler_bci(), nullptr); 164 entry->set(BlockBegin::exception_entry_flag); 165 h->set_entry_block(entry); 166 } 167 } 168 169 170 BlockBegin* BlockListBuilder::make_block_at(int cur_bci, BlockBegin* predecessor) { 171 assert(method()->bci_block_start().at(cur_bci), "wrong block starts of MethodLivenessAnalyzer"); 172 173 BlockBegin* block = _bci2block->at(cur_bci); 174 if (block == nullptr) { 175 block = new BlockBegin(cur_bci); 176 block->init_stores_to_locals(method()->max_locals()); 177 _bci2block->at_put(cur_bci, block); 178 _bci2block_successors.at_put_grow(cur_bci, BlockList()); 179 _blocks.append(block); 180 181 assert(predecessor == nullptr || predecessor->bci() < cur_bci, "targets for backward branches must already exist"); 182 } 183 184 if (predecessor != nullptr) { 185 if (block->is_set(BlockBegin::exception_entry_flag)) { 186 BAILOUT_("Exception handler can be reached by both normal and exceptional control flow", block); 187 } 188 189 add_successor(predecessor, block); 190 block->increment_total_preds(); 191 } 192 193 return block; 194 } 195 196 197 inline void BlockListBuilder::store_one(BlockBegin* current, int local) { 198 current->stores_to_locals().set_bit(local); 199 } 200 inline void BlockListBuilder::store_two(BlockBegin* current, int local) { 201 store_one(current, local); 202 store_one(current, local + 1); 203 } 204 205 206 void BlockListBuilder::handle_exceptions(BlockBegin* current, int cur_bci) { 207 // Draws edges from a block to its exception handlers 208 XHandlers* list = xhandlers(); 209 const int n = list->length(); 210 211 for (int i = 0; i < n; i++) { 212 XHandler* h = list->handler_at(i); 213 214 if (h->covers(cur_bci)) { 215 BlockBegin* entry = h->entry_block(); 216 assert(entry != nullptr && entry == _bci2block->at(h->handler_bci()), "entry must be set"); 217 assert(entry->is_set(BlockBegin::exception_entry_flag), "flag must be set"); 218 219 // add each exception handler only once 220 if(!is_successor(current, entry)) { 221 add_successor(current, entry); 222 entry->increment_total_preds(); 223 } 224 225 // stop when reaching catchall 226 if (h->catch_type() == 0) break; 227 } 228 } 229 } 230 231 void BlockListBuilder::handle_jsr(BlockBegin* current, int sr_bci, int next_bci) { 232 if (next_bci < method()->code_size()) { 233 // start a new block after jsr-bytecode and link this block into cfg 234 make_block_at(next_bci, current); 235 } 236 237 // start a new block at the subroutine entry at mark it with special flag 238 BlockBegin* sr_block = make_block_at(sr_bci, current); 239 if (!sr_block->is_set(BlockBegin::subroutine_entry_flag)) { 240 sr_block->set(BlockBegin::subroutine_entry_flag); 241 } 242 } 243 244 245 void BlockListBuilder::set_leaders() { 246 bool has_xhandlers = xhandlers()->has_handlers(); 247 BlockBegin* current = nullptr; 248 249 // The information which bci starts a new block simplifies the analysis 250 // Without it, backward branches could jump to a bci where no block was created 251 // during bytecode iteration. This would require the creation of a new block at the 252 // branch target and a modification of the successor lists. 253 const BitMap& bci_block_start = method()->bci_block_start(); 254 255 int end_bci = method()->code_size(); 256 257 ciBytecodeStream s(method()); 258 while (s.next() != ciBytecodeStream::EOBC()) { 259 int cur_bci = s.cur_bci(); 260 261 if (bci_block_start.at(cur_bci)) { 262 current = make_block_at(cur_bci, current); 263 } 264 assert(current != nullptr, "must have current block"); 265 266 if (has_xhandlers && GraphBuilder::can_trap(method(), s.cur_bc())) { 267 handle_exceptions(current, cur_bci); 268 } 269 270 switch (s.cur_bc()) { 271 // track stores to local variables for selective creation of phi functions 272 case Bytecodes::_iinc: store_one(current, s.get_index()); break; 273 case Bytecodes::_istore: store_one(current, s.get_index()); break; 274 case Bytecodes::_lstore: store_two(current, s.get_index()); break; 275 case Bytecodes::_fstore: store_one(current, s.get_index()); break; 276 case Bytecodes::_dstore: store_two(current, s.get_index()); break; 277 case Bytecodes::_astore: store_one(current, s.get_index()); break; 278 case Bytecodes::_istore_0: store_one(current, 0); break; 279 case Bytecodes::_istore_1: store_one(current, 1); break; 280 case Bytecodes::_istore_2: store_one(current, 2); break; 281 case Bytecodes::_istore_3: store_one(current, 3); break; 282 case Bytecodes::_lstore_0: store_two(current, 0); break; 283 case Bytecodes::_lstore_1: store_two(current, 1); break; 284 case Bytecodes::_lstore_2: store_two(current, 2); break; 285 case Bytecodes::_lstore_3: store_two(current, 3); break; 286 case Bytecodes::_fstore_0: store_one(current, 0); break; 287 case Bytecodes::_fstore_1: store_one(current, 1); break; 288 case Bytecodes::_fstore_2: store_one(current, 2); break; 289 case Bytecodes::_fstore_3: store_one(current, 3); break; 290 case Bytecodes::_dstore_0: store_two(current, 0); break; 291 case Bytecodes::_dstore_1: store_two(current, 1); break; 292 case Bytecodes::_dstore_2: store_two(current, 2); break; 293 case Bytecodes::_dstore_3: store_two(current, 3); break; 294 case Bytecodes::_astore_0: store_one(current, 0); break; 295 case Bytecodes::_astore_1: store_one(current, 1); break; 296 case Bytecodes::_astore_2: store_one(current, 2); break; 297 case Bytecodes::_astore_3: store_one(current, 3); break; 298 299 // track bytecodes that affect the control flow 300 case Bytecodes::_athrow: // fall through 301 case Bytecodes::_ret: // fall through 302 case Bytecodes::_ireturn: // fall through 303 case Bytecodes::_lreturn: // fall through 304 case Bytecodes::_freturn: // fall through 305 case Bytecodes::_dreturn: // fall through 306 case Bytecodes::_areturn: // fall through 307 case Bytecodes::_return: 308 current = nullptr; 309 break; 310 311 case Bytecodes::_ifeq: // fall through 312 case Bytecodes::_ifne: // fall through 313 case Bytecodes::_iflt: // fall through 314 case Bytecodes::_ifge: // fall through 315 case Bytecodes::_ifgt: // fall through 316 case Bytecodes::_ifle: // fall through 317 case Bytecodes::_if_icmpeq: // fall through 318 case Bytecodes::_if_icmpne: // fall through 319 case Bytecodes::_if_icmplt: // fall through 320 case Bytecodes::_if_icmpge: // fall through 321 case Bytecodes::_if_icmpgt: // fall through 322 case Bytecodes::_if_icmple: // fall through 323 case Bytecodes::_if_acmpeq: // fall through 324 case Bytecodes::_if_acmpne: // fall through 325 case Bytecodes::_ifnull: // fall through 326 case Bytecodes::_ifnonnull: 327 if (s.next_bci() < end_bci) { 328 make_block_at(s.next_bci(), current); 329 } 330 make_block_at(s.get_dest(), current); 331 current = nullptr; 332 break; 333 334 case Bytecodes::_goto: 335 make_block_at(s.get_dest(), current); 336 current = nullptr; 337 break; 338 339 case Bytecodes::_goto_w: 340 make_block_at(s.get_far_dest(), current); 341 current = nullptr; 342 break; 343 344 case Bytecodes::_jsr: 345 handle_jsr(current, s.get_dest(), s.next_bci()); 346 current = nullptr; 347 break; 348 349 case Bytecodes::_jsr_w: 350 handle_jsr(current, s.get_far_dest(), s.next_bci()); 351 current = nullptr; 352 break; 353 354 case Bytecodes::_tableswitch: { 355 // set block for each case 356 Bytecode_tableswitch sw(&s); 357 int l = sw.length(); 358 for (int i = 0; i < l; i++) { 359 make_block_at(cur_bci + sw.dest_offset_at(i), current); 360 } 361 make_block_at(cur_bci + sw.default_offset(), current); 362 current = nullptr; 363 break; 364 } 365 366 case Bytecodes::_lookupswitch: { 367 // set block for each case 368 Bytecode_lookupswitch sw(&s); 369 int l = sw.number_of_pairs(); 370 for (int i = 0; i < l; i++) { 371 make_block_at(cur_bci + sw.pair_at(i).offset(), current); 372 } 373 make_block_at(cur_bci + sw.default_offset(), current); 374 current = nullptr; 375 break; 376 } 377 378 default: 379 break; 380 } 381 } 382 } 383 384 385 void BlockListBuilder::mark_loops() { 386 ResourceMark rm; 387 388 const int number_of_blocks = _blocks.length(); 389 _active.initialize(number_of_blocks); 390 _visited.initialize(number_of_blocks); 391 _loop_map = GrowableArray<ResourceBitMap>(number_of_blocks, number_of_blocks, ResourceBitMap()); 392 for (int i = 0; i < number_of_blocks; i++) { 393 _loop_map.at(i).initialize(number_of_blocks); 394 } 395 _next_loop_index = 0; 396 _next_block_number = _blocks.length(); 397 398 // The loop detection algorithm works as follows: 399 // - We maintain the _loop_map, where for each block we have a bitmap indicating which loops contain this block. 400 // - The CFG is recursively traversed (depth-first) and if we detect a loop, we assign the loop a unique number that is stored 401 // in the bitmap associated with the loop header block. Until we return back through that loop header the bitmap contains 402 // only a single bit corresponding to the loop number. 403 // - The bit is then propagated for all the blocks in the loop after we exit them (post-order). There could be multiple bits 404 // of course in case of nested loops. 405 // - When we exit the loop header we remove that single bit and assign the real loop state for it. 406 // - Now, the tricky part here is how we detect irreducible loops. In the algorithm above the loop state bits 407 // are propagated to the predecessors. If we encounter an irreducible loop (a loop with multiple heads) we would see 408 // a node with some loop bit set that would then propagate back and be never cleared because we would 409 // never go back through the original loop header. Therefore if there are any irreducible loops the bits in the states 410 // for these loops are going to propagate back to the root. 411 BlockBegin* start = _bci2block->at(0); 412 _block_id_start = start->block_id(); 413 BitMap& loop_state = mark_loops(start, false); 414 if (!loop_state.is_empty()) { 415 compilation()->set_has_irreducible_loops(true); 416 } 417 assert(_next_block_number >= 0, "invalid block numbers"); 418 419 // Remove dangling Resource pointers before the ResourceMark goes out-of-scope. 420 _active.resize(0); 421 _visited.resize(0); 422 _loop_map.clear(); 423 } 424 425 void BlockListBuilder::make_loop_header(BlockBegin* block) { 426 int block_id = block->block_id(); 427 int block_bit = bit_number(block_id); 428 if (block->is_set(BlockBegin::exception_entry_flag)) { 429 // exception edges may look like loops but don't mark them as such 430 // since it screws up block ordering. 431 return; 432 } 433 if (!block->is_set(BlockBegin::parser_loop_header_flag)) { 434 block->set(BlockBegin::parser_loop_header_flag); 435 436 assert(_loop_map.at(block_bit).is_empty(), "must not be set yet"); 437 assert(0 <= _next_loop_index && _next_loop_index < _loop_map.length(), "_next_loop_index is too large"); 438 _loop_map.at(block_bit).set_bit(_next_loop_index++); 439 } else { 440 // block already marked as loop header 441 assert(_loop_map.at(block_bit).count_one_bits() == 1, "exactly one bit must be set"); 442 } 443 } 444 445 BitMap& BlockListBuilder::mark_loops(BlockBegin* block, bool in_subroutine) { 446 int block_id = block->block_id(); 447 int block_bit = bit_number(block_id); 448 if (_visited.at(block_bit)) { 449 if (_active.at(block_bit)) { 450 // reached block via backward branch 451 make_loop_header(block); 452 } 453 // return cached loop information for this block 454 return _loop_map.at(block_bit); 455 } 456 457 if (block->is_set(BlockBegin::subroutine_entry_flag)) { 458 in_subroutine = true; 459 } 460 461 // set active and visited bits before successors are processed 462 _visited.set_bit(block_bit); 463 _active.set_bit(block_bit); 464 465 ResourceMark rm; 466 ResourceBitMap loop_state(_loop_map.length()); 467 for (int i = number_of_successors(block) - 1; i >= 0; i--) { 468 BlockBegin* sux = successor_at(block, i); 469 // recursively process all successors 470 loop_state.set_union(mark_loops(sux, in_subroutine)); 471 } 472 473 // clear active-bit after all successors are processed 474 _active.clear_bit(block_bit); 475 476 // reverse-post-order numbering of all blocks 477 block->set_depth_first_number(_next_block_number); 478 _next_block_number--; 479 480 if (!loop_state.is_empty() || in_subroutine ) { 481 // block is contained at least in one loop, so phi functions are necessary 482 // phi functions are also necessary for all locals stored in a subroutine 483 scope()->requires_phi_function().set_union(block->stores_to_locals()); 484 } 485 486 if (block->is_set(BlockBegin::parser_loop_header_flag)) { 487 BitMap& header_loop_state = _loop_map.at(block_bit); 488 assert(header_loop_state.count_one_bits() == 1, "exactly one bit must be set"); 489 // remove the bit with the loop number for the state (header is outside of the loop) 490 loop_state.set_difference(header_loop_state); 491 } 492 493 // cache and return loop information for this block 494 _loop_map.at(block_bit).set_from(loop_state); 495 return _loop_map.at(block_bit); 496 } 497 498 inline int BlockListBuilder::number_of_successors(BlockBegin* block) 499 { 500 assert(_bci2block_successors.length() > block->bci(), "sux must exist"); 501 return _bci2block_successors.at(block->bci()).length(); 502 } 503 504 inline BlockBegin* BlockListBuilder::successor_at(BlockBegin* block, int i) 505 { 506 assert(_bci2block_successors.length() > block->bci(), "sux must exist"); 507 return _bci2block_successors.at(block->bci()).at(i); 508 } 509 510 inline void BlockListBuilder::add_successor(BlockBegin* block, BlockBegin* sux) 511 { 512 assert(_bci2block_successors.length() > block->bci(), "sux must exist"); 513 _bci2block_successors.at(block->bci()).append(sux); 514 } 515 516 inline bool BlockListBuilder::is_successor(BlockBegin* block, BlockBegin* sux) { 517 assert(_bci2block_successors.length() > block->bci(), "sux must exist"); 518 return _bci2block_successors.at(block->bci()).contains(sux); 519 } 520 521 #ifndef PRODUCT 522 523 static int compare_depth_first(BlockBegin** a, BlockBegin** b) { 524 return (*a)->depth_first_number() - (*b)->depth_first_number(); 525 } 526 527 void BlockListBuilder::print() { 528 tty->print("----- initial block list of BlockListBuilder for method "); 529 method()->print_short_name(); 530 tty->cr(); 531 532 // better readability if blocks are sorted in processing order 533 _blocks.sort(compare_depth_first); 534 535 for (int i = 0; i < _blocks.length(); i++) { 536 BlockBegin* cur = _blocks.at(i); 537 tty->print("%4d: B%-4d bci: %-4d preds: %-4d ", cur->depth_first_number(), cur->block_id(), cur->bci(), cur->total_preds()); 538 539 tty->print(cur->is_set(BlockBegin::std_entry_flag) ? " std" : " "); 540 tty->print(cur->is_set(BlockBegin::osr_entry_flag) ? " osr" : " "); 541 tty->print(cur->is_set(BlockBegin::exception_entry_flag) ? " ex" : " "); 542 tty->print(cur->is_set(BlockBegin::subroutine_entry_flag) ? " sr" : " "); 543 tty->print(cur->is_set(BlockBegin::parser_loop_header_flag) ? " lh" : " "); 544 545 if (number_of_successors(cur) > 0) { 546 tty->print(" sux: "); 547 for (int j = 0; j < number_of_successors(cur); j++) { 548 BlockBegin* sux = successor_at(cur, j); 549 tty->print("B%d ", sux->block_id()); 550 } 551 } 552 tty->cr(); 553 } 554 } 555 556 #endif 557 558 559 // A simple growable array of Values indexed by ciFields 560 class FieldBuffer: public CompilationResourceObj { 561 private: 562 GrowableArray<Value> _values; 563 564 public: 565 FieldBuffer() {} 566 567 void kill() { 568 _values.trunc_to(0); 569 } 570 571 Value at(ciField* field) { 572 assert(field->holder()->is_loaded(), "must be a loaded field"); 573 int offset = field->offset_in_bytes(); 574 if (offset < _values.length()) { 575 return _values.at(offset); 576 } else { 577 return nullptr; 578 } 579 } 580 581 void at_put(ciField* field, Value value) { 582 assert(field->holder()->is_loaded(), "must be a loaded field"); 583 int offset = field->offset_in_bytes(); 584 _values.at_put_grow(offset, value, nullptr); 585 } 586 587 }; 588 589 590 // MemoryBuffer is fairly simple model of the current state of memory. 591 // It partitions memory into several pieces. The first piece is 592 // generic memory where little is known about the owner of the memory. 593 // This is conceptually represented by the tuple <O, F, V> which says 594 // that the field F of object O has value V. This is flattened so 595 // that F is represented by the offset of the field and the parallel 596 // arrays _objects and _values are used for O and V. Loads of O.F can 597 // simply use V. Newly allocated objects are kept in a separate list 598 // along with a parallel array for each object which represents the 599 // current value of its fields. Stores of the default value to fields 600 // which have never been stored to before are eliminated since they 601 // are redundant. Once newly allocated objects are stored into 602 // another object or they are passed out of the current compile they 603 // are treated like generic memory. 604 605 class MemoryBuffer: public CompilationResourceObj { 606 private: 607 FieldBuffer _values; 608 GrowableArray<Value> _objects; 609 GrowableArray<Value> _newobjects; 610 GrowableArray<FieldBuffer*> _fields; 611 612 public: 613 MemoryBuffer() {} 614 615 StoreField* store(StoreField* st) { 616 if (!EliminateFieldAccess) { 617 return st; 618 } 619 620 Value object = st->obj(); 621 Value value = st->value(); 622 ciField* field = st->field(); 623 if (field->holder()->is_loaded()) { 624 int offset = field->offset_in_bytes(); 625 int index = _newobjects.find(object); 626 if (index != -1) { 627 // newly allocated object with no other stores performed on this field 628 FieldBuffer* buf = _fields.at(index); 629 if (buf->at(field) == nullptr && is_default_value(value)) { 630 #ifndef PRODUCT 631 if (PrintIRDuringConstruction && Verbose) { 632 tty->print_cr("Eliminated store for object %d:", index); 633 st->print_line(); 634 } 635 #endif 636 return nullptr; 637 } else { 638 buf->at_put(field, value); 639 } 640 } else { 641 _objects.at_put_grow(offset, object, nullptr); 642 _values.at_put(field, value); 643 } 644 645 store_value(value); 646 } else { 647 // if we held onto field names we could alias based on names but 648 // we don't know what's being stored to so kill it all. 649 kill(); 650 } 651 return st; 652 } 653 654 655 // return true if this value correspond to the default value of a field. 656 bool is_default_value(Value value) { 657 Constant* con = value->as_Constant(); 658 if (con) { 659 switch (con->type()->tag()) { 660 case intTag: return con->type()->as_IntConstant()->value() == 0; 661 case longTag: return con->type()->as_LongConstant()->value() == 0; 662 case floatTag: return jint_cast(con->type()->as_FloatConstant()->value()) == 0; 663 case doubleTag: return jlong_cast(con->type()->as_DoubleConstant()->value()) == jlong_cast(0); 664 case objectTag: return con->type() == objectNull; 665 default: ShouldNotReachHere(); 666 } 667 } 668 return false; 669 } 670 671 672 // return either the actual value of a load or the load itself 673 Value load(LoadField* load) { 674 if (!EliminateFieldAccess) { 675 return load; 676 } 677 678 if (strict_fp_requires_explicit_rounding && load->type()->is_float_kind()) { 679 #ifdef IA32 680 if (UseSSE < 2) { 681 // can't skip load since value might get rounded as a side effect 682 return load; 683 } 684 #else 685 Unimplemented(); 686 #endif // IA32 687 } 688 689 ciField* field = load->field(); 690 Value object = load->obj(); 691 if (field->holder()->is_loaded() && !field->is_volatile()) { 692 int offset = field->offset_in_bytes(); 693 Value result = nullptr; 694 int index = _newobjects.find(object); 695 if (index != -1) { 696 result = _fields.at(index)->at(field); 697 } else if (_objects.at_grow(offset, nullptr) == object) { 698 result = _values.at(field); 699 } 700 if (result != nullptr) { 701 #ifndef PRODUCT 702 if (PrintIRDuringConstruction && Verbose) { 703 tty->print_cr("Eliminated load: "); 704 load->print_line(); 705 } 706 #endif 707 assert(result->type()->tag() == load->type()->tag(), "wrong types"); 708 return result; 709 } 710 } 711 return load; 712 } 713 714 // Record this newly allocated object 715 void new_instance(NewInstance* object) { 716 int index = _newobjects.length(); 717 _newobjects.append(object); 718 if (_fields.at_grow(index, nullptr) == nullptr) { 719 _fields.at_put(index, new FieldBuffer()); 720 } else { 721 _fields.at(index)->kill(); 722 } 723 } 724 725 void store_value(Value value) { 726 int index = _newobjects.find(value); 727 if (index != -1) { 728 // stored a newly allocated object into another object. 729 // Assume we've lost track of it as separate slice of memory. 730 // We could do better by keeping track of whether individual 731 // fields could alias each other. 732 _newobjects.remove_at(index); 733 // pull out the field info and store it at the end up the list 734 // of field info list to be reused later. 735 _fields.append(_fields.at(index)); 736 _fields.remove_at(index); 737 } 738 } 739 740 void kill() { 741 _newobjects.trunc_to(0); 742 _objects.trunc_to(0); 743 _values.kill(); 744 } 745 }; 746 747 748 // Implementation of GraphBuilder's ScopeData 749 750 GraphBuilder::ScopeData::ScopeData(ScopeData* parent) 751 : _parent(parent) 752 , _bci2block(nullptr) 753 , _scope(nullptr) 754 , _has_handler(false) 755 , _stream(nullptr) 756 , _work_list(nullptr) 757 , _caller_stack_size(-1) 758 , _continuation(nullptr) 759 , _parsing_jsr(false) 760 , _jsr_xhandlers(nullptr) 761 , _num_returns(0) 762 , _cleanup_block(nullptr) 763 , _cleanup_return_prev(nullptr) 764 , _cleanup_state(nullptr) 765 , _ignore_return(false) 766 { 767 if (parent != nullptr) { 768 _max_inline_size = (intx) ((float) NestedInliningSizeRatio * (float) parent->max_inline_size() / 100.0f); 769 } else { 770 _max_inline_size = C1MaxInlineSize; 771 } 772 if (_max_inline_size < C1MaxTrivialSize) { 773 _max_inline_size = C1MaxTrivialSize; 774 } 775 } 776 777 778 void GraphBuilder::kill_all() { 779 if (UseLocalValueNumbering) { 780 vmap()->kill_all(); 781 } 782 _memory->kill(); 783 } 784 785 786 BlockBegin* GraphBuilder::ScopeData::block_at(int bci) { 787 if (parsing_jsr()) { 788 // It is necessary to clone all blocks associated with a 789 // subroutine, including those for exception handlers in the scope 790 // of the method containing the jsr (because those exception 791 // handlers may contain ret instructions in some cases). 792 BlockBegin* block = bci2block()->at(bci); 793 if (block != nullptr && block == parent()->bci2block()->at(bci)) { 794 BlockBegin* new_block = new BlockBegin(block->bci()); 795 if (PrintInitialBlockList) { 796 tty->print_cr("CFG: cloned block %d (bci %d) as block %d for jsr", 797 block->block_id(), block->bci(), new_block->block_id()); 798 } 799 // copy data from cloned blocked 800 new_block->set_depth_first_number(block->depth_first_number()); 801 if (block->is_set(BlockBegin::parser_loop_header_flag)) new_block->set(BlockBegin::parser_loop_header_flag); 802 // Preserve certain flags for assertion checking 803 if (block->is_set(BlockBegin::subroutine_entry_flag)) new_block->set(BlockBegin::subroutine_entry_flag); 804 if (block->is_set(BlockBegin::exception_entry_flag)) new_block->set(BlockBegin::exception_entry_flag); 805 806 // copy was_visited_flag to allow early detection of bailouts 807 // if a block that is used in a jsr has already been visited before, 808 // it is shared between the normal control flow and a subroutine 809 // BlockBegin::try_merge returns false when the flag is set, this leads 810 // to a compilation bailout 811 if (block->is_set(BlockBegin::was_visited_flag)) new_block->set(BlockBegin::was_visited_flag); 812 813 bci2block()->at_put(bci, new_block); 814 block = new_block; 815 } 816 return block; 817 } else { 818 return bci2block()->at(bci); 819 } 820 } 821 822 823 XHandlers* GraphBuilder::ScopeData::xhandlers() const { 824 if (_jsr_xhandlers == nullptr) { 825 assert(!parsing_jsr(), ""); 826 return scope()->xhandlers(); 827 } 828 assert(parsing_jsr(), ""); 829 return _jsr_xhandlers; 830 } 831 832 833 void GraphBuilder::ScopeData::set_scope(IRScope* scope) { 834 _scope = scope; 835 bool parent_has_handler = false; 836 if (parent() != nullptr) { 837 parent_has_handler = parent()->has_handler(); 838 } 839 _has_handler = parent_has_handler || scope->xhandlers()->has_handlers(); 840 } 841 842 843 void GraphBuilder::ScopeData::set_inline_cleanup_info(BlockBegin* block, 844 Instruction* return_prev, 845 ValueStack* return_state) { 846 _cleanup_block = block; 847 _cleanup_return_prev = return_prev; 848 _cleanup_state = return_state; 849 } 850 851 852 void GraphBuilder::ScopeData::add_to_work_list(BlockBegin* block) { 853 if (_work_list == nullptr) { 854 _work_list = new BlockList(); 855 } 856 857 if (!block->is_set(BlockBegin::is_on_work_list_flag)) { 858 // Do not start parsing the continuation block while in a 859 // sub-scope 860 if (parsing_jsr()) { 861 if (block == jsr_continuation()) { 862 return; 863 } 864 } else { 865 if (block == continuation()) { 866 return; 867 } 868 } 869 block->set(BlockBegin::is_on_work_list_flag); 870 _work_list->push(block); 871 872 sort_top_into_worklist(_work_list, block); 873 } 874 } 875 876 877 void GraphBuilder::sort_top_into_worklist(BlockList* worklist, BlockBegin* top) { 878 assert(worklist->top() == top, ""); 879 // sort block descending into work list 880 const int dfn = top->depth_first_number(); 881 assert(dfn != -1, "unknown depth first number"); 882 int i = worklist->length()-2; 883 while (i >= 0) { 884 BlockBegin* b = worklist->at(i); 885 if (b->depth_first_number() < dfn) { 886 worklist->at_put(i+1, b); 887 } else { 888 break; 889 } 890 i --; 891 } 892 if (i >= -1) worklist->at_put(i + 1, top); 893 } 894 895 896 BlockBegin* GraphBuilder::ScopeData::remove_from_work_list() { 897 if (is_work_list_empty()) { 898 return nullptr; 899 } 900 return _work_list->pop(); 901 } 902 903 904 bool GraphBuilder::ScopeData::is_work_list_empty() const { 905 return (_work_list == nullptr || _work_list->length() == 0); 906 } 907 908 909 void GraphBuilder::ScopeData::setup_jsr_xhandlers() { 910 assert(parsing_jsr(), ""); 911 // clone all the exception handlers from the scope 912 XHandlers* handlers = new XHandlers(scope()->xhandlers()); 913 const int n = handlers->length(); 914 for (int i = 0; i < n; i++) { 915 // The XHandlers need to be adjusted to dispatch to the cloned 916 // handler block instead of the default one but the synthetic 917 // unlocker needs to be handled specially. The synthetic unlocker 918 // should be left alone since there can be only one and all code 919 // should dispatch to the same one. 920 XHandler* h = handlers->handler_at(i); 921 assert(h->handler_bci() != SynchronizationEntryBCI, "must be real"); 922 h->set_entry_block(block_at(h->handler_bci())); 923 } 924 _jsr_xhandlers = handlers; 925 } 926 927 928 int GraphBuilder::ScopeData::num_returns() { 929 if (parsing_jsr()) { 930 return parent()->num_returns(); 931 } 932 return _num_returns; 933 } 934 935 936 void GraphBuilder::ScopeData::incr_num_returns() { 937 if (parsing_jsr()) { 938 parent()->incr_num_returns(); 939 } else { 940 ++_num_returns; 941 } 942 } 943 944 945 // Implementation of GraphBuilder 946 947 #define INLINE_BAILOUT(msg) { inline_bailout(msg); return false; } 948 949 950 void GraphBuilder::load_constant() { 951 ciConstant con = stream()->get_constant(); 952 if (con.is_valid()) { 953 ValueType* t = illegalType; 954 ValueStack* patch_state = nullptr; 955 switch (con.basic_type()) { 956 case T_BOOLEAN: t = new IntConstant (con.as_boolean()); break; 957 case T_BYTE : t = new IntConstant (con.as_byte ()); break; 958 case T_CHAR : t = new IntConstant (con.as_char ()); break; 959 case T_SHORT : t = new IntConstant (con.as_short ()); break; 960 case T_INT : t = new IntConstant (con.as_int ()); break; 961 case T_LONG : t = new LongConstant (con.as_long ()); break; 962 case T_FLOAT : t = new FloatConstant (con.as_float ()); break; 963 case T_DOUBLE : t = new DoubleConstant(con.as_double ()); break; 964 case T_ARRAY : // fall-through 965 case T_OBJECT : { 966 ciObject* obj = con.as_object(); 967 if (!obj->is_loaded() || (PatchALot && !stream()->is_string_constant())) { 968 // A Class, MethodType, MethodHandle, Dynamic, or String. 969 patch_state = copy_state_before(); 970 t = new ObjectConstant(obj); 971 } else { 972 // Might be a Class, MethodType, MethodHandle, or Dynamic constant 973 // result, which might turn out to be an array. 974 if (obj->is_null_object()) { 975 t = objectNull; 976 } else if (obj->is_array()) { 977 t = new ArrayConstant(obj->as_array()); 978 } else { 979 t = new InstanceConstant(obj->as_instance()); 980 } 981 } 982 break; 983 } 984 default: ShouldNotReachHere(); 985 } 986 Value x; 987 if (patch_state != nullptr) { 988 // Arbitrary memory effects from running BSM or class loading (using custom loader) during linkage. 989 bool kills_memory = stream()->is_dynamic_constant() || 990 (!stream()->is_string_constant() && !method()->holder()->has_trusted_loader()); 991 x = new Constant(t, patch_state, kills_memory); 992 } else { 993 x = new Constant(t); 994 } 995 996 // Unbox the value at runtime, if needed. 997 // ConstantDynamic entry can be of a primitive type, but it is cached in boxed form. 998 if (patch_state != nullptr) { 999 int cp_index = stream()->get_constant_pool_index(); 1000 BasicType type = stream()->get_basic_type_for_constant_at(cp_index); 1001 if (is_java_primitive(type)) { 1002 ciInstanceKlass* box_klass = ciEnv::current()->get_box_klass_for_primitive_type(type); 1003 assert(box_klass->is_loaded(), "sanity"); 1004 int offset = java_lang_boxing_object::value_offset(type); 1005 ciField* value_field = box_klass->get_field_by_offset(offset, false /*is_static*/); 1006 x = new LoadField(append(x), offset, value_field, false /*is_static*/, patch_state, false /*needs_patching*/); 1007 t = as_ValueType(type); 1008 } else { 1009 assert(is_reference_type(type), "not a reference: %s", type2name(type)); 1010 } 1011 } 1012 1013 push(t, append(x)); 1014 } else { 1015 BAILOUT("could not resolve a constant"); 1016 } 1017 } 1018 1019 1020 void GraphBuilder::load_local(ValueType* type, int index) { 1021 Value x = state()->local_at(index); 1022 assert(x != nullptr && !x->type()->is_illegal(), "access of illegal local variable"); 1023 push(type, x); 1024 } 1025 1026 1027 void GraphBuilder::store_local(ValueType* type, int index) { 1028 Value x = pop(type); 1029 store_local(state(), x, index); 1030 } 1031 1032 1033 void GraphBuilder::store_local(ValueStack* state, Value x, int index) { 1034 if (parsing_jsr()) { 1035 // We need to do additional tracking of the location of the return 1036 // address for jsrs since we don't handle arbitrary jsr/ret 1037 // constructs. Here we are figuring out in which circumstances we 1038 // need to bail out. 1039 if (x->type()->is_address()) { 1040 scope_data()->set_jsr_return_address_local(index); 1041 1042 // Also check parent jsrs (if any) at this time to see whether 1043 // they are using this local. We don't handle skipping over a 1044 // ret. 1045 for (ScopeData* cur_scope_data = scope_data()->parent(); 1046 cur_scope_data != nullptr && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope(); 1047 cur_scope_data = cur_scope_data->parent()) { 1048 if (cur_scope_data->jsr_return_address_local() == index) { 1049 BAILOUT("subroutine overwrites return address from previous subroutine"); 1050 } 1051 } 1052 } else if (index == scope_data()->jsr_return_address_local()) { 1053 scope_data()->set_jsr_return_address_local(-1); 1054 } 1055 } 1056 1057 state->store_local(index, round_fp(x)); 1058 } 1059 1060 1061 void GraphBuilder::load_indexed(BasicType type) { 1062 // In case of in block code motion in range check elimination 1063 ValueStack* state_before = nullptr; 1064 int array_idx = state()->stack_size() - 2; 1065 if (type == T_OBJECT && state()->stack_at(array_idx)->maybe_flat_array()) { 1066 // Save the entire state and re-execute on deopt when accessing flat arrays 1067 state_before = copy_state_before(); 1068 state_before->set_should_reexecute(true); 1069 } else { 1070 state_before = copy_state_indexed_access(); 1071 } 1072 compilation()->set_has_access_indexed(true); 1073 Value index = ipop(); 1074 Value array = apop(); 1075 Value length = nullptr; 1076 if (CSEArrayLength || 1077 (array->as_Constant() != nullptr) || 1078 (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) || 1079 (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant()) || 1080 (array->as_NewMultiArray() && array->as_NewMultiArray()->dims()->at(0)->type()->is_constant())) { 1081 length = append(new ArrayLength(array, state_before)); 1082 } 1083 1084 bool need_membar = false; 1085 LoadIndexed* load_indexed = nullptr; 1086 Instruction* result = nullptr; 1087 if (array->is_loaded_flat_array()) { 1088 // TODO 8350865 This is currently dead code 1089 ciType* array_type = array->declared_type(); 1090 ciInlineKlass* elem_klass = array_type->as_flat_array_klass()->element_klass()->as_inline_klass(); 1091 1092 bool can_delay_access = false; 1093 ciBytecodeStream s(method()); 1094 s.force_bci(bci()); 1095 s.next(); 1096 if (s.cur_bc() == Bytecodes::_getfield) { 1097 bool will_link; 1098 ciField* next_field = s.get_field(will_link); 1099 bool next_needs_patching = !next_field->holder()->is_initialized() || 1100 !next_field->will_link(method(), Bytecodes::_getfield) || 1101 PatchALot; 1102 can_delay_access = C1UseDelayedFlattenedFieldReads && !next_needs_patching; 1103 } 1104 if (can_delay_access) { 1105 // potentially optimizable array access, storing information for delayed decision 1106 LoadIndexed* li = new LoadIndexed(array, index, length, type, state_before); 1107 DelayedLoadIndexed* dli = new DelayedLoadIndexed(li, state_before); 1108 li->set_delayed(dli); 1109 set_pending_load_indexed(dli); 1110 return; // Nothing else to do for now 1111 } else { 1112 if (elem_klass->is_empty()) { 1113 // No need to create a new instance, the default instance will be used instead 1114 load_indexed = new LoadIndexed(array, index, length, type, state_before); 1115 apush(append(load_indexed)); 1116 } else { 1117 NewInstance* new_instance = new NewInstance(elem_klass, state_before, false, true); 1118 _memory->new_instance(new_instance); 1119 apush(append_split(new_instance)); 1120 load_indexed = new LoadIndexed(array, index, length, type, state_before); 1121 load_indexed->set_vt(new_instance); 1122 // The LoadIndexed node will initialise this instance by copying from 1123 // the flat field. Ensure these stores are visible before any 1124 // subsequent store that publishes this reference. 1125 need_membar = true; 1126 } 1127 } 1128 } else { 1129 load_indexed = new LoadIndexed(array, index, length, type, state_before); 1130 if (profile_array_accesses() && is_reference_type(type)) { 1131 compilation()->set_would_profile(true); 1132 load_indexed->set_should_profile(true); 1133 load_indexed->set_profiled_method(method()); 1134 load_indexed->set_profiled_bci(bci()); 1135 } 1136 } 1137 result = append(load_indexed); 1138 if (need_membar) { 1139 append(new MemBar(lir_membar_storestore)); 1140 } 1141 assert(!load_indexed->should_profile() || load_indexed == result, "should not be optimized out"); 1142 if (!array->is_loaded_flat_array()) { 1143 push(as_ValueType(type), result); 1144 } 1145 } 1146 1147 1148 void GraphBuilder::store_indexed(BasicType type) { 1149 // In case of in block code motion in range check elimination 1150 ValueStack* state_before = nullptr; 1151 int array_idx = state()->stack_size() - 3; 1152 if (type == T_OBJECT && state()->stack_at(array_idx)->maybe_flat_array()) { 1153 // Save the entire state and re-execute on deopt when accessing flat arrays 1154 state_before = copy_state_before(); 1155 state_before->set_should_reexecute(true); 1156 } else { 1157 state_before = copy_state_indexed_access(); 1158 } 1159 compilation()->set_has_access_indexed(true); 1160 Value value = pop(as_ValueType(type)); 1161 Value index = ipop(); 1162 Value array = apop(); 1163 Value length = nullptr; 1164 if (CSEArrayLength || 1165 (array->as_Constant() != nullptr) || 1166 (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) || 1167 (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant()) || 1168 (array->as_NewMultiArray() && array->as_NewMultiArray()->dims()->at(0)->type()->is_constant())) { 1169 length = append(new ArrayLength(array, state_before)); 1170 } 1171 ciType* array_type = array->declared_type(); 1172 bool check_boolean = false; 1173 if (array_type != nullptr) { 1174 if (array_type->is_loaded() && 1175 array_type->as_array_klass()->element_type()->basic_type() == T_BOOLEAN) { 1176 assert(type == T_BYTE, "boolean store uses bastore"); 1177 Value mask = append(new Constant(new IntConstant(1))); 1178 value = append(new LogicOp(Bytecodes::_iand, value, mask)); 1179 } 1180 } else if (type == T_BYTE) { 1181 check_boolean = true; 1182 } 1183 1184 StoreIndexed* store_indexed = new StoreIndexed(array, index, length, type, value, state_before, check_boolean); 1185 if (profile_array_accesses() && is_reference_type(type) && !array->is_loaded_flat_array()) { 1186 compilation()->set_would_profile(true); 1187 store_indexed->set_should_profile(true); 1188 store_indexed->set_profiled_method(method()); 1189 store_indexed->set_profiled_bci(bci()); 1190 } 1191 Instruction* result = append(store_indexed); 1192 assert(!store_indexed->should_profile() || store_indexed == result, "should not be optimized out"); 1193 _memory->store_value(value); 1194 } 1195 1196 void GraphBuilder::stack_op(Bytecodes::Code code) { 1197 switch (code) { 1198 case Bytecodes::_pop: 1199 { Value w = state()->raw_pop(); 1200 } 1201 break; 1202 case Bytecodes::_pop2: 1203 { Value w1 = state()->raw_pop(); 1204 Value w2 = state()->raw_pop(); 1205 } 1206 break; 1207 case Bytecodes::_dup: 1208 { Value w = state()->raw_pop(); 1209 state()->raw_push(w); 1210 state()->raw_push(w); 1211 } 1212 break; 1213 case Bytecodes::_dup_x1: 1214 { Value w1 = state()->raw_pop(); 1215 Value w2 = state()->raw_pop(); 1216 state()->raw_push(w1); 1217 state()->raw_push(w2); 1218 state()->raw_push(w1); 1219 } 1220 break; 1221 case Bytecodes::_dup_x2: 1222 { Value w1 = state()->raw_pop(); 1223 Value w2 = state()->raw_pop(); 1224 Value w3 = state()->raw_pop(); 1225 state()->raw_push(w1); 1226 state()->raw_push(w3); 1227 state()->raw_push(w2); 1228 state()->raw_push(w1); 1229 } 1230 break; 1231 case Bytecodes::_dup2: 1232 { Value w1 = state()->raw_pop(); 1233 Value w2 = state()->raw_pop(); 1234 state()->raw_push(w2); 1235 state()->raw_push(w1); 1236 state()->raw_push(w2); 1237 state()->raw_push(w1); 1238 } 1239 break; 1240 case Bytecodes::_dup2_x1: 1241 { Value w1 = state()->raw_pop(); 1242 Value w2 = state()->raw_pop(); 1243 Value w3 = state()->raw_pop(); 1244 state()->raw_push(w2); 1245 state()->raw_push(w1); 1246 state()->raw_push(w3); 1247 state()->raw_push(w2); 1248 state()->raw_push(w1); 1249 } 1250 break; 1251 case Bytecodes::_dup2_x2: 1252 { Value w1 = state()->raw_pop(); 1253 Value w2 = state()->raw_pop(); 1254 Value w3 = state()->raw_pop(); 1255 Value w4 = state()->raw_pop(); 1256 state()->raw_push(w2); 1257 state()->raw_push(w1); 1258 state()->raw_push(w4); 1259 state()->raw_push(w3); 1260 state()->raw_push(w2); 1261 state()->raw_push(w1); 1262 } 1263 break; 1264 case Bytecodes::_swap: 1265 { Value w1 = state()->raw_pop(); 1266 Value w2 = state()->raw_pop(); 1267 state()->raw_push(w1); 1268 state()->raw_push(w2); 1269 } 1270 break; 1271 default: 1272 ShouldNotReachHere(); 1273 break; 1274 } 1275 } 1276 1277 1278 void GraphBuilder::arithmetic_op(ValueType* type, Bytecodes::Code code, ValueStack* state_before) { 1279 Value y = pop(type); 1280 Value x = pop(type); 1281 Value res = new ArithmeticOp(code, x, y, state_before); 1282 // Note: currently single-precision floating-point rounding on Intel is handled at the LIRGenerator level 1283 res = append(res); 1284 res = round_fp(res); 1285 push(type, res); 1286 } 1287 1288 1289 void GraphBuilder::negate_op(ValueType* type) { 1290 push(type, append(new NegateOp(pop(type)))); 1291 } 1292 1293 1294 void GraphBuilder::shift_op(ValueType* type, Bytecodes::Code code) { 1295 Value s = ipop(); 1296 Value x = pop(type); 1297 // try to simplify 1298 // Note: This code should go into the canonicalizer as soon as it can 1299 // can handle canonicalized forms that contain more than one node. 1300 if (CanonicalizeNodes && code == Bytecodes::_iushr) { 1301 // pattern: x >>> s 1302 IntConstant* s1 = s->type()->as_IntConstant(); 1303 if (s1 != nullptr) { 1304 // pattern: x >>> s1, with s1 constant 1305 ShiftOp* l = x->as_ShiftOp(); 1306 if (l != nullptr && l->op() == Bytecodes::_ishl) { 1307 // pattern: (a << b) >>> s1 1308 IntConstant* s0 = l->y()->type()->as_IntConstant(); 1309 if (s0 != nullptr) { 1310 // pattern: (a << s0) >>> s1 1311 const int s0c = s0->value() & 0x1F; // only the low 5 bits are significant for shifts 1312 const int s1c = s1->value() & 0x1F; // only the low 5 bits are significant for shifts 1313 if (s0c == s1c) { 1314 if (s0c == 0) { 1315 // pattern: (a << 0) >>> 0 => simplify to: a 1316 ipush(l->x()); 1317 } else { 1318 // pattern: (a << s0c) >>> s0c => simplify to: a & m, with m constant 1319 assert(0 < s0c && s0c < BitsPerInt, "adjust code below to handle corner cases"); 1320 const int m = checked_cast<int>(right_n_bits(BitsPerInt - s0c)); 1321 Value s = append(new Constant(new IntConstant(m))); 1322 ipush(append(new LogicOp(Bytecodes::_iand, l->x(), s))); 1323 } 1324 return; 1325 } 1326 } 1327 } 1328 } 1329 } 1330 // could not simplify 1331 push(type, append(new ShiftOp(code, x, s))); 1332 } 1333 1334 1335 void GraphBuilder::logic_op(ValueType* type, Bytecodes::Code code) { 1336 Value y = pop(type); 1337 Value x = pop(type); 1338 push(type, append(new LogicOp(code, x, y))); 1339 } 1340 1341 1342 void GraphBuilder::compare_op(ValueType* type, Bytecodes::Code code) { 1343 ValueStack* state_before = copy_state_before(); 1344 Value y = pop(type); 1345 Value x = pop(type); 1346 ipush(append(new CompareOp(code, x, y, state_before))); 1347 } 1348 1349 1350 void GraphBuilder::convert(Bytecodes::Code op, BasicType from, BasicType to) { 1351 push(as_ValueType(to), append(new Convert(op, pop(as_ValueType(from)), as_ValueType(to)))); 1352 } 1353 1354 1355 void GraphBuilder::increment() { 1356 int index = stream()->get_index(); 1357 int delta = stream()->is_wide() ? (signed short)Bytes::get_Java_u2(stream()->cur_bcp() + 4) : (signed char)(stream()->cur_bcp()[2]); 1358 load_local(intType, index); 1359 ipush(append(new Constant(new IntConstant(delta)))); 1360 arithmetic_op(intType, Bytecodes::_iadd); 1361 store_local(intType, index); 1362 } 1363 1364 1365 void GraphBuilder::_goto(int from_bci, int to_bci) { 1366 Goto *x = new Goto(block_at(to_bci), to_bci <= from_bci); 1367 if (is_profiling()) { 1368 compilation()->set_would_profile(true); 1369 x->set_profiled_bci(bci()); 1370 if (profile_branches()) { 1371 x->set_profiled_method(method()); 1372 x->set_should_profile(true); 1373 } 1374 } 1375 append(x); 1376 } 1377 1378 1379 void GraphBuilder::if_node(Value x, If::Condition cond, Value y, ValueStack* state_before) { 1380 BlockBegin* tsux = block_at(stream()->get_dest()); 1381 BlockBegin* fsux = block_at(stream()->next_bci()); 1382 bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci(); 1383 1384 bool subst_check = false; 1385 if (EnableValhalla && (stream()->cur_bc() == Bytecodes::_if_acmpeq || stream()->cur_bc() == Bytecodes::_if_acmpne)) { 1386 ValueType* left_vt = x->type(); 1387 ValueType* right_vt = y->type(); 1388 if (left_vt->is_object()) { 1389 assert(right_vt->is_object(), "must be"); 1390 ciKlass* left_klass = x->as_loaded_klass_or_null(); 1391 ciKlass* right_klass = y->as_loaded_klass_or_null(); 1392 1393 if (left_klass == nullptr || right_klass == nullptr) { 1394 // The klass is still unloaded, or came from a Phi node. Go slow case; 1395 subst_check = true; 1396 } else if (left_klass->can_be_inline_klass() || right_klass->can_be_inline_klass()) { 1397 // Either operand may be a value object, but we're not sure. Go slow case; 1398 subst_check = true; 1399 } else { 1400 // No need to do substitutability check 1401 } 1402 } 1403 } 1404 if ((stream()->cur_bc() == Bytecodes::_if_acmpeq || stream()->cur_bc() == Bytecodes::_if_acmpne) && 1405 is_profiling() && profile_branches()) { 1406 compilation()->set_would_profile(true); 1407 append(new ProfileACmpTypes(method(), bci(), x, y)); 1408 } 1409 1410 // In case of loop invariant code motion or predicate insertion 1411 // before the body of a loop the state is needed 1412 Instruction *i = append(new If(x, cond, false, y, tsux, fsux, (is_bb || compilation()->is_optimistic() || subst_check) ? state_before : nullptr, is_bb, subst_check)); 1413 1414 assert(i->as_Goto() == nullptr || 1415 (i->as_Goto()->sux_at(0) == tsux && i->as_Goto()->is_safepoint() == (tsux->bci() < stream()->cur_bci())) || 1416 (i->as_Goto()->sux_at(0) == fsux && i->as_Goto()->is_safepoint() == (fsux->bci() < stream()->cur_bci())), 1417 "safepoint state of Goto returned by canonicalizer incorrect"); 1418 1419 if (is_profiling()) { 1420 If* if_node = i->as_If(); 1421 if (if_node != nullptr) { 1422 // Note that we'd collect profile data in this method if we wanted it. 1423 compilation()->set_would_profile(true); 1424 // At level 2 we need the proper bci to count backedges 1425 if_node->set_profiled_bci(bci()); 1426 if (profile_branches()) { 1427 // Successors can be rotated by the canonicalizer, check for this case. 1428 if_node->set_profiled_method(method()); 1429 if_node->set_should_profile(true); 1430 if (if_node->tsux() == fsux) { 1431 if_node->set_swapped(true); 1432 } 1433 } 1434 return; 1435 } 1436 1437 // Check if this If was reduced to Goto. 1438 Goto *goto_node = i->as_Goto(); 1439 if (goto_node != nullptr) { 1440 compilation()->set_would_profile(true); 1441 goto_node->set_profiled_bci(bci()); 1442 if (profile_branches()) { 1443 goto_node->set_profiled_method(method()); 1444 goto_node->set_should_profile(true); 1445 // Find out which successor is used. 1446 if (goto_node->default_sux() == tsux) { 1447 goto_node->set_direction(Goto::taken); 1448 } else if (goto_node->default_sux() == fsux) { 1449 goto_node->set_direction(Goto::not_taken); 1450 } else { 1451 ShouldNotReachHere(); 1452 } 1453 } 1454 return; 1455 } 1456 } 1457 } 1458 1459 1460 void GraphBuilder::if_zero(ValueType* type, If::Condition cond) { 1461 Value y = append(new Constant(intZero)); 1462 ValueStack* state_before = copy_state_before(); 1463 Value x = ipop(); 1464 if_node(x, cond, y, state_before); 1465 } 1466 1467 1468 void GraphBuilder::if_null(ValueType* type, If::Condition cond) { 1469 Value y = append(new Constant(objectNull)); 1470 ValueStack* state_before = copy_state_before(); 1471 Value x = apop(); 1472 if_node(x, cond, y, state_before); 1473 } 1474 1475 1476 void GraphBuilder::if_same(ValueType* type, If::Condition cond) { 1477 ValueStack* state_before = copy_state_before(); 1478 Value y = pop(type); 1479 Value x = pop(type); 1480 if_node(x, cond, y, state_before); 1481 } 1482 1483 1484 void GraphBuilder::jsr(int dest) { 1485 // We only handle well-formed jsrs (those which are "block-structured"). 1486 // If the bytecodes are strange (jumping out of a jsr block) then we 1487 // might end up trying to re-parse a block containing a jsr which 1488 // has already been activated. Watch for this case and bail out. 1489 if (next_bci() >= method()->code_size()) { 1490 // This can happen if the subroutine does not terminate with a ret, 1491 // effectively turning the jsr into a goto. 1492 BAILOUT("too-complicated jsr/ret structure"); 1493 } 1494 for (ScopeData* cur_scope_data = scope_data(); 1495 cur_scope_data != nullptr && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope(); 1496 cur_scope_data = cur_scope_data->parent()) { 1497 if (cur_scope_data->jsr_entry_bci() == dest) { 1498 BAILOUT("too-complicated jsr/ret structure"); 1499 } 1500 } 1501 1502 push(addressType, append(new Constant(new AddressConstant(next_bci())))); 1503 if (!try_inline_jsr(dest)) { 1504 return; // bailed out while parsing and inlining subroutine 1505 } 1506 } 1507 1508 1509 void GraphBuilder::ret(int local_index) { 1510 if (!parsing_jsr()) BAILOUT("ret encountered while not parsing subroutine"); 1511 1512 if (local_index != scope_data()->jsr_return_address_local()) { 1513 BAILOUT("can not handle complicated jsr/ret constructs"); 1514 } 1515 1516 // Rets simply become (NON-SAFEPOINT) gotos to the jsr continuation 1517 append(new Goto(scope_data()->jsr_continuation(), false)); 1518 } 1519 1520 1521 void GraphBuilder::table_switch() { 1522 Bytecode_tableswitch sw(stream()); 1523 const int l = sw.length(); 1524 if (CanonicalizeNodes && l == 1 && compilation()->env()->comp_level() != CompLevel_full_profile) { 1525 // total of 2 successors => use If instead of switch 1526 // Note: This code should go into the canonicalizer as soon as it can 1527 // can handle canonicalized forms that contain more than one node. 1528 Value key = append(new Constant(new IntConstant(sw.low_key()))); 1529 BlockBegin* tsux = block_at(bci() + sw.dest_offset_at(0)); 1530 BlockBegin* fsux = block_at(bci() + sw.default_offset()); 1531 bool is_bb = tsux->bci() < bci() || fsux->bci() < bci(); 1532 // In case of loop invariant code motion or predicate insertion 1533 // before the body of a loop the state is needed 1534 ValueStack* state_before = copy_state_if_bb(is_bb); 1535 append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb)); 1536 } else { 1537 // collect successors 1538 BlockList* sux = new BlockList(l + 1, nullptr); 1539 int i; 1540 bool has_bb = false; 1541 for (i = 0; i < l; i++) { 1542 sux->at_put(i, block_at(bci() + sw.dest_offset_at(i))); 1543 if (sw.dest_offset_at(i) < 0) has_bb = true; 1544 } 1545 // add default successor 1546 if (sw.default_offset() < 0) has_bb = true; 1547 sux->at_put(i, block_at(bci() + sw.default_offset())); 1548 // In case of loop invariant code motion or predicate insertion 1549 // before the body of a loop the state is needed 1550 ValueStack* state_before = copy_state_if_bb(has_bb); 1551 Instruction* res = append(new TableSwitch(ipop(), sux, sw.low_key(), state_before, has_bb)); 1552 #ifdef ASSERT 1553 if (res->as_Goto()) { 1554 for (i = 0; i < l; i++) { 1555 if (sux->at(i) == res->as_Goto()->sux_at(0)) { 1556 assert(res->as_Goto()->is_safepoint() == (sw.dest_offset_at(i) < 0), "safepoint state of Goto returned by canonicalizer incorrect"); 1557 } 1558 } 1559 } 1560 #endif 1561 } 1562 } 1563 1564 1565 void GraphBuilder::lookup_switch() { 1566 Bytecode_lookupswitch sw(stream()); 1567 const int l = sw.number_of_pairs(); 1568 if (CanonicalizeNodes && l == 1 && compilation()->env()->comp_level() != CompLevel_full_profile) { 1569 // total of 2 successors => use If instead of switch 1570 // Note: This code should go into the canonicalizer as soon as it can 1571 // can handle canonicalized forms that contain more than one node. 1572 // simplify to If 1573 LookupswitchPair pair = sw.pair_at(0); 1574 Value key = append(new Constant(new IntConstant(pair.match()))); 1575 BlockBegin* tsux = block_at(bci() + pair.offset()); 1576 BlockBegin* fsux = block_at(bci() + sw.default_offset()); 1577 bool is_bb = tsux->bci() < bci() || fsux->bci() < bci(); 1578 // In case of loop invariant code motion or predicate insertion 1579 // before the body of a loop the state is needed 1580 ValueStack* state_before = copy_state_if_bb(is_bb);; 1581 append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb)); 1582 } else { 1583 // collect successors & keys 1584 BlockList* sux = new BlockList(l + 1, nullptr); 1585 intArray* keys = new intArray(l, l, 0); 1586 int i; 1587 bool has_bb = false; 1588 for (i = 0; i < l; i++) { 1589 LookupswitchPair pair = sw.pair_at(i); 1590 if (pair.offset() < 0) has_bb = true; 1591 sux->at_put(i, block_at(bci() + pair.offset())); 1592 keys->at_put(i, pair.match()); 1593 } 1594 // add default successor 1595 if (sw.default_offset() < 0) has_bb = true; 1596 sux->at_put(i, block_at(bci() + sw.default_offset())); 1597 // In case of loop invariant code motion or predicate insertion 1598 // before the body of a loop the state is needed 1599 ValueStack* state_before = copy_state_if_bb(has_bb); 1600 Instruction* res = append(new LookupSwitch(ipop(), sux, keys, state_before, has_bb)); 1601 #ifdef ASSERT 1602 if (res->as_Goto()) { 1603 for (i = 0; i < l; i++) { 1604 if (sux->at(i) == res->as_Goto()->sux_at(0)) { 1605 assert(res->as_Goto()->is_safepoint() == (sw.pair_at(i).offset() < 0), "safepoint state of Goto returned by canonicalizer incorrect"); 1606 } 1607 } 1608 } 1609 #endif 1610 } 1611 } 1612 1613 void GraphBuilder::call_register_finalizer() { 1614 // If the receiver requires finalization then emit code to perform 1615 // the registration on return. 1616 1617 // Gather some type information about the receiver 1618 Value receiver = state()->local_at(0); 1619 assert(receiver != nullptr, "must have a receiver"); 1620 ciType* declared_type = receiver->declared_type(); 1621 ciType* exact_type = receiver->exact_type(); 1622 if (exact_type == nullptr && 1623 receiver->as_Local() && 1624 receiver->as_Local()->java_index() == 0) { 1625 ciInstanceKlass* ik = compilation()->method()->holder(); 1626 if (ik->is_final()) { 1627 exact_type = ik; 1628 } else if (UseCHA && !(ik->has_subklass() || ik->is_interface())) { 1629 // test class is leaf class 1630 compilation()->dependency_recorder()->assert_leaf_type(ik); 1631 exact_type = ik; 1632 } else { 1633 declared_type = ik; 1634 } 1635 } 1636 1637 // see if we know statically that registration isn't required 1638 bool needs_check = true; 1639 if (exact_type != nullptr) { 1640 needs_check = exact_type->as_instance_klass()->has_finalizer(); 1641 } else if (declared_type != nullptr) { 1642 ciInstanceKlass* ik = declared_type->as_instance_klass(); 1643 if (!Dependencies::has_finalizable_subclass(ik)) { 1644 compilation()->dependency_recorder()->assert_has_no_finalizable_subclasses(ik); 1645 needs_check = false; 1646 } 1647 } 1648 1649 if (needs_check) { 1650 // Perform the registration of finalizable objects. 1651 ValueStack* state_before = copy_state_for_exception(); 1652 load_local(objectType, 0); 1653 append_split(new Intrinsic(voidType, vmIntrinsics::_Object_init, 1654 state()->pop_arguments(1), 1655 true, state_before, true)); 1656 } 1657 } 1658 1659 1660 void GraphBuilder::method_return(Value x, bool ignore_return) { 1661 if (method()->intrinsic_id() == vmIntrinsics::_Object_init) { 1662 call_register_finalizer(); 1663 } 1664 1665 // The conditions for a memory barrier are described in Parse::do_exits(). 1666 bool need_mem_bar = false; 1667 if (method()->is_object_constructor() && 1668 (scope()->wrote_final() || scope()->wrote_stable() || 1669 (AlwaysSafeConstructors && scope()->wrote_fields()) || 1670 (support_IRIW_for_not_multiple_copy_atomic_cpu && scope()->wrote_volatile()))) { 1671 need_mem_bar = true; 1672 } 1673 1674 BasicType bt = method()->return_type()->basic_type(); 1675 switch (bt) { 1676 case T_BYTE: 1677 { 1678 Value shift = append(new Constant(new IntConstant(24))); 1679 x = append(new ShiftOp(Bytecodes::_ishl, x, shift)); 1680 x = append(new ShiftOp(Bytecodes::_ishr, x, shift)); 1681 break; 1682 } 1683 case T_SHORT: 1684 { 1685 Value shift = append(new Constant(new IntConstant(16))); 1686 x = append(new ShiftOp(Bytecodes::_ishl, x, shift)); 1687 x = append(new ShiftOp(Bytecodes::_ishr, x, shift)); 1688 break; 1689 } 1690 case T_CHAR: 1691 { 1692 Value mask = append(new Constant(new IntConstant(0xFFFF))); 1693 x = append(new LogicOp(Bytecodes::_iand, x, mask)); 1694 break; 1695 } 1696 case T_BOOLEAN: 1697 { 1698 Value mask = append(new Constant(new IntConstant(1))); 1699 x = append(new LogicOp(Bytecodes::_iand, x, mask)); 1700 break; 1701 } 1702 default: 1703 break; 1704 } 1705 1706 // Check to see whether we are inlining. If so, Return 1707 // instructions become Gotos to the continuation point. 1708 if (continuation() != nullptr) { 1709 1710 int invoke_bci = state()->caller_state()->bci(); 1711 1712 if (x != nullptr && !ignore_return) { 1713 ciMethod* caller = state()->scope()->caller()->method(); 1714 Bytecodes::Code invoke_raw_bc = caller->raw_code_at_bci(invoke_bci); 1715 if (invoke_raw_bc == Bytecodes::_invokehandle || invoke_raw_bc == Bytecodes::_invokedynamic) { 1716 ciType* declared_ret_type = caller->get_declared_signature_at_bci(invoke_bci)->return_type(); 1717 if (declared_ret_type->is_klass() && x->exact_type() == nullptr && 1718 x->declared_type() != declared_ret_type && declared_ret_type != compilation()->env()->Object_klass()) { 1719 x = append(new TypeCast(declared_ret_type->as_klass(), x, copy_state_before())); 1720 } 1721 } 1722 } 1723 1724 assert(!method()->is_synchronized() || InlineSynchronizedMethods, "can not inline synchronized methods yet"); 1725 1726 if (compilation()->env()->dtrace_method_probes()) { 1727 // Report exit from inline methods 1728 Values* args = new Values(1); 1729 args->push(append(new Constant(new MethodConstant(method())))); 1730 append(new RuntimeCall(voidType, "dtrace_method_exit", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), args)); 1731 } 1732 1733 // If the inlined method is synchronized, the monitor must be 1734 // released before we jump to the continuation block. 1735 if (method()->is_synchronized()) { 1736 assert(state()->locks_size() == 1, "receiver must be locked here"); 1737 monitorexit(state()->lock_at(0), SynchronizationEntryBCI); 1738 } 1739 1740 if (need_mem_bar) { 1741 append(new MemBar(lir_membar_storestore)); 1742 } 1743 1744 // State at end of inlined method is the state of the caller 1745 // without the method parameters on stack, including the 1746 // return value, if any, of the inlined method on operand stack. 1747 set_state(state()->caller_state()->copy_for_parsing()); 1748 if (x != nullptr) { 1749 if (!ignore_return) { 1750 state()->push(x->type(), x); 1751 } 1752 if (profile_return() && x->type()->is_object_kind()) { 1753 ciMethod* caller = state()->scope()->method(); 1754 profile_return_type(x, method(), caller, invoke_bci); 1755 } 1756 } 1757 Goto* goto_callee = new Goto(continuation(), false); 1758 1759 // See whether this is the first return; if so, store off some 1760 // of the state for later examination 1761 if (num_returns() == 0) { 1762 set_inline_cleanup_info(); 1763 } 1764 1765 // The current bci() is in the wrong scope, so use the bci() of 1766 // the continuation point. 1767 append_with_bci(goto_callee, scope_data()->continuation()->bci()); 1768 incr_num_returns(); 1769 return; 1770 } 1771 1772 state()->truncate_stack(0); 1773 if (method()->is_synchronized()) { 1774 // perform the unlocking before exiting the method 1775 Value receiver; 1776 if (!method()->is_static()) { 1777 receiver = _initial_state->local_at(0); 1778 } else { 1779 receiver = append(new Constant(new ClassConstant(method()->holder()))); 1780 } 1781 append_split(new MonitorExit(receiver, state()->unlock())); 1782 } 1783 1784 if (need_mem_bar) { 1785 append(new MemBar(lir_membar_storestore)); 1786 } 1787 1788 assert(!ignore_return, "Ignoring return value works only for inlining"); 1789 append(new Return(x)); 1790 } 1791 1792 Value GraphBuilder::make_constant(ciConstant field_value, ciField* field) { 1793 if (!field_value.is_valid()) return nullptr; 1794 1795 BasicType field_type = field_value.basic_type(); 1796 ValueType* value = as_ValueType(field_value); 1797 1798 // Attach dimension info to stable arrays. 1799 if (FoldStableValues && 1800 field->is_stable() && field_type == T_ARRAY && !field_value.is_null_or_zero()) { 1801 ciArray* array = field_value.as_object()->as_array(); 1802 jint dimension = field->type()->as_array_klass()->dimension(); 1803 value = new StableArrayConstant(array, dimension); 1804 } 1805 1806 switch (field_type) { 1807 case T_ARRAY: 1808 case T_OBJECT: 1809 if (field_value.as_object()->should_be_constant()) { 1810 return new Constant(value); 1811 } 1812 return nullptr; // Not a constant. 1813 default: 1814 return new Constant(value); 1815 } 1816 } 1817 1818 void GraphBuilder::copy_inline_content(ciInlineKlass* vk, Value src, int src_off, Value dest, int dest_off, ValueStack* state_before, ciField* enclosing_field) { 1819 for (int i = 0; i < vk->nof_declared_nonstatic_fields(); i++) { 1820 ciField* field = vk->declared_nonstatic_field_at(i); 1821 int offset = field->offset_in_bytes() - vk->payload_offset(); 1822 if (field->is_flat()) { 1823 bool needs_atomic_access = !field->is_null_free() || field->is_volatile(); 1824 assert(!needs_atomic_access, "Atomic access in non-atomic container"); 1825 copy_inline_content(field->type()->as_inline_klass(), src, src_off + offset, dest, dest_off + offset, state_before, enclosing_field); 1826 if (!field->is_null_free()) { 1827 // Nullable, copy the null marker using Unsafe because null markers are no real fields 1828 int null_marker_offset = field->null_marker_offset() - vk->payload_offset(); 1829 Value offset = append(new Constant(new LongConstant(src_off + null_marker_offset))); 1830 Value nm = append(new UnsafeGet(T_BOOLEAN, src, offset, false)); 1831 offset = append(new Constant(new LongConstant(dest_off + null_marker_offset))); 1832 append(new UnsafePut(T_BOOLEAN, dest, offset, nm, false)); 1833 } 1834 } else { 1835 Value value = append(new LoadField(src, src_off + offset, field, false, state_before, false)); 1836 StoreField* store = new StoreField(dest, dest_off + offset, field, value, false, state_before, false); 1837 store->set_enclosing_field(enclosing_field); 1838 append(store); 1839 } 1840 } 1841 } 1842 1843 void GraphBuilder::access_field(Bytecodes::Code code) { 1844 bool will_link; 1845 ciField* field = stream()->get_field(will_link); 1846 ciInstanceKlass* holder = field->holder(); 1847 BasicType field_type = field->type()->basic_type(); 1848 ValueType* type = as_ValueType(field_type); 1849 1850 // call will_link again to determine if the field is valid. 1851 const bool needs_patching = !holder->is_loaded() || 1852 !field->will_link(method(), code) || 1853 (!field->is_flat() && PatchALot); 1854 1855 ValueStack* state_before = nullptr; 1856 if (!holder->is_initialized() || needs_patching) { 1857 // save state before instruction for debug info when 1858 // deoptimization happens during patching 1859 state_before = copy_state_before(); 1860 } 1861 1862 Value obj = nullptr; 1863 if (code == Bytecodes::_getstatic || code == Bytecodes::_putstatic) { 1864 if (state_before != nullptr) { 1865 // build a patching constant 1866 obj = new Constant(new InstanceConstant(holder->java_mirror()), state_before); 1867 } else { 1868 obj = new Constant(new InstanceConstant(holder->java_mirror())); 1869 } 1870 } 1871 1872 if (code == Bytecodes::_putfield) { 1873 scope()->set_wrote_fields(); 1874 if (field->is_volatile()) { 1875 scope()->set_wrote_volatile(); 1876 } 1877 if (field->is_final()) { 1878 scope()->set_wrote_final(); 1879 } 1880 if (field->is_stable()) { 1881 scope()->set_wrote_stable(); 1882 } 1883 } 1884 1885 int offset = !needs_patching ? field->offset_in_bytes() : -1; 1886 switch (code) { 1887 case Bytecodes::_getstatic: { 1888 // check for compile-time constants, i.e., initialized static final fields 1889 Value constant = nullptr; 1890 if (field->is_static_constant() && !PatchALot) { 1891 ciConstant field_value = field->constant_value(); 1892 assert(!field->is_stable() || !field_value.is_null_or_zero(), 1893 "stable static w/ default value shouldn't be a constant"); 1894 constant = make_constant(field_value, field); 1895 } else if (field->is_null_free() && field->type()->as_instance_klass()->is_initialized() && 1896 field->type()->as_inline_klass()->is_empty()) { 1897 // Loading from a field of an empty, null-free inline type. Just return the default instance. 1898 constant = new Constant(new InstanceConstant(field->type()->as_inline_klass()->default_instance())); 1899 } 1900 if (constant != nullptr) { 1901 push(type, append(constant)); 1902 } else { 1903 if (state_before == nullptr) { 1904 state_before = copy_state_for_exception(); 1905 } 1906 LoadField* load_field = new LoadField(append(obj), offset, field, true, 1907 state_before, needs_patching); 1908 push(type, append(load_field)); 1909 } 1910 break; 1911 } 1912 case Bytecodes::_putstatic: { 1913 Value val = pop(type); 1914 if (state_before == nullptr) { 1915 state_before = copy_state_for_exception(); 1916 } 1917 if (field_type == T_BOOLEAN) { 1918 Value mask = append(new Constant(new IntConstant(1))); 1919 val = append(new LogicOp(Bytecodes::_iand, val, mask)); 1920 } 1921 if (field->is_null_free()) { 1922 null_check(val); 1923 } 1924 if (field->is_null_free() && field->type()->is_loaded() && field->type()->as_inline_klass()->is_empty()) { 1925 // Storing to a field of an empty, null-free inline type. Ignore. 1926 break; 1927 } 1928 append(new StoreField(append(obj), offset, field, val, true, state_before, needs_patching)); 1929 break; 1930 } 1931 case Bytecodes::_getfield: { 1932 // Check for compile-time constants, i.e., trusted final non-static fields. 1933 Value constant = nullptr; 1934 if (state_before == nullptr && field->is_flat()) { 1935 // Save the entire state and re-execute on deopt when accessing flat fields 1936 assert(Interpreter::bytecode_should_reexecute(code), "should reexecute"); 1937 state_before = copy_state_before(); 1938 } 1939 if (!has_pending_field_access() && !has_pending_load_indexed()) { 1940 obj = apop(); 1941 ObjectType* obj_type = obj->type()->as_ObjectType(); 1942 if (field->is_null_free() && field->type()->as_instance_klass()->is_initialized() 1943 && field->type()->as_inline_klass()->is_empty()) { 1944 // Loading from a field of an empty, null-free inline type. Just return the default instance. 1945 null_check(obj); 1946 constant = new Constant(new InstanceConstant(field->type()->as_inline_klass()->default_instance())); 1947 } else if (field->is_constant() && !field->is_flat() && obj_type->is_constant() && !PatchALot) { 1948 ciObject* const_oop = obj_type->constant_value(); 1949 if (!const_oop->is_null_object() && const_oop->is_loaded()) { 1950 ciConstant field_value = field->constant_value_of(const_oop); 1951 if (field_value.is_valid()) { 1952 if (field->is_null_free() && field_value.is_null_or_zero()) { 1953 // Non-flat inline type field. Replace null by the default value. 1954 constant = new Constant(new InstanceConstant(field->type()->as_inline_klass()->default_instance())); 1955 } else { 1956 constant = make_constant(field_value, field); 1957 } 1958 // For CallSite objects add a dependency for invalidation of the optimization. 1959 if (field->is_call_site_target()) { 1960 ciCallSite* call_site = const_oop->as_call_site(); 1961 if (!call_site->is_fully_initialized_constant_call_site()) { 1962 ciMethodHandle* target = field_value.as_object()->as_method_handle(); 1963 dependency_recorder()->assert_call_site_target_value(call_site, target); 1964 } 1965 } 1966 } 1967 } 1968 } 1969 } 1970 if (constant != nullptr) { 1971 push(type, append(constant)); 1972 } else { 1973 if (state_before == nullptr) { 1974 state_before = copy_state_for_exception(); 1975 } 1976 if (!field->is_flat()) { 1977 if (has_pending_field_access()) { 1978 assert(!needs_patching, "Can't patch delayed field access"); 1979 obj = pending_field_access()->obj(); 1980 offset += pending_field_access()->offset() - field->holder()->as_inline_klass()->payload_offset(); 1981 field = pending_field_access()->holder()->get_field_by_offset(offset, false); 1982 assert(field != nullptr, "field not found"); 1983 set_pending_field_access(nullptr); 1984 } else if (has_pending_load_indexed()) { 1985 assert(!needs_patching, "Can't patch delayed field access"); 1986 pending_load_indexed()->update(field, offset - field->holder()->as_inline_klass()->payload_offset()); 1987 LoadIndexed* li = pending_load_indexed()->load_instr(); 1988 li->set_type(type); 1989 push(type, append(li)); 1990 set_pending_load_indexed(nullptr); 1991 break; 1992 } 1993 LoadField* load = new LoadField(obj, offset, field, false, state_before, needs_patching); 1994 Value replacement = !needs_patching ? _memory->load(load) : load; 1995 if (replacement != load) { 1996 assert(replacement->is_linked() || !replacement->can_be_linked(), "should already by linked"); 1997 // Writing an (integer) value to a boolean, byte, char or short field includes an implicit narrowing 1998 // conversion. Emit an explicit conversion here to get the correct field value after the write. 1999 switch (field_type) { 2000 case T_BOOLEAN: 2001 case T_BYTE: 2002 replacement = append(new Convert(Bytecodes::_i2b, replacement, type)); 2003 break; 2004 case T_CHAR: 2005 replacement = append(new Convert(Bytecodes::_i2c, replacement, type)); 2006 break; 2007 case T_SHORT: 2008 replacement = append(new Convert(Bytecodes::_i2s, replacement, type)); 2009 break; 2010 default: 2011 break; 2012 } 2013 push(type, replacement); 2014 } else { 2015 push(type, append(load)); 2016 } 2017 } else { 2018 // Flat field 2019 assert(!needs_patching, "Can't patch flat inline type field access"); 2020 ciInlineKlass* inline_klass = field->type()->as_inline_klass(); 2021 bool is_naturally_atomic = inline_klass->nof_declared_nonstatic_fields() <= 1; 2022 bool needs_atomic_access = !field->is_null_free() || (field->is_volatile() && !is_naturally_atomic); 2023 if (needs_atomic_access) { 2024 assert(!has_pending_field_access(), "Pending field accesses are not supported"); 2025 LoadField* load = new LoadField(obj, offset, field, false, state_before, needs_patching); 2026 push(type, append(load)); 2027 } else { 2028 assert(field->is_null_free(), "must be null-free"); 2029 // Look at the next bytecode to check if we can delay the field access 2030 bool can_delay_access = false; 2031 ciBytecodeStream s(method()); 2032 s.force_bci(bci()); 2033 s.next(); 2034 if (s.cur_bc() == Bytecodes::_getfield && !needs_patching) { 2035 ciField* next_field = s.get_field(will_link); 2036 bool next_needs_patching = !next_field->holder()->is_loaded() || 2037 !next_field->will_link(method(), Bytecodes::_getfield) || 2038 PatchALot; 2039 // We can't update the offset for atomic accesses 2040 bool next_needs_atomic_access = !next_field->is_null_free() || next_field->is_volatile(); 2041 can_delay_access = C1UseDelayedFlattenedFieldReads && !next_needs_patching && !next_needs_atomic_access; 2042 } 2043 if (can_delay_access) { 2044 if (has_pending_load_indexed()) { 2045 pending_load_indexed()->update(field, offset - field->holder()->as_inline_klass()->payload_offset()); 2046 } else if (has_pending_field_access()) { 2047 pending_field_access()->inc_offset(offset - field->holder()->as_inline_klass()->payload_offset()); 2048 } else { 2049 null_check(obj); 2050 DelayedFieldAccess* dfa = new DelayedFieldAccess(obj, field->holder(), field->offset_in_bytes(), state_before); 2051 set_pending_field_access(dfa); 2052 } 2053 } else { 2054 scope()->set_wrote_final(); 2055 scope()->set_wrote_fields(); 2056 bool need_membar = false; 2057 if (field->is_null_free() && inline_klass->is_initialized() && inline_klass->is_empty()) { 2058 apush(append(new Constant(new InstanceConstant(inline_klass->default_instance())))); 2059 if (has_pending_field_access()) { 2060 set_pending_field_access(nullptr); 2061 } else if (has_pending_load_indexed()) { 2062 set_pending_load_indexed(nullptr); 2063 } 2064 } else if (has_pending_load_indexed()) { 2065 assert(!needs_patching, "Can't patch delayed field access"); 2066 pending_load_indexed()->update(field, offset - field->holder()->as_inline_klass()->payload_offset()); 2067 NewInstance* vt = new NewInstance(inline_klass, pending_load_indexed()->state_before(), false, true); 2068 _memory->new_instance(vt); 2069 pending_load_indexed()->load_instr()->set_vt(vt); 2070 apush(append_split(vt)); 2071 append(pending_load_indexed()->load_instr()); 2072 set_pending_load_indexed(nullptr); 2073 need_membar = true; 2074 } else { 2075 if (has_pending_field_access()) { 2076 state_before = pending_field_access()->state_before(); 2077 } 2078 NewInstance* new_instance = new NewInstance(inline_klass, state_before, false, true); 2079 _memory->new_instance(new_instance); 2080 apush(append_split(new_instance)); 2081 if (has_pending_field_access()) { 2082 copy_inline_content(inline_klass, pending_field_access()->obj(), 2083 pending_field_access()->offset() + field->offset_in_bytes() - field->holder()->as_inline_klass()->payload_offset(), 2084 new_instance, inline_klass->payload_offset(), state_before); 2085 set_pending_field_access(nullptr); 2086 } else { 2087 copy_inline_content(inline_klass, obj, field->offset_in_bytes(), new_instance, inline_klass->payload_offset(), state_before); 2088 } 2089 need_membar = true; 2090 } 2091 if (need_membar) { 2092 // If we allocated a new instance ensure the stores to copy the 2093 // field contents are visible before any subsequent store that 2094 // publishes this reference. 2095 append(new MemBar(lir_membar_storestore)); 2096 } 2097 } 2098 } 2099 } 2100 } 2101 break; 2102 } 2103 case Bytecodes::_putfield: { 2104 Value val = pop(type); 2105 obj = apop(); 2106 if (state_before == nullptr) { 2107 state_before = copy_state_for_exception(); 2108 } 2109 if (field_type == T_BOOLEAN) { 2110 Value mask = append(new Constant(new IntConstant(1))); 2111 val = append(new LogicOp(Bytecodes::_iand, val, mask)); 2112 } 2113 if (field->is_null_free() && field->type()->is_loaded() && field->type()->as_inline_klass()->is_empty()) { 2114 // Storing to a field of an empty, null-free inline type. Ignore. 2115 null_check(obj); 2116 null_check(val); 2117 } else if (!field->is_flat()) { 2118 if (field->is_null_free()) { 2119 null_check(val); 2120 } 2121 StoreField* store = new StoreField(obj, offset, field, val, false, state_before, needs_patching); 2122 if (!needs_patching) store = _memory->store(store); 2123 if (store != nullptr) { 2124 append(store); 2125 } 2126 } else { 2127 // Flat field 2128 assert(!needs_patching, "Can't patch flat inline type field access"); 2129 ciInlineKlass* inline_klass = field->type()->as_inline_klass(); 2130 bool is_naturally_atomic = inline_klass->nof_declared_nonstatic_fields() <= 1; 2131 bool needs_atomic_access = !field->is_null_free() || (field->is_volatile() && !is_naturally_atomic); 2132 if (needs_atomic_access) { 2133 if (field->is_null_free()) { 2134 null_check(val); 2135 } 2136 append(new StoreField(obj, offset, field, val, false, state_before, needs_patching)); 2137 } else { 2138 assert(field->is_null_free(), "must be null-free"); 2139 copy_inline_content(inline_klass, val, inline_klass->payload_offset(), obj, offset, state_before, field); 2140 } 2141 } 2142 break; 2143 } 2144 default: 2145 ShouldNotReachHere(); 2146 break; 2147 } 2148 } 2149 2150 Dependencies* GraphBuilder::dependency_recorder() const { 2151 assert(DeoptC1, "need debug information"); 2152 return compilation()->dependency_recorder(); 2153 } 2154 2155 // How many arguments do we want to profile? 2156 Values* GraphBuilder::args_list_for_profiling(ciMethod* target, int& start, bool may_have_receiver) { 2157 int n = 0; 2158 bool has_receiver = may_have_receiver && Bytecodes::has_receiver(method()->java_code_at_bci(bci())); 2159 start = has_receiver ? 1 : 0; 2160 if (profile_arguments()) { 2161 ciProfileData* data = method()->method_data()->bci_to_data(bci()); 2162 if (data != nullptr && (data->is_CallTypeData() || data->is_VirtualCallTypeData())) { 2163 n = data->is_CallTypeData() ? data->as_CallTypeData()->number_of_arguments() : data->as_VirtualCallTypeData()->number_of_arguments(); 2164 } 2165 } 2166 // If we are inlining then we need to collect arguments to profile parameters for the target 2167 if (profile_parameters() && target != nullptr) { 2168 if (target->method_data() != nullptr && target->method_data()->parameters_type_data() != nullptr) { 2169 // The receiver is profiled on method entry so it's included in 2170 // the number of parameters but here we're only interested in 2171 // actual arguments. 2172 n = MAX2(n, target->method_data()->parameters_type_data()->number_of_parameters() - start); 2173 } 2174 } 2175 if (n > 0) { 2176 return new Values(n); 2177 } 2178 return nullptr; 2179 } 2180 2181 void GraphBuilder::check_args_for_profiling(Values* obj_args, int expected) { 2182 #ifdef ASSERT 2183 bool ignored_will_link; 2184 ciSignature* declared_signature = nullptr; 2185 ciMethod* real_target = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature); 2186 assert(expected == obj_args->capacity() || real_target->is_method_handle_intrinsic(), "missed on arg?"); 2187 #endif 2188 } 2189 2190 // Collect arguments that we want to profile in a list 2191 Values* GraphBuilder::collect_args_for_profiling(Values* args, ciMethod* target, bool may_have_receiver) { 2192 int start = 0; 2193 Values* obj_args = args_list_for_profiling(target, start, may_have_receiver); 2194 if (obj_args == nullptr) { 2195 return nullptr; 2196 } 2197 int s = obj_args->capacity(); 2198 // if called through method handle invoke, some arguments may have been popped 2199 for (int i = start, j = 0; j < s && i < args->length(); i++) { 2200 if (args->at(i)->type()->is_object_kind()) { 2201 obj_args->push(args->at(i)); 2202 j++; 2203 } 2204 } 2205 check_args_for_profiling(obj_args, s); 2206 return obj_args; 2207 } 2208 2209 void GraphBuilder::invoke(Bytecodes::Code code) { 2210 bool will_link; 2211 ciSignature* declared_signature = nullptr; 2212 ciMethod* target = stream()->get_method(will_link, &declared_signature); 2213 ciKlass* holder = stream()->get_declared_method_holder(); 2214 const Bytecodes::Code bc_raw = stream()->cur_bc_raw(); 2215 assert(declared_signature != nullptr, "cannot be null"); 2216 assert(will_link == target->is_loaded(), ""); 2217 JFR_ONLY(Jfr::on_resolution(this, holder, target); CHECK_BAILOUT();) 2218 2219 ciInstanceKlass* klass = target->holder(); 2220 assert(!target->is_loaded() || klass->is_loaded(), "loaded target must imply loaded klass"); 2221 2222 // check if CHA possible: if so, change the code to invoke_special 2223 ciInstanceKlass* calling_klass = method()->holder(); 2224 ciInstanceKlass* callee_holder = ciEnv::get_instance_klass_for_declared_method_holder(holder); 2225 ciInstanceKlass* actual_recv = callee_holder; 2226 2227 CompileLog* log = compilation()->log(); 2228 if (log != nullptr) 2229 log->elem("call method='%d' instr='%s'", 2230 log->identify(target), 2231 Bytecodes::name(code)); 2232 2233 // Some methods are obviously bindable without any type checks so 2234 // convert them directly to an invokespecial or invokestatic. 2235 if (target->is_loaded() && !target->is_abstract() && target->can_be_statically_bound()) { 2236 switch (bc_raw) { 2237 case Bytecodes::_invokeinterface: 2238 // convert to invokespecial if the target is the private interface method. 2239 if (target->is_private()) { 2240 assert(holder->is_interface(), "How did we get a non-interface method here!"); 2241 code = Bytecodes::_invokespecial; 2242 } 2243 break; 2244 case Bytecodes::_invokevirtual: 2245 code = Bytecodes::_invokespecial; 2246 break; 2247 case Bytecodes::_invokehandle: 2248 code = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokespecial; 2249 break; 2250 default: 2251 break; 2252 } 2253 } else { 2254 if (bc_raw == Bytecodes::_invokehandle) { 2255 assert(!will_link, "should come here only for unlinked call"); 2256 code = Bytecodes::_invokespecial; 2257 } 2258 } 2259 2260 if (code == Bytecodes::_invokespecial) { 2261 // Additional receiver subtype checks for interface calls via invokespecial or invokeinterface. 2262 ciKlass* receiver_constraint = nullptr; 2263 2264 if (bc_raw == Bytecodes::_invokeinterface) { 2265 receiver_constraint = holder; 2266 } else if (bc_raw == Bytecodes::_invokespecial && !target->is_object_constructor() && calling_klass->is_interface()) { 2267 receiver_constraint = calling_klass; 2268 } 2269 2270 if (receiver_constraint != nullptr) { 2271 int index = state()->stack_size() - (target->arg_size_no_receiver() + 1); 2272 Value receiver = state()->stack_at(index); 2273 CheckCast* c = new CheckCast(receiver_constraint, receiver, copy_state_before()); 2274 // go to uncommon_trap when checkcast fails 2275 c->set_invokespecial_receiver_check(); 2276 state()->stack_at_put(index, append_split(c)); 2277 } 2278 } 2279 2280 // Push appendix argument (MethodType, CallSite, etc.), if one. 2281 bool patch_for_appendix = false; 2282 int patching_appendix_arg = 0; 2283 if (Bytecodes::has_optional_appendix(bc_raw) && (!will_link || PatchALot)) { 2284 Value arg = append(new Constant(new ObjectConstant(compilation()->env()->unloaded_ciinstance()), copy_state_before())); 2285 apush(arg); 2286 patch_for_appendix = true; 2287 patching_appendix_arg = (will_link && stream()->has_appendix()) ? 0 : 1; 2288 } else if (stream()->has_appendix()) { 2289 ciObject* appendix = stream()->get_appendix(); 2290 Value arg = append(new Constant(new ObjectConstant(appendix))); 2291 apush(arg); 2292 } 2293 2294 ciMethod* cha_monomorphic_target = nullptr; 2295 ciMethod* exact_target = nullptr; 2296 Value better_receiver = nullptr; 2297 if (UseCHA && DeoptC1 && target->is_loaded() && 2298 !(// %%% FIXME: Are both of these relevant? 2299 target->is_method_handle_intrinsic() || 2300 target->is_compiled_lambda_form()) && 2301 !patch_for_appendix) { 2302 Value receiver = nullptr; 2303 ciInstanceKlass* receiver_klass = nullptr; 2304 bool type_is_exact = false; 2305 // try to find a precise receiver type 2306 if (will_link && !target->is_static()) { 2307 int index = state()->stack_size() - (target->arg_size_no_receiver() + 1); 2308 receiver = state()->stack_at(index); 2309 ciType* type = receiver->exact_type(); 2310 if (type != nullptr && type->is_loaded()) { 2311 assert(!type->is_instance_klass() || !type->as_instance_klass()->is_interface(), "Must not be an interface"); 2312 // Detects non-interface instances, primitive arrays, and some object arrays. 2313 // Array receivers can only call Object methods, so we should be able to allow 2314 // all object arrays here too, even those with unloaded types. 2315 receiver_klass = (ciInstanceKlass*) type; 2316 type_is_exact = true; 2317 } 2318 if (type == nullptr) { 2319 type = receiver->declared_type(); 2320 if (type != nullptr && type->is_loaded() && 2321 type->is_instance_klass() && !type->as_instance_klass()->is_interface()) { 2322 receiver_klass = (ciInstanceKlass*) type; 2323 if (receiver_klass->is_leaf_type() && !receiver_klass->is_final()) { 2324 // Insert a dependency on this type since 2325 // find_monomorphic_target may assume it's already done. 2326 dependency_recorder()->assert_leaf_type(receiver_klass); 2327 type_is_exact = true; 2328 } 2329 } 2330 } 2331 } 2332 if (receiver_klass != nullptr && type_is_exact && 2333 receiver_klass->is_loaded() && code != Bytecodes::_invokespecial) { 2334 // If we have the exact receiver type we can bind directly to 2335 // the method to call. 2336 exact_target = target->resolve_invoke(calling_klass, receiver_klass); 2337 if (exact_target != nullptr) { 2338 target = exact_target; 2339 code = Bytecodes::_invokespecial; 2340 } 2341 } 2342 if (receiver_klass != nullptr && 2343 receiver_klass->is_subtype_of(actual_recv) && 2344 actual_recv->is_initialized()) { 2345 actual_recv = receiver_klass; 2346 } 2347 2348 if ((code == Bytecodes::_invokevirtual && callee_holder->is_initialized()) || 2349 (code == Bytecodes::_invokeinterface && callee_holder->is_initialized() && !actual_recv->is_interface())) { 2350 // Use CHA on the receiver to select a more precise method. 2351 cha_monomorphic_target = target->find_monomorphic_target(calling_klass, callee_holder, actual_recv); 2352 } else if (code == Bytecodes::_invokeinterface && callee_holder->is_loaded() && receiver != nullptr) { 2353 assert(callee_holder->is_interface(), "invokeinterface to non interface?"); 2354 // If there is only one implementor of this interface then we 2355 // may be able bind this invoke directly to the implementing 2356 // klass but we need both a dependence on the single interface 2357 // and on the method we bind to. Additionally since all we know 2358 // about the receiver type is the it's supposed to implement the 2359 // interface we have to insert a check that it's the class we 2360 // expect. Interface types are not checked by the verifier so 2361 // they are roughly equivalent to Object. 2362 // The number of implementors for declared_interface is less or 2363 // equal to the number of implementors for target->holder() so 2364 // if number of implementors of target->holder() == 1 then 2365 // number of implementors for decl_interface is 0 or 1. If 2366 // it's 0 then no class implements decl_interface and there's 2367 // no point in inlining. 2368 ciInstanceKlass* declared_interface = callee_holder; 2369 ciInstanceKlass* singleton = declared_interface->unique_implementor(); 2370 if (singleton != nullptr) { 2371 assert(singleton != declared_interface, "not a unique implementor"); 2372 cha_monomorphic_target = target->find_monomorphic_target(calling_klass, declared_interface, singleton); 2373 if (cha_monomorphic_target != nullptr) { 2374 ciInstanceKlass* holder = cha_monomorphic_target->holder(); 2375 ciInstanceKlass* constraint = (holder->is_subtype_of(singleton) ? holder : singleton); // avoid upcasts 2376 if (holder != compilation()->env()->Object_klass() && 2377 (!type_is_exact || receiver_klass->is_subtype_of(constraint))) { 2378 actual_recv = declared_interface; 2379 2380 // insert a check it's really the expected class. 2381 CheckCast* c = new CheckCast(constraint, receiver, copy_state_for_exception()); 2382 c->set_incompatible_class_change_check(); 2383 c->set_direct_compare(constraint->is_final()); 2384 // pass the result of the checkcast so that the compiler has 2385 // more accurate type info in the inlinee 2386 better_receiver = append_split(c); 2387 2388 dependency_recorder()->assert_unique_implementor(declared_interface, singleton); 2389 } else { 2390 cha_monomorphic_target = nullptr; 2391 } 2392 } 2393 } 2394 } 2395 } 2396 2397 if (cha_monomorphic_target != nullptr) { 2398 assert(!target->can_be_statically_bound() || target == cha_monomorphic_target, ""); 2399 assert(!cha_monomorphic_target->is_abstract(), ""); 2400 if (!cha_monomorphic_target->can_be_statically_bound(actual_recv)) { 2401 // If we inlined because CHA revealed only a single target method, 2402 // then we are dependent on that target method not getting overridden 2403 // by dynamic class loading. Be sure to test the "static" receiver 2404 // dest_method here, as opposed to the actual receiver, which may 2405 // falsely lead us to believe that the receiver is final or private. 2406 dependency_recorder()->assert_unique_concrete_method(actual_recv, cha_monomorphic_target, callee_holder, target); 2407 } 2408 code = Bytecodes::_invokespecial; 2409 } 2410 2411 // check if we could do inlining 2412 if (!PatchALot && Inline && target->is_loaded() && !patch_for_appendix && 2413 callee_holder->is_loaded()) { // the effect of symbolic reference resolution 2414 2415 // callee is known => check if we have static binding 2416 if ((code == Bytecodes::_invokestatic && klass->is_initialized()) || // invokestatic involves an initialization barrier on declaring class 2417 code == Bytecodes::_invokespecial || 2418 (code == Bytecodes::_invokevirtual && target->is_final_method()) || 2419 code == Bytecodes::_invokedynamic) { 2420 // static binding => check if callee is ok 2421 ciMethod* inline_target = (cha_monomorphic_target != nullptr) ? cha_monomorphic_target : target; 2422 bool holder_known = (cha_monomorphic_target != nullptr) || (exact_target != nullptr); 2423 bool success = try_inline(inline_target, holder_known, false /* ignore_return */, code, better_receiver); 2424 2425 CHECK_BAILOUT(); 2426 clear_inline_bailout(); 2427 2428 if (success) { 2429 // Register dependence if JVMTI has either breakpoint 2430 // setting or hotswapping of methods capabilities since they may 2431 // cause deoptimization. 2432 if (compilation()->env()->jvmti_can_hotswap_or_post_breakpoint()) { 2433 dependency_recorder()->assert_evol_method(inline_target); 2434 } 2435 return; 2436 } 2437 } else { 2438 print_inlining(target, "no static binding", /*success*/ false); 2439 } 2440 } else { 2441 print_inlining(target, "not inlineable", /*success*/ false); 2442 } 2443 2444 // If we attempted an inline which did not succeed because of a 2445 // bailout during construction of the callee graph, the entire 2446 // compilation has to be aborted. This is fairly rare and currently 2447 // seems to only occur for jasm-generated classes which contain 2448 // jsr/ret pairs which are not associated with finally clauses and 2449 // do not have exception handlers in the containing method, and are 2450 // therefore not caught early enough to abort the inlining without 2451 // corrupting the graph. (We currently bail out with a non-empty 2452 // stack at a ret in these situations.) 2453 CHECK_BAILOUT(); 2454 2455 // inlining not successful => standard invoke 2456 ValueType* result_type = as_ValueType(declared_signature->return_type()); 2457 ValueStack* state_before = copy_state_exhandling(); 2458 2459 // The bytecode (code) might change in this method so we are checking this very late. 2460 const bool has_receiver = 2461 code == Bytecodes::_invokespecial || 2462 code == Bytecodes::_invokevirtual || 2463 code == Bytecodes::_invokeinterface; 2464 Values* args = state()->pop_arguments(target->arg_size_no_receiver() + patching_appendix_arg); 2465 Value recv = has_receiver ? apop() : nullptr; 2466 2467 // A null check is required here (when there is a receiver) for any of the following cases 2468 // - invokespecial, always need a null check. 2469 // - invokevirtual, when the target is final and loaded. Calls to final targets will become optimized 2470 // and require null checking. If the target is loaded a null check is emitted here. 2471 // If the target isn't loaded the null check must happen after the call resolution. We achieve that 2472 // by using the target methods unverified entry point (see CompiledIC::compute_monomorphic_entry). 2473 // (The JVM specification requires that LinkageError must be thrown before a NPE. An unloaded target may 2474 // potentially fail, and can't have the null check before the resolution.) 2475 // - A call that will be profiled. (But we can't add a null check when the target is unloaded, by the same 2476 // reason as above, so calls with a receiver to unloaded targets can't be profiled.) 2477 // 2478 // Normal invokevirtual will perform the null check during lookup 2479 2480 bool need_null_check = (code == Bytecodes::_invokespecial) || 2481 (target->is_loaded() && (target->is_final_method() || (is_profiling() && profile_calls()))); 2482 2483 if (need_null_check) { 2484 if (recv != nullptr) { 2485 null_check(recv); 2486 } 2487 2488 if (is_profiling()) { 2489 // Note that we'd collect profile data in this method if we wanted it. 2490 compilation()->set_would_profile(true); 2491 2492 if (profile_calls()) { 2493 assert(cha_monomorphic_target == nullptr || exact_target == nullptr, "both can not be set"); 2494 ciKlass* target_klass = nullptr; 2495 if (cha_monomorphic_target != nullptr) { 2496 target_klass = cha_monomorphic_target->holder(); 2497 } else if (exact_target != nullptr) { 2498 target_klass = exact_target->holder(); 2499 } 2500 profile_call(target, recv, target_klass, collect_args_for_profiling(args, nullptr, false), false); 2501 } 2502 } 2503 } 2504 2505 Invoke* result = new Invoke(code, result_type, recv, args, target, state_before); 2506 // push result 2507 append_split(result); 2508 2509 if (result_type != voidType) { 2510 push(result_type, round_fp(result)); 2511 } 2512 if (profile_return() && result_type->is_object_kind()) { 2513 profile_return_type(result, target); 2514 } 2515 } 2516 2517 2518 void GraphBuilder::new_instance(int klass_index) { 2519 ValueStack* state_before = copy_state_exhandling(); 2520 ciKlass* klass = stream()->get_klass(); 2521 assert(klass->is_instance_klass(), "must be an instance klass"); 2522 if (!stream()->is_unresolved_klass() && klass->is_inlinetype() && 2523 klass->as_inline_klass()->is_initialized() && klass->as_inline_klass()->is_empty()) { 2524 ciInlineKlass* vk = klass->as_inline_klass(); 2525 apush(append(new Constant(new InstanceConstant(vk->default_instance())))); 2526 } else { 2527 NewInstance* new_instance = new NewInstance(klass->as_instance_klass(), state_before, stream()->is_unresolved_klass(), false); 2528 _memory->new_instance(new_instance); 2529 apush(append_split(new_instance)); 2530 } 2531 } 2532 2533 void GraphBuilder::new_type_array() { 2534 ValueStack* state_before = copy_state_exhandling(); 2535 apush(append_split(new NewTypeArray(ipop(), (BasicType)stream()->get_index(), state_before, true))); 2536 } 2537 2538 2539 void GraphBuilder::new_object_array() { 2540 ciKlass* klass = stream()->get_klass(); 2541 ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling(); 2542 NewArray* n = new NewObjectArray(klass, ipop(), state_before); 2543 apush(append_split(n)); 2544 } 2545 2546 2547 bool GraphBuilder::direct_compare(ciKlass* k) { 2548 if (k->is_loaded() && k->is_instance_klass() && !UseSlowPath) { 2549 ciInstanceKlass* ik = k->as_instance_klass(); 2550 if (ik->is_final()) { 2551 return true; 2552 } else { 2553 if (DeoptC1 && UseCHA && !(ik->has_subklass() || ik->is_interface())) { 2554 // test class is leaf class 2555 dependency_recorder()->assert_leaf_type(ik); 2556 return true; 2557 } 2558 } 2559 } 2560 return false; 2561 } 2562 2563 2564 void GraphBuilder::check_cast(int klass_index) { 2565 ciKlass* klass = stream()->get_klass(); 2566 ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_for_exception(); 2567 CheckCast* c = new CheckCast(klass, apop(), state_before); 2568 apush(append_split(c)); 2569 c->set_direct_compare(direct_compare(klass)); 2570 2571 if (is_profiling()) { 2572 // Note that we'd collect profile data in this method if we wanted it. 2573 compilation()->set_would_profile(true); 2574 2575 if (profile_checkcasts()) { 2576 c->set_profiled_method(method()); 2577 c->set_profiled_bci(bci()); 2578 c->set_should_profile(true); 2579 } 2580 } 2581 } 2582 2583 2584 void GraphBuilder::instance_of(int klass_index) { 2585 ciKlass* klass = stream()->get_klass(); 2586 ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling(); 2587 InstanceOf* i = new InstanceOf(klass, apop(), state_before); 2588 ipush(append_split(i)); 2589 i->set_direct_compare(direct_compare(klass)); 2590 2591 if (is_profiling()) { 2592 // Note that we'd collect profile data in this method if we wanted it. 2593 compilation()->set_would_profile(true); 2594 2595 if (profile_checkcasts()) { 2596 i->set_profiled_method(method()); 2597 i->set_profiled_bci(bci()); 2598 i->set_should_profile(true); 2599 } 2600 } 2601 } 2602 2603 2604 void GraphBuilder::monitorenter(Value x, int bci) { 2605 bool maybe_inlinetype = false; 2606 if (bci == InvocationEntryBci) { 2607 // Called by GraphBuilder::inline_sync_entry. 2608 #ifdef ASSERT 2609 ciType* obj_type = x->declared_type(); 2610 assert(obj_type == nullptr || !obj_type->is_inlinetype(), "inline types cannot have synchronized methods"); 2611 #endif 2612 } else { 2613 // We are compiling a monitorenter bytecode 2614 if (EnableValhalla) { 2615 ciType* obj_type = x->declared_type(); 2616 if (obj_type == nullptr || obj_type->as_klass()->can_be_inline_klass()) { 2617 // If we're (possibly) locking on an inline type, check for markWord::always_locked_pattern 2618 // and throw IMSE. (obj_type is null for Phi nodes, so let's just be conservative). 2619 maybe_inlinetype = true; 2620 } 2621 } 2622 } 2623 2624 // save state before locking in case of deoptimization after a NullPointerException 2625 ValueStack* state_before = copy_state_for_exception_with_bci(bci); 2626 append_with_bci(new MonitorEnter(x, state()->lock(x), state_before, maybe_inlinetype), bci); 2627 kill_all(); 2628 } 2629 2630 2631 void GraphBuilder::monitorexit(Value x, int bci) { 2632 append_with_bci(new MonitorExit(x, state()->unlock()), bci); 2633 kill_all(); 2634 } 2635 2636 2637 void GraphBuilder::new_multi_array(int dimensions) { 2638 ciKlass* klass = stream()->get_klass(); 2639 ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling(); 2640 2641 Values* dims = new Values(dimensions, dimensions, nullptr); 2642 // fill in all dimensions 2643 int i = dimensions; 2644 while (i-- > 0) dims->at_put(i, ipop()); 2645 // create array 2646 NewArray* n = new NewMultiArray(klass, dims, state_before); 2647 apush(append_split(n)); 2648 } 2649 2650 2651 void GraphBuilder::throw_op(int bci) { 2652 // We require that the debug info for a Throw be the "state before" 2653 // the Throw (i.e., exception oop is still on TOS) 2654 ValueStack* state_before = copy_state_before_with_bci(bci); 2655 Throw* t = new Throw(apop(), state_before); 2656 // operand stack not needed after a throw 2657 state()->truncate_stack(0); 2658 append_with_bci(t, bci); 2659 } 2660 2661 2662 Value GraphBuilder::round_fp(Value fp_value) { 2663 if (strict_fp_requires_explicit_rounding) { 2664 #ifdef IA32 2665 // no rounding needed if SSE2 is used 2666 if (UseSSE < 2) { 2667 // Must currently insert rounding node for doubleword values that 2668 // are results of expressions (i.e., not loads from memory or 2669 // constants) 2670 if (fp_value->type()->tag() == doubleTag && 2671 fp_value->as_Constant() == nullptr && 2672 fp_value->as_Local() == nullptr && // method parameters need no rounding 2673 fp_value->as_RoundFP() == nullptr) { 2674 return append(new RoundFP(fp_value)); 2675 } 2676 } 2677 #else 2678 Unimplemented(); 2679 #endif // IA32 2680 } 2681 return fp_value; 2682 } 2683 2684 2685 Instruction* GraphBuilder::append_with_bci(Instruction* instr, int bci) { 2686 Canonicalizer canon(compilation(), instr, bci); 2687 Instruction* i1 = canon.canonical(); 2688 if (i1->is_linked() || !i1->can_be_linked()) { 2689 // Canonicalizer returned an instruction which was already 2690 // appended so simply return it. 2691 return i1; 2692 } 2693 2694 if (UseLocalValueNumbering) { 2695 // Lookup the instruction in the ValueMap and add it to the map if 2696 // it's not found. 2697 Instruction* i2 = vmap()->find_insert(i1); 2698 if (i2 != i1) { 2699 // found an entry in the value map, so just return it. 2700 assert(i2->is_linked(), "should already be linked"); 2701 return i2; 2702 } 2703 ValueNumberingEffects vne(vmap()); 2704 i1->visit(&vne); 2705 } 2706 2707 // i1 was not eliminated => append it 2708 assert(i1->next() == nullptr, "shouldn't already be linked"); 2709 _last = _last->set_next(i1, canon.bci()); 2710 2711 if (++_instruction_count >= InstructionCountCutoff && !bailed_out()) { 2712 // set the bailout state but complete normal processing. We 2713 // might do a little more work before noticing the bailout so we 2714 // want processing to continue normally until it's noticed. 2715 bailout("Method and/or inlining is too large"); 2716 } 2717 2718 #ifndef PRODUCT 2719 if (PrintIRDuringConstruction) { 2720 InstructionPrinter ip; 2721 ip.print_line(i1); 2722 if (Verbose) { 2723 state()->print(); 2724 } 2725 } 2726 #endif 2727 2728 // save state after modification of operand stack for StateSplit instructions 2729 StateSplit* s = i1->as_StateSplit(); 2730 if (s != nullptr) { 2731 if (EliminateFieldAccess) { 2732 Intrinsic* intrinsic = s->as_Intrinsic(); 2733 if (s->as_Invoke() != nullptr || (intrinsic && !intrinsic->preserves_state())) { 2734 _memory->kill(); 2735 } 2736 } 2737 s->set_state(state()->copy(ValueStack::StateAfter, canon.bci())); 2738 } 2739 2740 // set up exception handlers for this instruction if necessary 2741 if (i1->can_trap()) { 2742 i1->set_exception_handlers(handle_exception(i1)); 2743 assert(i1->exception_state() != nullptr || !i1->needs_exception_state() || bailed_out(), "handle_exception must set exception state"); 2744 } 2745 return i1; 2746 } 2747 2748 2749 Instruction* GraphBuilder::append(Instruction* instr) { 2750 assert(instr->as_StateSplit() == nullptr || instr->as_BlockEnd() != nullptr, "wrong append used"); 2751 return append_with_bci(instr, bci()); 2752 } 2753 2754 2755 Instruction* GraphBuilder::append_split(StateSplit* instr) { 2756 return append_with_bci(instr, bci()); 2757 } 2758 2759 2760 void GraphBuilder::null_check(Value value) { 2761 if (value->as_NewArray() != nullptr || value->as_NewInstance() != nullptr) { 2762 return; 2763 } else { 2764 Constant* con = value->as_Constant(); 2765 if (con) { 2766 ObjectType* c = con->type()->as_ObjectType(); 2767 if (c && c->is_loaded()) { 2768 ObjectConstant* oc = c->as_ObjectConstant(); 2769 if (!oc || !oc->value()->is_null_object()) { 2770 return; 2771 } 2772 } 2773 } 2774 if (value->is_null_free()) return; 2775 } 2776 append(new NullCheck(value, copy_state_for_exception())); 2777 } 2778 2779 2780 2781 XHandlers* GraphBuilder::handle_exception(Instruction* instruction) { 2782 if (!has_handler() && (!instruction->needs_exception_state() || instruction->exception_state() != nullptr)) { 2783 assert(instruction->exception_state() == nullptr 2784 || instruction->exception_state()->kind() == ValueStack::EmptyExceptionState 2785 || (instruction->exception_state()->kind() == ValueStack::ExceptionState && _compilation->env()->should_retain_local_variables()), 2786 "exception_state should be of exception kind"); 2787 return new XHandlers(); 2788 } 2789 2790 XHandlers* exception_handlers = new XHandlers(); 2791 ScopeData* cur_scope_data = scope_data(); 2792 ValueStack* cur_state = instruction->state_before(); 2793 ValueStack* prev_state = nullptr; 2794 int scope_count = 0; 2795 2796 assert(cur_state != nullptr, "state_before must be set"); 2797 do { 2798 int cur_bci = cur_state->bci(); 2799 assert(cur_scope_data->scope() == cur_state->scope(), "scopes do not match"); 2800 assert(cur_bci == SynchronizationEntryBCI || cur_bci == cur_scope_data->stream()->cur_bci() 2801 || has_pending_field_access() || has_pending_load_indexed(), "invalid bci"); 2802 2803 2804 // join with all potential exception handlers 2805 XHandlers* list = cur_scope_data->xhandlers(); 2806 const int n = list->length(); 2807 for (int i = 0; i < n; i++) { 2808 XHandler* h = list->handler_at(i); 2809 if (h->covers(cur_bci)) { 2810 // h is a potential exception handler => join it 2811 compilation()->set_has_exception_handlers(true); 2812 2813 BlockBegin* entry = h->entry_block(); 2814 if (entry == block()) { 2815 // It's acceptable for an exception handler to cover itself 2816 // but we don't handle that in the parser currently. It's 2817 // very rare so we bailout instead of trying to handle it. 2818 BAILOUT_("exception handler covers itself", exception_handlers); 2819 } 2820 assert(entry->bci() == h->handler_bci(), "must match"); 2821 assert(entry->bci() == -1 || entry == cur_scope_data->block_at(entry->bci()), "blocks must correspond"); 2822 2823 // previously this was a BAILOUT, but this is not necessary 2824 // now because asynchronous exceptions are not handled this way. 2825 assert(entry->state() == nullptr || cur_state->total_locks_size() == entry->state()->total_locks_size(), "locks do not match"); 2826 2827 // xhandler start with an empty expression stack 2828 if (cur_state->stack_size() != 0) { 2829 // locals are preserved 2830 // stack will be truncated 2831 cur_state = cur_state->copy(ValueStack::ExceptionState, cur_state->bci()); 2832 } 2833 if (instruction->exception_state() == nullptr) { 2834 instruction->set_exception_state(cur_state); 2835 } 2836 2837 // Note: Usually this join must work. However, very 2838 // complicated jsr-ret structures where we don't ret from 2839 // the subroutine can cause the objects on the monitor 2840 // stacks to not match because blocks can be parsed twice. 2841 // The only test case we've seen so far which exhibits this 2842 // problem is caught by the infinite recursion test in 2843 // GraphBuilder::jsr() if the join doesn't work. 2844 if (!entry->try_merge(cur_state, compilation()->has_irreducible_loops())) { 2845 BAILOUT_("error while joining with exception handler, prob. due to complicated jsr/rets", exception_handlers); 2846 } 2847 2848 // add current state for correct handling of phi functions at begin of xhandler 2849 int phi_operand = entry->add_exception_state(cur_state); 2850 2851 // add entry to the list of xhandlers of this block 2852 _block->add_exception_handler(entry); 2853 2854 // add back-edge from xhandler entry to this block 2855 if (!entry->is_predecessor(_block)) { 2856 entry->add_predecessor(_block); 2857 } 2858 2859 // clone XHandler because phi_operand and scope_count can not be shared 2860 XHandler* new_xhandler = new XHandler(h); 2861 new_xhandler->set_phi_operand(phi_operand); 2862 new_xhandler->set_scope_count(scope_count); 2863 exception_handlers->append(new_xhandler); 2864 2865 // fill in exception handler subgraph lazily 2866 assert(!entry->is_set(BlockBegin::was_visited_flag), "entry must not be visited yet"); 2867 cur_scope_data->add_to_work_list(entry); 2868 2869 // stop when reaching catchall 2870 if (h->catch_type() == 0) { 2871 return exception_handlers; 2872 } 2873 } 2874 } 2875 2876 if (exception_handlers->length() == 0) { 2877 // This scope and all callees do not handle exceptions, so the local 2878 // variables of this scope are not needed. However, the scope itself is 2879 // required for a correct exception stack trace -> clear out the locals. 2880 // Stack and locals are invalidated but not truncated in caller state. 2881 if (prev_state != nullptr) { 2882 assert(instruction->exception_state() != nullptr, "missed set?"); 2883 ValueStack::Kind exc_kind = ValueStack::empty_exception_kind(true /* caller */); 2884 cur_state = cur_state->copy(exc_kind, cur_state->bci()); 2885 // reset caller exception state 2886 prev_state->set_caller_state(cur_state); 2887 } else { 2888 assert(instruction->exception_state() == nullptr, "already set"); 2889 // set instruction exception state 2890 // truncate stack 2891 ValueStack::Kind exc_kind = ValueStack::empty_exception_kind(); 2892 cur_state = cur_state->copy(exc_kind, cur_state->bci()); 2893 instruction->set_exception_state(cur_state); 2894 } 2895 } 2896 2897 // Set up iteration for next time. 2898 // If parsing a jsr, do not grab exception handlers from the 2899 // parent scopes for this method (already got them, and they 2900 // needed to be cloned) 2901 2902 while (cur_scope_data->parsing_jsr()) { 2903 cur_scope_data = cur_scope_data->parent(); 2904 } 2905 2906 assert(cur_scope_data->scope() == cur_state->scope(), "scopes do not match"); 2907 assert(cur_state->locks_size() == 0 || cur_state->locks_size() == 1, "unlocking must be done in a catchall exception handler"); 2908 2909 prev_state = cur_state; 2910 cur_state = cur_state->caller_state(); 2911 cur_scope_data = cur_scope_data->parent(); 2912 scope_count++; 2913 } while (cur_scope_data != nullptr); 2914 2915 return exception_handlers; 2916 } 2917 2918 2919 // Helper class for simplifying Phis. 2920 class PhiSimplifier : public BlockClosure { 2921 private: 2922 bool _has_substitutions; 2923 Value simplify(Value v); 2924 2925 public: 2926 PhiSimplifier(BlockBegin* start) : _has_substitutions(false) { 2927 start->iterate_preorder(this); 2928 if (_has_substitutions) { 2929 SubstitutionResolver sr(start); 2930 } 2931 } 2932 void block_do(BlockBegin* b); 2933 bool has_substitutions() const { return _has_substitutions; } 2934 }; 2935 2936 2937 Value PhiSimplifier::simplify(Value v) { 2938 Phi* phi = v->as_Phi(); 2939 2940 if (phi == nullptr) { 2941 // no phi function 2942 return v; 2943 } else if (v->has_subst()) { 2944 // already substituted; subst can be phi itself -> simplify 2945 return simplify(v->subst()); 2946 } else if (phi->is_set(Phi::cannot_simplify)) { 2947 // already tried to simplify phi before 2948 return phi; 2949 } else if (phi->is_set(Phi::visited)) { 2950 // break cycles in phi functions 2951 return phi; 2952 } else if (phi->type()->is_illegal()) { 2953 // illegal phi functions are ignored anyway 2954 return phi; 2955 2956 } else { 2957 // mark phi function as processed to break cycles in phi functions 2958 phi->set(Phi::visited); 2959 2960 // simplify x = [y, x] and x = [y, y] to y 2961 Value subst = nullptr; 2962 int opd_count = phi->operand_count(); 2963 for (int i = 0; i < opd_count; i++) { 2964 Value opd = phi->operand_at(i); 2965 assert(opd != nullptr, "Operand must exist!"); 2966 2967 if (opd->type()->is_illegal()) { 2968 // if one operand is illegal, the entire phi function is illegal 2969 phi->make_illegal(); 2970 phi->clear(Phi::visited); 2971 return phi; 2972 } 2973 2974 Value new_opd = simplify(opd); 2975 assert(new_opd != nullptr, "Simplified operand must exist!"); 2976 2977 if (new_opd != phi && new_opd != subst) { 2978 if (subst == nullptr) { 2979 subst = new_opd; 2980 } else { 2981 // no simplification possible 2982 phi->set(Phi::cannot_simplify); 2983 phi->clear(Phi::visited); 2984 return phi; 2985 } 2986 } 2987 } 2988 2989 // successfully simplified phi function 2990 assert(subst != nullptr, "illegal phi function"); 2991 _has_substitutions = true; 2992 phi->clear(Phi::visited); 2993 phi->set_subst(subst); 2994 2995 #ifndef PRODUCT 2996 if (PrintPhiFunctions) { 2997 tty->print_cr("simplified phi function %c%d to %c%d (Block B%d)", phi->type()->tchar(), phi->id(), subst->type()->tchar(), subst->id(), phi->block()->block_id()); 2998 } 2999 #endif 3000 3001 return subst; 3002 } 3003 } 3004 3005 3006 void PhiSimplifier::block_do(BlockBegin* b) { 3007 for_each_phi_fun(b, phi, 3008 simplify(phi); 3009 ); 3010 3011 #ifdef ASSERT 3012 for_each_phi_fun(b, phi, 3013 assert(phi->operand_count() != 1 || phi->subst() != phi || phi->is_illegal(), "missed trivial simplification"); 3014 ); 3015 3016 ValueStack* state = b->state()->caller_state(); 3017 for_each_state_value(state, value, 3018 Phi* phi = value->as_Phi(); 3019 assert(phi == nullptr || phi->block() != b, "must not have phi function to simplify in caller state"); 3020 ); 3021 #endif 3022 } 3023 3024 // This method is called after all blocks are filled with HIR instructions 3025 // It eliminates all Phi functions of the form x = [y, y] and x = [y, x] 3026 void GraphBuilder::eliminate_redundant_phis(BlockBegin* start) { 3027 PhiSimplifier simplifier(start); 3028 } 3029 3030 3031 void GraphBuilder::connect_to_end(BlockBegin* beg) { 3032 // setup iteration 3033 kill_all(); 3034 _block = beg; 3035 _state = beg->state()->copy_for_parsing(); 3036 _last = beg; 3037 iterate_bytecodes_for_block(beg->bci()); 3038 } 3039 3040 3041 BlockEnd* GraphBuilder::iterate_bytecodes_for_block(int bci) { 3042 #ifndef PRODUCT 3043 if (PrintIRDuringConstruction) { 3044 tty->cr(); 3045 InstructionPrinter ip; 3046 ip.print_instr(_block); tty->cr(); 3047 ip.print_stack(_block->state()); tty->cr(); 3048 ip.print_inline_level(_block); 3049 ip.print_head(); 3050 tty->print_cr("locals size: %d stack size: %d", state()->locals_size(), state()->stack_size()); 3051 } 3052 #endif 3053 _skip_block = false; 3054 assert(state() != nullptr, "ValueStack missing!"); 3055 CompileLog* log = compilation()->log(); 3056 ciBytecodeStream s(method()); 3057 s.reset_to_bci(bci); 3058 int prev_bci = bci; 3059 scope_data()->set_stream(&s); 3060 // iterate 3061 Bytecodes::Code code = Bytecodes::_illegal; 3062 bool push_exception = false; 3063 3064 if (block()->is_set(BlockBegin::exception_entry_flag) && block()->next() == nullptr) { 3065 // first thing in the exception entry block should be the exception object. 3066 push_exception = true; 3067 } 3068 3069 bool ignore_return = scope_data()->ignore_return(); 3070 3071 while (!bailed_out() && last()->as_BlockEnd() == nullptr && 3072 (code = stream()->next()) != ciBytecodeStream::EOBC() && 3073 (block_at(s.cur_bci()) == nullptr || block_at(s.cur_bci()) == block())) { 3074 assert(state()->kind() == ValueStack::Parsing, "invalid state kind"); 3075 3076 if (log != nullptr) 3077 log->set_context("bc code='%d' bci='%d'", (int)code, s.cur_bci()); 3078 3079 // Check for active jsr during OSR compilation 3080 if (compilation()->is_osr_compile() 3081 && scope()->is_top_scope() 3082 && parsing_jsr() 3083 && s.cur_bci() == compilation()->osr_bci()) { 3084 bailout("OSR not supported while a jsr is active"); 3085 } 3086 3087 if (push_exception) { 3088 apush(append(new ExceptionObject())); 3089 push_exception = false; 3090 } 3091 3092 // handle bytecode 3093 switch (code) { 3094 case Bytecodes::_nop : /* nothing to do */ break; 3095 case Bytecodes::_aconst_null : apush(append(new Constant(objectNull ))); break; 3096 case Bytecodes::_iconst_m1 : ipush(append(new Constant(new IntConstant (-1)))); break; 3097 case Bytecodes::_iconst_0 : ipush(append(new Constant(intZero ))); break; 3098 case Bytecodes::_iconst_1 : ipush(append(new Constant(intOne ))); break; 3099 case Bytecodes::_iconst_2 : ipush(append(new Constant(new IntConstant ( 2)))); break; 3100 case Bytecodes::_iconst_3 : ipush(append(new Constant(new IntConstant ( 3)))); break; 3101 case Bytecodes::_iconst_4 : ipush(append(new Constant(new IntConstant ( 4)))); break; 3102 case Bytecodes::_iconst_5 : ipush(append(new Constant(new IntConstant ( 5)))); break; 3103 case Bytecodes::_lconst_0 : lpush(append(new Constant(new LongConstant ( 0)))); break; 3104 case Bytecodes::_lconst_1 : lpush(append(new Constant(new LongConstant ( 1)))); break; 3105 case Bytecodes::_fconst_0 : fpush(append(new Constant(new FloatConstant ( 0)))); break; 3106 case Bytecodes::_fconst_1 : fpush(append(new Constant(new FloatConstant ( 1)))); break; 3107 case Bytecodes::_fconst_2 : fpush(append(new Constant(new FloatConstant ( 2)))); break; 3108 case Bytecodes::_dconst_0 : dpush(append(new Constant(new DoubleConstant( 0)))); break; 3109 case Bytecodes::_dconst_1 : dpush(append(new Constant(new DoubleConstant( 1)))); break; 3110 case Bytecodes::_bipush : ipush(append(new Constant(new IntConstant(((signed char*)s.cur_bcp())[1])))); break; 3111 case Bytecodes::_sipush : ipush(append(new Constant(new IntConstant((short)Bytes::get_Java_u2(s.cur_bcp()+1))))); break; 3112 case Bytecodes::_ldc : // fall through 3113 case Bytecodes::_ldc_w : // fall through 3114 case Bytecodes::_ldc2_w : load_constant(); break; 3115 case Bytecodes::_iload : load_local(intType , s.get_index()); break; 3116 case Bytecodes::_lload : load_local(longType , s.get_index()); break; 3117 case Bytecodes::_fload : load_local(floatType , s.get_index()); break; 3118 case Bytecodes::_dload : load_local(doubleType , s.get_index()); break; 3119 case Bytecodes::_aload : load_local(instanceType, s.get_index()); break; 3120 case Bytecodes::_iload_0 : load_local(intType , 0); break; 3121 case Bytecodes::_iload_1 : load_local(intType , 1); break; 3122 case Bytecodes::_iload_2 : load_local(intType , 2); break; 3123 case Bytecodes::_iload_3 : load_local(intType , 3); break; 3124 case Bytecodes::_lload_0 : load_local(longType , 0); break; 3125 case Bytecodes::_lload_1 : load_local(longType , 1); break; 3126 case Bytecodes::_lload_2 : load_local(longType , 2); break; 3127 case Bytecodes::_lload_3 : load_local(longType , 3); break; 3128 case Bytecodes::_fload_0 : load_local(floatType , 0); break; 3129 case Bytecodes::_fload_1 : load_local(floatType , 1); break; 3130 case Bytecodes::_fload_2 : load_local(floatType , 2); break; 3131 case Bytecodes::_fload_3 : load_local(floatType , 3); break; 3132 case Bytecodes::_dload_0 : load_local(doubleType, 0); break; 3133 case Bytecodes::_dload_1 : load_local(doubleType, 1); break; 3134 case Bytecodes::_dload_2 : load_local(doubleType, 2); break; 3135 case Bytecodes::_dload_3 : load_local(doubleType, 3); break; 3136 case Bytecodes::_aload_0 : load_local(objectType, 0); break; 3137 case Bytecodes::_aload_1 : load_local(objectType, 1); break; 3138 case Bytecodes::_aload_2 : load_local(objectType, 2); break; 3139 case Bytecodes::_aload_3 : load_local(objectType, 3); break; 3140 case Bytecodes::_iaload : load_indexed(T_INT ); break; 3141 case Bytecodes::_laload : load_indexed(T_LONG ); break; 3142 case Bytecodes::_faload : load_indexed(T_FLOAT ); break; 3143 case Bytecodes::_daload : load_indexed(T_DOUBLE); break; 3144 case Bytecodes::_aaload : load_indexed(T_OBJECT); break; 3145 case Bytecodes::_baload : load_indexed(T_BYTE ); break; 3146 case Bytecodes::_caload : load_indexed(T_CHAR ); break; 3147 case Bytecodes::_saload : load_indexed(T_SHORT ); break; 3148 case Bytecodes::_istore : store_local(intType , s.get_index()); break; 3149 case Bytecodes::_lstore : store_local(longType , s.get_index()); break; 3150 case Bytecodes::_fstore : store_local(floatType , s.get_index()); break; 3151 case Bytecodes::_dstore : store_local(doubleType, s.get_index()); break; 3152 case Bytecodes::_astore : store_local(objectType, s.get_index()); break; 3153 case Bytecodes::_istore_0 : store_local(intType , 0); break; 3154 case Bytecodes::_istore_1 : store_local(intType , 1); break; 3155 case Bytecodes::_istore_2 : store_local(intType , 2); break; 3156 case Bytecodes::_istore_3 : store_local(intType , 3); break; 3157 case Bytecodes::_lstore_0 : store_local(longType , 0); break; 3158 case Bytecodes::_lstore_1 : store_local(longType , 1); break; 3159 case Bytecodes::_lstore_2 : store_local(longType , 2); break; 3160 case Bytecodes::_lstore_3 : store_local(longType , 3); break; 3161 case Bytecodes::_fstore_0 : store_local(floatType , 0); break; 3162 case Bytecodes::_fstore_1 : store_local(floatType , 1); break; 3163 case Bytecodes::_fstore_2 : store_local(floatType , 2); break; 3164 case Bytecodes::_fstore_3 : store_local(floatType , 3); break; 3165 case Bytecodes::_dstore_0 : store_local(doubleType, 0); break; 3166 case Bytecodes::_dstore_1 : store_local(doubleType, 1); break; 3167 case Bytecodes::_dstore_2 : store_local(doubleType, 2); break; 3168 case Bytecodes::_dstore_3 : store_local(doubleType, 3); break; 3169 case Bytecodes::_astore_0 : store_local(objectType, 0); break; 3170 case Bytecodes::_astore_1 : store_local(objectType, 1); break; 3171 case Bytecodes::_astore_2 : store_local(objectType, 2); break; 3172 case Bytecodes::_astore_3 : store_local(objectType, 3); break; 3173 case Bytecodes::_iastore : store_indexed(T_INT ); break; 3174 case Bytecodes::_lastore : store_indexed(T_LONG ); break; 3175 case Bytecodes::_fastore : store_indexed(T_FLOAT ); break; 3176 case Bytecodes::_dastore : store_indexed(T_DOUBLE); break; 3177 case Bytecodes::_aastore : store_indexed(T_OBJECT); break; 3178 case Bytecodes::_bastore : store_indexed(T_BYTE ); break; 3179 case Bytecodes::_castore : store_indexed(T_CHAR ); break; 3180 case Bytecodes::_sastore : store_indexed(T_SHORT ); break; 3181 case Bytecodes::_pop : // fall through 3182 case Bytecodes::_pop2 : // fall through 3183 case Bytecodes::_dup : // fall through 3184 case Bytecodes::_dup_x1 : // fall through 3185 case Bytecodes::_dup_x2 : // fall through 3186 case Bytecodes::_dup2 : // fall through 3187 case Bytecodes::_dup2_x1 : // fall through 3188 case Bytecodes::_dup2_x2 : // fall through 3189 case Bytecodes::_swap : stack_op(code); break; 3190 case Bytecodes::_iadd : arithmetic_op(intType , code); break; 3191 case Bytecodes::_ladd : arithmetic_op(longType , code); break; 3192 case Bytecodes::_fadd : arithmetic_op(floatType , code); break; 3193 case Bytecodes::_dadd : arithmetic_op(doubleType, code); break; 3194 case Bytecodes::_isub : arithmetic_op(intType , code); break; 3195 case Bytecodes::_lsub : arithmetic_op(longType , code); break; 3196 case Bytecodes::_fsub : arithmetic_op(floatType , code); break; 3197 case Bytecodes::_dsub : arithmetic_op(doubleType, code); break; 3198 case Bytecodes::_imul : arithmetic_op(intType , code); break; 3199 case Bytecodes::_lmul : arithmetic_op(longType , code); break; 3200 case Bytecodes::_fmul : arithmetic_op(floatType , code); break; 3201 case Bytecodes::_dmul : arithmetic_op(doubleType, code); break; 3202 case Bytecodes::_idiv : arithmetic_op(intType , code, copy_state_for_exception()); break; 3203 case Bytecodes::_ldiv : arithmetic_op(longType , code, copy_state_for_exception()); break; 3204 case Bytecodes::_fdiv : arithmetic_op(floatType , code); break; 3205 case Bytecodes::_ddiv : arithmetic_op(doubleType, code); break; 3206 case Bytecodes::_irem : arithmetic_op(intType , code, copy_state_for_exception()); break; 3207 case Bytecodes::_lrem : arithmetic_op(longType , code, copy_state_for_exception()); break; 3208 case Bytecodes::_frem : arithmetic_op(floatType , code); break; 3209 case Bytecodes::_drem : arithmetic_op(doubleType, code); break; 3210 case Bytecodes::_ineg : negate_op(intType ); break; 3211 case Bytecodes::_lneg : negate_op(longType ); break; 3212 case Bytecodes::_fneg : negate_op(floatType ); break; 3213 case Bytecodes::_dneg : negate_op(doubleType); break; 3214 case Bytecodes::_ishl : shift_op(intType , code); break; 3215 case Bytecodes::_lshl : shift_op(longType, code); break; 3216 case Bytecodes::_ishr : shift_op(intType , code); break; 3217 case Bytecodes::_lshr : shift_op(longType, code); break; 3218 case Bytecodes::_iushr : shift_op(intType , code); break; 3219 case Bytecodes::_lushr : shift_op(longType, code); break; 3220 case Bytecodes::_iand : logic_op(intType , code); break; 3221 case Bytecodes::_land : logic_op(longType, code); break; 3222 case Bytecodes::_ior : logic_op(intType , code); break; 3223 case Bytecodes::_lor : logic_op(longType, code); break; 3224 case Bytecodes::_ixor : logic_op(intType , code); break; 3225 case Bytecodes::_lxor : logic_op(longType, code); break; 3226 case Bytecodes::_iinc : increment(); break; 3227 case Bytecodes::_i2l : convert(code, T_INT , T_LONG ); break; 3228 case Bytecodes::_i2f : convert(code, T_INT , T_FLOAT ); break; 3229 case Bytecodes::_i2d : convert(code, T_INT , T_DOUBLE); break; 3230 case Bytecodes::_l2i : convert(code, T_LONG , T_INT ); break; 3231 case Bytecodes::_l2f : convert(code, T_LONG , T_FLOAT ); break; 3232 case Bytecodes::_l2d : convert(code, T_LONG , T_DOUBLE); break; 3233 case Bytecodes::_f2i : convert(code, T_FLOAT , T_INT ); break; 3234 case Bytecodes::_f2l : convert(code, T_FLOAT , T_LONG ); break; 3235 case Bytecodes::_f2d : convert(code, T_FLOAT , T_DOUBLE); break; 3236 case Bytecodes::_d2i : convert(code, T_DOUBLE, T_INT ); break; 3237 case Bytecodes::_d2l : convert(code, T_DOUBLE, T_LONG ); break; 3238 case Bytecodes::_d2f : convert(code, T_DOUBLE, T_FLOAT ); break; 3239 case Bytecodes::_i2b : convert(code, T_INT , T_BYTE ); break; 3240 case Bytecodes::_i2c : convert(code, T_INT , T_CHAR ); break; 3241 case Bytecodes::_i2s : convert(code, T_INT , T_SHORT ); break; 3242 case Bytecodes::_lcmp : compare_op(longType , code); break; 3243 case Bytecodes::_fcmpl : compare_op(floatType , code); break; 3244 case Bytecodes::_fcmpg : compare_op(floatType , code); break; 3245 case Bytecodes::_dcmpl : compare_op(doubleType, code); break; 3246 case Bytecodes::_dcmpg : compare_op(doubleType, code); break; 3247 case Bytecodes::_ifeq : if_zero(intType , If::eql); break; 3248 case Bytecodes::_ifne : if_zero(intType , If::neq); break; 3249 case Bytecodes::_iflt : if_zero(intType , If::lss); break; 3250 case Bytecodes::_ifge : if_zero(intType , If::geq); break; 3251 case Bytecodes::_ifgt : if_zero(intType , If::gtr); break; 3252 case Bytecodes::_ifle : if_zero(intType , If::leq); break; 3253 case Bytecodes::_if_icmpeq : if_same(intType , If::eql); break; 3254 case Bytecodes::_if_icmpne : if_same(intType , If::neq); break; 3255 case Bytecodes::_if_icmplt : if_same(intType , If::lss); break; 3256 case Bytecodes::_if_icmpge : if_same(intType , If::geq); break; 3257 case Bytecodes::_if_icmpgt : if_same(intType , If::gtr); break; 3258 case Bytecodes::_if_icmple : if_same(intType , If::leq); break; 3259 case Bytecodes::_if_acmpeq : if_same(objectType, If::eql); break; 3260 case Bytecodes::_if_acmpne : if_same(objectType, If::neq); break; 3261 case Bytecodes::_goto : _goto(s.cur_bci(), s.get_dest()); break; 3262 case Bytecodes::_jsr : jsr(s.get_dest()); break; 3263 case Bytecodes::_ret : ret(s.get_index()); break; 3264 case Bytecodes::_tableswitch : table_switch(); break; 3265 case Bytecodes::_lookupswitch : lookup_switch(); break; 3266 case Bytecodes::_ireturn : method_return(ipop(), ignore_return); break; 3267 case Bytecodes::_lreturn : method_return(lpop(), ignore_return); break; 3268 case Bytecodes::_freturn : method_return(fpop(), ignore_return); break; 3269 case Bytecodes::_dreturn : method_return(dpop(), ignore_return); break; 3270 case Bytecodes::_areturn : method_return(apop(), ignore_return); break; 3271 case Bytecodes::_return : method_return(nullptr, ignore_return); break; 3272 case Bytecodes::_getstatic : // fall through 3273 case Bytecodes::_putstatic : // fall through 3274 case Bytecodes::_getfield : // fall through 3275 case Bytecodes::_putfield : access_field(code); break; 3276 case Bytecodes::_invokevirtual : // fall through 3277 case Bytecodes::_invokespecial : // fall through 3278 case Bytecodes::_invokestatic : // fall through 3279 case Bytecodes::_invokedynamic : // fall through 3280 case Bytecodes::_invokeinterface: invoke(code); break; 3281 case Bytecodes::_new : new_instance(s.get_index_u2()); break; 3282 case Bytecodes::_newarray : new_type_array(); break; 3283 case Bytecodes::_anewarray : new_object_array(); break; 3284 case Bytecodes::_arraylength : { ValueStack* state_before = copy_state_for_exception(); ipush(append(new ArrayLength(apop(), state_before))); break; } 3285 case Bytecodes::_athrow : throw_op(s.cur_bci()); break; 3286 case Bytecodes::_checkcast : check_cast(s.get_index_u2()); break; 3287 case Bytecodes::_instanceof : instance_of(s.get_index_u2()); break; 3288 case Bytecodes::_monitorenter : monitorenter(apop(), s.cur_bci()); break; 3289 case Bytecodes::_monitorexit : monitorexit (apop(), s.cur_bci()); break; 3290 case Bytecodes::_wide : ShouldNotReachHere(); break; 3291 case Bytecodes::_multianewarray : new_multi_array(s.cur_bcp()[3]); break; 3292 case Bytecodes::_ifnull : if_null(objectType, If::eql); break; 3293 case Bytecodes::_ifnonnull : if_null(objectType, If::neq); break; 3294 case Bytecodes::_goto_w : _goto(s.cur_bci(), s.get_far_dest()); break; 3295 case Bytecodes::_jsr_w : jsr(s.get_far_dest()); break; 3296 case Bytecodes::_breakpoint : BAILOUT_("concurrent setting of breakpoint", nullptr); 3297 default : ShouldNotReachHere(); break; 3298 } 3299 3300 if (log != nullptr) 3301 log->clear_context(); // skip marker if nothing was printed 3302 3303 // save current bci to setup Goto at the end 3304 prev_bci = s.cur_bci(); 3305 3306 } 3307 CHECK_BAILOUT_(nullptr); 3308 // stop processing of this block (see try_inline_full) 3309 if (_skip_block) { 3310 _skip_block = false; 3311 assert(_last && _last->as_BlockEnd(), ""); 3312 return _last->as_BlockEnd(); 3313 } 3314 // if there are any, check if last instruction is a BlockEnd instruction 3315 BlockEnd* end = last()->as_BlockEnd(); 3316 if (end == nullptr) { 3317 // all blocks must end with a BlockEnd instruction => add a Goto 3318 end = new Goto(block_at(s.cur_bci()), false); 3319 append(end); 3320 } 3321 assert(end == last()->as_BlockEnd(), "inconsistency"); 3322 3323 assert(end->state() != nullptr, "state must already be present"); 3324 assert(end->as_Return() == nullptr || end->as_Throw() == nullptr || end->state()->stack_size() == 0, "stack not needed for return and throw"); 3325 3326 // connect to begin & set state 3327 // NOTE that inlining may have changed the block we are parsing 3328 block()->set_end(end); 3329 // propagate state 3330 for (int i = end->number_of_sux() - 1; i >= 0; i--) { 3331 BlockBegin* sux = end->sux_at(i); 3332 assert(sux->is_predecessor(block()), "predecessor missing"); 3333 // be careful, bailout if bytecodes are strange 3334 if (!sux->try_merge(end->state(), compilation()->has_irreducible_loops())) BAILOUT_("block join failed", nullptr); 3335 scope_data()->add_to_work_list(end->sux_at(i)); 3336 } 3337 3338 scope_data()->set_stream(nullptr); 3339 3340 // done 3341 return end; 3342 } 3343 3344 3345 void GraphBuilder::iterate_all_blocks(bool start_in_current_block_for_inlining) { 3346 do { 3347 if (start_in_current_block_for_inlining && !bailed_out()) { 3348 iterate_bytecodes_for_block(0); 3349 start_in_current_block_for_inlining = false; 3350 } else { 3351 BlockBegin* b; 3352 while ((b = scope_data()->remove_from_work_list()) != nullptr) { 3353 if (!b->is_set(BlockBegin::was_visited_flag)) { 3354 if (b->is_set(BlockBegin::osr_entry_flag)) { 3355 // we're about to parse the osr entry block, so make sure 3356 // we setup the OSR edge leading into this block so that 3357 // Phis get setup correctly. 3358 setup_osr_entry_block(); 3359 // this is no longer the osr entry block, so clear it. 3360 b->clear(BlockBegin::osr_entry_flag); 3361 } 3362 b->set(BlockBegin::was_visited_flag); 3363 connect_to_end(b); 3364 } 3365 } 3366 } 3367 } while (!bailed_out() && !scope_data()->is_work_list_empty()); 3368 } 3369 3370 3371 bool GraphBuilder::_can_trap [Bytecodes::number_of_java_codes]; 3372 3373 void GraphBuilder::initialize() { 3374 // the following bytecodes are assumed to potentially 3375 // throw exceptions in compiled code - note that e.g. 3376 // monitorexit & the return bytecodes do not throw 3377 // exceptions since monitor pairing proved that they 3378 // succeed (if monitor pairing succeeded) 3379 Bytecodes::Code can_trap_list[] = 3380 { Bytecodes::_ldc 3381 , Bytecodes::_ldc_w 3382 , Bytecodes::_ldc2_w 3383 , Bytecodes::_iaload 3384 , Bytecodes::_laload 3385 , Bytecodes::_faload 3386 , Bytecodes::_daload 3387 , Bytecodes::_aaload 3388 , Bytecodes::_baload 3389 , Bytecodes::_caload 3390 , Bytecodes::_saload 3391 , Bytecodes::_iastore 3392 , Bytecodes::_lastore 3393 , Bytecodes::_fastore 3394 , Bytecodes::_dastore 3395 , Bytecodes::_aastore 3396 , Bytecodes::_bastore 3397 , Bytecodes::_castore 3398 , Bytecodes::_sastore 3399 , Bytecodes::_idiv 3400 , Bytecodes::_ldiv 3401 , Bytecodes::_irem 3402 , Bytecodes::_lrem 3403 , Bytecodes::_getstatic 3404 , Bytecodes::_putstatic 3405 , Bytecodes::_getfield 3406 , Bytecodes::_putfield 3407 , Bytecodes::_invokevirtual 3408 , Bytecodes::_invokespecial 3409 , Bytecodes::_invokestatic 3410 , Bytecodes::_invokedynamic 3411 , Bytecodes::_invokeinterface 3412 , Bytecodes::_new 3413 , Bytecodes::_newarray 3414 , Bytecodes::_anewarray 3415 , Bytecodes::_arraylength 3416 , Bytecodes::_athrow 3417 , Bytecodes::_checkcast 3418 , Bytecodes::_instanceof 3419 , Bytecodes::_monitorenter 3420 , Bytecodes::_multianewarray 3421 }; 3422 3423 // inititialize trap tables 3424 for (int i = 0; i < Bytecodes::number_of_java_codes; i++) { 3425 _can_trap[i] = false; 3426 } 3427 // set standard trap info 3428 for (uint j = 0; j < ARRAY_SIZE(can_trap_list); j++) { 3429 _can_trap[can_trap_list[j]] = true; 3430 } 3431 } 3432 3433 3434 BlockBegin* GraphBuilder::header_block(BlockBegin* entry, BlockBegin::Flag f, ValueStack* state) { 3435 assert(entry->is_set(f), "entry/flag mismatch"); 3436 // create header block 3437 BlockBegin* h = new BlockBegin(entry->bci()); 3438 h->set_depth_first_number(0); 3439 3440 Value l = h; 3441 BlockEnd* g = new Goto(entry, false); 3442 l->set_next(g, entry->bci()); 3443 h->set_end(g); 3444 h->set(f); 3445 // setup header block end state 3446 ValueStack* s = state->copy(ValueStack::StateAfter, entry->bci()); // can use copy since stack is empty (=> no phis) 3447 assert(s->stack_is_empty(), "must have empty stack at entry point"); 3448 g->set_state(s); 3449 return h; 3450 } 3451 3452 3453 3454 BlockBegin* GraphBuilder::setup_start_block(int osr_bci, BlockBegin* std_entry, BlockBegin* osr_entry, ValueStack* state) { 3455 BlockBegin* start = new BlockBegin(0); 3456 3457 // This code eliminates the empty start block at the beginning of 3458 // each method. Previously, each method started with the 3459 // start-block created below, and this block was followed by the 3460 // header block that was always empty. This header block is only 3461 // necessary if std_entry is also a backward branch target because 3462 // then phi functions may be necessary in the header block. It's 3463 // also necessary when profiling so that there's a single block that 3464 // can increment the counters. 3465 // In addition, with range check elimination, we may need a valid block 3466 // that dominates all the rest to insert range predicates. 3467 BlockBegin* new_header_block; 3468 if (std_entry->number_of_preds() > 0 || is_profiling() || RangeCheckElimination) { 3469 new_header_block = header_block(std_entry, BlockBegin::std_entry_flag, state); 3470 } else { 3471 new_header_block = std_entry; 3472 } 3473 3474 // setup start block (root for the IR graph) 3475 Base* base = 3476 new Base( 3477 new_header_block, 3478 osr_entry 3479 ); 3480 start->set_next(base, 0); 3481 start->set_end(base); 3482 // create & setup state for start block 3483 start->set_state(state->copy(ValueStack::StateAfter, std_entry->bci())); 3484 base->set_state(state->copy(ValueStack::StateAfter, std_entry->bci())); 3485 3486 if (base->std_entry()->state() == nullptr) { 3487 // setup states for header blocks 3488 base->std_entry()->merge(state, compilation()->has_irreducible_loops()); 3489 } 3490 3491 assert(base->std_entry()->state() != nullptr, ""); 3492 return start; 3493 } 3494 3495 3496 void GraphBuilder::setup_osr_entry_block() { 3497 assert(compilation()->is_osr_compile(), "only for osrs"); 3498 3499 int osr_bci = compilation()->osr_bci(); 3500 ciBytecodeStream s(method()); 3501 s.reset_to_bci(osr_bci); 3502 s.next(); 3503 scope_data()->set_stream(&s); 3504 3505 // create a new block to be the osr setup code 3506 _osr_entry = new BlockBegin(osr_bci); 3507 _osr_entry->set(BlockBegin::osr_entry_flag); 3508 _osr_entry->set_depth_first_number(0); 3509 BlockBegin* target = bci2block()->at(osr_bci); 3510 assert(target != nullptr && target->is_set(BlockBegin::osr_entry_flag), "must be there"); 3511 // the osr entry has no values for locals 3512 ValueStack* state = target->state()->copy(); 3513 _osr_entry->set_state(state); 3514 3515 kill_all(); 3516 _block = _osr_entry; 3517 _state = _osr_entry->state()->copy(); 3518 assert(_state->bci() == osr_bci, "mismatch"); 3519 _last = _osr_entry; 3520 Value e = append(new OsrEntry()); 3521 e->set_needs_null_check(false); 3522 3523 // OSR buffer is 3524 // 3525 // locals[nlocals-1..0] 3526 // monitors[number_of_locks-1..0] 3527 // 3528 // locals is a direct copy of the interpreter frame so in the osr buffer 3529 // so first slot in the local array is the last local from the interpreter 3530 // and last slot is local[0] (receiver) from the interpreter 3531 // 3532 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 3533 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 3534 // in the interpreter frame (the method lock if a sync method) 3535 3536 // Initialize monitors in the compiled activation. 3537 3538 int index; 3539 Value local; 3540 3541 // find all the locals that the interpreter thinks contain live oops 3542 const ResourceBitMap live_oops = method()->live_local_oops_at_bci(osr_bci); 3543 3544 // compute the offset into the locals so that we can treat the buffer 3545 // as if the locals were still in the interpreter frame 3546 int locals_offset = BytesPerWord * (method()->max_locals() - 1); 3547 for_each_local_value(state, index, local) { 3548 int offset = locals_offset - (index + local->type()->size() - 1) * BytesPerWord; 3549 Value get; 3550 if (local->type()->is_object_kind() && !live_oops.at(index)) { 3551 // The interpreter thinks this local is dead but the compiler 3552 // doesn't so pretend that the interpreter passed in null. 3553 get = append(new Constant(objectNull)); 3554 } else { 3555 Value off_val = append(new Constant(new IntConstant(offset))); 3556 get = append(new UnsafeGet(as_BasicType(local->type()), e, 3557 off_val, 3558 false/*is_volatile*/, 3559 true/*is_raw*/)); 3560 } 3561 _state->store_local(index, get); 3562 } 3563 3564 // the storage for the OSR buffer is freed manually in the LIRGenerator. 3565 3566 assert(state->caller_state() == nullptr, "should be top scope"); 3567 state->clear_locals(); 3568 Goto* g = new Goto(target, false); 3569 append(g); 3570 _osr_entry->set_end(g); 3571 target->merge(_osr_entry->end()->state(), compilation()->has_irreducible_loops()); 3572 3573 scope_data()->set_stream(nullptr); 3574 } 3575 3576 3577 ValueStack* GraphBuilder::state_at_entry() { 3578 ValueStack* state = new ValueStack(scope(), nullptr); 3579 3580 // Set up locals for receiver 3581 int idx = 0; 3582 if (!method()->is_static()) { 3583 // we should always see the receiver 3584 state->store_local(idx, new Local(method()->holder(), objectType, idx, 3585 /*receiver*/ true, /*null_free*/ method()->holder()->is_flat_array_klass())); 3586 idx = 1; 3587 } 3588 3589 // Set up locals for incoming arguments 3590 ciSignature* sig = method()->signature(); 3591 for (int i = 0; i < sig->count(); i++) { 3592 ciType* type = sig->type_at(i); 3593 BasicType basic_type = type->basic_type(); 3594 // don't allow T_ARRAY to propagate into locals types 3595 if (is_reference_type(basic_type)) basic_type = T_OBJECT; 3596 ValueType* vt = as_ValueType(basic_type); 3597 state->store_local(idx, new Local(type, vt, idx, false, false)); 3598 idx += type->size(); 3599 } 3600 3601 // lock synchronized method 3602 if (method()->is_synchronized()) { 3603 state->lock(nullptr); 3604 } 3605 3606 return state; 3607 } 3608 3609 3610 GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope) 3611 : _scope_data(nullptr) 3612 , _compilation(compilation) 3613 , _memory(new MemoryBuffer()) 3614 , _inline_bailout_msg(nullptr) 3615 , _instruction_count(0) 3616 , _osr_entry(nullptr) 3617 , _pending_field_access(nullptr) 3618 , _pending_load_indexed(nullptr) 3619 { 3620 int osr_bci = compilation->osr_bci(); 3621 3622 // determine entry points and bci2block mapping 3623 BlockListBuilder blm(compilation, scope, osr_bci); 3624 CHECK_BAILOUT(); 3625 3626 BlockList* bci2block = blm.bci2block(); 3627 BlockBegin* start_block = bci2block->at(0); 3628 3629 push_root_scope(scope, bci2block, start_block); 3630 3631 // setup state for std entry 3632 _initial_state = state_at_entry(); 3633 start_block->merge(_initial_state, compilation->has_irreducible_loops()); 3634 3635 // End nulls still exist here 3636 3637 // complete graph 3638 _vmap = new ValueMap(); 3639 switch (scope->method()->intrinsic_id()) { 3640 case vmIntrinsics::_dabs : // fall through 3641 case vmIntrinsics::_dsqrt : // fall through 3642 case vmIntrinsics::_dsqrt_strict : // fall through 3643 case vmIntrinsics::_dsin : // fall through 3644 case vmIntrinsics::_dcos : // fall through 3645 case vmIntrinsics::_dtan : // fall through 3646 case vmIntrinsics::_dtanh : // fall through 3647 case vmIntrinsics::_dlog : // fall through 3648 case vmIntrinsics::_dlog10 : // fall through 3649 case vmIntrinsics::_dexp : // fall through 3650 case vmIntrinsics::_dpow : // fall through 3651 { 3652 // Compiles where the root method is an intrinsic need a special 3653 // compilation environment because the bytecodes for the method 3654 // shouldn't be parsed during the compilation, only the special 3655 // Intrinsic node should be emitted. If this isn't done the 3656 // code for the inlined version will be different than the root 3657 // compiled version which could lead to monotonicity problems on 3658 // intel. 3659 if (CheckIntrinsics && !scope->method()->intrinsic_candidate()) { 3660 BAILOUT("failed to inline intrinsic, method not annotated"); 3661 } 3662 3663 // Set up a stream so that appending instructions works properly. 3664 ciBytecodeStream s(scope->method()); 3665 s.reset_to_bci(0); 3666 scope_data()->set_stream(&s); 3667 s.next(); 3668 3669 // setup the initial block state 3670 _block = start_block; 3671 _state = start_block->state()->copy_for_parsing(); 3672 _last = start_block; 3673 load_local(doubleType, 0); 3674 if (scope->method()->intrinsic_id() == vmIntrinsics::_dpow) { 3675 load_local(doubleType, 2); 3676 } 3677 3678 // Emit the intrinsic node. 3679 bool result = try_inline_intrinsics(scope->method()); 3680 if (!result) BAILOUT("failed to inline intrinsic"); 3681 method_return(dpop()); 3682 3683 // connect the begin and end blocks and we're all done. 3684 BlockEnd* end = last()->as_BlockEnd(); 3685 block()->set_end(end); 3686 break; 3687 } 3688 3689 case vmIntrinsics::_Reference_get: 3690 { 3691 { 3692 // With java.lang.ref.reference.get() we must go through the 3693 // intrinsic - when G1 is enabled - even when get() is the root 3694 // method of the compile so that, if necessary, the value in 3695 // the referent field of the reference object gets recorded by 3696 // the pre-barrier code. 3697 // Specifically, if G1 is enabled, the value in the referent 3698 // field is recorded by the G1 SATB pre barrier. This will 3699 // result in the referent being marked live and the reference 3700 // object removed from the list of discovered references during 3701 // reference processing. 3702 if (CheckIntrinsics && !scope->method()->intrinsic_candidate()) { 3703 BAILOUT("failed to inline intrinsic, method not annotated"); 3704 } 3705 3706 // Also we need intrinsic to prevent commoning reads from this field 3707 // across safepoint since GC can change its value. 3708 3709 // Set up a stream so that appending instructions works properly. 3710 ciBytecodeStream s(scope->method()); 3711 s.reset_to_bci(0); 3712 scope_data()->set_stream(&s); 3713 s.next(); 3714 3715 // setup the initial block state 3716 _block = start_block; 3717 _state = start_block->state()->copy_for_parsing(); 3718 _last = start_block; 3719 load_local(objectType, 0); 3720 3721 // Emit the intrinsic node. 3722 bool result = try_inline_intrinsics(scope->method()); 3723 if (!result) BAILOUT("failed to inline intrinsic"); 3724 method_return(apop()); 3725 3726 // connect the begin and end blocks and we're all done. 3727 BlockEnd* end = last()->as_BlockEnd(); 3728 block()->set_end(end); 3729 break; 3730 } 3731 // Otherwise, fall thru 3732 } 3733 3734 default: 3735 scope_data()->add_to_work_list(start_block); 3736 iterate_all_blocks(); 3737 break; 3738 } 3739 CHECK_BAILOUT(); 3740 3741 # ifdef ASSERT 3742 // For all blocks reachable from start_block: _end must be non-null 3743 { 3744 BlockList processed; 3745 BlockList to_go; 3746 to_go.append(start_block); 3747 while(to_go.length() > 0) { 3748 BlockBegin* current = to_go.pop(); 3749 assert(current != nullptr, "Should not happen."); 3750 assert(current->end() != nullptr, "All blocks reachable from start_block should have end() != nullptr."); 3751 processed.append(current); 3752 for(int i = 0; i < current->number_of_sux(); i++) { 3753 BlockBegin* s = current->sux_at(i); 3754 if (!processed.contains(s)) { 3755 to_go.append(s); 3756 } 3757 } 3758 } 3759 } 3760 #endif // ASSERT 3761 3762 _start = setup_start_block(osr_bci, start_block, _osr_entry, _initial_state); 3763 3764 eliminate_redundant_phis(_start); 3765 3766 NOT_PRODUCT(if (PrintValueNumbering && Verbose) print_stats()); 3767 // for osr compile, bailout if some requirements are not fulfilled 3768 if (osr_bci != -1) { 3769 BlockBegin* osr_block = blm.bci2block()->at(osr_bci); 3770 if (!osr_block->is_set(BlockBegin::was_visited_flag)) { 3771 BAILOUT("osr entry must have been visited for osr compile"); 3772 } 3773 3774 // check if osr entry point has empty stack - we cannot handle non-empty stacks at osr entry points 3775 if (!osr_block->state()->stack_is_empty()) { 3776 BAILOUT("stack not empty at OSR entry point"); 3777 } 3778 } 3779 #ifndef PRODUCT 3780 if (PrintCompilation && Verbose) tty->print_cr("Created %d Instructions", _instruction_count); 3781 #endif 3782 } 3783 3784 3785 ValueStack* GraphBuilder::copy_state_before() { 3786 return copy_state_before_with_bci(bci()); 3787 } 3788 3789 ValueStack* GraphBuilder::copy_state_exhandling() { 3790 return copy_state_exhandling_with_bci(bci()); 3791 } 3792 3793 ValueStack* GraphBuilder::copy_state_for_exception() { 3794 return copy_state_for_exception_with_bci(bci()); 3795 } 3796 3797 ValueStack* GraphBuilder::copy_state_before_with_bci(int bci) { 3798 return state()->copy(ValueStack::StateBefore, bci); 3799 } 3800 3801 ValueStack* GraphBuilder::copy_state_exhandling_with_bci(int bci) { 3802 if (!has_handler()) return nullptr; 3803 return state()->copy(ValueStack::StateBefore, bci); 3804 } 3805 3806 ValueStack* GraphBuilder::copy_state_for_exception_with_bci(int bci) { 3807 ValueStack* s = copy_state_exhandling_with_bci(bci); 3808 if (s == nullptr) { 3809 // no handler, no need to retain locals 3810 ValueStack::Kind exc_kind = ValueStack::empty_exception_kind(); 3811 s = state()->copy(exc_kind, bci); 3812 } 3813 return s; 3814 } 3815 3816 int GraphBuilder::recursive_inline_level(ciMethod* cur_callee) const { 3817 int recur_level = 0; 3818 for (IRScope* s = scope(); s != nullptr; s = s->caller()) { 3819 if (s->method() == cur_callee) { 3820 ++recur_level; 3821 } 3822 } 3823 return recur_level; 3824 } 3825 3826 static void set_flags_for_inlined_callee(Compilation* compilation, ciMethod* callee) { 3827 if (callee->has_reserved_stack_access()) { 3828 compilation->set_has_reserved_stack_access(true); 3829 } 3830 if (callee->is_synchronized() || callee->has_monitor_bytecodes()) { 3831 compilation->set_has_monitors(true); 3832 } 3833 if (callee->is_scoped()) { 3834 compilation->set_has_scoped_access(true); 3835 } 3836 } 3837 3838 bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, bool ignore_return, Bytecodes::Code bc, Value receiver) { 3839 const char* msg = nullptr; 3840 3841 // clear out any existing inline bailout condition 3842 clear_inline_bailout(); 3843 3844 // exclude methods we don't want to inline 3845 msg = should_not_inline(callee); 3846 if (msg != nullptr) { 3847 print_inlining(callee, msg, /*success*/ false); 3848 return false; 3849 } 3850 3851 // method handle invokes 3852 if (callee->is_method_handle_intrinsic()) { 3853 if (try_method_handle_inline(callee, ignore_return)) { 3854 set_flags_for_inlined_callee(compilation(), callee); 3855 return true; 3856 } 3857 return false; 3858 } 3859 3860 // handle intrinsics 3861 if (callee->intrinsic_id() != vmIntrinsics::_none && 3862 callee->check_intrinsic_candidate()) { 3863 if (try_inline_intrinsics(callee, ignore_return)) { 3864 print_inlining(callee, "intrinsic"); 3865 set_flags_for_inlined_callee(compilation(), callee); 3866 return true; 3867 } 3868 // try normal inlining 3869 } 3870 3871 // certain methods cannot be parsed at all 3872 msg = check_can_parse(callee); 3873 if (msg != nullptr) { 3874 print_inlining(callee, msg, /*success*/ false); 3875 return false; 3876 } 3877 3878 // If bytecode not set use the current one. 3879 if (bc == Bytecodes::_illegal) { 3880 bc = code(); 3881 } 3882 if (try_inline_full(callee, holder_known, ignore_return, bc, receiver)) { 3883 set_flags_for_inlined_callee(compilation(), callee); 3884 return true; 3885 } 3886 3887 // Entire compilation could fail during try_inline_full call. 3888 // In that case printing inlining decision info is useless. 3889 if (!bailed_out()) 3890 print_inlining(callee, _inline_bailout_msg, /*success*/ false); 3891 3892 return false; 3893 } 3894 3895 3896 const char* GraphBuilder::check_can_parse(ciMethod* callee) const { 3897 // Certain methods cannot be parsed at all: 3898 if ( callee->is_native()) return "native method"; 3899 if ( callee->is_abstract()) return "abstract method"; 3900 if (!callee->can_be_parsed()) return "cannot be parsed"; 3901 return nullptr; 3902 } 3903 3904 // negative filter: should callee NOT be inlined? returns null, ok to inline, or rejection msg 3905 const char* GraphBuilder::should_not_inline(ciMethod* callee) const { 3906 if ( compilation()->directive()->should_not_inline(callee)) return "disallowed by CompileCommand"; 3907 if ( callee->dont_inline()) return "don't inline by annotation"; 3908 return nullptr; 3909 } 3910 3911 void GraphBuilder::build_graph_for_intrinsic(ciMethod* callee, bool ignore_return) { 3912 vmIntrinsics::ID id = callee->intrinsic_id(); 3913 assert(id != vmIntrinsics::_none, "must be a VM intrinsic"); 3914 3915 // Some intrinsics need special IR nodes. 3916 switch(id) { 3917 case vmIntrinsics::_getReference : append_unsafe_get(callee, T_OBJECT, false); return; 3918 case vmIntrinsics::_getBoolean : append_unsafe_get(callee, T_BOOLEAN, false); return; 3919 case vmIntrinsics::_getByte : append_unsafe_get(callee, T_BYTE, false); return; 3920 case vmIntrinsics::_getShort : append_unsafe_get(callee, T_SHORT, false); return; 3921 case vmIntrinsics::_getChar : append_unsafe_get(callee, T_CHAR, false); return; 3922 case vmIntrinsics::_getInt : append_unsafe_get(callee, T_INT, false); return; 3923 case vmIntrinsics::_getLong : append_unsafe_get(callee, T_LONG, false); return; 3924 case vmIntrinsics::_getFloat : append_unsafe_get(callee, T_FLOAT, false); return; 3925 case vmIntrinsics::_getDouble : append_unsafe_get(callee, T_DOUBLE, false); return; 3926 case vmIntrinsics::_putReference : append_unsafe_put(callee, T_OBJECT, false); return; 3927 case vmIntrinsics::_putBoolean : append_unsafe_put(callee, T_BOOLEAN, false); return; 3928 case vmIntrinsics::_putByte : append_unsafe_put(callee, T_BYTE, false); return; 3929 case vmIntrinsics::_putShort : append_unsafe_put(callee, T_SHORT, false); return; 3930 case vmIntrinsics::_putChar : append_unsafe_put(callee, T_CHAR, false); return; 3931 case vmIntrinsics::_putInt : append_unsafe_put(callee, T_INT, false); return; 3932 case vmIntrinsics::_putLong : append_unsafe_put(callee, T_LONG, false); return; 3933 case vmIntrinsics::_putFloat : append_unsafe_put(callee, T_FLOAT, false); return; 3934 case vmIntrinsics::_putDouble : append_unsafe_put(callee, T_DOUBLE, false); return; 3935 case vmIntrinsics::_getShortUnaligned : append_unsafe_get(callee, T_SHORT, false); return; 3936 case vmIntrinsics::_getCharUnaligned : append_unsafe_get(callee, T_CHAR, false); return; 3937 case vmIntrinsics::_getIntUnaligned : append_unsafe_get(callee, T_INT, false); return; 3938 case vmIntrinsics::_getLongUnaligned : append_unsafe_get(callee, T_LONG, false); return; 3939 case vmIntrinsics::_putShortUnaligned : append_unsafe_put(callee, T_SHORT, false); return; 3940 case vmIntrinsics::_putCharUnaligned : append_unsafe_put(callee, T_CHAR, false); return; 3941 case vmIntrinsics::_putIntUnaligned : append_unsafe_put(callee, T_INT, false); return; 3942 case vmIntrinsics::_putLongUnaligned : append_unsafe_put(callee, T_LONG, false); return; 3943 case vmIntrinsics::_getReferenceVolatile : append_unsafe_get(callee, T_OBJECT, true); return; 3944 case vmIntrinsics::_getBooleanVolatile : append_unsafe_get(callee, T_BOOLEAN, true); return; 3945 case vmIntrinsics::_getByteVolatile : append_unsafe_get(callee, T_BYTE, true); return; 3946 case vmIntrinsics::_getShortVolatile : append_unsafe_get(callee, T_SHORT, true); return; 3947 case vmIntrinsics::_getCharVolatile : append_unsafe_get(callee, T_CHAR, true); return; 3948 case vmIntrinsics::_getIntVolatile : append_unsafe_get(callee, T_INT, true); return; 3949 case vmIntrinsics::_getLongVolatile : append_unsafe_get(callee, T_LONG, true); return; 3950 case vmIntrinsics::_getFloatVolatile : append_unsafe_get(callee, T_FLOAT, true); return; 3951 case vmIntrinsics::_getDoubleVolatile : append_unsafe_get(callee, T_DOUBLE, true); return; 3952 case vmIntrinsics::_putReferenceVolatile : append_unsafe_put(callee, T_OBJECT, true); return; 3953 case vmIntrinsics::_putBooleanVolatile : append_unsafe_put(callee, T_BOOLEAN, true); return; 3954 case vmIntrinsics::_putByteVolatile : append_unsafe_put(callee, T_BYTE, true); return; 3955 case vmIntrinsics::_putShortVolatile : append_unsafe_put(callee, T_SHORT, true); return; 3956 case vmIntrinsics::_putCharVolatile : append_unsafe_put(callee, T_CHAR, true); return; 3957 case vmIntrinsics::_putIntVolatile : append_unsafe_put(callee, T_INT, true); return; 3958 case vmIntrinsics::_putLongVolatile : append_unsafe_put(callee, T_LONG, true); return; 3959 case vmIntrinsics::_putFloatVolatile : append_unsafe_put(callee, T_FLOAT, true); return; 3960 case vmIntrinsics::_putDoubleVolatile : append_unsafe_put(callee, T_DOUBLE, true); return; 3961 case vmIntrinsics::_compareAndSetLong: 3962 case vmIntrinsics::_compareAndSetInt: 3963 case vmIntrinsics::_compareAndSetReference : append_unsafe_CAS(callee); return; 3964 case vmIntrinsics::_getAndAddInt: 3965 case vmIntrinsics::_getAndAddLong : append_unsafe_get_and_set(callee, true); return; 3966 case vmIntrinsics::_getAndSetInt : 3967 case vmIntrinsics::_getAndSetLong : 3968 case vmIntrinsics::_getAndSetReference : append_unsafe_get_and_set(callee, false); return; 3969 case vmIntrinsics::_getCharStringU : append_char_access(callee, false); return; 3970 case vmIntrinsics::_putCharStringU : append_char_access(callee, true); return; 3971 case vmIntrinsics::_clone : append_alloc_array_copy(callee); return; 3972 default: 3973 break; 3974 } 3975 if (_inline_bailout_msg != nullptr) { 3976 return; 3977 } 3978 3979 // create intrinsic node 3980 const bool has_receiver = !callee->is_static(); 3981 ValueType* result_type = as_ValueType(callee->return_type()); 3982 ValueStack* state_before = copy_state_for_exception(); 3983 3984 Values* args = state()->pop_arguments(callee->arg_size()); 3985 3986 if (is_profiling()) { 3987 // Don't profile in the special case where the root method 3988 // is the intrinsic 3989 if (callee != method()) { 3990 // Note that we'd collect profile data in this method if we wanted it. 3991 compilation()->set_would_profile(true); 3992 if (profile_calls()) { 3993 Value recv = nullptr; 3994 if (has_receiver) { 3995 recv = args->at(0); 3996 null_check(recv); 3997 } 3998 profile_call(callee, recv, nullptr, collect_args_for_profiling(args, callee, true), true); 3999 } 4000 } 4001 } 4002 4003 Intrinsic* result = new Intrinsic(result_type, callee->intrinsic_id(), 4004 args, has_receiver, state_before, 4005 vmIntrinsics::preserves_state(id), 4006 vmIntrinsics::can_trap(id)); 4007 // append instruction & push result 4008 Value value = append_split(result); 4009 if (result_type != voidType && !ignore_return) { 4010 push(result_type, value); 4011 } 4012 4013 if (callee != method() && profile_return() && result_type->is_object_kind()) { 4014 profile_return_type(result, callee); 4015 } 4016 } 4017 4018 bool GraphBuilder::try_inline_intrinsics(ciMethod* callee, bool ignore_return) { 4019 // For calling is_intrinsic_available we need to transition to 4020 // the '_thread_in_vm' state because is_intrinsic_available() 4021 // accesses critical VM-internal data. 4022 bool is_available = false; 4023 { 4024 VM_ENTRY_MARK; 4025 methodHandle mh(THREAD, callee->get_Method()); 4026 is_available = _compilation->compiler()->is_intrinsic_available(mh, _compilation->directive()); 4027 } 4028 4029 if (!is_available) { 4030 if (!InlineNatives) { 4031 // Return false and also set message that the inlining of 4032 // intrinsics has been disabled in general. 4033 INLINE_BAILOUT("intrinsic method inlining disabled"); 4034 } else { 4035 return false; 4036 } 4037 } 4038 build_graph_for_intrinsic(callee, ignore_return); 4039 if (_inline_bailout_msg != nullptr) { 4040 return false; 4041 } 4042 return true; 4043 } 4044 4045 4046 bool GraphBuilder::try_inline_jsr(int jsr_dest_bci) { 4047 // Introduce a new callee continuation point - all Ret instructions 4048 // will be replaced with Gotos to this point. 4049 if (next_bci() >= method()->code_size()) { 4050 return false; 4051 } 4052 BlockBegin* cont = block_at(next_bci()); 4053 assert(cont != nullptr, "continuation must exist (BlockListBuilder starts a new block after a jsr"); 4054 4055 // Note: can not assign state to continuation yet, as we have to 4056 // pick up the state from the Ret instructions. 4057 4058 // Push callee scope 4059 push_scope_for_jsr(cont, jsr_dest_bci); 4060 4061 // Temporarily set up bytecode stream so we can append instructions 4062 // (only using the bci of this stream) 4063 scope_data()->set_stream(scope_data()->parent()->stream()); 4064 4065 BlockBegin* jsr_start_block = block_at(jsr_dest_bci); 4066 assert(jsr_start_block != nullptr, "jsr start block must exist"); 4067 assert(!jsr_start_block->is_set(BlockBegin::was_visited_flag), "should not have visited jsr yet"); 4068 Goto* goto_sub = new Goto(jsr_start_block, false); 4069 // Must copy state to avoid wrong sharing when parsing bytecodes 4070 assert(jsr_start_block->state() == nullptr, "should have fresh jsr starting block"); 4071 jsr_start_block->set_state(copy_state_before_with_bci(jsr_dest_bci)); 4072 append(goto_sub); 4073 _block->set_end(goto_sub); 4074 _last = _block = jsr_start_block; 4075 4076 // Clear out bytecode stream 4077 scope_data()->set_stream(nullptr); 4078 4079 scope_data()->add_to_work_list(jsr_start_block); 4080 4081 // Ready to resume parsing in subroutine 4082 iterate_all_blocks(); 4083 4084 // If we bailed out during parsing, return immediately (this is bad news) 4085 CHECK_BAILOUT_(false); 4086 4087 // Detect whether the continuation can actually be reached. If not, 4088 // it has not had state set by the join() operations in 4089 // iterate_bytecodes_for_block()/ret() and we should not touch the 4090 // iteration state. The calling activation of 4091 // iterate_bytecodes_for_block will then complete normally. 4092 if (cont->state() != nullptr) { 4093 if (!cont->is_set(BlockBegin::was_visited_flag)) { 4094 // add continuation to work list instead of parsing it immediately 4095 scope_data()->parent()->add_to_work_list(cont); 4096 } 4097 } 4098 4099 assert(jsr_continuation() == cont, "continuation must not have changed"); 4100 assert(!jsr_continuation()->is_set(BlockBegin::was_visited_flag) || 4101 jsr_continuation()->is_set(BlockBegin::parser_loop_header_flag), 4102 "continuation can only be visited in case of backward branches"); 4103 assert(_last && _last->as_BlockEnd(), "block must have end"); 4104 4105 // continuation is in work list, so end iteration of current block 4106 _skip_block = true; 4107 pop_scope_for_jsr(); 4108 4109 return true; 4110 } 4111 4112 4113 // Inline the entry of a synchronized method as a monitor enter and 4114 // register the exception handler which releases the monitor if an 4115 // exception is thrown within the callee. Note that the monitor enter 4116 // cannot throw an exception itself, because the receiver is 4117 // guaranteed to be non-null by the explicit null check at the 4118 // beginning of inlining. 4119 void GraphBuilder::inline_sync_entry(Value lock, BlockBegin* sync_handler) { 4120 assert(lock != nullptr && sync_handler != nullptr, "lock or handler missing"); 4121 4122 monitorenter(lock, SynchronizationEntryBCI); 4123 assert(_last->as_MonitorEnter() != nullptr, "monitor enter expected"); 4124 _last->set_needs_null_check(false); 4125 4126 sync_handler->set(BlockBegin::exception_entry_flag); 4127 sync_handler->set(BlockBegin::is_on_work_list_flag); 4128 4129 ciExceptionHandler* desc = new ciExceptionHandler(method()->holder(), 0, method()->code_size(), -1, 0); 4130 XHandler* h = new XHandler(desc); 4131 h->set_entry_block(sync_handler); 4132 scope_data()->xhandlers()->append(h); 4133 scope_data()->set_has_handler(); 4134 } 4135 4136 4137 // If an exception is thrown and not handled within an inlined 4138 // synchronized method, the monitor must be released before the 4139 // exception is rethrown in the outer scope. Generate the appropriate 4140 // instructions here. 4141 void GraphBuilder::fill_sync_handler(Value lock, BlockBegin* sync_handler, bool default_handler) { 4142 BlockBegin* orig_block = _block; 4143 ValueStack* orig_state = _state; 4144 Instruction* orig_last = _last; 4145 _last = _block = sync_handler; 4146 _state = sync_handler->state()->copy(); 4147 4148 assert(sync_handler != nullptr, "handler missing"); 4149 assert(!sync_handler->is_set(BlockBegin::was_visited_flag), "is visited here"); 4150 4151 assert(lock != nullptr || default_handler, "lock or handler missing"); 4152 4153 XHandler* h = scope_data()->xhandlers()->remove_last(); 4154 assert(h->entry_block() == sync_handler, "corrupt list of handlers"); 4155 4156 block()->set(BlockBegin::was_visited_flag); 4157 Value exception = append_with_bci(new ExceptionObject(), SynchronizationEntryBCI); 4158 assert(exception->is_pinned(), "must be"); 4159 4160 int bci = SynchronizationEntryBCI; 4161 if (compilation()->env()->dtrace_method_probes()) { 4162 // Report exit from inline methods. We don't have a stream here 4163 // so pass an explicit bci of SynchronizationEntryBCI. 4164 Values* args = new Values(1); 4165 args->push(append_with_bci(new Constant(new MethodConstant(method())), bci)); 4166 append_with_bci(new RuntimeCall(voidType, "dtrace_method_exit", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), args), bci); 4167 } 4168 4169 if (lock) { 4170 assert(state()->locks_size() > 0 && state()->lock_at(state()->locks_size() - 1) == lock, "lock is missing"); 4171 if (!lock->is_linked()) { 4172 lock = append_with_bci(lock, bci); 4173 } 4174 4175 // exit the monitor in the context of the synchronized method 4176 monitorexit(lock, bci); 4177 4178 // exit the context of the synchronized method 4179 if (!default_handler) { 4180 pop_scope(); 4181 bci = _state->caller_state()->bci(); 4182 _state = _state->caller_state()->copy_for_parsing(); 4183 } 4184 } 4185 4186 // perform the throw as if at the call site 4187 apush(exception); 4188 throw_op(bci); 4189 4190 BlockEnd* end = last()->as_BlockEnd(); 4191 block()->set_end(end); 4192 4193 _block = orig_block; 4194 _state = orig_state; 4195 _last = orig_last; 4196 } 4197 4198 4199 bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, bool ignore_return, Bytecodes::Code bc, Value receiver) { 4200 assert(!callee->is_native(), "callee must not be native"); 4201 if (CompilationPolicy::should_not_inline(compilation()->env(), callee)) { 4202 INLINE_BAILOUT("inlining prohibited by policy"); 4203 } 4204 // first perform tests of things it's not possible to inline 4205 if (callee->has_exception_handlers() && 4206 !InlineMethodsWithExceptionHandlers) INLINE_BAILOUT("callee has exception handlers"); 4207 if (callee->is_synchronized() && 4208 !InlineSynchronizedMethods ) INLINE_BAILOUT("callee is synchronized"); 4209 if (!callee->holder()->is_linked()) INLINE_BAILOUT("callee's klass not linked yet"); 4210 if (bc == Bytecodes::_invokestatic && 4211 !callee->holder()->is_initialized()) INLINE_BAILOUT("callee's klass not initialized yet"); 4212 if (!callee->has_balanced_monitors()) INLINE_BAILOUT("callee's monitors do not match"); 4213 4214 // Proper inlining of methods with jsrs requires a little more work. 4215 if (callee->has_jsrs() ) INLINE_BAILOUT("jsrs not handled properly by inliner yet"); 4216 4217 if (is_profiling() && !callee->ensure_method_data()) { 4218 INLINE_BAILOUT("mdo allocation failed"); 4219 } 4220 4221 const bool is_invokedynamic = (bc == Bytecodes::_invokedynamic); 4222 const bool has_receiver = (bc != Bytecodes::_invokestatic && !is_invokedynamic); 4223 4224 const int args_base = state()->stack_size() - callee->arg_size(); 4225 assert(args_base >= 0, "stack underflow during inlining"); 4226 4227 Value recv = nullptr; 4228 if (has_receiver) { 4229 assert(!callee->is_static(), "callee must not be static"); 4230 assert(callee->arg_size() > 0, "must have at least a receiver"); 4231 4232 recv = state()->stack_at(args_base); 4233 if (recv->is_null_obj()) { 4234 INLINE_BAILOUT("receiver is always null"); 4235 } 4236 } 4237 4238 // now perform tests that are based on flag settings 4239 bool inlinee_by_directive = compilation()->directive()->should_inline(callee); 4240 if (callee->force_inline() || inlinee_by_directive) { 4241 if (inline_level() > MaxForceInlineLevel ) INLINE_BAILOUT("MaxForceInlineLevel"); 4242 if (recursive_inline_level(callee) > C1MaxRecursiveInlineLevel) INLINE_BAILOUT("recursive inlining too deep"); 4243 4244 const char* msg = ""; 4245 if (callee->force_inline()) msg = "force inline by annotation"; 4246 if (inlinee_by_directive) msg = "force inline by CompileCommand"; 4247 print_inlining(callee, msg); 4248 } else { 4249 // use heuristic controls on inlining 4250 if (inline_level() > C1MaxInlineLevel ) INLINE_BAILOUT("inlining too deep"); 4251 int callee_recursive_level = recursive_inline_level(callee); 4252 if (callee_recursive_level > C1MaxRecursiveInlineLevel ) INLINE_BAILOUT("recursive inlining too deep"); 4253 if (callee->code_size_for_inlining() > max_inline_size() ) INLINE_BAILOUT("callee is too large"); 4254 // Additional condition to limit stack usage for non-recursive calls. 4255 if ((callee_recursive_level == 0) && 4256 (callee->max_stack() + callee->max_locals() - callee->size_of_parameters() > C1InlineStackLimit)) { 4257 INLINE_BAILOUT("callee uses too much stack"); 4258 } 4259 4260 // don't inline throwable methods unless the inlining tree is rooted in a throwable class 4261 if (callee->name() == ciSymbols::object_initializer_name() && 4262 callee->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) { 4263 // Throwable constructor call 4264 IRScope* top = scope(); 4265 while (top->caller() != nullptr) { 4266 top = top->caller(); 4267 } 4268 if (!top->method()->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) { 4269 INLINE_BAILOUT("don't inline Throwable constructors"); 4270 } 4271 } 4272 4273 if (compilation()->env()->num_inlined_bytecodes() > DesiredMethodLimit) { 4274 INLINE_BAILOUT("total inlining greater than DesiredMethodLimit"); 4275 } 4276 // printing 4277 print_inlining(callee, "inline", /*success*/ true); 4278 } 4279 4280 assert(bc != Bytecodes::_invokestatic || callee->holder()->is_initialized(), "required"); 4281 4282 // NOTE: Bailouts from this point on, which occur at the 4283 // GraphBuilder level, do not cause bailout just of the inlining but 4284 // in fact of the entire compilation. 4285 4286 BlockBegin* orig_block = block(); 4287 4288 // Insert null check if necessary 4289 if (has_receiver) { 4290 // note: null check must happen even if first instruction of callee does 4291 // an implicit null check since the callee is in a different scope 4292 // and we must make sure exception handling does the right thing 4293 null_check(recv); 4294 } 4295 4296 if (is_profiling()) { 4297 // Note that we'd collect profile data in this method if we wanted it. 4298 // this may be redundant here... 4299 compilation()->set_would_profile(true); 4300 4301 if (profile_calls()) { 4302 int start = 0; 4303 Values* obj_args = args_list_for_profiling(callee, start, has_receiver); 4304 if (obj_args != nullptr) { 4305 int s = obj_args->capacity(); 4306 // if called through method handle invoke, some arguments may have been popped 4307 for (int i = args_base+start, j = 0; j < obj_args->capacity() && i < state()->stack_size(); ) { 4308 Value v = state()->stack_at_inc(i); 4309 if (v->type()->is_object_kind()) { 4310 obj_args->push(v); 4311 j++; 4312 } 4313 } 4314 check_args_for_profiling(obj_args, s); 4315 } 4316 profile_call(callee, recv, holder_known ? callee->holder() : nullptr, obj_args, true); 4317 } 4318 } 4319 4320 // Introduce a new callee continuation point - if the callee has 4321 // more than one return instruction or the return does not allow 4322 // fall-through of control flow, all return instructions of the 4323 // callee will need to be replaced by Goto's pointing to this 4324 // continuation point. 4325 BlockBegin* cont = block_at(next_bci()); 4326 bool continuation_existed = true; 4327 if (cont == nullptr) { 4328 cont = new BlockBegin(next_bci()); 4329 // low number so that continuation gets parsed as early as possible 4330 cont->set_depth_first_number(0); 4331 if (PrintInitialBlockList) { 4332 tty->print_cr("CFG: created block %d (bci %d) as continuation for inline at bci %d", 4333 cont->block_id(), cont->bci(), bci()); 4334 } 4335 continuation_existed = false; 4336 } 4337 // Record number of predecessors of continuation block before 4338 // inlining, to detect if inlined method has edges to its 4339 // continuation after inlining. 4340 int continuation_preds = cont->number_of_preds(); 4341 4342 // Push callee scope 4343 push_scope(callee, cont); 4344 4345 // the BlockListBuilder for the callee could have bailed out 4346 if (bailed_out()) 4347 return false; 4348 4349 // Temporarily set up bytecode stream so we can append instructions 4350 // (only using the bci of this stream) 4351 scope_data()->set_stream(scope_data()->parent()->stream()); 4352 4353 // Pass parameters into callee state: add assignments 4354 // note: this will also ensure that all arguments are computed before being passed 4355 ValueStack* callee_state = state(); 4356 ValueStack* caller_state = state()->caller_state(); 4357 for (int i = args_base; i < caller_state->stack_size(); ) { 4358 const int arg_no = i - args_base; 4359 Value arg = caller_state->stack_at_inc(i); 4360 store_local(callee_state, arg, arg_no); 4361 } 4362 4363 // Remove args from stack. 4364 // Note that we preserve locals state in case we can use it later 4365 // (see use of pop_scope() below) 4366 caller_state->truncate_stack(args_base); 4367 assert(callee_state->stack_size() == 0, "callee stack must be empty"); 4368 4369 Value lock = nullptr; 4370 BlockBegin* sync_handler = nullptr; 4371 4372 // Inline the locking of the receiver if the callee is synchronized 4373 if (callee->is_synchronized()) { 4374 lock = callee->is_static() ? append(new Constant(new InstanceConstant(callee->holder()->java_mirror()))) 4375 : state()->local_at(0); 4376 sync_handler = new BlockBegin(SynchronizationEntryBCI); 4377 inline_sync_entry(lock, sync_handler); 4378 } 4379 4380 if (compilation()->env()->dtrace_method_probes()) { 4381 Values* args = new Values(1); 4382 args->push(append(new Constant(new MethodConstant(method())))); 4383 append(new RuntimeCall(voidType, "dtrace_method_entry", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), args)); 4384 } 4385 4386 if (profile_inlined_calls()) { 4387 profile_invocation(callee, copy_state_before_with_bci(SynchronizationEntryBCI)); 4388 } 4389 4390 BlockBegin* callee_start_block = block_at(0); 4391 if (callee_start_block != nullptr) { 4392 assert(callee_start_block->is_set(BlockBegin::parser_loop_header_flag), "must be loop header"); 4393 Goto* goto_callee = new Goto(callee_start_block, false); 4394 // The state for this goto is in the scope of the callee, so use 4395 // the entry bci for the callee instead of the call site bci. 4396 append_with_bci(goto_callee, 0); 4397 _block->set_end(goto_callee); 4398 callee_start_block->merge(callee_state, compilation()->has_irreducible_loops()); 4399 4400 _last = _block = callee_start_block; 4401 4402 scope_data()->add_to_work_list(callee_start_block); 4403 } 4404 4405 // Clear out bytecode stream 4406 scope_data()->set_stream(nullptr); 4407 scope_data()->set_ignore_return(ignore_return); 4408 4409 CompileLog* log = compilation()->log(); 4410 if (log != nullptr) log->head("parse method='%d'", log->identify(callee)); 4411 4412 // Ready to resume parsing in callee (either in the same block we 4413 // were in before or in the callee's start block) 4414 iterate_all_blocks(callee_start_block == nullptr); 4415 4416 if (log != nullptr) log->done("parse"); 4417 4418 // If we bailed out during parsing, return immediately (this is bad news) 4419 if (bailed_out()) 4420 return false; 4421 4422 // iterate_all_blocks theoretically traverses in random order; in 4423 // practice, we have only traversed the continuation if we are 4424 // inlining into a subroutine 4425 assert(continuation_existed || 4426 !continuation()->is_set(BlockBegin::was_visited_flag), 4427 "continuation should not have been parsed yet if we created it"); 4428 4429 // At this point we are almost ready to return and resume parsing of 4430 // the caller back in the GraphBuilder. The only thing we want to do 4431 // first is an optimization: during parsing of the callee we 4432 // generated at least one Goto to the continuation block. If we 4433 // generated exactly one, and if the inlined method spanned exactly 4434 // one block (and we didn't have to Goto its entry), then we snip 4435 // off the Goto to the continuation, allowing control to fall 4436 // through back into the caller block and effectively performing 4437 // block merging. This allows load elimination and CSE to take place 4438 // across multiple callee scopes if they are relatively simple, and 4439 // is currently essential to making inlining profitable. 4440 if (num_returns() == 1 4441 && block() == orig_block 4442 && block() == inline_cleanup_block()) { 4443 _last = inline_cleanup_return_prev(); 4444 _state = inline_cleanup_state(); 4445 } else if (continuation_preds == cont->number_of_preds()) { 4446 // Inlining caused that the instructions after the invoke in the 4447 // caller are not reachable any more. So skip filling this block 4448 // with instructions! 4449 assert(cont == continuation(), ""); 4450 assert(_last && _last->as_BlockEnd(), ""); 4451 _skip_block = true; 4452 } else { 4453 // Resume parsing in continuation block unless it was already parsed. 4454 // Note that if we don't change _last here, iteration in 4455 // iterate_bytecodes_for_block will stop when we return. 4456 if (!continuation()->is_set(BlockBegin::was_visited_flag)) { 4457 // add continuation to work list instead of parsing it immediately 4458 assert(_last && _last->as_BlockEnd(), ""); 4459 scope_data()->parent()->add_to_work_list(continuation()); 4460 _skip_block = true; 4461 } 4462 } 4463 4464 // Fill the exception handler for synchronized methods with instructions 4465 if (callee->is_synchronized() && sync_handler->state() != nullptr) { 4466 fill_sync_handler(lock, sync_handler); 4467 } else { 4468 pop_scope(); 4469 } 4470 4471 compilation()->notice_inlined_method(callee); 4472 4473 return true; 4474 } 4475 4476 4477 bool GraphBuilder::try_method_handle_inline(ciMethod* callee, bool ignore_return) { 4478 ValueStack* state_before = copy_state_before(); 4479 vmIntrinsics::ID iid = callee->intrinsic_id(); 4480 switch (iid) { 4481 case vmIntrinsics::_invokeBasic: 4482 { 4483 // get MethodHandle receiver 4484 const int args_base = state()->stack_size() - callee->arg_size(); 4485 ValueType* type = state()->stack_at(args_base)->type(); 4486 if (type->is_constant()) { 4487 ciObject* mh = type->as_ObjectType()->constant_value(); 4488 if (mh->is_method_handle()) { 4489 ciMethod* target = mh->as_method_handle()->get_vmtarget(); 4490 4491 // We don't do CHA here so only inline static and statically bindable methods. 4492 if (target->is_static() || target->can_be_statically_bound()) { 4493 if (ciMethod::is_consistent_info(callee, target)) { 4494 Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual; 4495 ignore_return = ignore_return || (callee->return_type()->is_void() && !target->return_type()->is_void()); 4496 if (try_inline(target, /*holder_known*/ !callee->is_static(), ignore_return, bc)) { 4497 return true; 4498 } 4499 } else { 4500 print_inlining(target, "signatures mismatch", /*success*/ false); 4501 } 4502 } else { 4503 assert(false, "no inlining through MH::invokeBasic"); // missing optimization opportunity due to suboptimal LF shape 4504 print_inlining(target, "not static or statically bindable", /*success*/ false); 4505 } 4506 } else { 4507 assert(mh->is_null_object(), "not a null"); 4508 print_inlining(callee, "receiver is always null", /*success*/ false); 4509 } 4510 } else { 4511 print_inlining(callee, "receiver not constant", /*success*/ false); 4512 } 4513 } 4514 break; 4515 4516 case vmIntrinsics::_linkToVirtual: 4517 case vmIntrinsics::_linkToStatic: 4518 case vmIntrinsics::_linkToSpecial: 4519 case vmIntrinsics::_linkToInterface: 4520 { 4521 // pop MemberName argument 4522 const int args_base = state()->stack_size() - callee->arg_size(); 4523 ValueType* type = apop()->type(); 4524 if (type->is_constant()) { 4525 ciMethod* target = type->as_ObjectType()->constant_value()->as_member_name()->get_vmtarget(); 4526 ignore_return = ignore_return || (callee->return_type()->is_void() && !target->return_type()->is_void()); 4527 // If the target is another method handle invoke, try to recursively get 4528 // a better target. 4529 if (target->is_method_handle_intrinsic()) { 4530 if (try_method_handle_inline(target, ignore_return)) { 4531 return true; 4532 } 4533 } else if (!ciMethod::is_consistent_info(callee, target)) { 4534 print_inlining(target, "signatures mismatch", /*success*/ false); 4535 } else { 4536 ciSignature* signature = target->signature(); 4537 const int receiver_skip = target->is_static() ? 0 : 1; 4538 // Cast receiver to its type. 4539 if (!target->is_static()) { 4540 ciKlass* tk = signature->accessing_klass(); 4541 Value obj = state()->stack_at(args_base); 4542 if (obj->exact_type() == nullptr && 4543 obj->declared_type() != tk && tk != compilation()->env()->Object_klass()) { 4544 TypeCast* c = new TypeCast(tk, obj, state_before); 4545 append(c); 4546 state()->stack_at_put(args_base, c); 4547 } 4548 } 4549 // Cast reference arguments to its type. 4550 for (int i = 0, j = 0; i < signature->count(); i++) { 4551 ciType* t = signature->type_at(i); 4552 if (t->is_klass()) { 4553 ciKlass* tk = t->as_klass(); 4554 Value obj = state()->stack_at(args_base + receiver_skip + j); 4555 if (obj->exact_type() == nullptr && 4556 obj->declared_type() != tk && tk != compilation()->env()->Object_klass()) { 4557 TypeCast* c = new TypeCast(t, obj, state_before); 4558 append(c); 4559 state()->stack_at_put(args_base + receiver_skip + j, c); 4560 } 4561 } 4562 j += t->size(); // long and double take two slots 4563 } 4564 // We don't do CHA here so only inline static and statically bindable methods. 4565 if (target->is_static() || target->can_be_statically_bound()) { 4566 Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual; 4567 if (try_inline(target, /*holder_known*/ !callee->is_static(), ignore_return, bc)) { 4568 return true; 4569 } 4570 } else { 4571 print_inlining(target, "not static or statically bindable", /*success*/ false); 4572 } 4573 } 4574 } else { 4575 print_inlining(callee, "MemberName not constant", /*success*/ false); 4576 } 4577 } 4578 break; 4579 4580 case vmIntrinsics::_linkToNative: 4581 print_inlining(callee, "native call", /*success*/ false); 4582 break; 4583 4584 default: 4585 fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid)); 4586 break; 4587 } 4588 set_state(state_before->copy_for_parsing()); 4589 return false; 4590 } 4591 4592 4593 void GraphBuilder::inline_bailout(const char* msg) { 4594 assert(msg != nullptr, "inline bailout msg must exist"); 4595 _inline_bailout_msg = msg; 4596 } 4597 4598 4599 void GraphBuilder::clear_inline_bailout() { 4600 _inline_bailout_msg = nullptr; 4601 } 4602 4603 4604 void GraphBuilder::push_root_scope(IRScope* scope, BlockList* bci2block, BlockBegin* start) { 4605 ScopeData* data = new ScopeData(nullptr); 4606 data->set_scope(scope); 4607 data->set_bci2block(bci2block); 4608 _scope_data = data; 4609 _block = start; 4610 } 4611 4612 4613 void GraphBuilder::push_scope(ciMethod* callee, BlockBegin* continuation) { 4614 IRScope* callee_scope = new IRScope(compilation(), scope(), bci(), callee, -1, false); 4615 scope()->add_callee(callee_scope); 4616 4617 BlockListBuilder blb(compilation(), callee_scope, -1); 4618 CHECK_BAILOUT(); 4619 4620 if (!blb.bci2block()->at(0)->is_set(BlockBegin::parser_loop_header_flag)) { 4621 // this scope can be inlined directly into the caller so remove 4622 // the block at bci 0. 4623 blb.bci2block()->at_put(0, nullptr); 4624 } 4625 4626 set_state(new ValueStack(callee_scope, state()->copy(ValueStack::CallerState, bci()))); 4627 4628 ScopeData* data = new ScopeData(scope_data()); 4629 data->set_scope(callee_scope); 4630 data->set_bci2block(blb.bci2block()); 4631 data->set_continuation(continuation); 4632 _scope_data = data; 4633 } 4634 4635 4636 void GraphBuilder::push_scope_for_jsr(BlockBegin* jsr_continuation, int jsr_dest_bci) { 4637 ScopeData* data = new ScopeData(scope_data()); 4638 data->set_parsing_jsr(); 4639 data->set_jsr_entry_bci(jsr_dest_bci); 4640 data->set_jsr_return_address_local(-1); 4641 // Must clone bci2block list as we will be mutating it in order to 4642 // properly clone all blocks in jsr region as well as exception 4643 // handlers containing rets 4644 BlockList* new_bci2block = new BlockList(bci2block()->length()); 4645 new_bci2block->appendAll(bci2block()); 4646 data->set_bci2block(new_bci2block); 4647 data->set_scope(scope()); 4648 data->setup_jsr_xhandlers(); 4649 data->set_continuation(continuation()); 4650 data->set_jsr_continuation(jsr_continuation); 4651 _scope_data = data; 4652 } 4653 4654 4655 void GraphBuilder::pop_scope() { 4656 int number_of_locks = scope()->number_of_locks(); 4657 _scope_data = scope_data()->parent(); 4658 // accumulate minimum number of monitor slots to be reserved 4659 scope()->set_min_number_of_locks(number_of_locks); 4660 } 4661 4662 4663 void GraphBuilder::pop_scope_for_jsr() { 4664 _scope_data = scope_data()->parent(); 4665 } 4666 4667 void GraphBuilder::append_unsafe_get(ciMethod* callee, BasicType t, bool is_volatile) { 4668 Values* args = state()->pop_arguments(callee->arg_size()); 4669 null_check(args->at(0)); 4670 Instruction* offset = args->at(2); 4671 #ifndef _LP64 4672 offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT))); 4673 #endif 4674 Instruction* op = append(new UnsafeGet(t, args->at(1), offset, is_volatile)); 4675 push(op->type(), op); 4676 compilation()->set_has_unsafe_access(true); 4677 } 4678 4679 4680 void GraphBuilder::append_unsafe_put(ciMethod* callee, BasicType t, bool is_volatile) { 4681 Values* args = state()->pop_arguments(callee->arg_size()); 4682 null_check(args->at(0)); 4683 Instruction* offset = args->at(2); 4684 #ifndef _LP64 4685 offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT))); 4686 #endif 4687 Value val = args->at(3); 4688 if (t == T_BOOLEAN) { 4689 Value mask = append(new Constant(new IntConstant(1))); 4690 val = append(new LogicOp(Bytecodes::_iand, val, mask)); 4691 } 4692 Instruction* op = append(new UnsafePut(t, args->at(1), offset, val, is_volatile)); 4693 compilation()->set_has_unsafe_access(true); 4694 kill_all(); 4695 } 4696 4697 void GraphBuilder::append_unsafe_CAS(ciMethod* callee) { 4698 ValueStack* state_before = copy_state_for_exception(); 4699 ValueType* result_type = as_ValueType(callee->return_type()); 4700 assert(result_type->is_int(), "int result"); 4701 Values* args = state()->pop_arguments(callee->arg_size()); 4702 4703 // Pop off some args to specially handle, then push back 4704 Value newval = args->pop(); 4705 Value cmpval = args->pop(); 4706 Value offset = args->pop(); 4707 Value src = args->pop(); 4708 Value unsafe_obj = args->pop(); 4709 4710 // Separately handle the unsafe arg. It is not needed for code 4711 // generation, but must be null checked 4712 null_check(unsafe_obj); 4713 4714 #ifndef _LP64 4715 offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT))); 4716 #endif 4717 4718 args->push(src); 4719 args->push(offset); 4720 args->push(cmpval); 4721 args->push(newval); 4722 4723 // An unsafe CAS can alias with other field accesses, but we don't 4724 // know which ones so mark the state as no preserved. This will 4725 // cause CSE to invalidate memory across it. 4726 bool preserves_state = false; 4727 Intrinsic* result = new Intrinsic(result_type, callee->intrinsic_id(), args, false, state_before, preserves_state); 4728 append_split(result); 4729 push(result_type, result); 4730 compilation()->set_has_unsafe_access(true); 4731 } 4732 4733 void GraphBuilder::append_char_access(ciMethod* callee, bool is_store) { 4734 // This intrinsic accesses byte[] array as char[] array. Computing the offsets 4735 // correctly requires matched array shapes. 4736 assert (arrayOopDesc::base_offset_in_bytes(T_CHAR) == arrayOopDesc::base_offset_in_bytes(T_BYTE), 4737 "sanity: byte[] and char[] bases agree"); 4738 assert (type2aelembytes(T_CHAR) == type2aelembytes(T_BYTE)*2, 4739 "sanity: byte[] and char[] scales agree"); 4740 4741 ValueStack* state_before = copy_state_indexed_access(); 4742 compilation()->set_has_access_indexed(true); 4743 Values* args = state()->pop_arguments(callee->arg_size()); 4744 Value array = args->at(0); 4745 Value index = args->at(1); 4746 if (is_store) { 4747 Value value = args->at(2); 4748 Instruction* store = append(new StoreIndexed(array, index, nullptr, T_CHAR, value, state_before, false, true)); 4749 store->set_flag(Instruction::NeedsRangeCheckFlag, false); 4750 _memory->store_value(value); 4751 } else { 4752 Instruction* load = append(new LoadIndexed(array, index, nullptr, T_CHAR, state_before, true)); 4753 load->set_flag(Instruction::NeedsRangeCheckFlag, false); 4754 push(load->type(), load); 4755 } 4756 } 4757 4758 void GraphBuilder::append_alloc_array_copy(ciMethod* callee) { 4759 const int args_base = state()->stack_size() - callee->arg_size(); 4760 ciType* receiver_type = state()->stack_at(args_base)->exact_type(); 4761 if (receiver_type == nullptr) { 4762 inline_bailout("must have a receiver"); 4763 return; 4764 } 4765 if (!receiver_type->is_type_array_klass()) { 4766 inline_bailout("clone array not primitive"); 4767 return; 4768 } 4769 4770 ValueStack* state_before = copy_state_before(); 4771 state_before->set_force_reexecute(); 4772 Value src = apop(); 4773 BasicType basic_type = src->exact_type()->as_array_klass()->element_type()->basic_type(); 4774 Value length = append(new ArrayLength(src, state_before)); 4775 Value new_array = append_split(new NewTypeArray(length, basic_type, state_before, false)); 4776 4777 ValueType* result_type = as_ValueType(callee->return_type()); 4778 vmIntrinsics::ID id = vmIntrinsics::_arraycopy; 4779 Values* args = new Values(5); 4780 args->push(src); 4781 args->push(append(new Constant(new IntConstant(0)))); 4782 args->push(new_array); 4783 args->push(append(new Constant(new IntConstant(0)))); 4784 args->push(length); 4785 const bool has_receiver = true; 4786 Intrinsic* array_copy = new Intrinsic(result_type, id, 4787 args, has_receiver, state_before, 4788 vmIntrinsics::preserves_state(id), 4789 vmIntrinsics::can_trap(id)); 4790 array_copy->set_flag(Instruction::OmitChecksFlag, true); 4791 append_split(array_copy); 4792 apush(new_array); 4793 append(new MemBar(lir_membar_storestore)); 4794 } 4795 4796 void GraphBuilder::print_inlining(ciMethod* callee, const char* msg, bool success) { 4797 CompileLog* log = compilation()->log(); 4798 if (log != nullptr) { 4799 assert(msg != nullptr, "inlining msg should not be null!"); 4800 if (success) { 4801 log->inline_success(msg); 4802 } else { 4803 log->inline_fail(msg); 4804 } 4805 } 4806 EventCompilerInlining event; 4807 if (event.should_commit()) { 4808 CompilerEvent::InlineEvent::post(event, compilation()->env()->task()->compile_id(), method()->get_Method(), callee, success, msg, bci()); 4809 } 4810 4811 CompileTask::print_inlining_ul(callee, scope()->level(), bci(), inlining_result_of(success), msg); 4812 4813 if (!compilation()->directive()->PrintInliningOption) { 4814 return; 4815 } 4816 CompileTask::print_inlining_tty(callee, scope()->level(), bci(), inlining_result_of(success), msg); 4817 if (success && CIPrintMethodCodes) { 4818 callee->print_codes(); 4819 } 4820 } 4821 4822 void GraphBuilder::append_unsafe_get_and_set(ciMethod* callee, bool is_add) { 4823 Values* args = state()->pop_arguments(callee->arg_size()); 4824 BasicType t = callee->return_type()->basic_type(); 4825 null_check(args->at(0)); 4826 Instruction* offset = args->at(2); 4827 #ifndef _LP64 4828 offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT))); 4829 #endif 4830 Instruction* op = append(new UnsafeGetAndSet(t, args->at(1), offset, args->at(3), is_add)); 4831 compilation()->set_has_unsafe_access(true); 4832 kill_all(); 4833 push(op->type(), op); 4834 } 4835 4836 #ifndef PRODUCT 4837 void GraphBuilder::print_stats() { 4838 if (UseLocalValueNumbering) { 4839 vmap()->print(); 4840 } 4841 } 4842 #endif // PRODUCT 4843 4844 void GraphBuilder::profile_call(ciMethod* callee, Value recv, ciKlass* known_holder, Values* obj_args, bool inlined) { 4845 assert(known_holder == nullptr || (known_holder->is_instance_klass() && 4846 (!known_holder->is_interface() || 4847 ((ciInstanceKlass*)known_holder)->has_nonstatic_concrete_methods())), "should be non-static concrete method"); 4848 if (known_holder != nullptr) { 4849 if (known_holder->exact_klass() == nullptr) { 4850 known_holder = compilation()->cha_exact_type(known_holder); 4851 } 4852 } 4853 4854 append(new ProfileCall(method(), bci(), callee, recv, known_holder, obj_args, inlined)); 4855 } 4856 4857 void GraphBuilder::profile_return_type(Value ret, ciMethod* callee, ciMethod* m, int invoke_bci) { 4858 assert((m == nullptr) == (invoke_bci < 0), "invalid method and invalid bci together"); 4859 if (m == nullptr) { 4860 m = method(); 4861 } 4862 if (invoke_bci < 0) { 4863 invoke_bci = bci(); 4864 } 4865 ciMethodData* md = m->method_data_or_null(); 4866 ciProfileData* data = md->bci_to_data(invoke_bci); 4867 if (data != nullptr && (data->is_CallTypeData() || data->is_VirtualCallTypeData())) { 4868 bool has_return = data->is_CallTypeData() ? ((ciCallTypeData*)data)->has_return() : ((ciVirtualCallTypeData*)data)->has_return(); 4869 if (has_return) { 4870 append(new ProfileReturnType(m , invoke_bci, callee, ret)); 4871 } 4872 } 4873 } 4874 4875 void GraphBuilder::profile_invocation(ciMethod* callee, ValueStack* state) { 4876 append(new ProfileInvoke(callee, state)); 4877 }