1 /* 2 * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_CFGPrinter.hpp" 27 #include "c1/c1_Canonicalizer.hpp" 28 #include "c1/c1_Compilation.hpp" 29 #include "c1/c1_GraphBuilder.hpp" 30 #include "c1/c1_InstructionPrinter.hpp" 31 #include "ci/ciCallSite.hpp" 32 #include "ci/ciField.hpp" 33 #include "ci/ciFlatArrayKlass.hpp" 34 #include "ci/ciInlineKlass.hpp" 35 #include "ci/ciKlass.hpp" 36 #include "ci/ciMemberName.hpp" 37 #include "ci/ciSymbols.hpp" 38 #include "ci/ciUtilities.inline.hpp" 39 #include "classfile/javaClasses.hpp" 40 #include "compiler/compilationPolicy.hpp" 41 #include "compiler/compileBroker.hpp" 42 #include "compiler/compilerEvent.hpp" 43 #include "interpreter/bytecode.hpp" 44 #include "jfr/jfrEvents.hpp" 45 #include "memory/resourceArea.hpp" 46 #include "oops/oop.inline.hpp" 47 #include "runtime/sharedRuntime.hpp" 48 #include "runtime/vm_version.hpp" 49 #include "utilities/bitMap.inline.hpp" 50 #include "utilities/checkedCast.hpp" 51 #include "utilities/powerOfTwo.hpp" 52 #include "utilities/macros.hpp" 53 #if INCLUDE_JFR 54 #include "jfr/jfr.hpp" 55 #endif 56 57 class BlockListBuilder { 58 private: 59 Compilation* _compilation; 60 IRScope* _scope; 61 62 BlockList _blocks; // internal list of all blocks 63 BlockList* _bci2block; // mapping from bci to blocks for GraphBuilder 64 GrowableArray<BlockList> _bci2block_successors; // Mapping bcis to their blocks successors while we dont have a blockend 65 66 // fields used by mark_loops 67 ResourceBitMap _active; // for iteration of control flow graph 68 ResourceBitMap _visited; // for iteration of control flow graph 69 GrowableArray<ResourceBitMap> _loop_map; // caches the information if a block is contained in a loop 70 int _next_loop_index; // next free loop number 71 int _next_block_number; // for reverse postorder numbering of blocks 72 int _block_id_start; 73 74 int bit_number(int block_id) const { return block_id - _block_id_start; } 75 // accessors 76 Compilation* compilation() const { return _compilation; } 77 IRScope* scope() const { return _scope; } 78 ciMethod* method() const { return scope()->method(); } 79 XHandlers* xhandlers() const { return scope()->xhandlers(); } 80 81 // unified bailout support 82 void bailout(const char* msg) const { compilation()->bailout(msg); } 83 bool bailed_out() const { return compilation()->bailed_out(); } 84 85 // helper functions 86 BlockBegin* make_block_at(int bci, BlockBegin* predecessor); 87 void handle_exceptions(BlockBegin* current, int cur_bci); 88 void handle_jsr(BlockBegin* current, int sr_bci, int next_bci); 89 void store_one(BlockBegin* current, int local); 90 void store_two(BlockBegin* current, int local); 91 void set_entries(int osr_bci); 92 void set_leaders(); 93 94 void make_loop_header(BlockBegin* block); 95 void mark_loops(); 96 BitMap& mark_loops(BlockBegin* b, bool in_subroutine); 97 98 // debugging 99 #ifndef PRODUCT 100 void print(); 101 #endif 102 103 int number_of_successors(BlockBegin* block); 104 BlockBegin* successor_at(BlockBegin* block, int i); 105 void add_successor(BlockBegin* block, BlockBegin* sux); 106 bool is_successor(BlockBegin* block, BlockBegin* sux); 107 108 public: 109 // creation 110 BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci); 111 112 // accessors for GraphBuilder 113 BlockList* bci2block() const { return _bci2block; } 114 }; 115 116 117 // Implementation of BlockListBuilder 118 119 BlockListBuilder::BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci) 120 : _compilation(compilation) 121 , _scope(scope) 122 , _blocks(16) 123 , _bci2block(new BlockList(scope->method()->code_size(), nullptr)) 124 , _bci2block_successors(scope->method()->code_size()) 125 , _active() // size not known yet 126 , _visited() // size not known yet 127 , _loop_map() // size not known yet 128 , _next_loop_index(0) 129 , _next_block_number(0) 130 , _block_id_start(0) 131 { 132 set_entries(osr_bci); 133 set_leaders(); 134 CHECK_BAILOUT(); 135 136 mark_loops(); 137 NOT_PRODUCT(if (PrintInitialBlockList) print()); 138 139 // _bci2block still contains blocks with _end == null and > 0 sux in _bci2block_successors. 140 141 #ifndef PRODUCT 142 if (PrintCFGToFile) { 143 stringStream title; 144 title.print("BlockListBuilder "); 145 scope->method()->print_name(&title); 146 CFGPrinter::print_cfg(_bci2block, title.freeze(), false, false); 147 } 148 #endif 149 } 150 151 152 void BlockListBuilder::set_entries(int osr_bci) { 153 // generate start blocks 154 BlockBegin* std_entry = make_block_at(0, nullptr); 155 if (scope()->caller() == nullptr) { 156 std_entry->set(BlockBegin::std_entry_flag); 157 } 158 if (osr_bci != -1) { 159 BlockBegin* osr_entry = make_block_at(osr_bci, nullptr); 160 osr_entry->set(BlockBegin::osr_entry_flag); 161 } 162 163 // generate exception entry blocks 164 XHandlers* list = xhandlers(); 165 const int n = list->length(); 166 for (int i = 0; i < n; i++) { 167 XHandler* h = list->handler_at(i); 168 BlockBegin* entry = make_block_at(h->handler_bci(), nullptr); 169 entry->set(BlockBegin::exception_entry_flag); 170 h->set_entry_block(entry); 171 } 172 } 173 174 175 BlockBegin* BlockListBuilder::make_block_at(int cur_bci, BlockBegin* predecessor) { 176 assert(method()->bci_block_start().at(cur_bci), "wrong block starts of MethodLivenessAnalyzer"); 177 178 BlockBegin* block = _bci2block->at(cur_bci); 179 if (block == nullptr) { 180 block = new BlockBegin(cur_bci); 181 block->init_stores_to_locals(method()->max_locals()); 182 _bci2block->at_put(cur_bci, block); 183 _bci2block_successors.at_put_grow(cur_bci, BlockList()); 184 _blocks.append(block); 185 186 assert(predecessor == nullptr || predecessor->bci() < cur_bci, "targets for backward branches must already exist"); 187 } 188 189 if (predecessor != nullptr) { 190 if (block->is_set(BlockBegin::exception_entry_flag)) { 191 BAILOUT_("Exception handler can be reached by both normal and exceptional control flow", block); 192 } 193 194 add_successor(predecessor, block); 195 block->increment_total_preds(); 196 } 197 198 return block; 199 } 200 201 202 inline void BlockListBuilder::store_one(BlockBegin* current, int local) { 203 current->stores_to_locals().set_bit(local); 204 } 205 inline void BlockListBuilder::store_two(BlockBegin* current, int local) { 206 store_one(current, local); 207 store_one(current, local + 1); 208 } 209 210 211 void BlockListBuilder::handle_exceptions(BlockBegin* current, int cur_bci) { 212 // Draws edges from a block to its exception handlers 213 XHandlers* list = xhandlers(); 214 const int n = list->length(); 215 216 for (int i = 0; i < n; i++) { 217 XHandler* h = list->handler_at(i); 218 219 if (h->covers(cur_bci)) { 220 BlockBegin* entry = h->entry_block(); 221 assert(entry != nullptr && entry == _bci2block->at(h->handler_bci()), "entry must be set"); 222 assert(entry->is_set(BlockBegin::exception_entry_flag), "flag must be set"); 223 224 // add each exception handler only once 225 if(!is_successor(current, entry)) { 226 add_successor(current, entry); 227 entry->increment_total_preds(); 228 } 229 230 // stop when reaching catchall 231 if (h->catch_type() == 0) break; 232 } 233 } 234 } 235 236 void BlockListBuilder::handle_jsr(BlockBegin* current, int sr_bci, int next_bci) { 237 if (next_bci < method()->code_size()) { 238 // start a new block after jsr-bytecode and link this block into cfg 239 make_block_at(next_bci, current); 240 } 241 242 // start a new block at the subroutine entry at mark it with special flag 243 BlockBegin* sr_block = make_block_at(sr_bci, current); 244 if (!sr_block->is_set(BlockBegin::subroutine_entry_flag)) { 245 sr_block->set(BlockBegin::subroutine_entry_flag); 246 } 247 } 248 249 250 void BlockListBuilder::set_leaders() { 251 bool has_xhandlers = xhandlers()->has_handlers(); 252 BlockBegin* current = nullptr; 253 254 // The information which bci starts a new block simplifies the analysis 255 // Without it, backward branches could jump to a bci where no block was created 256 // during bytecode iteration. This would require the creation of a new block at the 257 // branch target and a modification of the successor lists. 258 const BitMap& bci_block_start = method()->bci_block_start(); 259 260 int end_bci = method()->code_size(); 261 262 ciBytecodeStream s(method()); 263 while (s.next() != ciBytecodeStream::EOBC()) { 264 int cur_bci = s.cur_bci(); 265 266 if (bci_block_start.at(cur_bci)) { 267 current = make_block_at(cur_bci, current); 268 } 269 assert(current != nullptr, "must have current block"); 270 271 if (has_xhandlers && GraphBuilder::can_trap(method(), s.cur_bc())) { 272 handle_exceptions(current, cur_bci); 273 } 274 275 switch (s.cur_bc()) { 276 // track stores to local variables for selective creation of phi functions 277 case Bytecodes::_iinc: store_one(current, s.get_index()); break; 278 case Bytecodes::_istore: store_one(current, s.get_index()); break; 279 case Bytecodes::_lstore: store_two(current, s.get_index()); break; 280 case Bytecodes::_fstore: store_one(current, s.get_index()); break; 281 case Bytecodes::_dstore: store_two(current, s.get_index()); break; 282 case Bytecodes::_astore: store_one(current, s.get_index()); break; 283 case Bytecodes::_istore_0: store_one(current, 0); break; 284 case Bytecodes::_istore_1: store_one(current, 1); break; 285 case Bytecodes::_istore_2: store_one(current, 2); break; 286 case Bytecodes::_istore_3: store_one(current, 3); break; 287 case Bytecodes::_lstore_0: store_two(current, 0); break; 288 case Bytecodes::_lstore_1: store_two(current, 1); break; 289 case Bytecodes::_lstore_2: store_two(current, 2); break; 290 case Bytecodes::_lstore_3: store_two(current, 3); break; 291 case Bytecodes::_fstore_0: store_one(current, 0); break; 292 case Bytecodes::_fstore_1: store_one(current, 1); break; 293 case Bytecodes::_fstore_2: store_one(current, 2); break; 294 case Bytecodes::_fstore_3: store_one(current, 3); break; 295 case Bytecodes::_dstore_0: store_two(current, 0); break; 296 case Bytecodes::_dstore_1: store_two(current, 1); break; 297 case Bytecodes::_dstore_2: store_two(current, 2); break; 298 case Bytecodes::_dstore_3: store_two(current, 3); break; 299 case Bytecodes::_astore_0: store_one(current, 0); break; 300 case Bytecodes::_astore_1: store_one(current, 1); break; 301 case Bytecodes::_astore_2: store_one(current, 2); break; 302 case Bytecodes::_astore_3: store_one(current, 3); break; 303 304 // track bytecodes that affect the control flow 305 case Bytecodes::_athrow: // fall through 306 case Bytecodes::_ret: // fall through 307 case Bytecodes::_ireturn: // fall through 308 case Bytecodes::_lreturn: // fall through 309 case Bytecodes::_freturn: // fall through 310 case Bytecodes::_dreturn: // fall through 311 case Bytecodes::_areturn: // fall through 312 case Bytecodes::_return: 313 current = nullptr; 314 break; 315 316 case Bytecodes::_ifeq: // fall through 317 case Bytecodes::_ifne: // fall through 318 case Bytecodes::_iflt: // fall through 319 case Bytecodes::_ifge: // fall through 320 case Bytecodes::_ifgt: // fall through 321 case Bytecodes::_ifle: // fall through 322 case Bytecodes::_if_icmpeq: // fall through 323 case Bytecodes::_if_icmpne: // fall through 324 case Bytecodes::_if_icmplt: // fall through 325 case Bytecodes::_if_icmpge: // fall through 326 case Bytecodes::_if_icmpgt: // fall through 327 case Bytecodes::_if_icmple: // fall through 328 case Bytecodes::_if_acmpeq: // fall through 329 case Bytecodes::_if_acmpne: // fall through 330 case Bytecodes::_ifnull: // fall through 331 case Bytecodes::_ifnonnull: 332 if (s.next_bci() < end_bci) { 333 make_block_at(s.next_bci(), current); 334 } 335 make_block_at(s.get_dest(), current); 336 current = nullptr; 337 break; 338 339 case Bytecodes::_goto: 340 make_block_at(s.get_dest(), current); 341 current = nullptr; 342 break; 343 344 case Bytecodes::_goto_w: 345 make_block_at(s.get_far_dest(), current); 346 current = nullptr; 347 break; 348 349 case Bytecodes::_jsr: 350 handle_jsr(current, s.get_dest(), s.next_bci()); 351 current = nullptr; 352 break; 353 354 case Bytecodes::_jsr_w: 355 handle_jsr(current, s.get_far_dest(), s.next_bci()); 356 current = nullptr; 357 break; 358 359 case Bytecodes::_tableswitch: { 360 // set block for each case 361 Bytecode_tableswitch sw(&s); 362 int l = sw.length(); 363 for (int i = 0; i < l; i++) { 364 make_block_at(cur_bci + sw.dest_offset_at(i), current); 365 } 366 make_block_at(cur_bci + sw.default_offset(), current); 367 current = nullptr; 368 break; 369 } 370 371 case Bytecodes::_lookupswitch: { 372 // set block for each case 373 Bytecode_lookupswitch sw(&s); 374 int l = sw.number_of_pairs(); 375 for (int i = 0; i < l; i++) { 376 make_block_at(cur_bci + sw.pair_at(i).offset(), current); 377 } 378 make_block_at(cur_bci + sw.default_offset(), current); 379 current = nullptr; 380 break; 381 } 382 383 default: 384 break; 385 } 386 } 387 } 388 389 390 void BlockListBuilder::mark_loops() { 391 ResourceMark rm; 392 393 const int number_of_blocks = _blocks.length(); 394 _active.initialize(number_of_blocks); 395 _visited.initialize(number_of_blocks); 396 _loop_map = GrowableArray<ResourceBitMap>(number_of_blocks, number_of_blocks, ResourceBitMap()); 397 for (int i = 0; i < number_of_blocks; i++) { 398 _loop_map.at(i).initialize(number_of_blocks); 399 } 400 _next_loop_index = 0; 401 _next_block_number = _blocks.length(); 402 403 // The loop detection algorithm works as follows: 404 // - We maintain the _loop_map, where for each block we have a bitmap indicating which loops contain this block. 405 // - The CFG is recursively traversed (depth-first) and if we detect a loop, we assign the loop a unique number that is stored 406 // in the bitmap associated with the loop header block. Until we return back through that loop header the bitmap contains 407 // only a single bit corresponding to the loop number. 408 // - The bit is then propagated for all the blocks in the loop after we exit them (post-order). There could be multiple bits 409 // of course in case of nested loops. 410 // - When we exit the loop header we remove that single bit and assign the real loop state for it. 411 // - Now, the tricky part here is how we detect irreducible loops. In the algorithm above the loop state bits 412 // are propagated to the predecessors. If we encounter an irreducible loop (a loop with multiple heads) we would see 413 // a node with some loop bit set that would then propagate back and be never cleared because we would 414 // never go back through the original loop header. Therefore if there are any irreducible loops the bits in the states 415 // for these loops are going to propagate back to the root. 416 BlockBegin* start = _bci2block->at(0); 417 _block_id_start = start->block_id(); 418 BitMap& loop_state = mark_loops(start, false); 419 if (!loop_state.is_empty()) { 420 compilation()->set_has_irreducible_loops(true); 421 } 422 assert(_next_block_number >= 0, "invalid block numbers"); 423 424 // Remove dangling Resource pointers before the ResourceMark goes out-of-scope. 425 _active.resize(0); 426 _visited.resize(0); 427 _loop_map.clear(); 428 } 429 430 void BlockListBuilder::make_loop_header(BlockBegin* block) { 431 int block_id = block->block_id(); 432 int block_bit = bit_number(block_id); 433 if (block->is_set(BlockBegin::exception_entry_flag)) { 434 // exception edges may look like loops but don't mark them as such 435 // since it screws up block ordering. 436 return; 437 } 438 if (!block->is_set(BlockBegin::parser_loop_header_flag)) { 439 block->set(BlockBegin::parser_loop_header_flag); 440 441 assert(_loop_map.at(block_bit).is_empty(), "must not be set yet"); 442 assert(0 <= _next_loop_index && _next_loop_index < _loop_map.length(), "_next_loop_index is too large"); 443 _loop_map.at(block_bit).set_bit(_next_loop_index++); 444 } else { 445 // block already marked as loop header 446 assert(_loop_map.at(block_bit).count_one_bits() == 1, "exactly one bit must be set"); 447 } 448 } 449 450 BitMap& BlockListBuilder::mark_loops(BlockBegin* block, bool in_subroutine) { 451 int block_id = block->block_id(); 452 int block_bit = bit_number(block_id); 453 if (_visited.at(block_bit)) { 454 if (_active.at(block_bit)) { 455 // reached block via backward branch 456 make_loop_header(block); 457 } 458 // return cached loop information for this block 459 return _loop_map.at(block_bit); 460 } 461 462 if (block->is_set(BlockBegin::subroutine_entry_flag)) { 463 in_subroutine = true; 464 } 465 466 // set active and visited bits before successors are processed 467 _visited.set_bit(block_bit); 468 _active.set_bit(block_bit); 469 470 ResourceMark rm; 471 ResourceBitMap loop_state(_loop_map.length()); 472 for (int i = number_of_successors(block) - 1; i >= 0; i--) { 473 BlockBegin* sux = successor_at(block, i); 474 // recursively process all successors 475 loop_state.set_union(mark_loops(sux, in_subroutine)); 476 } 477 478 // clear active-bit after all successors are processed 479 _active.clear_bit(block_bit); 480 481 // reverse-post-order numbering of all blocks 482 block->set_depth_first_number(_next_block_number); 483 _next_block_number--; 484 485 if (!loop_state.is_empty() || in_subroutine ) { 486 // block is contained at least in one loop, so phi functions are necessary 487 // phi functions are also necessary for all locals stored in a subroutine 488 scope()->requires_phi_function().set_union(block->stores_to_locals()); 489 } 490 491 if (block->is_set(BlockBegin::parser_loop_header_flag)) { 492 BitMap& header_loop_state = _loop_map.at(block_bit); 493 assert(header_loop_state.count_one_bits() == 1, "exactly one bit must be set"); 494 // remove the bit with the loop number for the state (header is outside of the loop) 495 loop_state.set_difference(header_loop_state); 496 } 497 498 // cache and return loop information for this block 499 _loop_map.at(block_bit).set_from(loop_state); 500 return _loop_map.at(block_bit); 501 } 502 503 inline int BlockListBuilder::number_of_successors(BlockBegin* block) 504 { 505 assert(_bci2block_successors.length() > block->bci(), "sux must exist"); 506 return _bci2block_successors.at(block->bci()).length(); 507 } 508 509 inline BlockBegin* BlockListBuilder::successor_at(BlockBegin* block, int i) 510 { 511 assert(_bci2block_successors.length() > block->bci(), "sux must exist"); 512 return _bci2block_successors.at(block->bci()).at(i); 513 } 514 515 inline void BlockListBuilder::add_successor(BlockBegin* block, BlockBegin* sux) 516 { 517 assert(_bci2block_successors.length() > block->bci(), "sux must exist"); 518 _bci2block_successors.at(block->bci()).append(sux); 519 } 520 521 inline bool BlockListBuilder::is_successor(BlockBegin* block, BlockBegin* sux) { 522 assert(_bci2block_successors.length() > block->bci(), "sux must exist"); 523 return _bci2block_successors.at(block->bci()).contains(sux); 524 } 525 526 #ifndef PRODUCT 527 528 static int compare_depth_first(BlockBegin** a, BlockBegin** b) { 529 return (*a)->depth_first_number() - (*b)->depth_first_number(); 530 } 531 532 void BlockListBuilder::print() { 533 tty->print("----- initial block list of BlockListBuilder for method "); 534 method()->print_short_name(); 535 tty->cr(); 536 537 // better readability if blocks are sorted in processing order 538 _blocks.sort(compare_depth_first); 539 540 for (int i = 0; i < _blocks.length(); i++) { 541 BlockBegin* cur = _blocks.at(i); 542 tty->print("%4d: B%-4d bci: %-4d preds: %-4d ", cur->depth_first_number(), cur->block_id(), cur->bci(), cur->total_preds()); 543 544 tty->print(cur->is_set(BlockBegin::std_entry_flag) ? " std" : " "); 545 tty->print(cur->is_set(BlockBegin::osr_entry_flag) ? " osr" : " "); 546 tty->print(cur->is_set(BlockBegin::exception_entry_flag) ? " ex" : " "); 547 tty->print(cur->is_set(BlockBegin::subroutine_entry_flag) ? " sr" : " "); 548 tty->print(cur->is_set(BlockBegin::parser_loop_header_flag) ? " lh" : " "); 549 550 if (number_of_successors(cur) > 0) { 551 tty->print(" sux: "); 552 for (int j = 0; j < number_of_successors(cur); j++) { 553 BlockBegin* sux = successor_at(cur, j); 554 tty->print("B%d ", sux->block_id()); 555 } 556 } 557 tty->cr(); 558 } 559 } 560 561 #endif 562 563 564 // A simple growable array of Values indexed by ciFields 565 class FieldBuffer: public CompilationResourceObj { 566 private: 567 GrowableArray<Value> _values; 568 569 public: 570 FieldBuffer() {} 571 572 void kill() { 573 _values.trunc_to(0); 574 } 575 576 Value at(ciField* field) { 577 assert(field->holder()->is_loaded(), "must be a loaded field"); 578 int offset = field->offset_in_bytes(); 579 if (offset < _values.length()) { 580 return _values.at(offset); 581 } else { 582 return nullptr; 583 } 584 } 585 586 void at_put(ciField* field, Value value) { 587 assert(field->holder()->is_loaded(), "must be a loaded field"); 588 int offset = field->offset_in_bytes(); 589 _values.at_put_grow(offset, value, nullptr); 590 } 591 592 }; 593 594 595 // MemoryBuffer is fairly simple model of the current state of memory. 596 // It partitions memory into several pieces. The first piece is 597 // generic memory where little is known about the owner of the memory. 598 // This is conceptually represented by the tuple <O, F, V> which says 599 // that the field F of object O has value V. This is flattened so 600 // that F is represented by the offset of the field and the parallel 601 // arrays _objects and _values are used for O and V. Loads of O.F can 602 // simply use V. Newly allocated objects are kept in a separate list 603 // along with a parallel array for each object which represents the 604 // current value of its fields. Stores of the default value to fields 605 // which have never been stored to before are eliminated since they 606 // are redundant. Once newly allocated objects are stored into 607 // another object or they are passed out of the current compile they 608 // are treated like generic memory. 609 610 class MemoryBuffer: public CompilationResourceObj { 611 private: 612 FieldBuffer _values; 613 GrowableArray<Value> _objects; 614 GrowableArray<Value> _newobjects; 615 GrowableArray<FieldBuffer*> _fields; 616 617 public: 618 MemoryBuffer() {} 619 620 StoreField* store(StoreField* st) { 621 if (!EliminateFieldAccess) { 622 return st; 623 } 624 625 Value object = st->obj(); 626 Value value = st->value(); 627 ciField* field = st->field(); 628 if (field->holder()->is_loaded()) { 629 int offset = field->offset_in_bytes(); 630 int index = _newobjects.find(object); 631 if (index != -1) { 632 // newly allocated object with no other stores performed on this field 633 FieldBuffer* buf = _fields.at(index); 634 if (buf->at(field) == nullptr && is_default_value(value)) { 635 #ifndef PRODUCT 636 if (PrintIRDuringConstruction && Verbose) { 637 tty->print_cr("Eliminated store for object %d:", index); 638 st->print_line(); 639 } 640 #endif 641 return nullptr; 642 } else { 643 buf->at_put(field, value); 644 } 645 } else { 646 _objects.at_put_grow(offset, object, nullptr); 647 _values.at_put(field, value); 648 } 649 650 store_value(value); 651 } else { 652 // if we held onto field names we could alias based on names but 653 // we don't know what's being stored to so kill it all. 654 kill(); 655 } 656 return st; 657 } 658 659 660 // return true if this value correspond to the default value of a field. 661 bool is_default_value(Value value) { 662 Constant* con = value->as_Constant(); 663 if (con) { 664 switch (con->type()->tag()) { 665 case intTag: return con->type()->as_IntConstant()->value() == 0; 666 case longTag: return con->type()->as_LongConstant()->value() == 0; 667 case floatTag: return jint_cast(con->type()->as_FloatConstant()->value()) == 0; 668 case doubleTag: return jlong_cast(con->type()->as_DoubleConstant()->value()) == jlong_cast(0); 669 case objectTag: return con->type() == objectNull; 670 default: ShouldNotReachHere(); 671 } 672 } 673 return false; 674 } 675 676 677 // return either the actual value of a load or the load itself 678 Value load(LoadField* load) { 679 if (!EliminateFieldAccess) { 680 return load; 681 } 682 683 if (strict_fp_requires_explicit_rounding && load->type()->is_float_kind()) { 684 #ifdef IA32 685 if (UseSSE < 2) { 686 // can't skip load since value might get rounded as a side effect 687 return load; 688 } 689 #else 690 Unimplemented(); 691 #endif // IA32 692 } 693 694 ciField* field = load->field(); 695 Value object = load->obj(); 696 if (field->holder()->is_loaded() && !field->is_volatile()) { 697 int offset = field->offset_in_bytes(); 698 Value result = nullptr; 699 int index = _newobjects.find(object); 700 if (index != -1) { 701 result = _fields.at(index)->at(field); 702 } else if (_objects.at_grow(offset, nullptr) == object) { 703 result = _values.at(field); 704 } 705 if (result != nullptr) { 706 #ifndef PRODUCT 707 if (PrintIRDuringConstruction && Verbose) { 708 tty->print_cr("Eliminated load: "); 709 load->print_line(); 710 } 711 #endif 712 assert(result->type()->tag() == load->type()->tag(), "wrong types"); 713 return result; 714 } 715 } 716 return load; 717 } 718 719 // Record this newly allocated object 720 void new_instance(NewInstance* object) { 721 int index = _newobjects.length(); 722 _newobjects.append(object); 723 if (_fields.at_grow(index, nullptr) == nullptr) { 724 _fields.at_put(index, new FieldBuffer()); 725 } else { 726 _fields.at(index)->kill(); 727 } 728 } 729 730 void store_value(Value value) { 731 int index = _newobjects.find(value); 732 if (index != -1) { 733 // stored a newly allocated object into another object. 734 // Assume we've lost track of it as separate slice of memory. 735 // We could do better by keeping track of whether individual 736 // fields could alias each other. 737 _newobjects.remove_at(index); 738 // pull out the field info and store it at the end up the list 739 // of field info list to be reused later. 740 _fields.append(_fields.at(index)); 741 _fields.remove_at(index); 742 } 743 } 744 745 void kill() { 746 _newobjects.trunc_to(0); 747 _objects.trunc_to(0); 748 _values.kill(); 749 } 750 }; 751 752 753 // Implementation of GraphBuilder's ScopeData 754 755 GraphBuilder::ScopeData::ScopeData(ScopeData* parent) 756 : _parent(parent) 757 , _bci2block(nullptr) 758 , _scope(nullptr) 759 , _has_handler(false) 760 , _stream(nullptr) 761 , _work_list(nullptr) 762 , _caller_stack_size(-1) 763 , _continuation(nullptr) 764 , _parsing_jsr(false) 765 , _jsr_xhandlers(nullptr) 766 , _num_returns(0) 767 , _cleanup_block(nullptr) 768 , _cleanup_return_prev(nullptr) 769 , _cleanup_state(nullptr) 770 , _ignore_return(false) 771 { 772 if (parent != nullptr) { 773 _max_inline_size = (intx) ((float) NestedInliningSizeRatio * (float) parent->max_inline_size() / 100.0f); 774 } else { 775 _max_inline_size = C1MaxInlineSize; 776 } 777 if (_max_inline_size < C1MaxTrivialSize) { 778 _max_inline_size = C1MaxTrivialSize; 779 } 780 } 781 782 783 void GraphBuilder::kill_all() { 784 if (UseLocalValueNumbering) { 785 vmap()->kill_all(); 786 } 787 _memory->kill(); 788 } 789 790 791 BlockBegin* GraphBuilder::ScopeData::block_at(int bci) { 792 if (parsing_jsr()) { 793 // It is necessary to clone all blocks associated with a 794 // subroutine, including those for exception handlers in the scope 795 // of the method containing the jsr (because those exception 796 // handlers may contain ret instructions in some cases). 797 BlockBegin* block = bci2block()->at(bci); 798 if (block != nullptr && block == parent()->bci2block()->at(bci)) { 799 BlockBegin* new_block = new BlockBegin(block->bci()); 800 if (PrintInitialBlockList) { 801 tty->print_cr("CFG: cloned block %d (bci %d) as block %d for jsr", 802 block->block_id(), block->bci(), new_block->block_id()); 803 } 804 // copy data from cloned blocked 805 new_block->set_depth_first_number(block->depth_first_number()); 806 if (block->is_set(BlockBegin::parser_loop_header_flag)) new_block->set(BlockBegin::parser_loop_header_flag); 807 // Preserve certain flags for assertion checking 808 if (block->is_set(BlockBegin::subroutine_entry_flag)) new_block->set(BlockBegin::subroutine_entry_flag); 809 if (block->is_set(BlockBegin::exception_entry_flag)) new_block->set(BlockBegin::exception_entry_flag); 810 811 // copy was_visited_flag to allow early detection of bailouts 812 // if a block that is used in a jsr has already been visited before, 813 // it is shared between the normal control flow and a subroutine 814 // BlockBegin::try_merge returns false when the flag is set, this leads 815 // to a compilation bailout 816 if (block->is_set(BlockBegin::was_visited_flag)) new_block->set(BlockBegin::was_visited_flag); 817 818 bci2block()->at_put(bci, new_block); 819 block = new_block; 820 } 821 return block; 822 } else { 823 return bci2block()->at(bci); 824 } 825 } 826 827 828 XHandlers* GraphBuilder::ScopeData::xhandlers() const { 829 if (_jsr_xhandlers == nullptr) { 830 assert(!parsing_jsr(), ""); 831 return scope()->xhandlers(); 832 } 833 assert(parsing_jsr(), ""); 834 return _jsr_xhandlers; 835 } 836 837 838 void GraphBuilder::ScopeData::set_scope(IRScope* scope) { 839 _scope = scope; 840 bool parent_has_handler = false; 841 if (parent() != nullptr) { 842 parent_has_handler = parent()->has_handler(); 843 } 844 _has_handler = parent_has_handler || scope->xhandlers()->has_handlers(); 845 } 846 847 848 void GraphBuilder::ScopeData::set_inline_cleanup_info(BlockBegin* block, 849 Instruction* return_prev, 850 ValueStack* return_state) { 851 _cleanup_block = block; 852 _cleanup_return_prev = return_prev; 853 _cleanup_state = return_state; 854 } 855 856 857 void GraphBuilder::ScopeData::add_to_work_list(BlockBegin* block) { 858 if (_work_list == nullptr) { 859 _work_list = new BlockList(); 860 } 861 862 if (!block->is_set(BlockBegin::is_on_work_list_flag)) { 863 // Do not start parsing the continuation block while in a 864 // sub-scope 865 if (parsing_jsr()) { 866 if (block == jsr_continuation()) { 867 return; 868 } 869 } else { 870 if (block == continuation()) { 871 return; 872 } 873 } 874 block->set(BlockBegin::is_on_work_list_flag); 875 _work_list->push(block); 876 877 sort_top_into_worklist(_work_list, block); 878 } 879 } 880 881 882 void GraphBuilder::sort_top_into_worklist(BlockList* worklist, BlockBegin* top) { 883 assert(worklist->top() == top, ""); 884 // sort block descending into work list 885 const int dfn = top->depth_first_number(); 886 assert(dfn != -1, "unknown depth first number"); 887 int i = worklist->length()-2; 888 while (i >= 0) { 889 BlockBegin* b = worklist->at(i); 890 if (b->depth_first_number() < dfn) { 891 worklist->at_put(i+1, b); 892 } else { 893 break; 894 } 895 i --; 896 } 897 if (i >= -1) worklist->at_put(i + 1, top); 898 } 899 900 901 BlockBegin* GraphBuilder::ScopeData::remove_from_work_list() { 902 if (is_work_list_empty()) { 903 return nullptr; 904 } 905 return _work_list->pop(); 906 } 907 908 909 bool GraphBuilder::ScopeData::is_work_list_empty() const { 910 return (_work_list == nullptr || _work_list->length() == 0); 911 } 912 913 914 void GraphBuilder::ScopeData::setup_jsr_xhandlers() { 915 assert(parsing_jsr(), ""); 916 // clone all the exception handlers from the scope 917 XHandlers* handlers = new XHandlers(scope()->xhandlers()); 918 const int n = handlers->length(); 919 for (int i = 0; i < n; i++) { 920 // The XHandlers need to be adjusted to dispatch to the cloned 921 // handler block instead of the default one but the synthetic 922 // unlocker needs to be handled specially. The synthetic unlocker 923 // should be left alone since there can be only one and all code 924 // should dispatch to the same one. 925 XHandler* h = handlers->handler_at(i); 926 assert(h->handler_bci() != SynchronizationEntryBCI, "must be real"); 927 h->set_entry_block(block_at(h->handler_bci())); 928 } 929 _jsr_xhandlers = handlers; 930 } 931 932 933 int GraphBuilder::ScopeData::num_returns() { 934 if (parsing_jsr()) { 935 return parent()->num_returns(); 936 } 937 return _num_returns; 938 } 939 940 941 void GraphBuilder::ScopeData::incr_num_returns() { 942 if (parsing_jsr()) { 943 parent()->incr_num_returns(); 944 } else { 945 ++_num_returns; 946 } 947 } 948 949 950 // Implementation of GraphBuilder 951 952 #define INLINE_BAILOUT(msg) { inline_bailout(msg); return false; } 953 954 955 void GraphBuilder::load_constant() { 956 ciConstant con = stream()->get_constant(); 957 if (con.is_valid()) { 958 ValueType* t = illegalType; 959 ValueStack* patch_state = nullptr; 960 switch (con.basic_type()) { 961 case T_BOOLEAN: t = new IntConstant (con.as_boolean()); break; 962 case T_BYTE : t = new IntConstant (con.as_byte ()); break; 963 case T_CHAR : t = new IntConstant (con.as_char ()); break; 964 case T_SHORT : t = new IntConstant (con.as_short ()); break; 965 case T_INT : t = new IntConstant (con.as_int ()); break; 966 case T_LONG : t = new LongConstant (con.as_long ()); break; 967 case T_FLOAT : t = new FloatConstant (con.as_float ()); break; 968 case T_DOUBLE : t = new DoubleConstant(con.as_double ()); break; 969 case T_ARRAY : // fall-through 970 case T_OBJECT : { 971 ciObject* obj = con.as_object(); 972 if (!obj->is_loaded() || (PatchALot && !stream()->is_string_constant())) { 973 // A Class, MethodType, MethodHandle, Dynamic, or String. 974 patch_state = copy_state_before(); 975 t = new ObjectConstant(obj); 976 } else { 977 // Might be a Class, MethodType, MethodHandle, or Dynamic constant 978 // result, which might turn out to be an array. 979 if (obj->is_null_object()) { 980 t = objectNull; 981 } else if (obj->is_array()) { 982 t = new ArrayConstant(obj->as_array()); 983 } else { 984 t = new InstanceConstant(obj->as_instance()); 985 } 986 } 987 break; 988 } 989 default: ShouldNotReachHere(); 990 } 991 Value x; 992 if (patch_state != nullptr) { 993 // Arbitrary memory effects from running BSM or class loading (using custom loader) during linkage. 994 bool kills_memory = stream()->is_dynamic_constant() || 995 (!stream()->is_string_constant() && !method()->holder()->has_trusted_loader()); 996 x = new Constant(t, patch_state, kills_memory); 997 } else { 998 x = new Constant(t); 999 } 1000 1001 // Unbox the value at runtime, if needed. 1002 // ConstantDynamic entry can be of a primitive type, but it is cached in boxed form. 1003 if (patch_state != nullptr) { 1004 int cp_index = stream()->get_constant_pool_index(); 1005 BasicType type = stream()->get_basic_type_for_constant_at(cp_index); 1006 if (is_java_primitive(type)) { 1007 ciInstanceKlass* box_klass = ciEnv::current()->get_box_klass_for_primitive_type(type); 1008 assert(box_klass->is_loaded(), "sanity"); 1009 int offset = java_lang_boxing_object::value_offset(type); 1010 ciField* value_field = box_klass->get_field_by_offset(offset, false /*is_static*/); 1011 x = new LoadField(append(x), offset, value_field, false /*is_static*/, patch_state, false /*needs_patching*/); 1012 t = as_ValueType(type); 1013 } else { 1014 assert(is_reference_type(type), "not a reference: %s", type2name(type)); 1015 } 1016 } 1017 1018 push(t, append(x)); 1019 } else { 1020 BAILOUT("could not resolve a constant"); 1021 } 1022 } 1023 1024 1025 void GraphBuilder::load_local(ValueType* type, int index) { 1026 Value x = state()->local_at(index); 1027 assert(x != nullptr && !x->type()->is_illegal(), "access of illegal local variable"); 1028 push(type, x); 1029 } 1030 1031 1032 void GraphBuilder::store_local(ValueType* type, int index) { 1033 Value x = pop(type); 1034 store_local(state(), x, index); 1035 } 1036 1037 1038 void GraphBuilder::store_local(ValueStack* state, Value x, int index) { 1039 if (parsing_jsr()) { 1040 // We need to do additional tracking of the location of the return 1041 // address for jsrs since we don't handle arbitrary jsr/ret 1042 // constructs. Here we are figuring out in which circumstances we 1043 // need to bail out. 1044 if (x->type()->is_address()) { 1045 scope_data()->set_jsr_return_address_local(index); 1046 1047 // Also check parent jsrs (if any) at this time to see whether 1048 // they are using this local. We don't handle skipping over a 1049 // ret. 1050 for (ScopeData* cur_scope_data = scope_data()->parent(); 1051 cur_scope_data != nullptr && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope(); 1052 cur_scope_data = cur_scope_data->parent()) { 1053 if (cur_scope_data->jsr_return_address_local() == index) { 1054 BAILOUT("subroutine overwrites return address from previous subroutine"); 1055 } 1056 } 1057 } else if (index == scope_data()->jsr_return_address_local()) { 1058 scope_data()->set_jsr_return_address_local(-1); 1059 } 1060 } 1061 1062 state->store_local(index, round_fp(x)); 1063 } 1064 1065 1066 void GraphBuilder::load_indexed(BasicType type) { 1067 // In case of in block code motion in range check elimination 1068 ValueStack* state_before = nullptr; 1069 int array_idx = state()->stack_size() - 2; 1070 if (type == T_OBJECT && state()->stack_at(array_idx)->maybe_flat_array()) { 1071 // Save the entire state and re-execute on deopt when accessing flat arrays 1072 state_before = copy_state_before(); 1073 state_before->set_should_reexecute(true); 1074 } else { 1075 state_before = copy_state_indexed_access(); 1076 } 1077 compilation()->set_has_access_indexed(true); 1078 Value index = ipop(); 1079 Value array = apop(); 1080 Value length = nullptr; 1081 if (CSEArrayLength || 1082 (array->as_Constant() != nullptr) || 1083 (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) || 1084 (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant()) || 1085 (array->as_NewMultiArray() && array->as_NewMultiArray()->dims()->at(0)->type()->is_constant())) { 1086 length = append(new ArrayLength(array, state_before)); 1087 } 1088 1089 bool need_membar = false; 1090 LoadIndexed* load_indexed = nullptr; 1091 Instruction* result = nullptr; 1092 if (array->is_loaded_flat_array()) { 1093 ciType* array_type = array->declared_type(); 1094 ciInlineKlass* elem_klass = array_type->as_flat_array_klass()->element_klass()->as_inline_klass(); 1095 1096 bool can_delay_access = false; 1097 ciBytecodeStream s(method()); 1098 s.force_bci(bci()); 1099 s.next(); 1100 if (s.cur_bc() == Bytecodes::_getfield) { 1101 bool will_link; 1102 ciField* next_field = s.get_field(will_link); 1103 bool next_needs_patching = !next_field->holder()->is_initialized() || 1104 !next_field->will_link(method(), Bytecodes::_getfield) || 1105 PatchALot; 1106 can_delay_access = C1UseDelayedFlattenedFieldReads && !next_needs_patching; 1107 } 1108 if (can_delay_access) { 1109 // potentially optimizable array access, storing information for delayed decision 1110 LoadIndexed* li = new LoadIndexed(array, index, length, type, state_before); 1111 DelayedLoadIndexed* dli = new DelayedLoadIndexed(li, state_before); 1112 li->set_delayed(dli); 1113 set_pending_load_indexed(dli); 1114 return; // Nothing else to do for now 1115 } else { 1116 if (elem_klass->is_empty()) { 1117 // No need to create a new instance, the default instance will be used instead 1118 load_indexed = new LoadIndexed(array, index, length, type, state_before); 1119 apush(append(load_indexed)); 1120 } else { 1121 NewInstance* new_instance = new NewInstance(elem_klass, state_before, false, true); 1122 _memory->new_instance(new_instance); 1123 apush(append_split(new_instance)); 1124 load_indexed = new LoadIndexed(array, index, length, type, state_before); 1125 load_indexed->set_vt(new_instance); 1126 // The LoadIndexed node will initialise this instance by copying from 1127 // the flat field. Ensure these stores are visible before any 1128 // subsequent store that publishes this reference. 1129 need_membar = true; 1130 } 1131 } 1132 } else { 1133 load_indexed = new LoadIndexed(array, index, length, type, state_before); 1134 if (profile_array_accesses() && is_reference_type(type)) { 1135 compilation()->set_would_profile(true); 1136 load_indexed->set_should_profile(true); 1137 load_indexed->set_profiled_method(method()); 1138 load_indexed->set_profiled_bci(bci()); 1139 } 1140 } 1141 result = append(load_indexed); 1142 if (need_membar) { 1143 append(new MemBar(lir_membar_storestore)); 1144 } 1145 assert(!load_indexed->should_profile() || load_indexed == result, "should not be optimized out"); 1146 if (!array->is_loaded_flat_array()) { 1147 push(as_ValueType(type), result); 1148 } 1149 } 1150 1151 1152 void GraphBuilder::store_indexed(BasicType type) { 1153 // In case of in block code motion in range check elimination 1154 ValueStack* state_before = nullptr; 1155 int array_idx = state()->stack_size() - 3; 1156 if (type == T_OBJECT && state()->stack_at(array_idx)->maybe_flat_array()) { 1157 // Save the entire state and re-execute on deopt when accessing flat arrays 1158 state_before = copy_state_before(); 1159 state_before->set_should_reexecute(true); 1160 } else { 1161 state_before = copy_state_indexed_access(); 1162 } 1163 compilation()->set_has_access_indexed(true); 1164 Value value = pop(as_ValueType(type)); 1165 Value index = ipop(); 1166 Value array = apop(); 1167 Value length = nullptr; 1168 if (CSEArrayLength || 1169 (array->as_Constant() != nullptr) || 1170 (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) || 1171 (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant()) || 1172 (array->as_NewMultiArray() && array->as_NewMultiArray()->dims()->at(0)->type()->is_constant())) { 1173 length = append(new ArrayLength(array, state_before)); 1174 } 1175 ciType* array_type = array->declared_type(); 1176 bool check_boolean = false; 1177 if (array_type != nullptr) { 1178 if (array_type->is_loaded() && 1179 array_type->as_array_klass()->element_type()->basic_type() == T_BOOLEAN) { 1180 assert(type == T_BYTE, "boolean store uses bastore"); 1181 Value mask = append(new Constant(new IntConstant(1))); 1182 value = append(new LogicOp(Bytecodes::_iand, value, mask)); 1183 } 1184 } else if (type == T_BYTE) { 1185 check_boolean = true; 1186 } 1187 1188 StoreIndexed* store_indexed = new StoreIndexed(array, index, length, type, value, state_before, check_boolean); 1189 if (profile_array_accesses() && is_reference_type(type) && !array->is_loaded_flat_array()) { 1190 compilation()->set_would_profile(true); 1191 store_indexed->set_should_profile(true); 1192 store_indexed->set_profiled_method(method()); 1193 store_indexed->set_profiled_bci(bci()); 1194 } 1195 Instruction* result = append(store_indexed); 1196 assert(!store_indexed->should_profile() || store_indexed == result, "should not be optimized out"); 1197 _memory->store_value(value); 1198 } 1199 1200 void GraphBuilder::stack_op(Bytecodes::Code code) { 1201 switch (code) { 1202 case Bytecodes::_pop: 1203 { Value w = state()->raw_pop(); 1204 } 1205 break; 1206 case Bytecodes::_pop2: 1207 { Value w1 = state()->raw_pop(); 1208 Value w2 = state()->raw_pop(); 1209 } 1210 break; 1211 case Bytecodes::_dup: 1212 { Value w = state()->raw_pop(); 1213 state()->raw_push(w); 1214 state()->raw_push(w); 1215 } 1216 break; 1217 case Bytecodes::_dup_x1: 1218 { Value w1 = state()->raw_pop(); 1219 Value w2 = state()->raw_pop(); 1220 state()->raw_push(w1); 1221 state()->raw_push(w2); 1222 state()->raw_push(w1); 1223 } 1224 break; 1225 case Bytecodes::_dup_x2: 1226 { Value w1 = state()->raw_pop(); 1227 Value w2 = state()->raw_pop(); 1228 Value w3 = state()->raw_pop(); 1229 state()->raw_push(w1); 1230 state()->raw_push(w3); 1231 state()->raw_push(w2); 1232 state()->raw_push(w1); 1233 } 1234 break; 1235 case Bytecodes::_dup2: 1236 { Value w1 = state()->raw_pop(); 1237 Value w2 = state()->raw_pop(); 1238 state()->raw_push(w2); 1239 state()->raw_push(w1); 1240 state()->raw_push(w2); 1241 state()->raw_push(w1); 1242 } 1243 break; 1244 case Bytecodes::_dup2_x1: 1245 { Value w1 = state()->raw_pop(); 1246 Value w2 = state()->raw_pop(); 1247 Value w3 = state()->raw_pop(); 1248 state()->raw_push(w2); 1249 state()->raw_push(w1); 1250 state()->raw_push(w3); 1251 state()->raw_push(w2); 1252 state()->raw_push(w1); 1253 } 1254 break; 1255 case Bytecodes::_dup2_x2: 1256 { Value w1 = state()->raw_pop(); 1257 Value w2 = state()->raw_pop(); 1258 Value w3 = state()->raw_pop(); 1259 Value w4 = state()->raw_pop(); 1260 state()->raw_push(w2); 1261 state()->raw_push(w1); 1262 state()->raw_push(w4); 1263 state()->raw_push(w3); 1264 state()->raw_push(w2); 1265 state()->raw_push(w1); 1266 } 1267 break; 1268 case Bytecodes::_swap: 1269 { Value w1 = state()->raw_pop(); 1270 Value w2 = state()->raw_pop(); 1271 state()->raw_push(w1); 1272 state()->raw_push(w2); 1273 } 1274 break; 1275 default: 1276 ShouldNotReachHere(); 1277 break; 1278 } 1279 } 1280 1281 1282 void GraphBuilder::arithmetic_op(ValueType* type, Bytecodes::Code code, ValueStack* state_before) { 1283 Value y = pop(type); 1284 Value x = pop(type); 1285 Value res = new ArithmeticOp(code, x, y, state_before); 1286 // Note: currently single-precision floating-point rounding on Intel is handled at the LIRGenerator level 1287 res = append(res); 1288 res = round_fp(res); 1289 push(type, res); 1290 } 1291 1292 1293 void GraphBuilder::negate_op(ValueType* type) { 1294 push(type, append(new NegateOp(pop(type)))); 1295 } 1296 1297 1298 void GraphBuilder::shift_op(ValueType* type, Bytecodes::Code code) { 1299 Value s = ipop(); 1300 Value x = pop(type); 1301 // try to simplify 1302 // Note: This code should go into the canonicalizer as soon as it can 1303 // can handle canonicalized forms that contain more than one node. 1304 if (CanonicalizeNodes && code == Bytecodes::_iushr) { 1305 // pattern: x >>> s 1306 IntConstant* s1 = s->type()->as_IntConstant(); 1307 if (s1 != nullptr) { 1308 // pattern: x >>> s1, with s1 constant 1309 ShiftOp* l = x->as_ShiftOp(); 1310 if (l != nullptr && l->op() == Bytecodes::_ishl) { 1311 // pattern: (a << b) >>> s1 1312 IntConstant* s0 = l->y()->type()->as_IntConstant(); 1313 if (s0 != nullptr) { 1314 // pattern: (a << s0) >>> s1 1315 const int s0c = s0->value() & 0x1F; // only the low 5 bits are significant for shifts 1316 const int s1c = s1->value() & 0x1F; // only the low 5 bits are significant for shifts 1317 if (s0c == s1c) { 1318 if (s0c == 0) { 1319 // pattern: (a << 0) >>> 0 => simplify to: a 1320 ipush(l->x()); 1321 } else { 1322 // pattern: (a << s0c) >>> s0c => simplify to: a & m, with m constant 1323 assert(0 < s0c && s0c < BitsPerInt, "adjust code below to handle corner cases"); 1324 const int m = checked_cast<int>(right_n_bits(BitsPerInt - s0c)); 1325 Value s = append(new Constant(new IntConstant(m))); 1326 ipush(append(new LogicOp(Bytecodes::_iand, l->x(), s))); 1327 } 1328 return; 1329 } 1330 } 1331 } 1332 } 1333 } 1334 // could not simplify 1335 push(type, append(new ShiftOp(code, x, s))); 1336 } 1337 1338 1339 void GraphBuilder::logic_op(ValueType* type, Bytecodes::Code code) { 1340 Value y = pop(type); 1341 Value x = pop(type); 1342 push(type, append(new LogicOp(code, x, y))); 1343 } 1344 1345 1346 void GraphBuilder::compare_op(ValueType* type, Bytecodes::Code code) { 1347 ValueStack* state_before = copy_state_before(); 1348 Value y = pop(type); 1349 Value x = pop(type); 1350 ipush(append(new CompareOp(code, x, y, state_before))); 1351 } 1352 1353 1354 void GraphBuilder::convert(Bytecodes::Code op, BasicType from, BasicType to) { 1355 push(as_ValueType(to), append(new Convert(op, pop(as_ValueType(from)), as_ValueType(to)))); 1356 } 1357 1358 1359 void GraphBuilder::increment() { 1360 int index = stream()->get_index(); 1361 int delta = stream()->is_wide() ? (signed short)Bytes::get_Java_u2(stream()->cur_bcp() + 4) : (signed char)(stream()->cur_bcp()[2]); 1362 load_local(intType, index); 1363 ipush(append(new Constant(new IntConstant(delta)))); 1364 arithmetic_op(intType, Bytecodes::_iadd); 1365 store_local(intType, index); 1366 } 1367 1368 1369 void GraphBuilder::_goto(int from_bci, int to_bci) { 1370 Goto *x = new Goto(block_at(to_bci), to_bci <= from_bci); 1371 if (is_profiling()) { 1372 compilation()->set_would_profile(true); 1373 x->set_profiled_bci(bci()); 1374 if (profile_branches()) { 1375 x->set_profiled_method(method()); 1376 x->set_should_profile(true); 1377 } 1378 } 1379 append(x); 1380 } 1381 1382 1383 void GraphBuilder::if_node(Value x, If::Condition cond, Value y, ValueStack* state_before) { 1384 BlockBegin* tsux = block_at(stream()->get_dest()); 1385 BlockBegin* fsux = block_at(stream()->next_bci()); 1386 bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci(); 1387 1388 bool subst_check = false; 1389 if (EnableValhalla && (stream()->cur_bc() == Bytecodes::_if_acmpeq || stream()->cur_bc() == Bytecodes::_if_acmpne)) { 1390 ValueType* left_vt = x->type(); 1391 ValueType* right_vt = y->type(); 1392 if (left_vt->is_object()) { 1393 assert(right_vt->is_object(), "must be"); 1394 ciKlass* left_klass = x->as_loaded_klass_or_null(); 1395 ciKlass* right_klass = y->as_loaded_klass_or_null(); 1396 1397 if (left_klass == nullptr || right_klass == nullptr) { 1398 // The klass is still unloaded, or came from a Phi node. Go slow case; 1399 subst_check = true; 1400 } else if (left_klass->can_be_inline_klass() || right_klass->can_be_inline_klass()) { 1401 // Either operand may be a value object, but we're not sure. Go slow case; 1402 subst_check = true; 1403 } else { 1404 // No need to do substitutability check 1405 } 1406 } 1407 } 1408 if ((stream()->cur_bc() == Bytecodes::_if_acmpeq || stream()->cur_bc() == Bytecodes::_if_acmpne) && 1409 is_profiling() && profile_branches()) { 1410 compilation()->set_would_profile(true); 1411 append(new ProfileACmpTypes(method(), bci(), x, y)); 1412 } 1413 1414 // In case of loop invariant code motion or predicate insertion 1415 // before the body of a loop the state is needed 1416 Instruction *i = append(new If(x, cond, false, y, tsux, fsux, (is_bb || compilation()->is_optimistic() || subst_check) ? state_before : nullptr, is_bb, subst_check)); 1417 1418 assert(i->as_Goto() == nullptr || 1419 (i->as_Goto()->sux_at(0) == tsux && i->as_Goto()->is_safepoint() == (tsux->bci() < stream()->cur_bci())) || 1420 (i->as_Goto()->sux_at(0) == fsux && i->as_Goto()->is_safepoint() == (fsux->bci() < stream()->cur_bci())), 1421 "safepoint state of Goto returned by canonicalizer incorrect"); 1422 1423 if (is_profiling()) { 1424 If* if_node = i->as_If(); 1425 if (if_node != nullptr) { 1426 // Note that we'd collect profile data in this method if we wanted it. 1427 compilation()->set_would_profile(true); 1428 // At level 2 we need the proper bci to count backedges 1429 if_node->set_profiled_bci(bci()); 1430 if (profile_branches()) { 1431 // Successors can be rotated by the canonicalizer, check for this case. 1432 if_node->set_profiled_method(method()); 1433 if_node->set_should_profile(true); 1434 if (if_node->tsux() == fsux) { 1435 if_node->set_swapped(true); 1436 } 1437 } 1438 return; 1439 } 1440 1441 // Check if this If was reduced to Goto. 1442 Goto *goto_node = i->as_Goto(); 1443 if (goto_node != nullptr) { 1444 compilation()->set_would_profile(true); 1445 goto_node->set_profiled_bci(bci()); 1446 if (profile_branches()) { 1447 goto_node->set_profiled_method(method()); 1448 goto_node->set_should_profile(true); 1449 // Find out which successor is used. 1450 if (goto_node->default_sux() == tsux) { 1451 goto_node->set_direction(Goto::taken); 1452 } else if (goto_node->default_sux() == fsux) { 1453 goto_node->set_direction(Goto::not_taken); 1454 } else { 1455 ShouldNotReachHere(); 1456 } 1457 } 1458 return; 1459 } 1460 } 1461 } 1462 1463 1464 void GraphBuilder::if_zero(ValueType* type, If::Condition cond) { 1465 Value y = append(new Constant(intZero)); 1466 ValueStack* state_before = copy_state_before(); 1467 Value x = ipop(); 1468 if_node(x, cond, y, state_before); 1469 } 1470 1471 1472 void GraphBuilder::if_null(ValueType* type, If::Condition cond) { 1473 Value y = append(new Constant(objectNull)); 1474 ValueStack* state_before = copy_state_before(); 1475 Value x = apop(); 1476 if_node(x, cond, y, state_before); 1477 } 1478 1479 1480 void GraphBuilder::if_same(ValueType* type, If::Condition cond) { 1481 ValueStack* state_before = copy_state_before(); 1482 Value y = pop(type); 1483 Value x = pop(type); 1484 if_node(x, cond, y, state_before); 1485 } 1486 1487 1488 void GraphBuilder::jsr(int dest) { 1489 // We only handle well-formed jsrs (those which are "block-structured"). 1490 // If the bytecodes are strange (jumping out of a jsr block) then we 1491 // might end up trying to re-parse a block containing a jsr which 1492 // has already been activated. Watch for this case and bail out. 1493 if (next_bci() >= method()->code_size()) { 1494 // This can happen if the subroutine does not terminate with a ret, 1495 // effectively turning the jsr into a goto. 1496 BAILOUT("too-complicated jsr/ret structure"); 1497 } 1498 for (ScopeData* cur_scope_data = scope_data(); 1499 cur_scope_data != nullptr && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope(); 1500 cur_scope_data = cur_scope_data->parent()) { 1501 if (cur_scope_data->jsr_entry_bci() == dest) { 1502 BAILOUT("too-complicated jsr/ret structure"); 1503 } 1504 } 1505 1506 push(addressType, append(new Constant(new AddressConstant(next_bci())))); 1507 if (!try_inline_jsr(dest)) { 1508 return; // bailed out while parsing and inlining subroutine 1509 } 1510 } 1511 1512 1513 void GraphBuilder::ret(int local_index) { 1514 if (!parsing_jsr()) BAILOUT("ret encountered while not parsing subroutine"); 1515 1516 if (local_index != scope_data()->jsr_return_address_local()) { 1517 BAILOUT("can not handle complicated jsr/ret constructs"); 1518 } 1519 1520 // Rets simply become (NON-SAFEPOINT) gotos to the jsr continuation 1521 append(new Goto(scope_data()->jsr_continuation(), false)); 1522 } 1523 1524 1525 void GraphBuilder::table_switch() { 1526 Bytecode_tableswitch sw(stream()); 1527 const int l = sw.length(); 1528 if (CanonicalizeNodes && l == 1 && compilation()->env()->comp_level() != CompLevel_full_profile) { 1529 // total of 2 successors => use If instead of switch 1530 // Note: This code should go into the canonicalizer as soon as it can 1531 // can handle canonicalized forms that contain more than one node. 1532 Value key = append(new Constant(new IntConstant(sw.low_key()))); 1533 BlockBegin* tsux = block_at(bci() + sw.dest_offset_at(0)); 1534 BlockBegin* fsux = block_at(bci() + sw.default_offset()); 1535 bool is_bb = tsux->bci() < bci() || fsux->bci() < bci(); 1536 // In case of loop invariant code motion or predicate insertion 1537 // before the body of a loop the state is needed 1538 ValueStack* state_before = copy_state_if_bb(is_bb); 1539 append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb)); 1540 } else { 1541 // collect successors 1542 BlockList* sux = new BlockList(l + 1, nullptr); 1543 int i; 1544 bool has_bb = false; 1545 for (i = 0; i < l; i++) { 1546 sux->at_put(i, block_at(bci() + sw.dest_offset_at(i))); 1547 if (sw.dest_offset_at(i) < 0) has_bb = true; 1548 } 1549 // add default successor 1550 if (sw.default_offset() < 0) has_bb = true; 1551 sux->at_put(i, block_at(bci() + sw.default_offset())); 1552 // In case of loop invariant code motion or predicate insertion 1553 // before the body of a loop the state is needed 1554 ValueStack* state_before = copy_state_if_bb(has_bb); 1555 Instruction* res = append(new TableSwitch(ipop(), sux, sw.low_key(), state_before, has_bb)); 1556 #ifdef ASSERT 1557 if (res->as_Goto()) { 1558 for (i = 0; i < l; i++) { 1559 if (sux->at(i) == res->as_Goto()->sux_at(0)) { 1560 assert(res->as_Goto()->is_safepoint() == (sw.dest_offset_at(i) < 0), "safepoint state of Goto returned by canonicalizer incorrect"); 1561 } 1562 } 1563 } 1564 #endif 1565 } 1566 } 1567 1568 1569 void GraphBuilder::lookup_switch() { 1570 Bytecode_lookupswitch sw(stream()); 1571 const int l = sw.number_of_pairs(); 1572 if (CanonicalizeNodes && l == 1 && compilation()->env()->comp_level() != CompLevel_full_profile) { 1573 // total of 2 successors => use If instead of switch 1574 // Note: This code should go into the canonicalizer as soon as it can 1575 // can handle canonicalized forms that contain more than one node. 1576 // simplify to If 1577 LookupswitchPair pair = sw.pair_at(0); 1578 Value key = append(new Constant(new IntConstant(pair.match()))); 1579 BlockBegin* tsux = block_at(bci() + pair.offset()); 1580 BlockBegin* fsux = block_at(bci() + sw.default_offset()); 1581 bool is_bb = tsux->bci() < bci() || fsux->bci() < bci(); 1582 // In case of loop invariant code motion or predicate insertion 1583 // before the body of a loop the state is needed 1584 ValueStack* state_before = copy_state_if_bb(is_bb);; 1585 append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb)); 1586 } else { 1587 // collect successors & keys 1588 BlockList* sux = new BlockList(l + 1, nullptr); 1589 intArray* keys = new intArray(l, l, 0); 1590 int i; 1591 bool has_bb = false; 1592 for (i = 0; i < l; i++) { 1593 LookupswitchPair pair = sw.pair_at(i); 1594 if (pair.offset() < 0) has_bb = true; 1595 sux->at_put(i, block_at(bci() + pair.offset())); 1596 keys->at_put(i, pair.match()); 1597 } 1598 // add default successor 1599 if (sw.default_offset() < 0) has_bb = true; 1600 sux->at_put(i, block_at(bci() + sw.default_offset())); 1601 // In case of loop invariant code motion or predicate insertion 1602 // before the body of a loop the state is needed 1603 ValueStack* state_before = copy_state_if_bb(has_bb); 1604 Instruction* res = append(new LookupSwitch(ipop(), sux, keys, state_before, has_bb)); 1605 #ifdef ASSERT 1606 if (res->as_Goto()) { 1607 for (i = 0; i < l; i++) { 1608 if (sux->at(i) == res->as_Goto()->sux_at(0)) { 1609 assert(res->as_Goto()->is_safepoint() == (sw.pair_at(i).offset() < 0), "safepoint state of Goto returned by canonicalizer incorrect"); 1610 } 1611 } 1612 } 1613 #endif 1614 } 1615 } 1616 1617 void GraphBuilder::call_register_finalizer() { 1618 // If the receiver requires finalization then emit code to perform 1619 // the registration on return. 1620 1621 // Gather some type information about the receiver 1622 Value receiver = state()->local_at(0); 1623 assert(receiver != nullptr, "must have a receiver"); 1624 ciType* declared_type = receiver->declared_type(); 1625 ciType* exact_type = receiver->exact_type(); 1626 if (exact_type == nullptr && 1627 receiver->as_Local() && 1628 receiver->as_Local()->java_index() == 0) { 1629 ciInstanceKlass* ik = compilation()->method()->holder(); 1630 if (ik->is_final()) { 1631 exact_type = ik; 1632 } else if (UseCHA && !(ik->has_subklass() || ik->is_interface())) { 1633 // test class is leaf class 1634 compilation()->dependency_recorder()->assert_leaf_type(ik); 1635 exact_type = ik; 1636 } else { 1637 declared_type = ik; 1638 } 1639 } 1640 1641 // see if we know statically that registration isn't required 1642 bool needs_check = true; 1643 if (exact_type != nullptr) { 1644 needs_check = exact_type->as_instance_klass()->has_finalizer(); 1645 } else if (declared_type != nullptr) { 1646 ciInstanceKlass* ik = declared_type->as_instance_klass(); 1647 if (!Dependencies::has_finalizable_subclass(ik)) { 1648 compilation()->dependency_recorder()->assert_has_no_finalizable_subclasses(ik); 1649 needs_check = false; 1650 } 1651 } 1652 1653 if (needs_check) { 1654 // Perform the registration of finalizable objects. 1655 ValueStack* state_before = copy_state_for_exception(); 1656 load_local(objectType, 0); 1657 append_split(new Intrinsic(voidType, vmIntrinsics::_Object_init, 1658 state()->pop_arguments(1), 1659 true, state_before, true)); 1660 } 1661 } 1662 1663 1664 void GraphBuilder::method_return(Value x, bool ignore_return) { 1665 if (method()->intrinsic_id() == vmIntrinsics::_Object_init) { 1666 call_register_finalizer(); 1667 } 1668 1669 // The conditions for a memory barrier are described in Parse::do_exits(). 1670 bool need_mem_bar = false; 1671 if (method()->is_object_constructor() && 1672 (scope()->wrote_final() || scope()->wrote_stable() || 1673 (AlwaysSafeConstructors && scope()->wrote_fields()) || 1674 (support_IRIW_for_not_multiple_copy_atomic_cpu && scope()->wrote_volatile()))) { 1675 need_mem_bar = true; 1676 } 1677 1678 BasicType bt = method()->return_type()->basic_type(); 1679 switch (bt) { 1680 case T_BYTE: 1681 { 1682 Value shift = append(new Constant(new IntConstant(24))); 1683 x = append(new ShiftOp(Bytecodes::_ishl, x, shift)); 1684 x = append(new ShiftOp(Bytecodes::_ishr, x, shift)); 1685 break; 1686 } 1687 case T_SHORT: 1688 { 1689 Value shift = append(new Constant(new IntConstant(16))); 1690 x = append(new ShiftOp(Bytecodes::_ishl, x, shift)); 1691 x = append(new ShiftOp(Bytecodes::_ishr, x, shift)); 1692 break; 1693 } 1694 case T_CHAR: 1695 { 1696 Value mask = append(new Constant(new IntConstant(0xFFFF))); 1697 x = append(new LogicOp(Bytecodes::_iand, x, mask)); 1698 break; 1699 } 1700 case T_BOOLEAN: 1701 { 1702 Value mask = append(new Constant(new IntConstant(1))); 1703 x = append(new LogicOp(Bytecodes::_iand, x, mask)); 1704 break; 1705 } 1706 default: 1707 break; 1708 } 1709 1710 // Check to see whether we are inlining. If so, Return 1711 // instructions become Gotos to the continuation point. 1712 if (continuation() != nullptr) { 1713 1714 int invoke_bci = state()->caller_state()->bci(); 1715 1716 if (x != nullptr && !ignore_return) { 1717 ciMethod* caller = state()->scope()->caller()->method(); 1718 Bytecodes::Code invoke_raw_bc = caller->raw_code_at_bci(invoke_bci); 1719 if (invoke_raw_bc == Bytecodes::_invokehandle || invoke_raw_bc == Bytecodes::_invokedynamic) { 1720 ciType* declared_ret_type = caller->get_declared_signature_at_bci(invoke_bci)->return_type(); 1721 if (declared_ret_type->is_klass() && x->exact_type() == nullptr && 1722 x->declared_type() != declared_ret_type && declared_ret_type != compilation()->env()->Object_klass()) { 1723 x = append(new TypeCast(declared_ret_type->as_klass(), x, copy_state_before())); 1724 } 1725 } 1726 } 1727 1728 assert(!method()->is_synchronized() || InlineSynchronizedMethods, "can not inline synchronized methods yet"); 1729 1730 if (compilation()->env()->dtrace_method_probes()) { 1731 // Report exit from inline methods 1732 Values* args = new Values(1); 1733 args->push(append(new Constant(new MethodConstant(method())))); 1734 append(new RuntimeCall(voidType, "dtrace_method_exit", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), args)); 1735 } 1736 1737 // If the inlined method is synchronized, the monitor must be 1738 // released before we jump to the continuation block. 1739 if (method()->is_synchronized()) { 1740 assert(state()->locks_size() == 1, "receiver must be locked here"); 1741 monitorexit(state()->lock_at(0), SynchronizationEntryBCI); 1742 } 1743 1744 if (need_mem_bar) { 1745 append(new MemBar(lir_membar_storestore)); 1746 } 1747 1748 // State at end of inlined method is the state of the caller 1749 // without the method parameters on stack, including the 1750 // return value, if any, of the inlined method on operand stack. 1751 set_state(state()->caller_state()->copy_for_parsing()); 1752 if (x != nullptr) { 1753 if (!ignore_return) { 1754 state()->push(x->type(), x); 1755 } 1756 if (profile_return() && x->type()->is_object_kind()) { 1757 ciMethod* caller = state()->scope()->method(); 1758 profile_return_type(x, method(), caller, invoke_bci); 1759 } 1760 } 1761 Goto* goto_callee = new Goto(continuation(), false); 1762 1763 // See whether this is the first return; if so, store off some 1764 // of the state for later examination 1765 if (num_returns() == 0) { 1766 set_inline_cleanup_info(); 1767 } 1768 1769 // The current bci() is in the wrong scope, so use the bci() of 1770 // the continuation point. 1771 append_with_bci(goto_callee, scope_data()->continuation()->bci()); 1772 incr_num_returns(); 1773 return; 1774 } 1775 1776 state()->truncate_stack(0); 1777 if (method()->is_synchronized()) { 1778 // perform the unlocking before exiting the method 1779 Value receiver; 1780 if (!method()->is_static()) { 1781 receiver = _initial_state->local_at(0); 1782 } else { 1783 receiver = append(new Constant(new ClassConstant(method()->holder()))); 1784 } 1785 append_split(new MonitorExit(receiver, state()->unlock())); 1786 } 1787 1788 if (need_mem_bar) { 1789 append(new MemBar(lir_membar_storestore)); 1790 } 1791 1792 assert(!ignore_return, "Ignoring return value works only for inlining"); 1793 append(new Return(x)); 1794 } 1795 1796 Value GraphBuilder::make_constant(ciConstant field_value, ciField* field) { 1797 if (!field_value.is_valid()) return nullptr; 1798 1799 BasicType field_type = field_value.basic_type(); 1800 ValueType* value = as_ValueType(field_value); 1801 1802 // Attach dimension info to stable arrays. 1803 if (FoldStableValues && 1804 field->is_stable() && field_type == T_ARRAY && !field_value.is_null_or_zero()) { 1805 ciArray* array = field_value.as_object()->as_array(); 1806 jint dimension = field->type()->as_array_klass()->dimension(); 1807 value = new StableArrayConstant(array, dimension); 1808 } 1809 1810 switch (field_type) { 1811 case T_ARRAY: 1812 case T_OBJECT: 1813 if (field_value.as_object()->should_be_constant()) { 1814 return new Constant(value); 1815 } 1816 return nullptr; // Not a constant. 1817 default: 1818 return new Constant(value); 1819 } 1820 } 1821 1822 void GraphBuilder::copy_inline_content(ciInlineKlass* vk, Value src, int src_off, Value dest, int dest_off, ValueStack* state_before, ciField* enclosing_field) { 1823 for (int i = 0; i < vk->nof_nonstatic_fields(); i++) { 1824 ciField* inner_field = vk->nonstatic_field_at(i); 1825 assert(!inner_field->is_flat(), "the iteration over nested fields is handled by the loop itself"); 1826 int off = inner_field->offset_in_bytes() - vk->first_field_offset(); 1827 LoadField* load = new LoadField(src, src_off + off, inner_field, false, state_before, false); 1828 Value replacement = append(load); 1829 StoreField* store = new StoreField(dest, dest_off + off, inner_field, replacement, false, state_before, false); 1830 store->set_enclosing_field(enclosing_field); 1831 append(store); 1832 } 1833 } 1834 1835 void GraphBuilder::access_field(Bytecodes::Code code) { 1836 bool will_link; 1837 ciField* field = stream()->get_field(will_link); 1838 ciInstanceKlass* holder = field->holder(); 1839 BasicType field_type = field->type()->basic_type(); 1840 ValueType* type = as_ValueType(field_type); 1841 1842 // call will_link again to determine if the field is valid. 1843 const bool needs_patching = !holder->is_loaded() || 1844 !field->will_link(method(), code) || 1845 (!field->is_flat() && PatchALot); 1846 1847 ValueStack* state_before = nullptr; 1848 if (!holder->is_initialized() || needs_patching) { 1849 // save state before instruction for debug info when 1850 // deoptimization happens during patching 1851 state_before = copy_state_before(); 1852 } 1853 1854 Value obj = nullptr; 1855 if (code == Bytecodes::_getstatic || code == Bytecodes::_putstatic) { 1856 if (state_before != nullptr) { 1857 // build a patching constant 1858 obj = new Constant(new InstanceConstant(holder->java_mirror()), state_before); 1859 } else { 1860 obj = new Constant(new InstanceConstant(holder->java_mirror())); 1861 } 1862 } 1863 1864 if (code == Bytecodes::_putfield) { 1865 scope()->set_wrote_fields(); 1866 if (field->is_volatile()) { 1867 scope()->set_wrote_volatile(); 1868 } 1869 if (field->is_final()) { 1870 scope()->set_wrote_final(); 1871 } 1872 if (field->is_stable()) { 1873 scope()->set_wrote_stable(); 1874 } 1875 } 1876 1877 int offset = !needs_patching ? field->offset_in_bytes() : -1; 1878 switch (code) { 1879 case Bytecodes::_getstatic: { 1880 // check for compile-time constants, i.e., initialized static final fields 1881 Value constant = nullptr; 1882 if (field->is_static_constant() && !PatchALot) { 1883 ciConstant field_value = field->constant_value(); 1884 assert(!field->is_stable() || !field_value.is_null_or_zero(), 1885 "stable static w/ default value shouldn't be a constant"); 1886 constant = make_constant(field_value, field); 1887 } else if (field->is_null_free() && field->type()->as_instance_klass()->is_initialized() && 1888 field->type()->as_inline_klass()->is_empty()) { 1889 // Loading from a field of an empty inline type. Just return the default instance. 1890 constant = new Constant(new InstanceConstant(field->type()->as_inline_klass()->default_instance())); 1891 } 1892 if (constant != nullptr) { 1893 push(type, append(constant)); 1894 } else { 1895 if (state_before == nullptr) { 1896 state_before = copy_state_for_exception(); 1897 } 1898 LoadField* load_field = new LoadField(append(obj), offset, field, true, 1899 state_before, needs_patching); 1900 push(type, append(load_field)); 1901 } 1902 break; 1903 } 1904 case Bytecodes::_putstatic: { 1905 Value val = pop(type); 1906 if (state_before == nullptr) { 1907 state_before = copy_state_for_exception(); 1908 } 1909 if (field_type == T_BOOLEAN) { 1910 Value mask = append(new Constant(new IntConstant(1))); 1911 val = append(new LogicOp(Bytecodes::_iand, val, mask)); 1912 } 1913 if (field->is_null_free()) { 1914 null_check(val); 1915 } 1916 if (field->is_null_free() && field->type()->is_loaded() && field->type()->as_inline_klass()->is_empty()) { 1917 // Storing to a field of an empty inline type. Ignore. 1918 break; 1919 } 1920 append(new StoreField(append(obj), offset, field, val, true, state_before, needs_patching)); 1921 break; 1922 } 1923 case Bytecodes::_getfield: { 1924 // Check for compile-time constants, i.e., trusted final non-static fields. 1925 Value constant = nullptr; 1926 if (state_before == nullptr && field->is_flat()) { 1927 // Save the entire state and re-execute on deopt when accessing flat fields 1928 assert(Interpreter::bytecode_should_reexecute(code), "should reexecute"); 1929 state_before = copy_state_before(); 1930 } 1931 if (!has_pending_field_access() && !has_pending_load_indexed()) { 1932 obj = apop(); 1933 ObjectType* obj_type = obj->type()->as_ObjectType(); 1934 if (field->is_null_free() && field->type()->as_instance_klass()->is_initialized() 1935 && field->type()->as_inline_klass()->is_empty()) { 1936 // Loading from a field of an empty inline type. Just return the default instance. 1937 null_check(obj); 1938 constant = new Constant(new InstanceConstant(field->type()->as_inline_klass()->default_instance())); 1939 } else if (field->is_constant() && !field->is_flat() && obj_type->is_constant() && !PatchALot) { 1940 ciObject* const_oop = obj_type->constant_value(); 1941 if (!const_oop->is_null_object() && const_oop->is_loaded()) { 1942 ciConstant field_value = field->constant_value_of(const_oop); 1943 if (field_value.is_valid()) { 1944 if (field->is_null_free() && field_value.is_null_or_zero()) { 1945 // Non-flat inline type field. Replace null by the default value. 1946 constant = new Constant(new InstanceConstant(field->type()->as_inline_klass()->default_instance())); 1947 } else { 1948 constant = make_constant(field_value, field); 1949 } 1950 // For CallSite objects add a dependency for invalidation of the optimization. 1951 if (field->is_call_site_target()) { 1952 ciCallSite* call_site = const_oop->as_call_site(); 1953 if (!call_site->is_fully_initialized_constant_call_site()) { 1954 ciMethodHandle* target = field_value.as_object()->as_method_handle(); 1955 dependency_recorder()->assert_call_site_target_value(call_site, target); 1956 } 1957 } 1958 } 1959 } 1960 } 1961 } 1962 if (constant != nullptr) { 1963 push(type, append(constant)); 1964 } else { 1965 if (state_before == nullptr) { 1966 state_before = copy_state_for_exception(); 1967 } 1968 if (!field->is_flat()) { 1969 if (has_pending_field_access()) { 1970 assert(!needs_patching, "Can't patch delayed field access"); 1971 obj = pending_field_access()->obj(); 1972 offset += pending_field_access()->offset() - field->holder()->as_inline_klass()->first_field_offset(); 1973 field = pending_field_access()->holder()->get_field_by_offset(offset, false); 1974 assert(field != nullptr, "field not found"); 1975 set_pending_field_access(nullptr); 1976 } else if (has_pending_load_indexed()) { 1977 assert(!needs_patching, "Can't patch delayed field access"); 1978 pending_load_indexed()->update(field, offset - field->holder()->as_inline_klass()->first_field_offset()); 1979 LoadIndexed* li = pending_load_indexed()->load_instr(); 1980 li->set_type(type); 1981 push(type, append(li)); 1982 set_pending_load_indexed(nullptr); 1983 break; 1984 } 1985 LoadField* load = new LoadField(obj, offset, field, false, state_before, needs_patching); 1986 Value replacement = !needs_patching ? _memory->load(load) : load; 1987 if (replacement != load) { 1988 assert(replacement->is_linked() || !replacement->can_be_linked(), "should already by linked"); 1989 // Writing an (integer) value to a boolean, byte, char or short field includes an implicit narrowing 1990 // conversion. Emit an explicit conversion here to get the correct field value after the write. 1991 switch (field_type) { 1992 case T_BOOLEAN: 1993 case T_BYTE: 1994 replacement = append(new Convert(Bytecodes::_i2b, replacement, type)); 1995 break; 1996 case T_CHAR: 1997 replacement = append(new Convert(Bytecodes::_i2c, replacement, type)); 1998 break; 1999 case T_SHORT: 2000 replacement = append(new Convert(Bytecodes::_i2s, replacement, type)); 2001 break; 2002 default: 2003 break; 2004 } 2005 push(type, replacement); 2006 } else { 2007 push(type, append(load)); 2008 } 2009 } else { // field is flat 2010 // Look at the next bytecode to check if we can delay the field access 2011 bool can_delay_access = false; 2012 ciBytecodeStream s(method()); 2013 s.force_bci(bci()); 2014 s.next(); 2015 if (s.cur_bc() == Bytecodes::_getfield && !needs_patching) { 2016 ciField* next_field = s.get_field(will_link); 2017 bool next_needs_patching = !next_field->holder()->is_loaded() || 2018 !next_field->will_link(method(), Bytecodes::_getfield) || 2019 PatchALot; 2020 can_delay_access = C1UseDelayedFlattenedFieldReads && !next_needs_patching; 2021 } 2022 if (can_delay_access) { 2023 if (has_pending_load_indexed()) { 2024 pending_load_indexed()->update(field, offset - field->holder()->as_inline_klass()->first_field_offset()); 2025 } else if (has_pending_field_access()) { 2026 pending_field_access()->inc_offset(offset - field->holder()->as_inline_klass()->first_field_offset()); 2027 } else { 2028 null_check(obj); 2029 DelayedFieldAccess* dfa = new DelayedFieldAccess(obj, field->holder(), field->offset_in_bytes(), state_before); 2030 set_pending_field_access(dfa); 2031 } 2032 } else { 2033 ciInlineKlass* inline_klass = field->type()->as_inline_klass(); 2034 scope()->set_wrote_final(); 2035 scope()->set_wrote_fields(); 2036 bool need_membar = false; 2037 if (inline_klass->is_initialized() && inline_klass->is_empty()) { 2038 apush(append(new Constant(new InstanceConstant(inline_klass->default_instance())))); 2039 if (has_pending_field_access()) { 2040 set_pending_field_access(nullptr); 2041 } else if (has_pending_load_indexed()) { 2042 set_pending_load_indexed(nullptr); 2043 } 2044 } else if (has_pending_load_indexed()) { 2045 assert(!needs_patching, "Can't patch delayed field access"); 2046 pending_load_indexed()->update(field, offset - field->holder()->as_inline_klass()->first_field_offset()); 2047 NewInstance* vt = new NewInstance(inline_klass, pending_load_indexed()->state_before(), false, true); 2048 _memory->new_instance(vt); 2049 pending_load_indexed()->load_instr()->set_vt(vt); 2050 apush(append_split(vt)); 2051 append(pending_load_indexed()->load_instr()); 2052 set_pending_load_indexed(nullptr); 2053 need_membar = true; 2054 } else { 2055 if (has_pending_field_access()) { 2056 state_before = pending_field_access()->state_before(); 2057 } 2058 NewInstance* new_instance = new NewInstance(inline_klass, state_before, false, true); 2059 _memory->new_instance(new_instance); 2060 apush(append_split(new_instance)); 2061 assert(!needs_patching, "Can't patch flat inline type field access"); 2062 if (has_pending_field_access()) { 2063 copy_inline_content(inline_klass, pending_field_access()->obj(), 2064 pending_field_access()->offset() + field->offset_in_bytes() - field->holder()->as_inline_klass()->first_field_offset(), 2065 new_instance, inline_klass->first_field_offset(), state_before); 2066 set_pending_field_access(nullptr); 2067 } else { 2068 copy_inline_content(inline_klass, obj, field->offset_in_bytes(), new_instance, inline_klass->first_field_offset(), state_before); 2069 } 2070 need_membar = true; 2071 } 2072 if (need_membar) { 2073 // If we allocated a new instance ensure the stores to copy the 2074 // field contents are visible before any subsequent store that 2075 // publishes this reference. 2076 append(new MemBar(lir_membar_storestore)); 2077 } 2078 } 2079 } 2080 } 2081 break; 2082 } 2083 case Bytecodes::_putfield: { 2084 Value val = pop(type); 2085 obj = apop(); 2086 if (state_before == nullptr) { 2087 state_before = copy_state_for_exception(); 2088 } 2089 if (field_type == T_BOOLEAN) { 2090 Value mask = append(new Constant(new IntConstant(1))); 2091 val = append(new LogicOp(Bytecodes::_iand, val, mask)); 2092 } 2093 if (field->is_null_free() && field->type()->is_loaded() && field->type()->as_inline_klass()->is_empty()) { 2094 // Storing to a field of an empty inline type. Ignore. 2095 null_check(obj); 2096 null_check(val); 2097 } else if (!field->is_flat()) { 2098 if (field->is_null_free()) { 2099 null_check(val); 2100 } 2101 StoreField* store = new StoreField(obj, offset, field, val, false, state_before, needs_patching); 2102 if (!needs_patching) store = _memory->store(store); 2103 if (store != nullptr) { 2104 append(store); 2105 } 2106 } else { 2107 assert(!needs_patching, "Can't patch flat inline type field access"); 2108 ciInlineKlass* inline_klass = field->type()->as_inline_klass(); 2109 copy_inline_content(inline_klass, val, inline_klass->first_field_offset(), obj, offset, state_before, field); 2110 } 2111 break; 2112 } 2113 default: 2114 ShouldNotReachHere(); 2115 break; 2116 } 2117 } 2118 2119 Dependencies* GraphBuilder::dependency_recorder() const { 2120 assert(DeoptC1, "need debug information"); 2121 return compilation()->dependency_recorder(); 2122 } 2123 2124 // How many arguments do we want to profile? 2125 Values* GraphBuilder::args_list_for_profiling(ciMethod* target, int& start, bool may_have_receiver) { 2126 int n = 0; 2127 bool has_receiver = may_have_receiver && Bytecodes::has_receiver(method()->java_code_at_bci(bci())); 2128 start = has_receiver ? 1 : 0; 2129 if (profile_arguments()) { 2130 ciProfileData* data = method()->method_data()->bci_to_data(bci()); 2131 if (data != nullptr && (data->is_CallTypeData() || data->is_VirtualCallTypeData())) { 2132 n = data->is_CallTypeData() ? data->as_CallTypeData()->number_of_arguments() : data->as_VirtualCallTypeData()->number_of_arguments(); 2133 } 2134 } 2135 // If we are inlining then we need to collect arguments to profile parameters for the target 2136 if (profile_parameters() && target != nullptr) { 2137 if (target->method_data() != nullptr && target->method_data()->parameters_type_data() != nullptr) { 2138 // The receiver is profiled on method entry so it's included in 2139 // the number of parameters but here we're only interested in 2140 // actual arguments. 2141 n = MAX2(n, target->method_data()->parameters_type_data()->number_of_parameters() - start); 2142 } 2143 } 2144 if (n > 0) { 2145 return new Values(n); 2146 } 2147 return nullptr; 2148 } 2149 2150 void GraphBuilder::check_args_for_profiling(Values* obj_args, int expected) { 2151 #ifdef ASSERT 2152 bool ignored_will_link; 2153 ciSignature* declared_signature = nullptr; 2154 ciMethod* real_target = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature); 2155 assert(expected == obj_args->capacity() || real_target->is_method_handle_intrinsic(), "missed on arg?"); 2156 #endif 2157 } 2158 2159 // Collect arguments that we want to profile in a list 2160 Values* GraphBuilder::collect_args_for_profiling(Values* args, ciMethod* target, bool may_have_receiver) { 2161 int start = 0; 2162 Values* obj_args = args_list_for_profiling(target, start, may_have_receiver); 2163 if (obj_args == nullptr) { 2164 return nullptr; 2165 } 2166 int s = obj_args->capacity(); 2167 // if called through method handle invoke, some arguments may have been popped 2168 for (int i = start, j = 0; j < s && i < args->length(); i++) { 2169 if (args->at(i)->type()->is_object_kind()) { 2170 obj_args->push(args->at(i)); 2171 j++; 2172 } 2173 } 2174 check_args_for_profiling(obj_args, s); 2175 return obj_args; 2176 } 2177 2178 void GraphBuilder::invoke(Bytecodes::Code code) { 2179 bool will_link; 2180 ciSignature* declared_signature = nullptr; 2181 ciMethod* target = stream()->get_method(will_link, &declared_signature); 2182 ciKlass* holder = stream()->get_declared_method_holder(); 2183 const Bytecodes::Code bc_raw = stream()->cur_bc_raw(); 2184 assert(declared_signature != nullptr, "cannot be null"); 2185 assert(will_link == target->is_loaded(), ""); 2186 JFR_ONLY(Jfr::on_resolution(this, holder, target); CHECK_BAILOUT();) 2187 2188 ciInstanceKlass* klass = target->holder(); 2189 assert(!target->is_loaded() || klass->is_loaded(), "loaded target must imply loaded klass"); 2190 2191 // check if CHA possible: if so, change the code to invoke_special 2192 ciInstanceKlass* calling_klass = method()->holder(); 2193 ciInstanceKlass* callee_holder = ciEnv::get_instance_klass_for_declared_method_holder(holder); 2194 ciInstanceKlass* actual_recv = callee_holder; 2195 2196 CompileLog* log = compilation()->log(); 2197 if (log != nullptr) 2198 log->elem("call method='%d' instr='%s'", 2199 log->identify(target), 2200 Bytecodes::name(code)); 2201 2202 // Some methods are obviously bindable without any type checks so 2203 // convert them directly to an invokespecial or invokestatic. 2204 if (target->is_loaded() && !target->is_abstract() && target->can_be_statically_bound()) { 2205 switch (bc_raw) { 2206 case Bytecodes::_invokeinterface: 2207 // convert to invokespecial if the target is the private interface method. 2208 if (target->is_private()) { 2209 assert(holder->is_interface(), "How did we get a non-interface method here!"); 2210 code = Bytecodes::_invokespecial; 2211 } 2212 break; 2213 case Bytecodes::_invokevirtual: 2214 code = Bytecodes::_invokespecial; 2215 break; 2216 case Bytecodes::_invokehandle: 2217 code = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokespecial; 2218 break; 2219 default: 2220 break; 2221 } 2222 } else { 2223 if (bc_raw == Bytecodes::_invokehandle) { 2224 assert(!will_link, "should come here only for unlinked call"); 2225 code = Bytecodes::_invokespecial; 2226 } 2227 } 2228 2229 if (code == Bytecodes::_invokespecial) { 2230 // Additional receiver subtype checks for interface calls via invokespecial or invokeinterface. 2231 ciKlass* receiver_constraint = nullptr; 2232 2233 if (bc_raw == Bytecodes::_invokeinterface) { 2234 receiver_constraint = holder; 2235 } else if (bc_raw == Bytecodes::_invokespecial && !target->is_object_constructor() && calling_klass->is_interface()) { 2236 receiver_constraint = calling_klass; 2237 } 2238 2239 if (receiver_constraint != nullptr) { 2240 int index = state()->stack_size() - (target->arg_size_no_receiver() + 1); 2241 Value receiver = state()->stack_at(index); 2242 CheckCast* c = new CheckCast(receiver_constraint, receiver, copy_state_before()); 2243 // go to uncommon_trap when checkcast fails 2244 c->set_invokespecial_receiver_check(); 2245 state()->stack_at_put(index, append_split(c)); 2246 } 2247 } 2248 2249 // Push appendix argument (MethodType, CallSite, etc.), if one. 2250 bool patch_for_appendix = false; 2251 int patching_appendix_arg = 0; 2252 if (Bytecodes::has_optional_appendix(bc_raw) && (!will_link || PatchALot)) { 2253 Value arg = append(new Constant(new ObjectConstant(compilation()->env()->unloaded_ciinstance()), copy_state_before())); 2254 apush(arg); 2255 patch_for_appendix = true; 2256 patching_appendix_arg = (will_link && stream()->has_appendix()) ? 0 : 1; 2257 } else if (stream()->has_appendix()) { 2258 ciObject* appendix = stream()->get_appendix(); 2259 Value arg = append(new Constant(new ObjectConstant(appendix))); 2260 apush(arg); 2261 } 2262 2263 ciMethod* cha_monomorphic_target = nullptr; 2264 ciMethod* exact_target = nullptr; 2265 Value better_receiver = nullptr; 2266 if (UseCHA && DeoptC1 && target->is_loaded() && 2267 !(// %%% FIXME: Are both of these relevant? 2268 target->is_method_handle_intrinsic() || 2269 target->is_compiled_lambda_form()) && 2270 !patch_for_appendix) { 2271 Value receiver = nullptr; 2272 ciInstanceKlass* receiver_klass = nullptr; 2273 bool type_is_exact = false; 2274 // try to find a precise receiver type 2275 if (will_link && !target->is_static()) { 2276 int index = state()->stack_size() - (target->arg_size_no_receiver() + 1); 2277 receiver = state()->stack_at(index); 2278 ciType* type = receiver->exact_type(); 2279 if (type != nullptr && type->is_loaded()) { 2280 assert(!type->is_instance_klass() || !type->as_instance_klass()->is_interface(), "Must not be an interface"); 2281 // Detects non-interface instances, primitive arrays, and some object arrays. 2282 // Array receivers can only call Object methods, so we should be able to allow 2283 // all object arrays here too, even those with unloaded types. 2284 receiver_klass = (ciInstanceKlass*) type; 2285 type_is_exact = true; 2286 } 2287 if (type == nullptr) { 2288 type = receiver->declared_type(); 2289 if (type != nullptr && type->is_loaded() && 2290 type->is_instance_klass() && !type->as_instance_klass()->is_interface()) { 2291 receiver_klass = (ciInstanceKlass*) type; 2292 if (receiver_klass->is_leaf_type() && !receiver_klass->is_final()) { 2293 // Insert a dependency on this type since 2294 // find_monomorphic_target may assume it's already done. 2295 dependency_recorder()->assert_leaf_type(receiver_klass); 2296 type_is_exact = true; 2297 } 2298 } 2299 } 2300 } 2301 if (receiver_klass != nullptr && type_is_exact && 2302 receiver_klass->is_loaded() && code != Bytecodes::_invokespecial) { 2303 // If we have the exact receiver type we can bind directly to 2304 // the method to call. 2305 exact_target = target->resolve_invoke(calling_klass, receiver_klass); 2306 if (exact_target != nullptr) { 2307 target = exact_target; 2308 code = Bytecodes::_invokespecial; 2309 } 2310 } 2311 if (receiver_klass != nullptr && 2312 receiver_klass->is_subtype_of(actual_recv) && 2313 actual_recv->is_initialized()) { 2314 actual_recv = receiver_klass; 2315 } 2316 2317 if ((code == Bytecodes::_invokevirtual && callee_holder->is_initialized()) || 2318 (code == Bytecodes::_invokeinterface && callee_holder->is_initialized() && !actual_recv->is_interface())) { 2319 // Use CHA on the receiver to select a more precise method. 2320 cha_monomorphic_target = target->find_monomorphic_target(calling_klass, callee_holder, actual_recv); 2321 } else if (code == Bytecodes::_invokeinterface && callee_holder->is_loaded() && receiver != nullptr) { 2322 assert(callee_holder->is_interface(), "invokeinterface to non interface?"); 2323 // If there is only one implementor of this interface then we 2324 // may be able bind this invoke directly to the implementing 2325 // klass but we need both a dependence on the single interface 2326 // and on the method we bind to. Additionally since all we know 2327 // about the receiver type is the it's supposed to implement the 2328 // interface we have to insert a check that it's the class we 2329 // expect. Interface types are not checked by the verifier so 2330 // they are roughly equivalent to Object. 2331 // The number of implementors for declared_interface is less or 2332 // equal to the number of implementors for target->holder() so 2333 // if number of implementors of target->holder() == 1 then 2334 // number of implementors for decl_interface is 0 or 1. If 2335 // it's 0 then no class implements decl_interface and there's 2336 // no point in inlining. 2337 ciInstanceKlass* declared_interface = callee_holder; 2338 ciInstanceKlass* singleton = declared_interface->unique_implementor(); 2339 if (singleton != nullptr) { 2340 assert(singleton != declared_interface, "not a unique implementor"); 2341 cha_monomorphic_target = target->find_monomorphic_target(calling_klass, declared_interface, singleton); 2342 if (cha_monomorphic_target != nullptr) { 2343 ciInstanceKlass* holder = cha_monomorphic_target->holder(); 2344 ciInstanceKlass* constraint = (holder->is_subtype_of(singleton) ? holder : singleton); // avoid upcasts 2345 if (holder != compilation()->env()->Object_klass() && 2346 (!type_is_exact || receiver_klass->is_subtype_of(constraint))) { 2347 actual_recv = declared_interface; 2348 2349 // insert a check it's really the expected class. 2350 CheckCast* c = new CheckCast(constraint, receiver, copy_state_for_exception()); 2351 c->set_incompatible_class_change_check(); 2352 c->set_direct_compare(constraint->is_final()); 2353 // pass the result of the checkcast so that the compiler has 2354 // more accurate type info in the inlinee 2355 better_receiver = append_split(c); 2356 2357 dependency_recorder()->assert_unique_implementor(declared_interface, singleton); 2358 } else { 2359 cha_monomorphic_target = nullptr; 2360 } 2361 } 2362 } 2363 } 2364 } 2365 2366 if (cha_monomorphic_target != nullptr) { 2367 assert(!target->can_be_statically_bound() || target->equals(cha_monomorphic_target), ""); 2368 assert(!cha_monomorphic_target->is_abstract(), ""); 2369 if (!cha_monomorphic_target->can_be_statically_bound(actual_recv)) { 2370 // If we inlined because CHA revealed only a single target method, 2371 // then we are dependent on that target method not getting overridden 2372 // by dynamic class loading. Be sure to test the "static" receiver 2373 // dest_method here, as opposed to the actual receiver, which may 2374 // falsely lead us to believe that the receiver is final or private. 2375 dependency_recorder()->assert_unique_concrete_method(actual_recv, cha_monomorphic_target, callee_holder, target); 2376 } 2377 code = Bytecodes::_invokespecial; 2378 } 2379 2380 // check if we could do inlining 2381 if (!PatchALot && Inline && target->is_loaded() && !patch_for_appendix && 2382 callee_holder->is_loaded()) { // the effect of symbolic reference resolution 2383 2384 // callee is known => check if we have static binding 2385 if ((code == Bytecodes::_invokestatic && klass->is_initialized()) || // invokestatic involves an initialization barrier on declaring class 2386 code == Bytecodes::_invokespecial || 2387 (code == Bytecodes::_invokevirtual && target->is_final_method()) || 2388 code == Bytecodes::_invokedynamic) { 2389 // static binding => check if callee is ok 2390 ciMethod* inline_target = (cha_monomorphic_target != nullptr) ? cha_monomorphic_target : target; 2391 bool holder_known = (cha_monomorphic_target != nullptr) || (exact_target != nullptr); 2392 bool success = try_inline(inline_target, holder_known, false /* ignore_return */, code, better_receiver); 2393 2394 CHECK_BAILOUT(); 2395 clear_inline_bailout(); 2396 2397 if (success) { 2398 // Register dependence if JVMTI has either breakpoint 2399 // setting or hotswapping of methods capabilities since they may 2400 // cause deoptimization. 2401 if (compilation()->env()->jvmti_can_hotswap_or_post_breakpoint()) { 2402 dependency_recorder()->assert_evol_method(inline_target); 2403 } 2404 return; 2405 } 2406 } else { 2407 print_inlining(target, "no static binding", /*success*/ false); 2408 } 2409 } else { 2410 print_inlining(target, "not inlineable", /*success*/ false); 2411 } 2412 2413 // If we attempted an inline which did not succeed because of a 2414 // bailout during construction of the callee graph, the entire 2415 // compilation has to be aborted. This is fairly rare and currently 2416 // seems to only occur for jasm-generated classes which contain 2417 // jsr/ret pairs which are not associated with finally clauses and 2418 // do not have exception handlers in the containing method, and are 2419 // therefore not caught early enough to abort the inlining without 2420 // corrupting the graph. (We currently bail out with a non-empty 2421 // stack at a ret in these situations.) 2422 CHECK_BAILOUT(); 2423 2424 // inlining not successful => standard invoke 2425 ValueType* result_type = as_ValueType(declared_signature->return_type()); 2426 ValueStack* state_before = copy_state_exhandling(); 2427 2428 // The bytecode (code) might change in this method so we are checking this very late. 2429 const bool has_receiver = 2430 code == Bytecodes::_invokespecial || 2431 code == Bytecodes::_invokevirtual || 2432 code == Bytecodes::_invokeinterface; 2433 Values* args = state()->pop_arguments(target->arg_size_no_receiver() + patching_appendix_arg); 2434 Value recv = has_receiver ? apop() : nullptr; 2435 2436 // A null check is required here (when there is a receiver) for any of the following cases 2437 // - invokespecial, always need a null check. 2438 // - invokevirtual, when the target is final and loaded. Calls to final targets will become optimized 2439 // and require null checking. If the target is loaded a null check is emitted here. 2440 // If the target isn't loaded the null check must happen after the call resolution. We achieve that 2441 // by using the target methods unverified entry point (see CompiledIC::compute_monomorphic_entry). 2442 // (The JVM specification requires that LinkageError must be thrown before a NPE. An unloaded target may 2443 // potentially fail, and can't have the null check before the resolution.) 2444 // - A call that will be profiled. (But we can't add a null check when the target is unloaded, by the same 2445 // reason as above, so calls with a receiver to unloaded targets can't be profiled.) 2446 // 2447 // Normal invokevirtual will perform the null check during lookup 2448 2449 bool need_null_check = (code == Bytecodes::_invokespecial) || 2450 (target->is_loaded() && (target->is_final_method() || (is_profiling() && profile_calls()))); 2451 2452 if (need_null_check) { 2453 if (recv != nullptr) { 2454 null_check(recv); 2455 } 2456 2457 if (is_profiling()) { 2458 // Note that we'd collect profile data in this method if we wanted it. 2459 compilation()->set_would_profile(true); 2460 2461 if (profile_calls()) { 2462 assert(cha_monomorphic_target == nullptr || exact_target == nullptr, "both can not be set"); 2463 ciKlass* target_klass = nullptr; 2464 if (cha_monomorphic_target != nullptr) { 2465 target_klass = cha_monomorphic_target->holder(); 2466 } else if (exact_target != nullptr) { 2467 target_klass = exact_target->holder(); 2468 } 2469 profile_call(target, recv, target_klass, collect_args_for_profiling(args, nullptr, false), false); 2470 } 2471 } 2472 } 2473 2474 Invoke* result = new Invoke(code, result_type, recv, args, target, state_before); 2475 // push result 2476 append_split(result); 2477 2478 if (result_type != voidType) { 2479 push(result_type, round_fp(result)); 2480 } 2481 if (profile_return() && result_type->is_object_kind()) { 2482 profile_return_type(result, target); 2483 } 2484 } 2485 2486 2487 void GraphBuilder::new_instance(int klass_index) { 2488 ValueStack* state_before = copy_state_exhandling(); 2489 ciKlass* klass = stream()->get_klass(); 2490 assert(klass->is_instance_klass(), "must be an instance klass"); 2491 if (!stream()->is_unresolved_klass() && klass->is_inlinetype() && 2492 klass->as_inline_klass()->is_initialized() && klass->as_inline_klass()->is_empty()) { 2493 ciInlineKlass* vk = klass->as_inline_klass(); 2494 apush(append(new Constant(new InstanceConstant(vk->default_instance())))); 2495 } else { 2496 NewInstance* new_instance = new NewInstance(klass->as_instance_klass(), state_before, stream()->is_unresolved_klass(), false); 2497 _memory->new_instance(new_instance); 2498 apush(append_split(new_instance)); 2499 } 2500 } 2501 2502 void GraphBuilder::new_type_array() { 2503 ValueStack* state_before = copy_state_exhandling(); 2504 apush(append_split(new NewTypeArray(ipop(), (BasicType)stream()->get_index(), state_before, true))); 2505 } 2506 2507 2508 void GraphBuilder::new_object_array() { 2509 ciKlass* klass = stream()->get_klass(); 2510 ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling(); 2511 NewArray* n = new NewObjectArray(klass, ipop(), state_before); 2512 apush(append_split(n)); 2513 } 2514 2515 2516 bool GraphBuilder::direct_compare(ciKlass* k) { 2517 if (k->is_loaded() && k->is_instance_klass() && !UseSlowPath) { 2518 ciInstanceKlass* ik = k->as_instance_klass(); 2519 if (ik->is_final()) { 2520 return true; 2521 } else { 2522 if (DeoptC1 && UseCHA && !(ik->has_subklass() || ik->is_interface())) { 2523 // test class is leaf class 2524 dependency_recorder()->assert_leaf_type(ik); 2525 return true; 2526 } 2527 } 2528 } 2529 return false; 2530 } 2531 2532 2533 void GraphBuilder::check_cast(int klass_index) { 2534 ciKlass* klass = stream()->get_klass(); 2535 ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_for_exception(); 2536 CheckCast* c = new CheckCast(klass, apop(), state_before); 2537 apush(append_split(c)); 2538 c->set_direct_compare(direct_compare(klass)); 2539 2540 if (is_profiling()) { 2541 // Note that we'd collect profile data in this method if we wanted it. 2542 compilation()->set_would_profile(true); 2543 2544 if (profile_checkcasts()) { 2545 c->set_profiled_method(method()); 2546 c->set_profiled_bci(bci()); 2547 c->set_should_profile(true); 2548 } 2549 } 2550 } 2551 2552 2553 void GraphBuilder::instance_of(int klass_index) { 2554 ciKlass* klass = stream()->get_klass(); 2555 ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling(); 2556 InstanceOf* i = new InstanceOf(klass, apop(), state_before); 2557 ipush(append_split(i)); 2558 i->set_direct_compare(direct_compare(klass)); 2559 2560 if (is_profiling()) { 2561 // Note that we'd collect profile data in this method if we wanted it. 2562 compilation()->set_would_profile(true); 2563 2564 if (profile_checkcasts()) { 2565 i->set_profiled_method(method()); 2566 i->set_profiled_bci(bci()); 2567 i->set_should_profile(true); 2568 } 2569 } 2570 } 2571 2572 2573 void GraphBuilder::monitorenter(Value x, int bci) { 2574 bool maybe_inlinetype = false; 2575 if (bci == InvocationEntryBci) { 2576 // Called by GraphBuilder::inline_sync_entry. 2577 #ifdef ASSERT 2578 ciType* obj_type = x->declared_type(); 2579 assert(obj_type == nullptr || !obj_type->is_inlinetype(), "inline types cannot have synchronized methods"); 2580 #endif 2581 } else { 2582 // We are compiling a monitorenter bytecode 2583 if (EnableValhalla) { 2584 ciType* obj_type = x->declared_type(); 2585 if (obj_type == nullptr || obj_type->as_klass()->can_be_inline_klass()) { 2586 // If we're (possibly) locking on an inline type, check for markWord::always_locked_pattern 2587 // and throw IMSE. (obj_type is null for Phi nodes, so let's just be conservative). 2588 maybe_inlinetype = true; 2589 } 2590 } 2591 } 2592 2593 // save state before locking in case of deoptimization after a NullPointerException 2594 ValueStack* state_before = copy_state_for_exception_with_bci(bci); 2595 append_with_bci(new MonitorEnter(x, state()->lock(x), state_before, maybe_inlinetype), bci); 2596 kill_all(); 2597 } 2598 2599 2600 void GraphBuilder::monitorexit(Value x, int bci) { 2601 append_with_bci(new MonitorExit(x, state()->unlock()), bci); 2602 kill_all(); 2603 } 2604 2605 2606 void GraphBuilder::new_multi_array(int dimensions) { 2607 ciKlass* klass = stream()->get_klass(); 2608 ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling(); 2609 2610 Values* dims = new Values(dimensions, dimensions, nullptr); 2611 // fill in all dimensions 2612 int i = dimensions; 2613 while (i-- > 0) dims->at_put(i, ipop()); 2614 // create array 2615 NewArray* n = new NewMultiArray(klass, dims, state_before); 2616 apush(append_split(n)); 2617 } 2618 2619 2620 void GraphBuilder::throw_op(int bci) { 2621 // We require that the debug info for a Throw be the "state before" 2622 // the Throw (i.e., exception oop is still on TOS) 2623 ValueStack* state_before = copy_state_before_with_bci(bci); 2624 Throw* t = new Throw(apop(), state_before); 2625 // operand stack not needed after a throw 2626 state()->truncate_stack(0); 2627 append_with_bci(t, bci); 2628 } 2629 2630 2631 Value GraphBuilder::round_fp(Value fp_value) { 2632 if (strict_fp_requires_explicit_rounding) { 2633 #ifdef IA32 2634 // no rounding needed if SSE2 is used 2635 if (UseSSE < 2) { 2636 // Must currently insert rounding node for doubleword values that 2637 // are results of expressions (i.e., not loads from memory or 2638 // constants) 2639 if (fp_value->type()->tag() == doubleTag && 2640 fp_value->as_Constant() == nullptr && 2641 fp_value->as_Local() == nullptr && // method parameters need no rounding 2642 fp_value->as_RoundFP() == nullptr) { 2643 return append(new RoundFP(fp_value)); 2644 } 2645 } 2646 #else 2647 Unimplemented(); 2648 #endif // IA32 2649 } 2650 return fp_value; 2651 } 2652 2653 2654 Instruction* GraphBuilder::append_with_bci(Instruction* instr, int bci) { 2655 Canonicalizer canon(compilation(), instr, bci); 2656 Instruction* i1 = canon.canonical(); 2657 if (i1->is_linked() || !i1->can_be_linked()) { 2658 // Canonicalizer returned an instruction which was already 2659 // appended so simply return it. 2660 return i1; 2661 } 2662 2663 if (UseLocalValueNumbering) { 2664 // Lookup the instruction in the ValueMap and add it to the map if 2665 // it's not found. 2666 Instruction* i2 = vmap()->find_insert(i1); 2667 if (i2 != i1) { 2668 // found an entry in the value map, so just return it. 2669 assert(i2->is_linked(), "should already be linked"); 2670 return i2; 2671 } 2672 ValueNumberingEffects vne(vmap()); 2673 i1->visit(&vne); 2674 } 2675 2676 // i1 was not eliminated => append it 2677 assert(i1->next() == nullptr, "shouldn't already be linked"); 2678 _last = _last->set_next(i1, canon.bci()); 2679 2680 if (++_instruction_count >= InstructionCountCutoff && !bailed_out()) { 2681 // set the bailout state but complete normal processing. We 2682 // might do a little more work before noticing the bailout so we 2683 // want processing to continue normally until it's noticed. 2684 bailout("Method and/or inlining is too large"); 2685 } 2686 2687 #ifndef PRODUCT 2688 if (PrintIRDuringConstruction) { 2689 InstructionPrinter ip; 2690 ip.print_line(i1); 2691 if (Verbose) { 2692 state()->print(); 2693 } 2694 } 2695 #endif 2696 2697 // save state after modification of operand stack for StateSplit instructions 2698 StateSplit* s = i1->as_StateSplit(); 2699 if (s != nullptr) { 2700 if (EliminateFieldAccess) { 2701 Intrinsic* intrinsic = s->as_Intrinsic(); 2702 if (s->as_Invoke() != nullptr || (intrinsic && !intrinsic->preserves_state())) { 2703 _memory->kill(); 2704 } 2705 } 2706 s->set_state(state()->copy(ValueStack::StateAfter, canon.bci())); 2707 } 2708 2709 // set up exception handlers for this instruction if necessary 2710 if (i1->can_trap()) { 2711 i1->set_exception_handlers(handle_exception(i1)); 2712 assert(i1->exception_state() != nullptr || !i1->needs_exception_state() || bailed_out(), "handle_exception must set exception state"); 2713 } 2714 return i1; 2715 } 2716 2717 2718 Instruction* GraphBuilder::append(Instruction* instr) { 2719 assert(instr->as_StateSplit() == nullptr || instr->as_BlockEnd() != nullptr, "wrong append used"); 2720 return append_with_bci(instr, bci()); 2721 } 2722 2723 2724 Instruction* GraphBuilder::append_split(StateSplit* instr) { 2725 return append_with_bci(instr, bci()); 2726 } 2727 2728 2729 void GraphBuilder::null_check(Value value) { 2730 if (value->as_NewArray() != nullptr || value->as_NewInstance() != nullptr) { 2731 return; 2732 } else { 2733 Constant* con = value->as_Constant(); 2734 if (con) { 2735 ObjectType* c = con->type()->as_ObjectType(); 2736 if (c && c->is_loaded()) { 2737 ObjectConstant* oc = c->as_ObjectConstant(); 2738 if (!oc || !oc->value()->is_null_object()) { 2739 return; 2740 } 2741 } 2742 } 2743 if (value->is_null_free()) return; 2744 } 2745 append(new NullCheck(value, copy_state_for_exception())); 2746 } 2747 2748 2749 2750 XHandlers* GraphBuilder::handle_exception(Instruction* instruction) { 2751 if (!has_handler() && (!instruction->needs_exception_state() || instruction->exception_state() != nullptr)) { 2752 assert(instruction->exception_state() == nullptr 2753 || instruction->exception_state()->kind() == ValueStack::EmptyExceptionState 2754 || (instruction->exception_state()->kind() == ValueStack::ExceptionState && _compilation->env()->should_retain_local_variables()), 2755 "exception_state should be of exception kind"); 2756 return new XHandlers(); 2757 } 2758 2759 XHandlers* exception_handlers = new XHandlers(); 2760 ScopeData* cur_scope_data = scope_data(); 2761 ValueStack* cur_state = instruction->state_before(); 2762 ValueStack* prev_state = nullptr; 2763 int scope_count = 0; 2764 2765 assert(cur_state != nullptr, "state_before must be set"); 2766 do { 2767 int cur_bci = cur_state->bci(); 2768 assert(cur_scope_data->scope() == cur_state->scope(), "scopes do not match"); 2769 assert(cur_bci == SynchronizationEntryBCI || cur_bci == cur_scope_data->stream()->cur_bci() 2770 || has_pending_field_access() || has_pending_load_indexed(), "invalid bci"); 2771 2772 2773 // join with all potential exception handlers 2774 XHandlers* list = cur_scope_data->xhandlers(); 2775 const int n = list->length(); 2776 for (int i = 0; i < n; i++) { 2777 XHandler* h = list->handler_at(i); 2778 if (h->covers(cur_bci)) { 2779 // h is a potential exception handler => join it 2780 compilation()->set_has_exception_handlers(true); 2781 2782 BlockBegin* entry = h->entry_block(); 2783 if (entry == block()) { 2784 // It's acceptable for an exception handler to cover itself 2785 // but we don't handle that in the parser currently. It's 2786 // very rare so we bailout instead of trying to handle it. 2787 BAILOUT_("exception handler covers itself", exception_handlers); 2788 } 2789 assert(entry->bci() == h->handler_bci(), "must match"); 2790 assert(entry->bci() == -1 || entry == cur_scope_data->block_at(entry->bci()), "blocks must correspond"); 2791 2792 // previously this was a BAILOUT, but this is not necessary 2793 // now because asynchronous exceptions are not handled this way. 2794 assert(entry->state() == nullptr || cur_state->total_locks_size() == entry->state()->total_locks_size(), "locks do not match"); 2795 2796 // xhandler start with an empty expression stack 2797 if (cur_state->stack_size() != 0) { 2798 // locals are preserved 2799 // stack will be truncated 2800 cur_state = cur_state->copy(ValueStack::ExceptionState, cur_state->bci()); 2801 } 2802 if (instruction->exception_state() == nullptr) { 2803 instruction->set_exception_state(cur_state); 2804 } 2805 2806 // Note: Usually this join must work. However, very 2807 // complicated jsr-ret structures where we don't ret from 2808 // the subroutine can cause the objects on the monitor 2809 // stacks to not match because blocks can be parsed twice. 2810 // The only test case we've seen so far which exhibits this 2811 // problem is caught by the infinite recursion test in 2812 // GraphBuilder::jsr() if the join doesn't work. 2813 if (!entry->try_merge(cur_state, compilation()->has_irreducible_loops())) { 2814 BAILOUT_("error while joining with exception handler, prob. due to complicated jsr/rets", exception_handlers); 2815 } 2816 2817 // add current state for correct handling of phi functions at begin of xhandler 2818 int phi_operand = entry->add_exception_state(cur_state); 2819 2820 // add entry to the list of xhandlers of this block 2821 _block->add_exception_handler(entry); 2822 2823 // add back-edge from xhandler entry to this block 2824 if (!entry->is_predecessor(_block)) { 2825 entry->add_predecessor(_block); 2826 } 2827 2828 // clone XHandler because phi_operand and scope_count can not be shared 2829 XHandler* new_xhandler = new XHandler(h); 2830 new_xhandler->set_phi_operand(phi_operand); 2831 new_xhandler->set_scope_count(scope_count); 2832 exception_handlers->append(new_xhandler); 2833 2834 // fill in exception handler subgraph lazily 2835 assert(!entry->is_set(BlockBegin::was_visited_flag), "entry must not be visited yet"); 2836 cur_scope_data->add_to_work_list(entry); 2837 2838 // stop when reaching catchall 2839 if (h->catch_type() == 0) { 2840 return exception_handlers; 2841 } 2842 } 2843 } 2844 2845 if (exception_handlers->length() == 0) { 2846 // This scope and all callees do not handle exceptions, so the local 2847 // variables of this scope are not needed. However, the scope itself is 2848 // required for a correct exception stack trace -> clear out the locals. 2849 // Stack and locals are invalidated but not truncated in caller state. 2850 if (prev_state != nullptr) { 2851 assert(instruction->exception_state() != nullptr, "missed set?"); 2852 ValueStack::Kind exc_kind = ValueStack::empty_exception_kind(true /* caller */); 2853 cur_state = cur_state->copy(exc_kind, cur_state->bci()); 2854 // reset caller exception state 2855 prev_state->set_caller_state(cur_state); 2856 } else { 2857 assert(instruction->exception_state() == nullptr, "already set"); 2858 // set instruction exception state 2859 // truncate stack 2860 ValueStack::Kind exc_kind = ValueStack::empty_exception_kind(); 2861 cur_state = cur_state->copy(exc_kind, cur_state->bci()); 2862 instruction->set_exception_state(cur_state); 2863 } 2864 } 2865 2866 // Set up iteration for next time. 2867 // If parsing a jsr, do not grab exception handlers from the 2868 // parent scopes for this method (already got them, and they 2869 // needed to be cloned) 2870 2871 while (cur_scope_data->parsing_jsr()) { 2872 cur_scope_data = cur_scope_data->parent(); 2873 } 2874 2875 assert(cur_scope_data->scope() == cur_state->scope(), "scopes do not match"); 2876 assert(cur_state->locks_size() == 0 || cur_state->locks_size() == 1, "unlocking must be done in a catchall exception handler"); 2877 2878 prev_state = cur_state; 2879 cur_state = cur_state->caller_state(); 2880 cur_scope_data = cur_scope_data->parent(); 2881 scope_count++; 2882 } while (cur_scope_data != nullptr); 2883 2884 return exception_handlers; 2885 } 2886 2887 2888 // Helper class for simplifying Phis. 2889 class PhiSimplifier : public BlockClosure { 2890 private: 2891 bool _has_substitutions; 2892 Value simplify(Value v); 2893 2894 public: 2895 PhiSimplifier(BlockBegin* start) : _has_substitutions(false) { 2896 start->iterate_preorder(this); 2897 if (_has_substitutions) { 2898 SubstitutionResolver sr(start); 2899 } 2900 } 2901 void block_do(BlockBegin* b); 2902 bool has_substitutions() const { return _has_substitutions; } 2903 }; 2904 2905 2906 Value PhiSimplifier::simplify(Value v) { 2907 Phi* phi = v->as_Phi(); 2908 2909 if (phi == nullptr) { 2910 // no phi function 2911 return v; 2912 } else if (v->has_subst()) { 2913 // already substituted; subst can be phi itself -> simplify 2914 return simplify(v->subst()); 2915 } else if (phi->is_set(Phi::cannot_simplify)) { 2916 // already tried to simplify phi before 2917 return phi; 2918 } else if (phi->is_set(Phi::visited)) { 2919 // break cycles in phi functions 2920 return phi; 2921 } else if (phi->type()->is_illegal()) { 2922 // illegal phi functions are ignored anyway 2923 return phi; 2924 2925 } else { 2926 // mark phi function as processed to break cycles in phi functions 2927 phi->set(Phi::visited); 2928 2929 // simplify x = [y, x] and x = [y, y] to y 2930 Value subst = nullptr; 2931 int opd_count = phi->operand_count(); 2932 for (int i = 0; i < opd_count; i++) { 2933 Value opd = phi->operand_at(i); 2934 assert(opd != nullptr, "Operand must exist!"); 2935 2936 if (opd->type()->is_illegal()) { 2937 // if one operand is illegal, the entire phi function is illegal 2938 phi->make_illegal(); 2939 phi->clear(Phi::visited); 2940 return phi; 2941 } 2942 2943 Value new_opd = simplify(opd); 2944 assert(new_opd != nullptr, "Simplified operand must exist!"); 2945 2946 if (new_opd != phi && new_opd != subst) { 2947 if (subst == nullptr) { 2948 subst = new_opd; 2949 } else { 2950 // no simplification possible 2951 phi->set(Phi::cannot_simplify); 2952 phi->clear(Phi::visited); 2953 return phi; 2954 } 2955 } 2956 } 2957 2958 // successfully simplified phi function 2959 assert(subst != nullptr, "illegal phi function"); 2960 _has_substitutions = true; 2961 phi->clear(Phi::visited); 2962 phi->set_subst(subst); 2963 2964 #ifndef PRODUCT 2965 if (PrintPhiFunctions) { 2966 tty->print_cr("simplified phi function %c%d to %c%d (Block B%d)", phi->type()->tchar(), phi->id(), subst->type()->tchar(), subst->id(), phi->block()->block_id()); 2967 } 2968 #endif 2969 2970 return subst; 2971 } 2972 } 2973 2974 2975 void PhiSimplifier::block_do(BlockBegin* b) { 2976 for_each_phi_fun(b, phi, 2977 simplify(phi); 2978 ); 2979 2980 #ifdef ASSERT 2981 for_each_phi_fun(b, phi, 2982 assert(phi->operand_count() != 1 || phi->subst() != phi || phi->is_illegal(), "missed trivial simplification"); 2983 ); 2984 2985 ValueStack* state = b->state()->caller_state(); 2986 for_each_state_value(state, value, 2987 Phi* phi = value->as_Phi(); 2988 assert(phi == nullptr || phi->block() != b, "must not have phi function to simplify in caller state"); 2989 ); 2990 #endif 2991 } 2992 2993 // This method is called after all blocks are filled with HIR instructions 2994 // It eliminates all Phi functions of the form x = [y, y] and x = [y, x] 2995 void GraphBuilder::eliminate_redundant_phis(BlockBegin* start) { 2996 PhiSimplifier simplifier(start); 2997 } 2998 2999 3000 void GraphBuilder::connect_to_end(BlockBegin* beg) { 3001 // setup iteration 3002 kill_all(); 3003 _block = beg; 3004 _state = beg->state()->copy_for_parsing(); 3005 _last = beg; 3006 iterate_bytecodes_for_block(beg->bci()); 3007 } 3008 3009 3010 BlockEnd* GraphBuilder::iterate_bytecodes_for_block(int bci) { 3011 #ifndef PRODUCT 3012 if (PrintIRDuringConstruction) { 3013 tty->cr(); 3014 InstructionPrinter ip; 3015 ip.print_instr(_block); tty->cr(); 3016 ip.print_stack(_block->state()); tty->cr(); 3017 ip.print_inline_level(_block); 3018 ip.print_head(); 3019 tty->print_cr("locals size: %d stack size: %d", state()->locals_size(), state()->stack_size()); 3020 } 3021 #endif 3022 _skip_block = false; 3023 assert(state() != nullptr, "ValueStack missing!"); 3024 CompileLog* log = compilation()->log(); 3025 ciBytecodeStream s(method()); 3026 s.reset_to_bci(bci); 3027 int prev_bci = bci; 3028 scope_data()->set_stream(&s); 3029 // iterate 3030 Bytecodes::Code code = Bytecodes::_illegal; 3031 bool push_exception = false; 3032 3033 if (block()->is_set(BlockBegin::exception_entry_flag) && block()->next() == nullptr) { 3034 // first thing in the exception entry block should be the exception object. 3035 push_exception = true; 3036 } 3037 3038 bool ignore_return = scope_data()->ignore_return(); 3039 3040 while (!bailed_out() && last()->as_BlockEnd() == nullptr && 3041 (code = stream()->next()) != ciBytecodeStream::EOBC() && 3042 (block_at(s.cur_bci()) == nullptr || block_at(s.cur_bci()) == block())) { 3043 assert(state()->kind() == ValueStack::Parsing, "invalid state kind"); 3044 3045 if (log != nullptr) 3046 log->set_context("bc code='%d' bci='%d'", (int)code, s.cur_bci()); 3047 3048 // Check for active jsr during OSR compilation 3049 if (compilation()->is_osr_compile() 3050 && scope()->is_top_scope() 3051 && parsing_jsr() 3052 && s.cur_bci() == compilation()->osr_bci()) { 3053 bailout("OSR not supported while a jsr is active"); 3054 } 3055 3056 if (push_exception) { 3057 apush(append(new ExceptionObject())); 3058 push_exception = false; 3059 } 3060 3061 // handle bytecode 3062 switch (code) { 3063 case Bytecodes::_nop : /* nothing to do */ break; 3064 case Bytecodes::_aconst_null : apush(append(new Constant(objectNull ))); break; 3065 case Bytecodes::_iconst_m1 : ipush(append(new Constant(new IntConstant (-1)))); break; 3066 case Bytecodes::_iconst_0 : ipush(append(new Constant(intZero ))); break; 3067 case Bytecodes::_iconst_1 : ipush(append(new Constant(intOne ))); break; 3068 case Bytecodes::_iconst_2 : ipush(append(new Constant(new IntConstant ( 2)))); break; 3069 case Bytecodes::_iconst_3 : ipush(append(new Constant(new IntConstant ( 3)))); break; 3070 case Bytecodes::_iconst_4 : ipush(append(new Constant(new IntConstant ( 4)))); break; 3071 case Bytecodes::_iconst_5 : ipush(append(new Constant(new IntConstant ( 5)))); break; 3072 case Bytecodes::_lconst_0 : lpush(append(new Constant(new LongConstant ( 0)))); break; 3073 case Bytecodes::_lconst_1 : lpush(append(new Constant(new LongConstant ( 1)))); break; 3074 case Bytecodes::_fconst_0 : fpush(append(new Constant(new FloatConstant ( 0)))); break; 3075 case Bytecodes::_fconst_1 : fpush(append(new Constant(new FloatConstant ( 1)))); break; 3076 case Bytecodes::_fconst_2 : fpush(append(new Constant(new FloatConstant ( 2)))); break; 3077 case Bytecodes::_dconst_0 : dpush(append(new Constant(new DoubleConstant( 0)))); break; 3078 case Bytecodes::_dconst_1 : dpush(append(new Constant(new DoubleConstant( 1)))); break; 3079 case Bytecodes::_bipush : ipush(append(new Constant(new IntConstant(((signed char*)s.cur_bcp())[1])))); break; 3080 case Bytecodes::_sipush : ipush(append(new Constant(new IntConstant((short)Bytes::get_Java_u2(s.cur_bcp()+1))))); break; 3081 case Bytecodes::_ldc : // fall through 3082 case Bytecodes::_ldc_w : // fall through 3083 case Bytecodes::_ldc2_w : load_constant(); break; 3084 case Bytecodes::_iload : load_local(intType , s.get_index()); break; 3085 case Bytecodes::_lload : load_local(longType , s.get_index()); break; 3086 case Bytecodes::_fload : load_local(floatType , s.get_index()); break; 3087 case Bytecodes::_dload : load_local(doubleType , s.get_index()); break; 3088 case Bytecodes::_aload : load_local(instanceType, s.get_index()); break; 3089 case Bytecodes::_iload_0 : load_local(intType , 0); break; 3090 case Bytecodes::_iload_1 : load_local(intType , 1); break; 3091 case Bytecodes::_iload_2 : load_local(intType , 2); break; 3092 case Bytecodes::_iload_3 : load_local(intType , 3); break; 3093 case Bytecodes::_lload_0 : load_local(longType , 0); break; 3094 case Bytecodes::_lload_1 : load_local(longType , 1); break; 3095 case Bytecodes::_lload_2 : load_local(longType , 2); break; 3096 case Bytecodes::_lload_3 : load_local(longType , 3); break; 3097 case Bytecodes::_fload_0 : load_local(floatType , 0); break; 3098 case Bytecodes::_fload_1 : load_local(floatType , 1); break; 3099 case Bytecodes::_fload_2 : load_local(floatType , 2); break; 3100 case Bytecodes::_fload_3 : load_local(floatType , 3); break; 3101 case Bytecodes::_dload_0 : load_local(doubleType, 0); break; 3102 case Bytecodes::_dload_1 : load_local(doubleType, 1); break; 3103 case Bytecodes::_dload_2 : load_local(doubleType, 2); break; 3104 case Bytecodes::_dload_3 : load_local(doubleType, 3); break; 3105 case Bytecodes::_aload_0 : load_local(objectType, 0); break; 3106 case Bytecodes::_aload_1 : load_local(objectType, 1); break; 3107 case Bytecodes::_aload_2 : load_local(objectType, 2); break; 3108 case Bytecodes::_aload_3 : load_local(objectType, 3); break; 3109 case Bytecodes::_iaload : load_indexed(T_INT ); break; 3110 case Bytecodes::_laload : load_indexed(T_LONG ); break; 3111 case Bytecodes::_faload : load_indexed(T_FLOAT ); break; 3112 case Bytecodes::_daload : load_indexed(T_DOUBLE); break; 3113 case Bytecodes::_aaload : load_indexed(T_OBJECT); break; 3114 case Bytecodes::_baload : load_indexed(T_BYTE ); break; 3115 case Bytecodes::_caload : load_indexed(T_CHAR ); break; 3116 case Bytecodes::_saload : load_indexed(T_SHORT ); break; 3117 case Bytecodes::_istore : store_local(intType , s.get_index()); break; 3118 case Bytecodes::_lstore : store_local(longType , s.get_index()); break; 3119 case Bytecodes::_fstore : store_local(floatType , s.get_index()); break; 3120 case Bytecodes::_dstore : store_local(doubleType, s.get_index()); break; 3121 case Bytecodes::_astore : store_local(objectType, s.get_index()); break; 3122 case Bytecodes::_istore_0 : store_local(intType , 0); break; 3123 case Bytecodes::_istore_1 : store_local(intType , 1); break; 3124 case Bytecodes::_istore_2 : store_local(intType , 2); break; 3125 case Bytecodes::_istore_3 : store_local(intType , 3); break; 3126 case Bytecodes::_lstore_0 : store_local(longType , 0); break; 3127 case Bytecodes::_lstore_1 : store_local(longType , 1); break; 3128 case Bytecodes::_lstore_2 : store_local(longType , 2); break; 3129 case Bytecodes::_lstore_3 : store_local(longType , 3); break; 3130 case Bytecodes::_fstore_0 : store_local(floatType , 0); break; 3131 case Bytecodes::_fstore_1 : store_local(floatType , 1); break; 3132 case Bytecodes::_fstore_2 : store_local(floatType , 2); break; 3133 case Bytecodes::_fstore_3 : store_local(floatType , 3); break; 3134 case Bytecodes::_dstore_0 : store_local(doubleType, 0); break; 3135 case Bytecodes::_dstore_1 : store_local(doubleType, 1); break; 3136 case Bytecodes::_dstore_2 : store_local(doubleType, 2); break; 3137 case Bytecodes::_dstore_3 : store_local(doubleType, 3); break; 3138 case Bytecodes::_astore_0 : store_local(objectType, 0); break; 3139 case Bytecodes::_astore_1 : store_local(objectType, 1); break; 3140 case Bytecodes::_astore_2 : store_local(objectType, 2); break; 3141 case Bytecodes::_astore_3 : store_local(objectType, 3); break; 3142 case Bytecodes::_iastore : store_indexed(T_INT ); break; 3143 case Bytecodes::_lastore : store_indexed(T_LONG ); break; 3144 case Bytecodes::_fastore : store_indexed(T_FLOAT ); break; 3145 case Bytecodes::_dastore : store_indexed(T_DOUBLE); break; 3146 case Bytecodes::_aastore : store_indexed(T_OBJECT); break; 3147 case Bytecodes::_bastore : store_indexed(T_BYTE ); break; 3148 case Bytecodes::_castore : store_indexed(T_CHAR ); break; 3149 case Bytecodes::_sastore : store_indexed(T_SHORT ); break; 3150 case Bytecodes::_pop : // fall through 3151 case Bytecodes::_pop2 : // fall through 3152 case Bytecodes::_dup : // fall through 3153 case Bytecodes::_dup_x1 : // fall through 3154 case Bytecodes::_dup_x2 : // fall through 3155 case Bytecodes::_dup2 : // fall through 3156 case Bytecodes::_dup2_x1 : // fall through 3157 case Bytecodes::_dup2_x2 : // fall through 3158 case Bytecodes::_swap : stack_op(code); break; 3159 case Bytecodes::_iadd : arithmetic_op(intType , code); break; 3160 case Bytecodes::_ladd : arithmetic_op(longType , code); break; 3161 case Bytecodes::_fadd : arithmetic_op(floatType , code); break; 3162 case Bytecodes::_dadd : arithmetic_op(doubleType, code); break; 3163 case Bytecodes::_isub : arithmetic_op(intType , code); break; 3164 case Bytecodes::_lsub : arithmetic_op(longType , code); break; 3165 case Bytecodes::_fsub : arithmetic_op(floatType , code); break; 3166 case Bytecodes::_dsub : arithmetic_op(doubleType, code); break; 3167 case Bytecodes::_imul : arithmetic_op(intType , code); break; 3168 case Bytecodes::_lmul : arithmetic_op(longType , code); break; 3169 case Bytecodes::_fmul : arithmetic_op(floatType , code); break; 3170 case Bytecodes::_dmul : arithmetic_op(doubleType, code); break; 3171 case Bytecodes::_idiv : arithmetic_op(intType , code, copy_state_for_exception()); break; 3172 case Bytecodes::_ldiv : arithmetic_op(longType , code, copy_state_for_exception()); break; 3173 case Bytecodes::_fdiv : arithmetic_op(floatType , code); break; 3174 case Bytecodes::_ddiv : arithmetic_op(doubleType, code); break; 3175 case Bytecodes::_irem : arithmetic_op(intType , code, copy_state_for_exception()); break; 3176 case Bytecodes::_lrem : arithmetic_op(longType , code, copy_state_for_exception()); break; 3177 case Bytecodes::_frem : arithmetic_op(floatType , code); break; 3178 case Bytecodes::_drem : arithmetic_op(doubleType, code); break; 3179 case Bytecodes::_ineg : negate_op(intType ); break; 3180 case Bytecodes::_lneg : negate_op(longType ); break; 3181 case Bytecodes::_fneg : negate_op(floatType ); break; 3182 case Bytecodes::_dneg : negate_op(doubleType); break; 3183 case Bytecodes::_ishl : shift_op(intType , code); break; 3184 case Bytecodes::_lshl : shift_op(longType, code); break; 3185 case Bytecodes::_ishr : shift_op(intType , code); break; 3186 case Bytecodes::_lshr : shift_op(longType, code); break; 3187 case Bytecodes::_iushr : shift_op(intType , code); break; 3188 case Bytecodes::_lushr : shift_op(longType, code); break; 3189 case Bytecodes::_iand : logic_op(intType , code); break; 3190 case Bytecodes::_land : logic_op(longType, code); break; 3191 case Bytecodes::_ior : logic_op(intType , code); break; 3192 case Bytecodes::_lor : logic_op(longType, code); break; 3193 case Bytecodes::_ixor : logic_op(intType , code); break; 3194 case Bytecodes::_lxor : logic_op(longType, code); break; 3195 case Bytecodes::_iinc : increment(); break; 3196 case Bytecodes::_i2l : convert(code, T_INT , T_LONG ); break; 3197 case Bytecodes::_i2f : convert(code, T_INT , T_FLOAT ); break; 3198 case Bytecodes::_i2d : convert(code, T_INT , T_DOUBLE); break; 3199 case Bytecodes::_l2i : convert(code, T_LONG , T_INT ); break; 3200 case Bytecodes::_l2f : convert(code, T_LONG , T_FLOAT ); break; 3201 case Bytecodes::_l2d : convert(code, T_LONG , T_DOUBLE); break; 3202 case Bytecodes::_f2i : convert(code, T_FLOAT , T_INT ); break; 3203 case Bytecodes::_f2l : convert(code, T_FLOAT , T_LONG ); break; 3204 case Bytecodes::_f2d : convert(code, T_FLOAT , T_DOUBLE); break; 3205 case Bytecodes::_d2i : convert(code, T_DOUBLE, T_INT ); break; 3206 case Bytecodes::_d2l : convert(code, T_DOUBLE, T_LONG ); break; 3207 case Bytecodes::_d2f : convert(code, T_DOUBLE, T_FLOAT ); break; 3208 case Bytecodes::_i2b : convert(code, T_INT , T_BYTE ); break; 3209 case Bytecodes::_i2c : convert(code, T_INT , T_CHAR ); break; 3210 case Bytecodes::_i2s : convert(code, T_INT , T_SHORT ); break; 3211 case Bytecodes::_lcmp : compare_op(longType , code); break; 3212 case Bytecodes::_fcmpl : compare_op(floatType , code); break; 3213 case Bytecodes::_fcmpg : compare_op(floatType , code); break; 3214 case Bytecodes::_dcmpl : compare_op(doubleType, code); break; 3215 case Bytecodes::_dcmpg : compare_op(doubleType, code); break; 3216 case Bytecodes::_ifeq : if_zero(intType , If::eql); break; 3217 case Bytecodes::_ifne : if_zero(intType , If::neq); break; 3218 case Bytecodes::_iflt : if_zero(intType , If::lss); break; 3219 case Bytecodes::_ifge : if_zero(intType , If::geq); break; 3220 case Bytecodes::_ifgt : if_zero(intType , If::gtr); break; 3221 case Bytecodes::_ifle : if_zero(intType , If::leq); break; 3222 case Bytecodes::_if_icmpeq : if_same(intType , If::eql); break; 3223 case Bytecodes::_if_icmpne : if_same(intType , If::neq); break; 3224 case Bytecodes::_if_icmplt : if_same(intType , If::lss); break; 3225 case Bytecodes::_if_icmpge : if_same(intType , If::geq); break; 3226 case Bytecodes::_if_icmpgt : if_same(intType , If::gtr); break; 3227 case Bytecodes::_if_icmple : if_same(intType , If::leq); break; 3228 case Bytecodes::_if_acmpeq : if_same(objectType, If::eql); break; 3229 case Bytecodes::_if_acmpne : if_same(objectType, If::neq); break; 3230 case Bytecodes::_goto : _goto(s.cur_bci(), s.get_dest()); break; 3231 case Bytecodes::_jsr : jsr(s.get_dest()); break; 3232 case Bytecodes::_ret : ret(s.get_index()); break; 3233 case Bytecodes::_tableswitch : table_switch(); break; 3234 case Bytecodes::_lookupswitch : lookup_switch(); break; 3235 case Bytecodes::_ireturn : method_return(ipop(), ignore_return); break; 3236 case Bytecodes::_lreturn : method_return(lpop(), ignore_return); break; 3237 case Bytecodes::_freturn : method_return(fpop(), ignore_return); break; 3238 case Bytecodes::_dreturn : method_return(dpop(), ignore_return); break; 3239 case Bytecodes::_areturn : method_return(apop(), ignore_return); break; 3240 case Bytecodes::_return : method_return(nullptr, ignore_return); break; 3241 case Bytecodes::_getstatic : // fall through 3242 case Bytecodes::_putstatic : // fall through 3243 case Bytecodes::_getfield : // fall through 3244 case Bytecodes::_putfield : access_field(code); break; 3245 case Bytecodes::_invokevirtual : // fall through 3246 case Bytecodes::_invokespecial : // fall through 3247 case Bytecodes::_invokestatic : // fall through 3248 case Bytecodes::_invokedynamic : // fall through 3249 case Bytecodes::_invokeinterface: invoke(code); break; 3250 case Bytecodes::_new : new_instance(s.get_index_u2()); break; 3251 case Bytecodes::_newarray : new_type_array(); break; 3252 case Bytecodes::_anewarray : new_object_array(); break; 3253 case Bytecodes::_arraylength : { ValueStack* state_before = copy_state_for_exception(); ipush(append(new ArrayLength(apop(), state_before))); break; } 3254 case Bytecodes::_athrow : throw_op(s.cur_bci()); break; 3255 case Bytecodes::_checkcast : check_cast(s.get_index_u2()); break; 3256 case Bytecodes::_instanceof : instance_of(s.get_index_u2()); break; 3257 case Bytecodes::_monitorenter : monitorenter(apop(), s.cur_bci()); break; 3258 case Bytecodes::_monitorexit : monitorexit (apop(), s.cur_bci()); break; 3259 case Bytecodes::_wide : ShouldNotReachHere(); break; 3260 case Bytecodes::_multianewarray : new_multi_array(s.cur_bcp()[3]); break; 3261 case Bytecodes::_ifnull : if_null(objectType, If::eql); break; 3262 case Bytecodes::_ifnonnull : if_null(objectType, If::neq); break; 3263 case Bytecodes::_goto_w : _goto(s.cur_bci(), s.get_far_dest()); break; 3264 case Bytecodes::_jsr_w : jsr(s.get_far_dest()); break; 3265 case Bytecodes::_breakpoint : BAILOUT_("concurrent setting of breakpoint", nullptr); 3266 default : ShouldNotReachHere(); break; 3267 } 3268 3269 if (log != nullptr) 3270 log->clear_context(); // skip marker if nothing was printed 3271 3272 // save current bci to setup Goto at the end 3273 prev_bci = s.cur_bci(); 3274 3275 } 3276 CHECK_BAILOUT_(nullptr); 3277 // stop processing of this block (see try_inline_full) 3278 if (_skip_block) { 3279 _skip_block = false; 3280 assert(_last && _last->as_BlockEnd(), ""); 3281 return _last->as_BlockEnd(); 3282 } 3283 // if there are any, check if last instruction is a BlockEnd instruction 3284 BlockEnd* end = last()->as_BlockEnd(); 3285 if (end == nullptr) { 3286 // all blocks must end with a BlockEnd instruction => add a Goto 3287 end = new Goto(block_at(s.cur_bci()), false); 3288 append(end); 3289 } 3290 assert(end == last()->as_BlockEnd(), "inconsistency"); 3291 3292 assert(end->state() != nullptr, "state must already be present"); 3293 assert(end->as_Return() == nullptr || end->as_Throw() == nullptr || end->state()->stack_size() == 0, "stack not needed for return and throw"); 3294 3295 // connect to begin & set state 3296 // NOTE that inlining may have changed the block we are parsing 3297 block()->set_end(end); 3298 // propagate state 3299 for (int i = end->number_of_sux() - 1; i >= 0; i--) { 3300 BlockBegin* sux = end->sux_at(i); 3301 assert(sux->is_predecessor(block()), "predecessor missing"); 3302 // be careful, bailout if bytecodes are strange 3303 if (!sux->try_merge(end->state(), compilation()->has_irreducible_loops())) BAILOUT_("block join failed", nullptr); 3304 scope_data()->add_to_work_list(end->sux_at(i)); 3305 } 3306 3307 scope_data()->set_stream(nullptr); 3308 3309 // done 3310 return end; 3311 } 3312 3313 3314 void GraphBuilder::iterate_all_blocks(bool start_in_current_block_for_inlining) { 3315 do { 3316 if (start_in_current_block_for_inlining && !bailed_out()) { 3317 iterate_bytecodes_for_block(0); 3318 start_in_current_block_for_inlining = false; 3319 } else { 3320 BlockBegin* b; 3321 while ((b = scope_data()->remove_from_work_list()) != nullptr) { 3322 if (!b->is_set(BlockBegin::was_visited_flag)) { 3323 if (b->is_set(BlockBegin::osr_entry_flag)) { 3324 // we're about to parse the osr entry block, so make sure 3325 // we setup the OSR edge leading into this block so that 3326 // Phis get setup correctly. 3327 setup_osr_entry_block(); 3328 // this is no longer the osr entry block, so clear it. 3329 b->clear(BlockBegin::osr_entry_flag); 3330 } 3331 b->set(BlockBegin::was_visited_flag); 3332 connect_to_end(b); 3333 } 3334 } 3335 } 3336 } while (!bailed_out() && !scope_data()->is_work_list_empty()); 3337 } 3338 3339 3340 bool GraphBuilder::_can_trap [Bytecodes::number_of_java_codes]; 3341 3342 void GraphBuilder::initialize() { 3343 // the following bytecodes are assumed to potentially 3344 // throw exceptions in compiled code - note that e.g. 3345 // monitorexit & the return bytecodes do not throw 3346 // exceptions since monitor pairing proved that they 3347 // succeed (if monitor pairing succeeded) 3348 Bytecodes::Code can_trap_list[] = 3349 { Bytecodes::_ldc 3350 , Bytecodes::_ldc_w 3351 , Bytecodes::_ldc2_w 3352 , Bytecodes::_iaload 3353 , Bytecodes::_laload 3354 , Bytecodes::_faload 3355 , Bytecodes::_daload 3356 , Bytecodes::_aaload 3357 , Bytecodes::_baload 3358 , Bytecodes::_caload 3359 , Bytecodes::_saload 3360 , Bytecodes::_iastore 3361 , Bytecodes::_lastore 3362 , Bytecodes::_fastore 3363 , Bytecodes::_dastore 3364 , Bytecodes::_aastore 3365 , Bytecodes::_bastore 3366 , Bytecodes::_castore 3367 , Bytecodes::_sastore 3368 , Bytecodes::_idiv 3369 , Bytecodes::_ldiv 3370 , Bytecodes::_irem 3371 , Bytecodes::_lrem 3372 , Bytecodes::_getstatic 3373 , Bytecodes::_putstatic 3374 , Bytecodes::_getfield 3375 , Bytecodes::_putfield 3376 , Bytecodes::_invokevirtual 3377 , Bytecodes::_invokespecial 3378 , Bytecodes::_invokestatic 3379 , Bytecodes::_invokedynamic 3380 , Bytecodes::_invokeinterface 3381 , Bytecodes::_new 3382 , Bytecodes::_newarray 3383 , Bytecodes::_anewarray 3384 , Bytecodes::_arraylength 3385 , Bytecodes::_athrow 3386 , Bytecodes::_checkcast 3387 , Bytecodes::_instanceof 3388 , Bytecodes::_monitorenter 3389 , Bytecodes::_multianewarray 3390 }; 3391 3392 // inititialize trap tables 3393 for (int i = 0; i < Bytecodes::number_of_java_codes; i++) { 3394 _can_trap[i] = false; 3395 } 3396 // set standard trap info 3397 for (uint j = 0; j < ARRAY_SIZE(can_trap_list); j++) { 3398 _can_trap[can_trap_list[j]] = true; 3399 } 3400 } 3401 3402 3403 BlockBegin* GraphBuilder::header_block(BlockBegin* entry, BlockBegin::Flag f, ValueStack* state) { 3404 assert(entry->is_set(f), "entry/flag mismatch"); 3405 // create header block 3406 BlockBegin* h = new BlockBegin(entry->bci()); 3407 h->set_depth_first_number(0); 3408 3409 Value l = h; 3410 BlockEnd* g = new Goto(entry, false); 3411 l->set_next(g, entry->bci()); 3412 h->set_end(g); 3413 h->set(f); 3414 // setup header block end state 3415 ValueStack* s = state->copy(ValueStack::StateAfter, entry->bci()); // can use copy since stack is empty (=> no phis) 3416 assert(s->stack_is_empty(), "must have empty stack at entry point"); 3417 g->set_state(s); 3418 return h; 3419 } 3420 3421 3422 3423 BlockBegin* GraphBuilder::setup_start_block(int osr_bci, BlockBegin* std_entry, BlockBegin* osr_entry, ValueStack* state) { 3424 BlockBegin* start = new BlockBegin(0); 3425 3426 // This code eliminates the empty start block at the beginning of 3427 // each method. Previously, each method started with the 3428 // start-block created below, and this block was followed by the 3429 // header block that was always empty. This header block is only 3430 // necessary if std_entry is also a backward branch target because 3431 // then phi functions may be necessary in the header block. It's 3432 // also necessary when profiling so that there's a single block that 3433 // can increment the counters. 3434 // In addition, with range check elimination, we may need a valid block 3435 // that dominates all the rest to insert range predicates. 3436 BlockBegin* new_header_block; 3437 if (std_entry->number_of_preds() > 0 || is_profiling() || RangeCheckElimination) { 3438 new_header_block = header_block(std_entry, BlockBegin::std_entry_flag, state); 3439 } else { 3440 new_header_block = std_entry; 3441 } 3442 3443 // setup start block (root for the IR graph) 3444 Base* base = 3445 new Base( 3446 new_header_block, 3447 osr_entry 3448 ); 3449 start->set_next(base, 0); 3450 start->set_end(base); 3451 // create & setup state for start block 3452 start->set_state(state->copy(ValueStack::StateAfter, std_entry->bci())); 3453 base->set_state(state->copy(ValueStack::StateAfter, std_entry->bci())); 3454 3455 if (base->std_entry()->state() == nullptr) { 3456 // setup states for header blocks 3457 base->std_entry()->merge(state, compilation()->has_irreducible_loops()); 3458 } 3459 3460 assert(base->std_entry()->state() != nullptr, ""); 3461 return start; 3462 } 3463 3464 3465 void GraphBuilder::setup_osr_entry_block() { 3466 assert(compilation()->is_osr_compile(), "only for osrs"); 3467 3468 int osr_bci = compilation()->osr_bci(); 3469 ciBytecodeStream s(method()); 3470 s.reset_to_bci(osr_bci); 3471 s.next(); 3472 scope_data()->set_stream(&s); 3473 3474 // create a new block to be the osr setup code 3475 _osr_entry = new BlockBegin(osr_bci); 3476 _osr_entry->set(BlockBegin::osr_entry_flag); 3477 _osr_entry->set_depth_first_number(0); 3478 BlockBegin* target = bci2block()->at(osr_bci); 3479 assert(target != nullptr && target->is_set(BlockBegin::osr_entry_flag), "must be there"); 3480 // the osr entry has no values for locals 3481 ValueStack* state = target->state()->copy(); 3482 _osr_entry->set_state(state); 3483 3484 kill_all(); 3485 _block = _osr_entry; 3486 _state = _osr_entry->state()->copy(); 3487 assert(_state->bci() == osr_bci, "mismatch"); 3488 _last = _osr_entry; 3489 Value e = append(new OsrEntry()); 3490 e->set_needs_null_check(false); 3491 3492 // OSR buffer is 3493 // 3494 // locals[nlocals-1..0] 3495 // monitors[number_of_locks-1..0] 3496 // 3497 // locals is a direct copy of the interpreter frame so in the osr buffer 3498 // so first slot in the local array is the last local from the interpreter 3499 // and last slot is local[0] (receiver) from the interpreter 3500 // 3501 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 3502 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 3503 // in the interpreter frame (the method lock if a sync method) 3504 3505 // Initialize monitors in the compiled activation. 3506 3507 int index; 3508 Value local; 3509 3510 // find all the locals that the interpreter thinks contain live oops 3511 const ResourceBitMap live_oops = method()->live_local_oops_at_bci(osr_bci); 3512 3513 // compute the offset into the locals so that we can treat the buffer 3514 // as if the locals were still in the interpreter frame 3515 int locals_offset = BytesPerWord * (method()->max_locals() - 1); 3516 for_each_local_value(state, index, local) { 3517 int offset = locals_offset - (index + local->type()->size() - 1) * BytesPerWord; 3518 Value get; 3519 if (local->type()->is_object_kind() && !live_oops.at(index)) { 3520 // The interpreter thinks this local is dead but the compiler 3521 // doesn't so pretend that the interpreter passed in null. 3522 get = append(new Constant(objectNull)); 3523 } else { 3524 Value off_val = append(new Constant(new IntConstant(offset))); 3525 get = append(new UnsafeGet(as_BasicType(local->type()), e, 3526 off_val, 3527 false/*is_volatile*/, 3528 true/*is_raw*/)); 3529 } 3530 _state->store_local(index, get); 3531 } 3532 3533 // the storage for the OSR buffer is freed manually in the LIRGenerator. 3534 3535 assert(state->caller_state() == nullptr, "should be top scope"); 3536 state->clear_locals(); 3537 Goto* g = new Goto(target, false); 3538 append(g); 3539 _osr_entry->set_end(g); 3540 target->merge(_osr_entry->end()->state(), compilation()->has_irreducible_loops()); 3541 3542 scope_data()->set_stream(nullptr); 3543 } 3544 3545 3546 ValueStack* GraphBuilder::state_at_entry() { 3547 ValueStack* state = new ValueStack(scope(), nullptr); 3548 3549 // Set up locals for receiver 3550 int idx = 0; 3551 if (!method()->is_static()) { 3552 // we should always see the receiver 3553 state->store_local(idx, new Local(method()->holder(), objectType, idx, 3554 /*receiver*/ true, /*null_free*/ method()->holder()->is_flat_array_klass())); 3555 idx = 1; 3556 } 3557 3558 // Set up locals for incoming arguments 3559 ciSignature* sig = method()->signature(); 3560 for (int i = 0; i < sig->count(); i++) { 3561 ciType* type = sig->type_at(i); 3562 BasicType basic_type = type->basic_type(); 3563 // don't allow T_ARRAY to propagate into locals types 3564 if (is_reference_type(basic_type)) basic_type = T_OBJECT; 3565 ValueType* vt = as_ValueType(basic_type); 3566 state->store_local(idx, new Local(type, vt, idx, false, false)); 3567 idx += type->size(); 3568 } 3569 3570 // lock synchronized method 3571 if (method()->is_synchronized()) { 3572 state->lock(nullptr); 3573 } 3574 3575 return state; 3576 } 3577 3578 3579 GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope) 3580 : _scope_data(nullptr) 3581 , _compilation(compilation) 3582 , _memory(new MemoryBuffer()) 3583 , _inline_bailout_msg(nullptr) 3584 , _instruction_count(0) 3585 , _osr_entry(nullptr) 3586 , _pending_field_access(nullptr) 3587 , _pending_load_indexed(nullptr) 3588 { 3589 int osr_bci = compilation->osr_bci(); 3590 3591 // determine entry points and bci2block mapping 3592 BlockListBuilder blm(compilation, scope, osr_bci); 3593 CHECK_BAILOUT(); 3594 3595 BlockList* bci2block = blm.bci2block(); 3596 BlockBegin* start_block = bci2block->at(0); 3597 3598 push_root_scope(scope, bci2block, start_block); 3599 3600 // setup state for std entry 3601 _initial_state = state_at_entry(); 3602 start_block->merge(_initial_state, compilation->has_irreducible_loops()); 3603 3604 // End nulls still exist here 3605 3606 // complete graph 3607 _vmap = new ValueMap(); 3608 switch (scope->method()->intrinsic_id()) { 3609 case vmIntrinsics::_dabs : // fall through 3610 case vmIntrinsics::_dsqrt : // fall through 3611 case vmIntrinsics::_dsqrt_strict : // fall through 3612 case vmIntrinsics::_dsin : // fall through 3613 case vmIntrinsics::_dcos : // fall through 3614 case vmIntrinsics::_dtan : // fall through 3615 case vmIntrinsics::_dtanh : // fall through 3616 case vmIntrinsics::_dlog : // fall through 3617 case vmIntrinsics::_dlog10 : // fall through 3618 case vmIntrinsics::_dexp : // fall through 3619 case vmIntrinsics::_dpow : // fall through 3620 { 3621 // Compiles where the root method is an intrinsic need a special 3622 // compilation environment because the bytecodes for the method 3623 // shouldn't be parsed during the compilation, only the special 3624 // Intrinsic node should be emitted. If this isn't done the 3625 // code for the inlined version will be different than the root 3626 // compiled version which could lead to monotonicity problems on 3627 // intel. 3628 if (CheckIntrinsics && !scope->method()->intrinsic_candidate()) { 3629 BAILOUT("failed to inline intrinsic, method not annotated"); 3630 } 3631 3632 // Set up a stream so that appending instructions works properly. 3633 ciBytecodeStream s(scope->method()); 3634 s.reset_to_bci(0); 3635 scope_data()->set_stream(&s); 3636 s.next(); 3637 3638 // setup the initial block state 3639 _block = start_block; 3640 _state = start_block->state()->copy_for_parsing(); 3641 _last = start_block; 3642 load_local(doubleType, 0); 3643 if (scope->method()->intrinsic_id() == vmIntrinsics::_dpow) { 3644 load_local(doubleType, 2); 3645 } 3646 3647 // Emit the intrinsic node. 3648 bool result = try_inline_intrinsics(scope->method()); 3649 if (!result) BAILOUT("failed to inline intrinsic"); 3650 method_return(dpop()); 3651 3652 // connect the begin and end blocks and we're all done. 3653 BlockEnd* end = last()->as_BlockEnd(); 3654 block()->set_end(end); 3655 break; 3656 } 3657 3658 case vmIntrinsics::_Reference_get: 3659 { 3660 { 3661 // With java.lang.ref.reference.get() we must go through the 3662 // intrinsic - when G1 is enabled - even when get() is the root 3663 // method of the compile so that, if necessary, the value in 3664 // the referent field of the reference object gets recorded by 3665 // the pre-barrier code. 3666 // Specifically, if G1 is enabled, the value in the referent 3667 // field is recorded by the G1 SATB pre barrier. This will 3668 // result in the referent being marked live and the reference 3669 // object removed from the list of discovered references during 3670 // reference processing. 3671 if (CheckIntrinsics && !scope->method()->intrinsic_candidate()) { 3672 BAILOUT("failed to inline intrinsic, method not annotated"); 3673 } 3674 3675 // Also we need intrinsic to prevent commoning reads from this field 3676 // across safepoint since GC can change its value. 3677 3678 // Set up a stream so that appending instructions works properly. 3679 ciBytecodeStream s(scope->method()); 3680 s.reset_to_bci(0); 3681 scope_data()->set_stream(&s); 3682 s.next(); 3683 3684 // setup the initial block state 3685 _block = start_block; 3686 _state = start_block->state()->copy_for_parsing(); 3687 _last = start_block; 3688 load_local(objectType, 0); 3689 3690 // Emit the intrinsic node. 3691 bool result = try_inline_intrinsics(scope->method()); 3692 if (!result) BAILOUT("failed to inline intrinsic"); 3693 method_return(apop()); 3694 3695 // connect the begin and end blocks and we're all done. 3696 BlockEnd* end = last()->as_BlockEnd(); 3697 block()->set_end(end); 3698 break; 3699 } 3700 // Otherwise, fall thru 3701 } 3702 3703 default: 3704 scope_data()->add_to_work_list(start_block); 3705 iterate_all_blocks(); 3706 break; 3707 } 3708 CHECK_BAILOUT(); 3709 3710 # ifdef ASSERT 3711 // For all blocks reachable from start_block: _end must be non-null 3712 { 3713 BlockList processed; 3714 BlockList to_go; 3715 to_go.append(start_block); 3716 while(to_go.length() > 0) { 3717 BlockBegin* current = to_go.pop(); 3718 assert(current != nullptr, "Should not happen."); 3719 assert(current->end() != nullptr, "All blocks reachable from start_block should have end() != nullptr."); 3720 processed.append(current); 3721 for(int i = 0; i < current->number_of_sux(); i++) { 3722 BlockBegin* s = current->sux_at(i); 3723 if (!processed.contains(s)) { 3724 to_go.append(s); 3725 } 3726 } 3727 } 3728 } 3729 #endif // ASSERT 3730 3731 _start = setup_start_block(osr_bci, start_block, _osr_entry, _initial_state); 3732 3733 eliminate_redundant_phis(_start); 3734 3735 NOT_PRODUCT(if (PrintValueNumbering && Verbose) print_stats()); 3736 // for osr compile, bailout if some requirements are not fulfilled 3737 if (osr_bci != -1) { 3738 BlockBegin* osr_block = blm.bci2block()->at(osr_bci); 3739 if (!osr_block->is_set(BlockBegin::was_visited_flag)) { 3740 BAILOUT("osr entry must have been visited for osr compile"); 3741 } 3742 3743 // check if osr entry point has empty stack - we cannot handle non-empty stacks at osr entry points 3744 if (!osr_block->state()->stack_is_empty()) { 3745 BAILOUT("stack not empty at OSR entry point"); 3746 } 3747 } 3748 #ifndef PRODUCT 3749 if (PrintCompilation && Verbose) tty->print_cr("Created %d Instructions", _instruction_count); 3750 #endif 3751 } 3752 3753 3754 ValueStack* GraphBuilder::copy_state_before() { 3755 return copy_state_before_with_bci(bci()); 3756 } 3757 3758 ValueStack* GraphBuilder::copy_state_exhandling() { 3759 return copy_state_exhandling_with_bci(bci()); 3760 } 3761 3762 ValueStack* GraphBuilder::copy_state_for_exception() { 3763 return copy_state_for_exception_with_bci(bci()); 3764 } 3765 3766 ValueStack* GraphBuilder::copy_state_before_with_bci(int bci) { 3767 return state()->copy(ValueStack::StateBefore, bci); 3768 } 3769 3770 ValueStack* GraphBuilder::copy_state_exhandling_with_bci(int bci) { 3771 if (!has_handler()) return nullptr; 3772 return state()->copy(ValueStack::StateBefore, bci); 3773 } 3774 3775 ValueStack* GraphBuilder::copy_state_for_exception_with_bci(int bci) { 3776 ValueStack* s = copy_state_exhandling_with_bci(bci); 3777 if (s == nullptr) { 3778 // no handler, no need to retain locals 3779 ValueStack::Kind exc_kind = ValueStack::empty_exception_kind(); 3780 s = state()->copy(exc_kind, bci); 3781 } 3782 return s; 3783 } 3784 3785 int GraphBuilder::recursive_inline_level(ciMethod* cur_callee) const { 3786 int recur_level = 0; 3787 for (IRScope* s = scope(); s != nullptr; s = s->caller()) { 3788 if (s->method() == cur_callee) { 3789 ++recur_level; 3790 } 3791 } 3792 return recur_level; 3793 } 3794 3795 static void set_flags_for_inlined_callee(Compilation* compilation, ciMethod* callee) { 3796 if (callee->has_reserved_stack_access()) { 3797 compilation->set_has_reserved_stack_access(true); 3798 } 3799 if (callee->is_synchronized() || callee->has_monitor_bytecodes()) { 3800 compilation->set_has_monitors(true); 3801 } 3802 if (callee->is_scoped()) { 3803 compilation->set_has_scoped_access(true); 3804 } 3805 } 3806 3807 bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, bool ignore_return, Bytecodes::Code bc, Value receiver) { 3808 const char* msg = nullptr; 3809 3810 // clear out any existing inline bailout condition 3811 clear_inline_bailout(); 3812 3813 // exclude methods we don't want to inline 3814 msg = should_not_inline(callee); 3815 if (msg != nullptr) { 3816 print_inlining(callee, msg, /*success*/ false); 3817 return false; 3818 } 3819 3820 // method handle invokes 3821 if (callee->is_method_handle_intrinsic()) { 3822 if (try_method_handle_inline(callee, ignore_return)) { 3823 set_flags_for_inlined_callee(compilation(), callee); 3824 return true; 3825 } 3826 return false; 3827 } 3828 3829 // handle intrinsics 3830 if (callee->intrinsic_id() != vmIntrinsics::_none && 3831 callee->check_intrinsic_candidate()) { 3832 if (try_inline_intrinsics(callee, ignore_return)) { 3833 print_inlining(callee, "intrinsic"); 3834 set_flags_for_inlined_callee(compilation(), callee); 3835 return true; 3836 } 3837 // try normal inlining 3838 } 3839 3840 // certain methods cannot be parsed at all 3841 msg = check_can_parse(callee); 3842 if (msg != nullptr) { 3843 print_inlining(callee, msg, /*success*/ false); 3844 return false; 3845 } 3846 3847 // If bytecode not set use the current one. 3848 if (bc == Bytecodes::_illegal) { 3849 bc = code(); 3850 } 3851 if (try_inline_full(callee, holder_known, ignore_return, bc, receiver)) { 3852 set_flags_for_inlined_callee(compilation(), callee); 3853 return true; 3854 } 3855 3856 // Entire compilation could fail during try_inline_full call. 3857 // In that case printing inlining decision info is useless. 3858 if (!bailed_out()) 3859 print_inlining(callee, _inline_bailout_msg, /*success*/ false); 3860 3861 return false; 3862 } 3863 3864 3865 const char* GraphBuilder::check_can_parse(ciMethod* callee) const { 3866 // Certain methods cannot be parsed at all: 3867 if ( callee->is_native()) return "native method"; 3868 if ( callee->is_abstract()) return "abstract method"; 3869 if (!callee->can_be_parsed()) return "cannot be parsed"; 3870 return nullptr; 3871 } 3872 3873 // negative filter: should callee NOT be inlined? returns null, ok to inline, or rejection msg 3874 const char* GraphBuilder::should_not_inline(ciMethod* callee) const { 3875 if ( compilation()->directive()->should_not_inline(callee)) return "disallowed by CompileCommand"; 3876 if ( callee->dont_inline()) return "don't inline by annotation"; 3877 return nullptr; 3878 } 3879 3880 void GraphBuilder::build_graph_for_intrinsic(ciMethod* callee, bool ignore_return) { 3881 vmIntrinsics::ID id = callee->intrinsic_id(); 3882 assert(id != vmIntrinsics::_none, "must be a VM intrinsic"); 3883 3884 // Some intrinsics need special IR nodes. 3885 switch(id) { 3886 case vmIntrinsics::_getReference : append_unsafe_get(callee, T_OBJECT, false); return; 3887 case vmIntrinsics::_getBoolean : append_unsafe_get(callee, T_BOOLEAN, false); return; 3888 case vmIntrinsics::_getByte : append_unsafe_get(callee, T_BYTE, false); return; 3889 case vmIntrinsics::_getShort : append_unsafe_get(callee, T_SHORT, false); return; 3890 case vmIntrinsics::_getChar : append_unsafe_get(callee, T_CHAR, false); return; 3891 case vmIntrinsics::_getInt : append_unsafe_get(callee, T_INT, false); return; 3892 case vmIntrinsics::_getLong : append_unsafe_get(callee, T_LONG, false); return; 3893 case vmIntrinsics::_getFloat : append_unsafe_get(callee, T_FLOAT, false); return; 3894 case vmIntrinsics::_getDouble : append_unsafe_get(callee, T_DOUBLE, false); return; 3895 case vmIntrinsics::_putReference : append_unsafe_put(callee, T_OBJECT, false); return; 3896 case vmIntrinsics::_putBoolean : append_unsafe_put(callee, T_BOOLEAN, false); return; 3897 case vmIntrinsics::_putByte : append_unsafe_put(callee, T_BYTE, false); return; 3898 case vmIntrinsics::_putShort : append_unsafe_put(callee, T_SHORT, false); return; 3899 case vmIntrinsics::_putChar : append_unsafe_put(callee, T_CHAR, false); return; 3900 case vmIntrinsics::_putInt : append_unsafe_put(callee, T_INT, false); return; 3901 case vmIntrinsics::_putLong : append_unsafe_put(callee, T_LONG, false); return; 3902 case vmIntrinsics::_putFloat : append_unsafe_put(callee, T_FLOAT, false); return; 3903 case vmIntrinsics::_putDouble : append_unsafe_put(callee, T_DOUBLE, false); return; 3904 case vmIntrinsics::_getShortUnaligned : append_unsafe_get(callee, T_SHORT, false); return; 3905 case vmIntrinsics::_getCharUnaligned : append_unsafe_get(callee, T_CHAR, false); return; 3906 case vmIntrinsics::_getIntUnaligned : append_unsafe_get(callee, T_INT, false); return; 3907 case vmIntrinsics::_getLongUnaligned : append_unsafe_get(callee, T_LONG, false); return; 3908 case vmIntrinsics::_putShortUnaligned : append_unsafe_put(callee, T_SHORT, false); return; 3909 case vmIntrinsics::_putCharUnaligned : append_unsafe_put(callee, T_CHAR, false); return; 3910 case vmIntrinsics::_putIntUnaligned : append_unsafe_put(callee, T_INT, false); return; 3911 case vmIntrinsics::_putLongUnaligned : append_unsafe_put(callee, T_LONG, false); return; 3912 case vmIntrinsics::_getReferenceVolatile : append_unsafe_get(callee, T_OBJECT, true); return; 3913 case vmIntrinsics::_getBooleanVolatile : append_unsafe_get(callee, T_BOOLEAN, true); return; 3914 case vmIntrinsics::_getByteVolatile : append_unsafe_get(callee, T_BYTE, true); return; 3915 case vmIntrinsics::_getShortVolatile : append_unsafe_get(callee, T_SHORT, true); return; 3916 case vmIntrinsics::_getCharVolatile : append_unsafe_get(callee, T_CHAR, true); return; 3917 case vmIntrinsics::_getIntVolatile : append_unsafe_get(callee, T_INT, true); return; 3918 case vmIntrinsics::_getLongVolatile : append_unsafe_get(callee, T_LONG, true); return; 3919 case vmIntrinsics::_getFloatVolatile : append_unsafe_get(callee, T_FLOAT, true); return; 3920 case vmIntrinsics::_getDoubleVolatile : append_unsafe_get(callee, T_DOUBLE, true); return; 3921 case vmIntrinsics::_putReferenceVolatile : append_unsafe_put(callee, T_OBJECT, true); return; 3922 case vmIntrinsics::_putBooleanVolatile : append_unsafe_put(callee, T_BOOLEAN, true); return; 3923 case vmIntrinsics::_putByteVolatile : append_unsafe_put(callee, T_BYTE, true); return; 3924 case vmIntrinsics::_putShortVolatile : append_unsafe_put(callee, T_SHORT, true); return; 3925 case vmIntrinsics::_putCharVolatile : append_unsafe_put(callee, T_CHAR, true); return; 3926 case vmIntrinsics::_putIntVolatile : append_unsafe_put(callee, T_INT, true); return; 3927 case vmIntrinsics::_putLongVolatile : append_unsafe_put(callee, T_LONG, true); return; 3928 case vmIntrinsics::_putFloatVolatile : append_unsafe_put(callee, T_FLOAT, true); return; 3929 case vmIntrinsics::_putDoubleVolatile : append_unsafe_put(callee, T_DOUBLE, true); return; 3930 case vmIntrinsics::_compareAndSetLong: 3931 case vmIntrinsics::_compareAndSetInt: 3932 case vmIntrinsics::_compareAndSetReference : append_unsafe_CAS(callee); return; 3933 case vmIntrinsics::_getAndAddInt: 3934 case vmIntrinsics::_getAndAddLong : append_unsafe_get_and_set(callee, true); return; 3935 case vmIntrinsics::_getAndSetInt : 3936 case vmIntrinsics::_getAndSetLong : 3937 case vmIntrinsics::_getAndSetReference : append_unsafe_get_and_set(callee, false); return; 3938 case vmIntrinsics::_getCharStringU : append_char_access(callee, false); return; 3939 case vmIntrinsics::_putCharStringU : append_char_access(callee, true); return; 3940 case vmIntrinsics::_clone : append_alloc_array_copy(callee); return; 3941 default: 3942 break; 3943 } 3944 if (_inline_bailout_msg != nullptr) { 3945 return; 3946 } 3947 3948 // create intrinsic node 3949 const bool has_receiver = !callee->is_static(); 3950 ValueType* result_type = as_ValueType(callee->return_type()); 3951 ValueStack* state_before = copy_state_for_exception(); 3952 3953 Values* args = state()->pop_arguments(callee->arg_size()); 3954 3955 if (is_profiling()) { 3956 // Don't profile in the special case where the root method 3957 // is the intrinsic 3958 if (callee != method()) { 3959 // Note that we'd collect profile data in this method if we wanted it. 3960 compilation()->set_would_profile(true); 3961 if (profile_calls()) { 3962 Value recv = nullptr; 3963 if (has_receiver) { 3964 recv = args->at(0); 3965 null_check(recv); 3966 } 3967 profile_call(callee, recv, nullptr, collect_args_for_profiling(args, callee, true), true); 3968 } 3969 } 3970 } 3971 3972 Intrinsic* result = new Intrinsic(result_type, callee->intrinsic_id(), 3973 args, has_receiver, state_before, 3974 vmIntrinsics::preserves_state(id), 3975 vmIntrinsics::can_trap(id)); 3976 // append instruction & push result 3977 Value value = append_split(result); 3978 if (result_type != voidType && !ignore_return) { 3979 push(result_type, value); 3980 } 3981 3982 if (callee != method() && profile_return() && result_type->is_object_kind()) { 3983 profile_return_type(result, callee); 3984 } 3985 } 3986 3987 bool GraphBuilder::try_inline_intrinsics(ciMethod* callee, bool ignore_return) { 3988 // For calling is_intrinsic_available we need to transition to 3989 // the '_thread_in_vm' state because is_intrinsic_available() 3990 // accesses critical VM-internal data. 3991 bool is_available = false; 3992 { 3993 VM_ENTRY_MARK; 3994 methodHandle mh(THREAD, callee->get_Method()); 3995 is_available = _compilation->compiler()->is_intrinsic_available(mh, _compilation->directive()); 3996 } 3997 3998 if (!is_available) { 3999 if (!InlineNatives) { 4000 // Return false and also set message that the inlining of 4001 // intrinsics has been disabled in general. 4002 INLINE_BAILOUT("intrinsic method inlining disabled"); 4003 } else { 4004 return false; 4005 } 4006 } 4007 build_graph_for_intrinsic(callee, ignore_return); 4008 if (_inline_bailout_msg != nullptr) { 4009 return false; 4010 } 4011 return true; 4012 } 4013 4014 4015 bool GraphBuilder::try_inline_jsr(int jsr_dest_bci) { 4016 // Introduce a new callee continuation point - all Ret instructions 4017 // will be replaced with Gotos to this point. 4018 if (next_bci() >= method()->code_size()) { 4019 return false; 4020 } 4021 BlockBegin* cont = block_at(next_bci()); 4022 assert(cont != nullptr, "continuation must exist (BlockListBuilder starts a new block after a jsr"); 4023 4024 // Note: can not assign state to continuation yet, as we have to 4025 // pick up the state from the Ret instructions. 4026 4027 // Push callee scope 4028 push_scope_for_jsr(cont, jsr_dest_bci); 4029 4030 // Temporarily set up bytecode stream so we can append instructions 4031 // (only using the bci of this stream) 4032 scope_data()->set_stream(scope_data()->parent()->stream()); 4033 4034 BlockBegin* jsr_start_block = block_at(jsr_dest_bci); 4035 assert(jsr_start_block != nullptr, "jsr start block must exist"); 4036 assert(!jsr_start_block->is_set(BlockBegin::was_visited_flag), "should not have visited jsr yet"); 4037 Goto* goto_sub = new Goto(jsr_start_block, false); 4038 // Must copy state to avoid wrong sharing when parsing bytecodes 4039 assert(jsr_start_block->state() == nullptr, "should have fresh jsr starting block"); 4040 jsr_start_block->set_state(copy_state_before_with_bci(jsr_dest_bci)); 4041 append(goto_sub); 4042 _block->set_end(goto_sub); 4043 _last = _block = jsr_start_block; 4044 4045 // Clear out bytecode stream 4046 scope_data()->set_stream(nullptr); 4047 4048 scope_data()->add_to_work_list(jsr_start_block); 4049 4050 // Ready to resume parsing in subroutine 4051 iterate_all_blocks(); 4052 4053 // If we bailed out during parsing, return immediately (this is bad news) 4054 CHECK_BAILOUT_(false); 4055 4056 // Detect whether the continuation can actually be reached. If not, 4057 // it has not had state set by the join() operations in 4058 // iterate_bytecodes_for_block()/ret() and we should not touch the 4059 // iteration state. The calling activation of 4060 // iterate_bytecodes_for_block will then complete normally. 4061 if (cont->state() != nullptr) { 4062 if (!cont->is_set(BlockBegin::was_visited_flag)) { 4063 // add continuation to work list instead of parsing it immediately 4064 scope_data()->parent()->add_to_work_list(cont); 4065 } 4066 } 4067 4068 assert(jsr_continuation() == cont, "continuation must not have changed"); 4069 assert(!jsr_continuation()->is_set(BlockBegin::was_visited_flag) || 4070 jsr_continuation()->is_set(BlockBegin::parser_loop_header_flag), 4071 "continuation can only be visited in case of backward branches"); 4072 assert(_last && _last->as_BlockEnd(), "block must have end"); 4073 4074 // continuation is in work list, so end iteration of current block 4075 _skip_block = true; 4076 pop_scope_for_jsr(); 4077 4078 return true; 4079 } 4080 4081 4082 // Inline the entry of a synchronized method as a monitor enter and 4083 // register the exception handler which releases the monitor if an 4084 // exception is thrown within the callee. Note that the monitor enter 4085 // cannot throw an exception itself, because the receiver is 4086 // guaranteed to be non-null by the explicit null check at the 4087 // beginning of inlining. 4088 void GraphBuilder::inline_sync_entry(Value lock, BlockBegin* sync_handler) { 4089 assert(lock != nullptr && sync_handler != nullptr, "lock or handler missing"); 4090 4091 monitorenter(lock, SynchronizationEntryBCI); 4092 assert(_last->as_MonitorEnter() != nullptr, "monitor enter expected"); 4093 _last->set_needs_null_check(false); 4094 4095 sync_handler->set(BlockBegin::exception_entry_flag); 4096 sync_handler->set(BlockBegin::is_on_work_list_flag); 4097 4098 ciExceptionHandler* desc = new ciExceptionHandler(method()->holder(), 0, method()->code_size(), -1, 0); 4099 XHandler* h = new XHandler(desc); 4100 h->set_entry_block(sync_handler); 4101 scope_data()->xhandlers()->append(h); 4102 scope_data()->set_has_handler(); 4103 } 4104 4105 4106 // If an exception is thrown and not handled within an inlined 4107 // synchronized method, the monitor must be released before the 4108 // exception is rethrown in the outer scope. Generate the appropriate 4109 // instructions here. 4110 void GraphBuilder::fill_sync_handler(Value lock, BlockBegin* sync_handler, bool default_handler) { 4111 BlockBegin* orig_block = _block; 4112 ValueStack* orig_state = _state; 4113 Instruction* orig_last = _last; 4114 _last = _block = sync_handler; 4115 _state = sync_handler->state()->copy(); 4116 4117 assert(sync_handler != nullptr, "handler missing"); 4118 assert(!sync_handler->is_set(BlockBegin::was_visited_flag), "is visited here"); 4119 4120 assert(lock != nullptr || default_handler, "lock or handler missing"); 4121 4122 XHandler* h = scope_data()->xhandlers()->remove_last(); 4123 assert(h->entry_block() == sync_handler, "corrupt list of handlers"); 4124 4125 block()->set(BlockBegin::was_visited_flag); 4126 Value exception = append_with_bci(new ExceptionObject(), SynchronizationEntryBCI); 4127 assert(exception->is_pinned(), "must be"); 4128 4129 int bci = SynchronizationEntryBCI; 4130 if (compilation()->env()->dtrace_method_probes()) { 4131 // Report exit from inline methods. We don't have a stream here 4132 // so pass an explicit bci of SynchronizationEntryBCI. 4133 Values* args = new Values(1); 4134 args->push(append_with_bci(new Constant(new MethodConstant(method())), bci)); 4135 append_with_bci(new RuntimeCall(voidType, "dtrace_method_exit", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), args), bci); 4136 } 4137 4138 if (lock) { 4139 assert(state()->locks_size() > 0 && state()->lock_at(state()->locks_size() - 1) == lock, "lock is missing"); 4140 if (!lock->is_linked()) { 4141 lock = append_with_bci(lock, bci); 4142 } 4143 4144 // exit the monitor in the context of the synchronized method 4145 monitorexit(lock, bci); 4146 4147 // exit the context of the synchronized method 4148 if (!default_handler) { 4149 pop_scope(); 4150 bci = _state->caller_state()->bci(); 4151 _state = _state->caller_state()->copy_for_parsing(); 4152 } 4153 } 4154 4155 // perform the throw as if at the call site 4156 apush(exception); 4157 throw_op(bci); 4158 4159 BlockEnd* end = last()->as_BlockEnd(); 4160 block()->set_end(end); 4161 4162 _block = orig_block; 4163 _state = orig_state; 4164 _last = orig_last; 4165 } 4166 4167 4168 bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, bool ignore_return, Bytecodes::Code bc, Value receiver) { 4169 assert(!callee->is_native(), "callee must not be native"); 4170 if (CompilationPolicy::should_not_inline(compilation()->env(), callee)) { 4171 INLINE_BAILOUT("inlining prohibited by policy"); 4172 } 4173 // first perform tests of things it's not possible to inline 4174 if (callee->has_exception_handlers() && 4175 !InlineMethodsWithExceptionHandlers) INLINE_BAILOUT("callee has exception handlers"); 4176 if (callee->is_synchronized() && 4177 !InlineSynchronizedMethods ) INLINE_BAILOUT("callee is synchronized"); 4178 if (!callee->holder()->is_linked()) INLINE_BAILOUT("callee's klass not linked yet"); 4179 if (bc == Bytecodes::_invokestatic && 4180 !callee->holder()->is_initialized()) INLINE_BAILOUT("callee's klass not initialized yet"); 4181 if (!callee->has_balanced_monitors()) INLINE_BAILOUT("callee's monitors do not match"); 4182 4183 // Proper inlining of methods with jsrs requires a little more work. 4184 if (callee->has_jsrs() ) INLINE_BAILOUT("jsrs not handled properly by inliner yet"); 4185 4186 if (is_profiling() && !callee->ensure_method_data()) { 4187 INLINE_BAILOUT("mdo allocation failed"); 4188 } 4189 4190 const bool is_invokedynamic = (bc == Bytecodes::_invokedynamic); 4191 const bool has_receiver = (bc != Bytecodes::_invokestatic && !is_invokedynamic); 4192 4193 const int args_base = state()->stack_size() - callee->arg_size(); 4194 assert(args_base >= 0, "stack underflow during inlining"); 4195 4196 Value recv = nullptr; 4197 if (has_receiver) { 4198 assert(!callee->is_static(), "callee must not be static"); 4199 assert(callee->arg_size() > 0, "must have at least a receiver"); 4200 4201 recv = state()->stack_at(args_base); 4202 if (recv->is_null_obj()) { 4203 INLINE_BAILOUT("receiver is always null"); 4204 } 4205 } 4206 4207 // now perform tests that are based on flag settings 4208 bool inlinee_by_directive = compilation()->directive()->should_inline(callee); 4209 if (callee->force_inline() || inlinee_by_directive) { 4210 if (inline_level() > MaxForceInlineLevel ) INLINE_BAILOUT("MaxForceInlineLevel"); 4211 if (recursive_inline_level(callee) > C1MaxRecursiveInlineLevel) INLINE_BAILOUT("recursive inlining too deep"); 4212 4213 const char* msg = ""; 4214 if (callee->force_inline()) msg = "force inline by annotation"; 4215 if (inlinee_by_directive) msg = "force inline by CompileCommand"; 4216 print_inlining(callee, msg); 4217 } else { 4218 // use heuristic controls on inlining 4219 if (inline_level() > C1MaxInlineLevel ) INLINE_BAILOUT("inlining too deep"); 4220 int callee_recursive_level = recursive_inline_level(callee); 4221 if (callee_recursive_level > C1MaxRecursiveInlineLevel ) INLINE_BAILOUT("recursive inlining too deep"); 4222 if (callee->code_size_for_inlining() > max_inline_size() ) INLINE_BAILOUT("callee is too large"); 4223 // Additional condition to limit stack usage for non-recursive calls. 4224 if ((callee_recursive_level == 0) && 4225 (callee->max_stack() + callee->max_locals() - callee->size_of_parameters() > C1InlineStackLimit)) { 4226 INLINE_BAILOUT("callee uses too much stack"); 4227 } 4228 4229 // don't inline throwable methods unless the inlining tree is rooted in a throwable class 4230 if (callee->name() == ciSymbols::object_initializer_name() && 4231 callee->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) { 4232 // Throwable constructor call 4233 IRScope* top = scope(); 4234 while (top->caller() != nullptr) { 4235 top = top->caller(); 4236 } 4237 if (!top->method()->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) { 4238 INLINE_BAILOUT("don't inline Throwable constructors"); 4239 } 4240 } 4241 4242 if (compilation()->env()->num_inlined_bytecodes() > DesiredMethodLimit) { 4243 INLINE_BAILOUT("total inlining greater than DesiredMethodLimit"); 4244 } 4245 // printing 4246 print_inlining(callee, "inline", /*success*/ true); 4247 } 4248 4249 assert(bc != Bytecodes::_invokestatic || callee->holder()->is_initialized(), "required"); 4250 4251 // NOTE: Bailouts from this point on, which occur at the 4252 // GraphBuilder level, do not cause bailout just of the inlining but 4253 // in fact of the entire compilation. 4254 4255 BlockBegin* orig_block = block(); 4256 4257 // Insert null check if necessary 4258 if (has_receiver) { 4259 // note: null check must happen even if first instruction of callee does 4260 // an implicit null check since the callee is in a different scope 4261 // and we must make sure exception handling does the right thing 4262 null_check(recv); 4263 } 4264 4265 if (is_profiling()) { 4266 // Note that we'd collect profile data in this method if we wanted it. 4267 // this may be redundant here... 4268 compilation()->set_would_profile(true); 4269 4270 if (profile_calls()) { 4271 int start = 0; 4272 Values* obj_args = args_list_for_profiling(callee, start, has_receiver); 4273 if (obj_args != nullptr) { 4274 int s = obj_args->capacity(); 4275 // if called through method handle invoke, some arguments may have been popped 4276 for (int i = args_base+start, j = 0; j < obj_args->capacity() && i < state()->stack_size(); ) { 4277 Value v = state()->stack_at_inc(i); 4278 if (v->type()->is_object_kind()) { 4279 obj_args->push(v); 4280 j++; 4281 } 4282 } 4283 check_args_for_profiling(obj_args, s); 4284 } 4285 profile_call(callee, recv, holder_known ? callee->holder() : nullptr, obj_args, true); 4286 } 4287 } 4288 4289 // Introduce a new callee continuation point - if the callee has 4290 // more than one return instruction or the return does not allow 4291 // fall-through of control flow, all return instructions of the 4292 // callee will need to be replaced by Goto's pointing to this 4293 // continuation point. 4294 BlockBegin* cont = block_at(next_bci()); 4295 bool continuation_existed = true; 4296 if (cont == nullptr) { 4297 cont = new BlockBegin(next_bci()); 4298 // low number so that continuation gets parsed as early as possible 4299 cont->set_depth_first_number(0); 4300 if (PrintInitialBlockList) { 4301 tty->print_cr("CFG: created block %d (bci %d) as continuation for inline at bci %d", 4302 cont->block_id(), cont->bci(), bci()); 4303 } 4304 continuation_existed = false; 4305 } 4306 // Record number of predecessors of continuation block before 4307 // inlining, to detect if inlined method has edges to its 4308 // continuation after inlining. 4309 int continuation_preds = cont->number_of_preds(); 4310 4311 // Push callee scope 4312 push_scope(callee, cont); 4313 4314 // the BlockListBuilder for the callee could have bailed out 4315 if (bailed_out()) 4316 return false; 4317 4318 // Temporarily set up bytecode stream so we can append instructions 4319 // (only using the bci of this stream) 4320 scope_data()->set_stream(scope_data()->parent()->stream()); 4321 4322 // Pass parameters into callee state: add assignments 4323 // note: this will also ensure that all arguments are computed before being passed 4324 ValueStack* callee_state = state(); 4325 ValueStack* caller_state = state()->caller_state(); 4326 for (int i = args_base; i < caller_state->stack_size(); ) { 4327 const int arg_no = i - args_base; 4328 Value arg = caller_state->stack_at_inc(i); 4329 store_local(callee_state, arg, arg_no); 4330 } 4331 4332 // Remove args from stack. 4333 // Note that we preserve locals state in case we can use it later 4334 // (see use of pop_scope() below) 4335 caller_state->truncate_stack(args_base); 4336 assert(callee_state->stack_size() == 0, "callee stack must be empty"); 4337 4338 Value lock = nullptr; 4339 BlockBegin* sync_handler = nullptr; 4340 4341 // Inline the locking of the receiver if the callee is synchronized 4342 if (callee->is_synchronized()) { 4343 lock = callee->is_static() ? append(new Constant(new InstanceConstant(callee->holder()->java_mirror()))) 4344 : state()->local_at(0); 4345 sync_handler = new BlockBegin(SynchronizationEntryBCI); 4346 inline_sync_entry(lock, sync_handler); 4347 } 4348 4349 if (compilation()->env()->dtrace_method_probes()) { 4350 Values* args = new Values(1); 4351 args->push(append(new Constant(new MethodConstant(method())))); 4352 append(new RuntimeCall(voidType, "dtrace_method_entry", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), args)); 4353 } 4354 4355 if (profile_inlined_calls()) { 4356 profile_invocation(callee, copy_state_before_with_bci(SynchronizationEntryBCI)); 4357 } 4358 4359 BlockBegin* callee_start_block = block_at(0); 4360 if (callee_start_block != nullptr) { 4361 assert(callee_start_block->is_set(BlockBegin::parser_loop_header_flag), "must be loop header"); 4362 Goto* goto_callee = new Goto(callee_start_block, false); 4363 // The state for this goto is in the scope of the callee, so use 4364 // the entry bci for the callee instead of the call site bci. 4365 append_with_bci(goto_callee, 0); 4366 _block->set_end(goto_callee); 4367 callee_start_block->merge(callee_state, compilation()->has_irreducible_loops()); 4368 4369 _last = _block = callee_start_block; 4370 4371 scope_data()->add_to_work_list(callee_start_block); 4372 } 4373 4374 // Clear out bytecode stream 4375 scope_data()->set_stream(nullptr); 4376 scope_data()->set_ignore_return(ignore_return); 4377 4378 CompileLog* log = compilation()->log(); 4379 if (log != nullptr) log->head("parse method='%d'", log->identify(callee)); 4380 4381 // Ready to resume parsing in callee (either in the same block we 4382 // were in before or in the callee's start block) 4383 iterate_all_blocks(callee_start_block == nullptr); 4384 4385 if (log != nullptr) log->done("parse"); 4386 4387 // If we bailed out during parsing, return immediately (this is bad news) 4388 if (bailed_out()) 4389 return false; 4390 4391 // iterate_all_blocks theoretically traverses in random order; in 4392 // practice, we have only traversed the continuation if we are 4393 // inlining into a subroutine 4394 assert(continuation_existed || 4395 !continuation()->is_set(BlockBegin::was_visited_flag), 4396 "continuation should not have been parsed yet if we created it"); 4397 4398 // At this point we are almost ready to return and resume parsing of 4399 // the caller back in the GraphBuilder. The only thing we want to do 4400 // first is an optimization: during parsing of the callee we 4401 // generated at least one Goto to the continuation block. If we 4402 // generated exactly one, and if the inlined method spanned exactly 4403 // one block (and we didn't have to Goto its entry), then we snip 4404 // off the Goto to the continuation, allowing control to fall 4405 // through back into the caller block and effectively performing 4406 // block merging. This allows load elimination and CSE to take place 4407 // across multiple callee scopes if they are relatively simple, and 4408 // is currently essential to making inlining profitable. 4409 if (num_returns() == 1 4410 && block() == orig_block 4411 && block() == inline_cleanup_block()) { 4412 _last = inline_cleanup_return_prev(); 4413 _state = inline_cleanup_state(); 4414 } else if (continuation_preds == cont->number_of_preds()) { 4415 // Inlining caused that the instructions after the invoke in the 4416 // caller are not reachable any more. So skip filling this block 4417 // with instructions! 4418 assert(cont == continuation(), ""); 4419 assert(_last && _last->as_BlockEnd(), ""); 4420 _skip_block = true; 4421 } else { 4422 // Resume parsing in continuation block unless it was already parsed. 4423 // Note that if we don't change _last here, iteration in 4424 // iterate_bytecodes_for_block will stop when we return. 4425 if (!continuation()->is_set(BlockBegin::was_visited_flag)) { 4426 // add continuation to work list instead of parsing it immediately 4427 assert(_last && _last->as_BlockEnd(), ""); 4428 scope_data()->parent()->add_to_work_list(continuation()); 4429 _skip_block = true; 4430 } 4431 } 4432 4433 // Fill the exception handler for synchronized methods with instructions 4434 if (callee->is_synchronized() && sync_handler->state() != nullptr) { 4435 fill_sync_handler(lock, sync_handler); 4436 } else { 4437 pop_scope(); 4438 } 4439 4440 compilation()->notice_inlined_method(callee); 4441 4442 return true; 4443 } 4444 4445 4446 bool GraphBuilder::try_method_handle_inline(ciMethod* callee, bool ignore_return) { 4447 ValueStack* state_before = copy_state_before(); 4448 vmIntrinsics::ID iid = callee->intrinsic_id(); 4449 switch (iid) { 4450 case vmIntrinsics::_invokeBasic: 4451 { 4452 // get MethodHandle receiver 4453 const int args_base = state()->stack_size() - callee->arg_size(); 4454 ValueType* type = state()->stack_at(args_base)->type(); 4455 if (type->is_constant()) { 4456 ciObject* mh = type->as_ObjectType()->constant_value(); 4457 if (mh->is_method_handle()) { 4458 ciMethod* target = mh->as_method_handle()->get_vmtarget(); 4459 4460 // We don't do CHA here so only inline static and statically bindable methods. 4461 if (target->is_static() || target->can_be_statically_bound()) { 4462 if (ciMethod::is_consistent_info(callee, target)) { 4463 Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual; 4464 ignore_return = ignore_return || (callee->return_type()->is_void() && !target->return_type()->is_void()); 4465 if (try_inline(target, /*holder_known*/ !callee->is_static(), ignore_return, bc)) { 4466 return true; 4467 } 4468 } else { 4469 print_inlining(target, "signatures mismatch", /*success*/ false); 4470 } 4471 } else { 4472 assert(false, "no inlining through MH::invokeBasic"); // missing optimization opportunity due to suboptimal LF shape 4473 print_inlining(target, "not static or statically bindable", /*success*/ false); 4474 } 4475 } else { 4476 assert(mh->is_null_object(), "not a null"); 4477 print_inlining(callee, "receiver is always null", /*success*/ false); 4478 } 4479 } else { 4480 print_inlining(callee, "receiver not constant", /*success*/ false); 4481 } 4482 } 4483 break; 4484 4485 case vmIntrinsics::_linkToVirtual: 4486 case vmIntrinsics::_linkToStatic: 4487 case vmIntrinsics::_linkToSpecial: 4488 case vmIntrinsics::_linkToInterface: 4489 { 4490 // pop MemberName argument 4491 const int args_base = state()->stack_size() - callee->arg_size(); 4492 ValueType* type = apop()->type(); 4493 if (type->is_constant()) { 4494 ciMethod* target = type->as_ObjectType()->constant_value()->as_member_name()->get_vmtarget(); 4495 ignore_return = ignore_return || (callee->return_type()->is_void() && !target->return_type()->is_void()); 4496 // If the target is another method handle invoke, try to recursively get 4497 // a better target. 4498 if (target->is_method_handle_intrinsic()) { 4499 if (try_method_handle_inline(target, ignore_return)) { 4500 return true; 4501 } 4502 } else if (!ciMethod::is_consistent_info(callee, target)) { 4503 print_inlining(target, "signatures mismatch", /*success*/ false); 4504 } else { 4505 ciSignature* signature = target->signature(); 4506 const int receiver_skip = target->is_static() ? 0 : 1; 4507 // Cast receiver to its type. 4508 if (!target->is_static()) { 4509 ciKlass* tk = signature->accessing_klass(); 4510 Value obj = state()->stack_at(args_base); 4511 if (obj->exact_type() == nullptr && 4512 obj->declared_type() != tk && tk != compilation()->env()->Object_klass()) { 4513 TypeCast* c = new TypeCast(tk, obj, state_before); 4514 append(c); 4515 state()->stack_at_put(args_base, c); 4516 } 4517 } 4518 // Cast reference arguments to its type. 4519 for (int i = 0, j = 0; i < signature->count(); i++) { 4520 ciType* t = signature->type_at(i); 4521 if (t->is_klass()) { 4522 ciKlass* tk = t->as_klass(); 4523 Value obj = state()->stack_at(args_base + receiver_skip + j); 4524 if (obj->exact_type() == nullptr && 4525 obj->declared_type() != tk && tk != compilation()->env()->Object_klass()) { 4526 TypeCast* c = new TypeCast(t, obj, state_before); 4527 append(c); 4528 state()->stack_at_put(args_base + receiver_skip + j, c); 4529 } 4530 } 4531 j += t->size(); // long and double take two slots 4532 } 4533 // We don't do CHA here so only inline static and statically bindable methods. 4534 if (target->is_static() || target->can_be_statically_bound()) { 4535 Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual; 4536 if (try_inline(target, /*holder_known*/ !callee->is_static(), ignore_return, bc)) { 4537 return true; 4538 } 4539 } else { 4540 print_inlining(target, "not static or statically bindable", /*success*/ false); 4541 } 4542 } 4543 } else { 4544 print_inlining(callee, "MemberName not constant", /*success*/ false); 4545 } 4546 } 4547 break; 4548 4549 case vmIntrinsics::_linkToNative: 4550 print_inlining(callee, "native call", /*success*/ false); 4551 break; 4552 4553 default: 4554 fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid)); 4555 break; 4556 } 4557 set_state(state_before->copy_for_parsing()); 4558 return false; 4559 } 4560 4561 4562 void GraphBuilder::inline_bailout(const char* msg) { 4563 assert(msg != nullptr, "inline bailout msg must exist"); 4564 _inline_bailout_msg = msg; 4565 } 4566 4567 4568 void GraphBuilder::clear_inline_bailout() { 4569 _inline_bailout_msg = nullptr; 4570 } 4571 4572 4573 void GraphBuilder::push_root_scope(IRScope* scope, BlockList* bci2block, BlockBegin* start) { 4574 ScopeData* data = new ScopeData(nullptr); 4575 data->set_scope(scope); 4576 data->set_bci2block(bci2block); 4577 _scope_data = data; 4578 _block = start; 4579 } 4580 4581 4582 void GraphBuilder::push_scope(ciMethod* callee, BlockBegin* continuation) { 4583 IRScope* callee_scope = new IRScope(compilation(), scope(), bci(), callee, -1, false); 4584 scope()->add_callee(callee_scope); 4585 4586 BlockListBuilder blb(compilation(), callee_scope, -1); 4587 CHECK_BAILOUT(); 4588 4589 if (!blb.bci2block()->at(0)->is_set(BlockBegin::parser_loop_header_flag)) { 4590 // this scope can be inlined directly into the caller so remove 4591 // the block at bci 0. 4592 blb.bci2block()->at_put(0, nullptr); 4593 } 4594 4595 set_state(new ValueStack(callee_scope, state()->copy(ValueStack::CallerState, bci()))); 4596 4597 ScopeData* data = new ScopeData(scope_data()); 4598 data->set_scope(callee_scope); 4599 data->set_bci2block(blb.bci2block()); 4600 data->set_continuation(continuation); 4601 _scope_data = data; 4602 } 4603 4604 4605 void GraphBuilder::push_scope_for_jsr(BlockBegin* jsr_continuation, int jsr_dest_bci) { 4606 ScopeData* data = new ScopeData(scope_data()); 4607 data->set_parsing_jsr(); 4608 data->set_jsr_entry_bci(jsr_dest_bci); 4609 data->set_jsr_return_address_local(-1); 4610 // Must clone bci2block list as we will be mutating it in order to 4611 // properly clone all blocks in jsr region as well as exception 4612 // handlers containing rets 4613 BlockList* new_bci2block = new BlockList(bci2block()->length()); 4614 new_bci2block->appendAll(bci2block()); 4615 data->set_bci2block(new_bci2block); 4616 data->set_scope(scope()); 4617 data->setup_jsr_xhandlers(); 4618 data->set_continuation(continuation()); 4619 data->set_jsr_continuation(jsr_continuation); 4620 _scope_data = data; 4621 } 4622 4623 4624 void GraphBuilder::pop_scope() { 4625 int number_of_locks = scope()->number_of_locks(); 4626 _scope_data = scope_data()->parent(); 4627 // accumulate minimum number of monitor slots to be reserved 4628 scope()->set_min_number_of_locks(number_of_locks); 4629 } 4630 4631 4632 void GraphBuilder::pop_scope_for_jsr() { 4633 _scope_data = scope_data()->parent(); 4634 } 4635 4636 void GraphBuilder::append_unsafe_get(ciMethod* callee, BasicType t, bool is_volatile) { 4637 Values* args = state()->pop_arguments(callee->arg_size()); 4638 null_check(args->at(0)); 4639 Instruction* offset = args->at(2); 4640 #ifndef _LP64 4641 offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT))); 4642 #endif 4643 Instruction* op = append(new UnsafeGet(t, args->at(1), offset, is_volatile)); 4644 push(op->type(), op); 4645 compilation()->set_has_unsafe_access(true); 4646 } 4647 4648 4649 void GraphBuilder::append_unsafe_put(ciMethod* callee, BasicType t, bool is_volatile) { 4650 Values* args = state()->pop_arguments(callee->arg_size()); 4651 null_check(args->at(0)); 4652 Instruction* offset = args->at(2); 4653 #ifndef _LP64 4654 offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT))); 4655 #endif 4656 Value val = args->at(3); 4657 if (t == T_BOOLEAN) { 4658 Value mask = append(new Constant(new IntConstant(1))); 4659 val = append(new LogicOp(Bytecodes::_iand, val, mask)); 4660 } 4661 Instruction* op = append(new UnsafePut(t, args->at(1), offset, val, is_volatile)); 4662 compilation()->set_has_unsafe_access(true); 4663 kill_all(); 4664 } 4665 4666 void GraphBuilder::append_unsafe_CAS(ciMethod* callee) { 4667 ValueStack* state_before = copy_state_for_exception(); 4668 ValueType* result_type = as_ValueType(callee->return_type()); 4669 assert(result_type->is_int(), "int result"); 4670 Values* args = state()->pop_arguments(callee->arg_size()); 4671 4672 // Pop off some args to specially handle, then push back 4673 Value newval = args->pop(); 4674 Value cmpval = args->pop(); 4675 Value offset = args->pop(); 4676 Value src = args->pop(); 4677 Value unsafe_obj = args->pop(); 4678 4679 // Separately handle the unsafe arg. It is not needed for code 4680 // generation, but must be null checked 4681 null_check(unsafe_obj); 4682 4683 #ifndef _LP64 4684 offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT))); 4685 #endif 4686 4687 args->push(src); 4688 args->push(offset); 4689 args->push(cmpval); 4690 args->push(newval); 4691 4692 // An unsafe CAS can alias with other field accesses, but we don't 4693 // know which ones so mark the state as no preserved. This will 4694 // cause CSE to invalidate memory across it. 4695 bool preserves_state = false; 4696 Intrinsic* result = new Intrinsic(result_type, callee->intrinsic_id(), args, false, state_before, preserves_state); 4697 append_split(result); 4698 push(result_type, result); 4699 compilation()->set_has_unsafe_access(true); 4700 } 4701 4702 void GraphBuilder::append_char_access(ciMethod* callee, bool is_store) { 4703 // This intrinsic accesses byte[] array as char[] array. Computing the offsets 4704 // correctly requires matched array shapes. 4705 assert (arrayOopDesc::base_offset_in_bytes(T_CHAR) == arrayOopDesc::base_offset_in_bytes(T_BYTE), 4706 "sanity: byte[] and char[] bases agree"); 4707 assert (type2aelembytes(T_CHAR) == type2aelembytes(T_BYTE)*2, 4708 "sanity: byte[] and char[] scales agree"); 4709 4710 ValueStack* state_before = copy_state_indexed_access(); 4711 compilation()->set_has_access_indexed(true); 4712 Values* args = state()->pop_arguments(callee->arg_size()); 4713 Value array = args->at(0); 4714 Value index = args->at(1); 4715 if (is_store) { 4716 Value value = args->at(2); 4717 Instruction* store = append(new StoreIndexed(array, index, nullptr, T_CHAR, value, state_before, false, true)); 4718 store->set_flag(Instruction::NeedsRangeCheckFlag, false); 4719 _memory->store_value(value); 4720 } else { 4721 Instruction* load = append(new LoadIndexed(array, index, nullptr, T_CHAR, state_before, true)); 4722 load->set_flag(Instruction::NeedsRangeCheckFlag, false); 4723 push(load->type(), load); 4724 } 4725 } 4726 4727 void GraphBuilder::append_alloc_array_copy(ciMethod* callee) { 4728 const int args_base = state()->stack_size() - callee->arg_size(); 4729 ciType* receiver_type = state()->stack_at(args_base)->exact_type(); 4730 if (receiver_type == nullptr) { 4731 inline_bailout("must have a receiver"); 4732 return; 4733 } 4734 if (!receiver_type->is_type_array_klass()) { 4735 inline_bailout("clone array not primitive"); 4736 return; 4737 } 4738 4739 ValueStack* state_before = copy_state_before(); 4740 state_before->set_force_reexecute(); 4741 Value src = apop(); 4742 BasicType basic_type = src->exact_type()->as_array_klass()->element_type()->basic_type(); 4743 Value length = append(new ArrayLength(src, state_before)); 4744 Value new_array = append_split(new NewTypeArray(length, basic_type, state_before, false)); 4745 4746 ValueType* result_type = as_ValueType(callee->return_type()); 4747 vmIntrinsics::ID id = vmIntrinsics::_arraycopy; 4748 Values* args = new Values(5); 4749 args->push(src); 4750 args->push(append(new Constant(new IntConstant(0)))); 4751 args->push(new_array); 4752 args->push(append(new Constant(new IntConstant(0)))); 4753 args->push(length); 4754 const bool has_receiver = true; 4755 Intrinsic* array_copy = new Intrinsic(result_type, id, 4756 args, has_receiver, state_before, 4757 vmIntrinsics::preserves_state(id), 4758 vmIntrinsics::can_trap(id)); 4759 array_copy->set_flag(Instruction::OmitChecksFlag, true); 4760 append_split(array_copy); 4761 apush(new_array); 4762 append(new MemBar(lir_membar_storestore)); 4763 } 4764 4765 void GraphBuilder::print_inlining(ciMethod* callee, const char* msg, bool success) { 4766 CompileLog* log = compilation()->log(); 4767 if (log != nullptr) { 4768 assert(msg != nullptr, "inlining msg should not be null!"); 4769 if (success) { 4770 log->inline_success(msg); 4771 } else { 4772 log->inline_fail(msg); 4773 } 4774 } 4775 EventCompilerInlining event; 4776 if (event.should_commit()) { 4777 CompilerEvent::InlineEvent::post(event, compilation()->env()->task()->compile_id(), method()->get_Method(), callee, success, msg, bci()); 4778 } 4779 4780 CompileTask::print_inlining_ul(callee, scope()->level(), bci(), inlining_result_of(success), msg); 4781 4782 if (!compilation()->directive()->PrintInliningOption) { 4783 return; 4784 } 4785 CompileTask::print_inlining_tty(callee, scope()->level(), bci(), inlining_result_of(success), msg); 4786 if (success && CIPrintMethodCodes) { 4787 callee->print_codes(); 4788 } 4789 } 4790 4791 void GraphBuilder::append_unsafe_get_and_set(ciMethod* callee, bool is_add) { 4792 Values* args = state()->pop_arguments(callee->arg_size()); 4793 BasicType t = callee->return_type()->basic_type(); 4794 null_check(args->at(0)); 4795 Instruction* offset = args->at(2); 4796 #ifndef _LP64 4797 offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT))); 4798 #endif 4799 Instruction* op = append(new UnsafeGetAndSet(t, args->at(1), offset, args->at(3), is_add)); 4800 compilation()->set_has_unsafe_access(true); 4801 kill_all(); 4802 push(op->type(), op); 4803 } 4804 4805 #ifndef PRODUCT 4806 void GraphBuilder::print_stats() { 4807 if (UseLocalValueNumbering) { 4808 vmap()->print(); 4809 } 4810 } 4811 #endif // PRODUCT 4812 4813 void GraphBuilder::profile_call(ciMethod* callee, Value recv, ciKlass* known_holder, Values* obj_args, bool inlined) { 4814 assert(known_holder == nullptr || (known_holder->is_instance_klass() && 4815 (!known_holder->is_interface() || 4816 ((ciInstanceKlass*)known_holder)->has_nonstatic_concrete_methods())), "should be non-static concrete method"); 4817 if (known_holder != nullptr) { 4818 if (known_holder->exact_klass() == nullptr) { 4819 known_holder = compilation()->cha_exact_type(known_holder); 4820 } 4821 } 4822 4823 append(new ProfileCall(method(), bci(), callee, recv, known_holder, obj_args, inlined)); 4824 } 4825 4826 void GraphBuilder::profile_return_type(Value ret, ciMethod* callee, ciMethod* m, int invoke_bci) { 4827 assert((m == nullptr) == (invoke_bci < 0), "invalid method and invalid bci together"); 4828 if (m == nullptr) { 4829 m = method(); 4830 } 4831 if (invoke_bci < 0) { 4832 invoke_bci = bci(); 4833 } 4834 ciMethodData* md = m->method_data_or_null(); 4835 ciProfileData* data = md->bci_to_data(invoke_bci); 4836 if (data != nullptr && (data->is_CallTypeData() || data->is_VirtualCallTypeData())) { 4837 bool has_return = data->is_CallTypeData() ? ((ciCallTypeData*)data)->has_return() : ((ciVirtualCallTypeData*)data)->has_return(); 4838 if (has_return) { 4839 append(new ProfileReturnType(m , invoke_bci, callee, ret)); 4840 } 4841 } 4842 } 4843 4844 void GraphBuilder::profile_invocation(ciMethod* callee, ValueStack* state) { 4845 append(new ProfileInvoke(callee, state)); 4846 }