1 /* 2 * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "c1/c1_CFGPrinter.hpp" 26 #include "c1/c1_Canonicalizer.hpp" 27 #include "c1/c1_Compilation.hpp" 28 #include "c1/c1_GraphBuilder.hpp" 29 #include "c1/c1_InstructionPrinter.hpp" 30 #include "ci/ciCallSite.hpp" 31 #include "ci/ciField.hpp" 32 #include "ci/ciKlass.hpp" 33 #include "ci/ciMemberName.hpp" 34 #include "ci/ciSymbols.hpp" 35 #include "ci/ciUtilities.inline.hpp" 36 #include "classfile/javaClasses.hpp" 37 #include "compiler/compilationPolicy.hpp" 38 #include "compiler/compileBroker.hpp" 39 #include "compiler/compilerEvent.hpp" 40 #include "interpreter/bytecode.hpp" 41 #include "jfr/jfrEvents.hpp" 42 #include "memory/resourceArea.hpp" 43 #include "oops/oop.inline.hpp" 44 #include "runtime/runtimeUpcalls.hpp" 45 #include "runtime/sharedRuntime.hpp" 46 #include "utilities/checkedCast.hpp" 47 #include "utilities/macros.hpp" 48 #if INCLUDE_JFR 49 #include "jfr/jfr.hpp" 50 #endif 51 52 class BlockListBuilder { 53 private: 54 Compilation* _compilation; 55 IRScope* _scope; 56 57 BlockList _blocks; // internal list of all blocks 58 BlockList* _bci2block; // mapping from bci to blocks for GraphBuilder 59 GrowableArray<BlockList> _bci2block_successors; // Mapping bcis to their blocks successors while we dont have a blockend 60 61 // fields used by mark_loops 62 ResourceBitMap _active; // for iteration of control flow graph 63 ResourceBitMap _visited; // for iteration of control flow graph 64 GrowableArray<ResourceBitMap> _loop_map; // caches the information if a block is contained in a loop 65 int _next_loop_index; // next free loop number 66 int _next_block_number; // for reverse postorder numbering of blocks 67 int _block_id_start; 68 69 int bit_number(int block_id) const { return block_id - _block_id_start; } 70 // accessors 71 Compilation* compilation() const { return _compilation; } 72 IRScope* scope() const { return _scope; } 73 ciMethod* method() const { return scope()->method(); } 74 XHandlers* xhandlers() const { return scope()->xhandlers(); } 75 76 // unified bailout support 77 void bailout(const char* msg) const { compilation()->bailout(msg); } 78 bool bailed_out() const { return compilation()->bailed_out(); } 79 80 // helper functions 81 BlockBegin* make_block_at(int bci, BlockBegin* predecessor); 82 void handle_exceptions(BlockBegin* current, int cur_bci); 83 void handle_jsr(BlockBegin* current, int sr_bci, int next_bci); 84 void store_one(BlockBegin* current, int local); 85 void store_two(BlockBegin* current, int local); 86 void set_entries(int osr_bci); 87 void set_leaders(); 88 89 void make_loop_header(BlockBegin* block); 90 void mark_loops(); 91 BitMap& mark_loops(BlockBegin* b, bool in_subroutine); 92 93 // debugging 94 #ifndef PRODUCT 95 void print(); 96 #endif 97 98 int number_of_successors(BlockBegin* block); 99 BlockBegin* successor_at(BlockBegin* block, int i); 100 void add_successor(BlockBegin* block, BlockBegin* sux); 101 bool is_successor(BlockBegin* block, BlockBegin* sux); 102 103 public: 104 // creation 105 BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci); 106 107 // accessors for GraphBuilder 108 BlockList* bci2block() const { return _bci2block; } 109 }; 110 111 112 // Implementation of BlockListBuilder 113 114 BlockListBuilder::BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci) 115 : _compilation(compilation) 116 , _scope(scope) 117 , _blocks(16) 118 , _bci2block(new BlockList(scope->method()->code_size(), nullptr)) 119 , _bci2block_successors(scope->method()->code_size()) 120 , _active() // size not known yet 121 , _visited() // size not known yet 122 , _loop_map() // size not known yet 123 , _next_loop_index(0) 124 , _next_block_number(0) 125 , _block_id_start(0) 126 { 127 set_entries(osr_bci); 128 set_leaders(); 129 CHECK_BAILOUT(); 130 131 mark_loops(); 132 NOT_PRODUCT(if (PrintInitialBlockList) print()); 133 134 // _bci2block still contains blocks with _end == null and > 0 sux in _bci2block_successors. 135 136 #ifndef PRODUCT 137 if (PrintCFGToFile) { 138 stringStream title; 139 title.print("BlockListBuilder "); 140 scope->method()->print_name(&title); 141 CFGPrinter::print_cfg(_bci2block, title.freeze(), false, false); 142 } 143 #endif 144 } 145 146 147 void BlockListBuilder::set_entries(int osr_bci) { 148 // generate start blocks 149 BlockBegin* std_entry = make_block_at(0, nullptr); 150 if (scope()->caller() == nullptr) { 151 std_entry->set(BlockBegin::std_entry_flag); 152 } 153 if (osr_bci != -1) { 154 BlockBegin* osr_entry = make_block_at(osr_bci, nullptr); 155 osr_entry->set(BlockBegin::osr_entry_flag); 156 } 157 158 // generate exception entry blocks 159 XHandlers* list = xhandlers(); 160 const int n = list->length(); 161 for (int i = 0; i < n; i++) { 162 XHandler* h = list->handler_at(i); 163 BlockBegin* entry = make_block_at(h->handler_bci(), nullptr); 164 entry->set(BlockBegin::exception_entry_flag); 165 h->set_entry_block(entry); 166 } 167 } 168 169 170 BlockBegin* BlockListBuilder::make_block_at(int cur_bci, BlockBegin* predecessor) { 171 assert(method()->bci_block_start().at(cur_bci), "wrong block starts of MethodLivenessAnalyzer"); 172 173 BlockBegin* block = _bci2block->at(cur_bci); 174 if (block == nullptr) { 175 block = new BlockBegin(cur_bci); 176 block->init_stores_to_locals(method()->max_locals()); 177 _bci2block->at_put(cur_bci, block); 178 _bci2block_successors.at_put_grow(cur_bci, BlockList()); 179 _blocks.append(block); 180 181 assert(predecessor == nullptr || predecessor->bci() < cur_bci, "targets for backward branches must already exist"); 182 } 183 184 if (predecessor != nullptr) { 185 if (block->is_set(BlockBegin::exception_entry_flag)) { 186 BAILOUT_("Exception handler can be reached by both normal and exceptional control flow", block); 187 } 188 189 add_successor(predecessor, block); 190 block->increment_total_preds(); 191 } 192 193 return block; 194 } 195 196 197 inline void BlockListBuilder::store_one(BlockBegin* current, int local) { 198 current->stores_to_locals().set_bit(local); 199 } 200 inline void BlockListBuilder::store_two(BlockBegin* current, int local) { 201 store_one(current, local); 202 store_one(current, local + 1); 203 } 204 205 206 void BlockListBuilder::handle_exceptions(BlockBegin* current, int cur_bci) { 207 // Draws edges from a block to its exception handlers 208 XHandlers* list = xhandlers(); 209 const int n = list->length(); 210 211 for (int i = 0; i < n; i++) { 212 XHandler* h = list->handler_at(i); 213 214 if (h->covers(cur_bci)) { 215 BlockBegin* entry = h->entry_block(); 216 assert(entry != nullptr && entry == _bci2block->at(h->handler_bci()), "entry must be set"); 217 assert(entry->is_set(BlockBegin::exception_entry_flag), "flag must be set"); 218 219 // add each exception handler only once 220 if(!is_successor(current, entry)) { 221 add_successor(current, entry); 222 entry->increment_total_preds(); 223 } 224 225 // stop when reaching catchall 226 if (h->catch_type() == 0) break; 227 } 228 } 229 } 230 231 void BlockListBuilder::handle_jsr(BlockBegin* current, int sr_bci, int next_bci) { 232 if (next_bci < method()->code_size()) { 233 // start a new block after jsr-bytecode and link this block into cfg 234 make_block_at(next_bci, current); 235 } 236 237 // start a new block at the subroutine entry at mark it with special flag 238 BlockBegin* sr_block = make_block_at(sr_bci, current); 239 if (!sr_block->is_set(BlockBegin::subroutine_entry_flag)) { 240 sr_block->set(BlockBegin::subroutine_entry_flag); 241 } 242 } 243 244 245 void BlockListBuilder::set_leaders() { 246 bool has_xhandlers = xhandlers()->has_handlers(); 247 BlockBegin* current = nullptr; 248 249 // The information which bci starts a new block simplifies the analysis 250 // Without it, backward branches could jump to a bci where no block was created 251 // during bytecode iteration. This would require the creation of a new block at the 252 // branch target and a modification of the successor lists. 253 const BitMap& bci_block_start = method()->bci_block_start(); 254 255 int end_bci = method()->code_size(); 256 257 ciBytecodeStream s(method()); 258 while (s.next() != ciBytecodeStream::EOBC()) { 259 int cur_bci = s.cur_bci(); 260 261 if (bci_block_start.at(cur_bci)) { 262 current = make_block_at(cur_bci, current); 263 } 264 assert(current != nullptr, "must have current block"); 265 266 if (has_xhandlers && GraphBuilder::can_trap(method(), s.cur_bc())) { 267 handle_exceptions(current, cur_bci); 268 } 269 270 switch (s.cur_bc()) { 271 // track stores to local variables for selective creation of phi functions 272 case Bytecodes::_iinc: store_one(current, s.get_index()); break; 273 case Bytecodes::_istore: store_one(current, s.get_index()); break; 274 case Bytecodes::_lstore: store_two(current, s.get_index()); break; 275 case Bytecodes::_fstore: store_one(current, s.get_index()); break; 276 case Bytecodes::_dstore: store_two(current, s.get_index()); break; 277 case Bytecodes::_astore: store_one(current, s.get_index()); break; 278 case Bytecodes::_istore_0: store_one(current, 0); break; 279 case Bytecodes::_istore_1: store_one(current, 1); break; 280 case Bytecodes::_istore_2: store_one(current, 2); break; 281 case Bytecodes::_istore_3: store_one(current, 3); break; 282 case Bytecodes::_lstore_0: store_two(current, 0); break; 283 case Bytecodes::_lstore_1: store_two(current, 1); break; 284 case Bytecodes::_lstore_2: store_two(current, 2); break; 285 case Bytecodes::_lstore_3: store_two(current, 3); break; 286 case Bytecodes::_fstore_0: store_one(current, 0); break; 287 case Bytecodes::_fstore_1: store_one(current, 1); break; 288 case Bytecodes::_fstore_2: store_one(current, 2); break; 289 case Bytecodes::_fstore_3: store_one(current, 3); break; 290 case Bytecodes::_dstore_0: store_two(current, 0); break; 291 case Bytecodes::_dstore_1: store_two(current, 1); break; 292 case Bytecodes::_dstore_2: store_two(current, 2); break; 293 case Bytecodes::_dstore_3: store_two(current, 3); break; 294 case Bytecodes::_astore_0: store_one(current, 0); break; 295 case Bytecodes::_astore_1: store_one(current, 1); break; 296 case Bytecodes::_astore_2: store_one(current, 2); break; 297 case Bytecodes::_astore_3: store_one(current, 3); break; 298 299 // track bytecodes that affect the control flow 300 case Bytecodes::_athrow: // fall through 301 case Bytecodes::_ret: // fall through 302 case Bytecodes::_ireturn: // fall through 303 case Bytecodes::_lreturn: // fall through 304 case Bytecodes::_freturn: // fall through 305 case Bytecodes::_dreturn: // fall through 306 case Bytecodes::_areturn: // fall through 307 case Bytecodes::_return: 308 current = nullptr; 309 break; 310 311 case Bytecodes::_ifeq: // fall through 312 case Bytecodes::_ifne: // fall through 313 case Bytecodes::_iflt: // fall through 314 case Bytecodes::_ifge: // fall through 315 case Bytecodes::_ifgt: // fall through 316 case Bytecodes::_ifle: // fall through 317 case Bytecodes::_if_icmpeq: // fall through 318 case Bytecodes::_if_icmpne: // fall through 319 case Bytecodes::_if_icmplt: // fall through 320 case Bytecodes::_if_icmpge: // fall through 321 case Bytecodes::_if_icmpgt: // fall through 322 case Bytecodes::_if_icmple: // fall through 323 case Bytecodes::_if_acmpeq: // fall through 324 case Bytecodes::_if_acmpne: // fall through 325 case Bytecodes::_ifnull: // fall through 326 case Bytecodes::_ifnonnull: 327 if (s.next_bci() < end_bci) { 328 make_block_at(s.next_bci(), current); 329 } 330 make_block_at(s.get_dest(), current); 331 current = nullptr; 332 break; 333 334 case Bytecodes::_goto: 335 make_block_at(s.get_dest(), current); 336 current = nullptr; 337 break; 338 339 case Bytecodes::_goto_w: 340 make_block_at(s.get_far_dest(), current); 341 current = nullptr; 342 break; 343 344 case Bytecodes::_jsr: 345 handle_jsr(current, s.get_dest(), s.next_bci()); 346 current = nullptr; 347 break; 348 349 case Bytecodes::_jsr_w: 350 handle_jsr(current, s.get_far_dest(), s.next_bci()); 351 current = nullptr; 352 break; 353 354 case Bytecodes::_tableswitch: { 355 // set block for each case 356 Bytecode_tableswitch sw(&s); 357 int l = sw.length(); 358 for (int i = 0; i < l; i++) { 359 make_block_at(cur_bci + sw.dest_offset_at(i), current); 360 } 361 make_block_at(cur_bci + sw.default_offset(), current); 362 current = nullptr; 363 break; 364 } 365 366 case Bytecodes::_lookupswitch: { 367 // set block for each case 368 Bytecode_lookupswitch sw(&s); 369 int l = sw.number_of_pairs(); 370 for (int i = 0; i < l; i++) { 371 make_block_at(cur_bci + sw.pair_at(i).offset(), current); 372 } 373 make_block_at(cur_bci + sw.default_offset(), current); 374 current = nullptr; 375 break; 376 } 377 378 default: 379 break; 380 } 381 } 382 } 383 384 385 void BlockListBuilder::mark_loops() { 386 ResourceMark rm; 387 388 const int number_of_blocks = _blocks.length(); 389 _active.initialize(number_of_blocks); 390 _visited.initialize(number_of_blocks); 391 _loop_map = GrowableArray<ResourceBitMap>(number_of_blocks, number_of_blocks, ResourceBitMap()); 392 for (int i = 0; i < number_of_blocks; i++) { 393 _loop_map.at(i).initialize(number_of_blocks); 394 } 395 _next_loop_index = 0; 396 _next_block_number = _blocks.length(); 397 398 // The loop detection algorithm works as follows: 399 // - We maintain the _loop_map, where for each block we have a bitmap indicating which loops contain this block. 400 // - The CFG is recursively traversed (depth-first) and if we detect a loop, we assign the loop a unique number that is stored 401 // in the bitmap associated with the loop header block. Until we return back through that loop header the bitmap contains 402 // only a single bit corresponding to the loop number. 403 // - The bit is then propagated for all the blocks in the loop after we exit them (post-order). There could be multiple bits 404 // of course in case of nested loops. 405 // - When we exit the loop header we remove that single bit and assign the real loop state for it. 406 // - Now, the tricky part here is how we detect irreducible loops. In the algorithm above the loop state bits 407 // are propagated to the predecessors. If we encounter an irreducible loop (a loop with multiple heads) we would see 408 // a node with some loop bit set that would then propagate back and be never cleared because we would 409 // never go back through the original loop header. Therefore if there are any irreducible loops the bits in the states 410 // for these loops are going to propagate back to the root. 411 BlockBegin* start = _bci2block->at(0); 412 _block_id_start = start->block_id(); 413 BitMap& loop_state = mark_loops(start, false); 414 if (!loop_state.is_empty()) { 415 compilation()->set_has_irreducible_loops(true); 416 } 417 assert(_next_block_number >= 0, "invalid block numbers"); 418 419 // Remove dangling Resource pointers before the ResourceMark goes out-of-scope. 420 _active.resize(0); 421 _visited.resize(0); 422 _loop_map.clear(); 423 } 424 425 void BlockListBuilder::make_loop_header(BlockBegin* block) { 426 int block_id = block->block_id(); 427 int block_bit = bit_number(block_id); 428 if (block->is_set(BlockBegin::exception_entry_flag)) { 429 // exception edges may look like loops but don't mark them as such 430 // since it screws up block ordering. 431 return; 432 } 433 if (!block->is_set(BlockBegin::parser_loop_header_flag)) { 434 block->set(BlockBegin::parser_loop_header_flag); 435 436 assert(_loop_map.at(block_bit).is_empty(), "must not be set yet"); 437 assert(0 <= _next_loop_index && _next_loop_index < _loop_map.length(), "_next_loop_index is too large"); 438 _loop_map.at(block_bit).set_bit(_next_loop_index++); 439 } else { 440 // block already marked as loop header 441 assert(_loop_map.at(block_bit).count_one_bits() == 1, "exactly one bit must be set"); 442 } 443 } 444 445 BitMap& BlockListBuilder::mark_loops(BlockBegin* block, bool in_subroutine) { 446 int block_id = block->block_id(); 447 int block_bit = bit_number(block_id); 448 if (_visited.at(block_bit)) { 449 if (_active.at(block_bit)) { 450 // reached block via backward branch 451 make_loop_header(block); 452 } 453 // return cached loop information for this block 454 return _loop_map.at(block_bit); 455 } 456 457 if (block->is_set(BlockBegin::subroutine_entry_flag)) { 458 in_subroutine = true; 459 } 460 461 // set active and visited bits before successors are processed 462 _visited.set_bit(block_bit); 463 _active.set_bit(block_bit); 464 465 ResourceMark rm; 466 ResourceBitMap loop_state(_loop_map.length()); 467 for (int i = number_of_successors(block) - 1; i >= 0; i--) { 468 BlockBegin* sux = successor_at(block, i); 469 // recursively process all successors 470 loop_state.set_union(mark_loops(sux, in_subroutine)); 471 } 472 473 // clear active-bit after all successors are processed 474 _active.clear_bit(block_bit); 475 476 // reverse-post-order numbering of all blocks 477 block->set_depth_first_number(_next_block_number); 478 _next_block_number--; 479 480 if (!loop_state.is_empty() || in_subroutine ) { 481 // block is contained at least in one loop, so phi functions are necessary 482 // phi functions are also necessary for all locals stored in a subroutine 483 scope()->requires_phi_function().set_union(block->stores_to_locals()); 484 } 485 486 if (block->is_set(BlockBegin::parser_loop_header_flag)) { 487 BitMap& header_loop_state = _loop_map.at(block_bit); 488 assert(header_loop_state.count_one_bits() == 1, "exactly one bit must be set"); 489 // remove the bit with the loop number for the state (header is outside of the loop) 490 loop_state.set_difference(header_loop_state); 491 } 492 493 // cache and return loop information for this block 494 _loop_map.at(block_bit).set_from(loop_state); 495 return _loop_map.at(block_bit); 496 } 497 498 inline int BlockListBuilder::number_of_successors(BlockBegin* block) 499 { 500 assert(_bci2block_successors.length() > block->bci(), "sux must exist"); 501 return _bci2block_successors.at(block->bci()).length(); 502 } 503 504 inline BlockBegin* BlockListBuilder::successor_at(BlockBegin* block, int i) 505 { 506 assert(_bci2block_successors.length() > block->bci(), "sux must exist"); 507 return _bci2block_successors.at(block->bci()).at(i); 508 } 509 510 inline void BlockListBuilder::add_successor(BlockBegin* block, BlockBegin* sux) 511 { 512 assert(_bci2block_successors.length() > block->bci(), "sux must exist"); 513 _bci2block_successors.at(block->bci()).append(sux); 514 } 515 516 inline bool BlockListBuilder::is_successor(BlockBegin* block, BlockBegin* sux) { 517 assert(_bci2block_successors.length() > block->bci(), "sux must exist"); 518 return _bci2block_successors.at(block->bci()).contains(sux); 519 } 520 521 #ifndef PRODUCT 522 523 static int compare_depth_first(BlockBegin** a, BlockBegin** b) { 524 return (*a)->depth_first_number() - (*b)->depth_first_number(); 525 } 526 527 void BlockListBuilder::print() { 528 tty->print("----- initial block list of BlockListBuilder for method "); 529 method()->print_short_name(); 530 tty->cr(); 531 532 // better readability if blocks are sorted in processing order 533 _blocks.sort(compare_depth_first); 534 535 for (int i = 0; i < _blocks.length(); i++) { 536 BlockBegin* cur = _blocks.at(i); 537 tty->print("%4d: B%-4d bci: %-4d preds: %-4d ", cur->depth_first_number(), cur->block_id(), cur->bci(), cur->total_preds()); 538 539 tty->print(cur->is_set(BlockBegin::std_entry_flag) ? " std" : " "); 540 tty->print(cur->is_set(BlockBegin::osr_entry_flag) ? " osr" : " "); 541 tty->print(cur->is_set(BlockBegin::exception_entry_flag) ? " ex" : " "); 542 tty->print(cur->is_set(BlockBegin::subroutine_entry_flag) ? " sr" : " "); 543 tty->print(cur->is_set(BlockBegin::parser_loop_header_flag) ? " lh" : " "); 544 545 if (number_of_successors(cur) > 0) { 546 tty->print(" sux: "); 547 for (int j = 0; j < number_of_successors(cur); j++) { 548 BlockBegin* sux = successor_at(cur, j); 549 tty->print("B%d ", sux->block_id()); 550 } 551 } 552 tty->cr(); 553 } 554 } 555 556 #endif 557 558 559 // A simple growable array of Values indexed by ciFields 560 class FieldBuffer: public CompilationResourceObj { 561 private: 562 GrowableArray<Value> _values; 563 564 public: 565 FieldBuffer() {} 566 567 void kill() { 568 _values.trunc_to(0); 569 } 570 571 Value at(ciField* field) { 572 assert(field->holder()->is_loaded(), "must be a loaded field"); 573 int offset = field->offset_in_bytes(); 574 if (offset < _values.length()) { 575 return _values.at(offset); 576 } else { 577 return nullptr; 578 } 579 } 580 581 void at_put(ciField* field, Value value) { 582 assert(field->holder()->is_loaded(), "must be a loaded field"); 583 int offset = field->offset_in_bytes(); 584 _values.at_put_grow(offset, value, nullptr); 585 } 586 587 }; 588 589 590 // MemoryBuffer is fairly simple model of the current state of memory. 591 // It partitions memory into several pieces. The first piece is 592 // generic memory where little is known about the owner of the memory. 593 // This is conceptually represented by the tuple <O, F, V> which says 594 // that the field F of object O has value V. This is flattened so 595 // that F is represented by the offset of the field and the parallel 596 // arrays _objects and _values are used for O and V. Loads of O.F can 597 // simply use V. Newly allocated objects are kept in a separate list 598 // along with a parallel array for each object which represents the 599 // current value of its fields. Stores of the default value to fields 600 // which have never been stored to before are eliminated since they 601 // are redundant. Once newly allocated objects are stored into 602 // another object or they are passed out of the current compile they 603 // are treated like generic memory. 604 605 class MemoryBuffer: public CompilationResourceObj { 606 private: 607 FieldBuffer _values; 608 GrowableArray<Value> _objects; 609 GrowableArray<Value> _newobjects; 610 GrowableArray<FieldBuffer*> _fields; 611 612 public: 613 MemoryBuffer() {} 614 615 StoreField* store(StoreField* st) { 616 if (!EliminateFieldAccess) { 617 return st; 618 } 619 620 Value object = st->obj(); 621 Value value = st->value(); 622 ciField* field = st->field(); 623 if (field->holder()->is_loaded()) { 624 int offset = field->offset_in_bytes(); 625 int index = _newobjects.find(object); 626 if (index != -1) { 627 // newly allocated object with no other stores performed on this field 628 FieldBuffer* buf = _fields.at(index); 629 if (buf->at(field) == nullptr && is_default_value(value)) { 630 #ifndef PRODUCT 631 if (PrintIRDuringConstruction && Verbose) { 632 tty->print_cr("Eliminated store for object %d:", index); 633 st->print_line(); 634 } 635 #endif 636 return nullptr; 637 } else { 638 buf->at_put(field, value); 639 } 640 } else { 641 _objects.at_put_grow(offset, object, nullptr); 642 _values.at_put(field, value); 643 } 644 645 store_value(value); 646 } else { 647 // if we held onto field names we could alias based on names but 648 // we don't know what's being stored to so kill it all. 649 kill(); 650 } 651 return st; 652 } 653 654 655 // return true if this value correspond to the default value of a field. 656 bool is_default_value(Value value) { 657 Constant* con = value->as_Constant(); 658 if (con) { 659 switch (con->type()->tag()) { 660 case intTag: return con->type()->as_IntConstant()->value() == 0; 661 case longTag: return con->type()->as_LongConstant()->value() == 0; 662 case floatTag: return jint_cast(con->type()->as_FloatConstant()->value()) == 0; 663 case doubleTag: return jlong_cast(con->type()->as_DoubleConstant()->value()) == jlong_cast(0); 664 case objectTag: return con->type() == objectNull; 665 default: ShouldNotReachHere(); 666 } 667 } 668 return false; 669 } 670 671 672 // return either the actual value of a load or the load itself 673 Value load(LoadField* load) { 674 if (!EliminateFieldAccess) { 675 return load; 676 } 677 678 if (strict_fp_requires_explicit_rounding && load->type()->is_float_kind()) { 679 #ifdef IA32 680 if (UseSSE < 2) { 681 // can't skip load since value might get rounded as a side effect 682 return load; 683 } 684 #else 685 Unimplemented(); 686 #endif // IA32 687 } 688 689 ciField* field = load->field(); 690 Value object = load->obj(); 691 if (field->holder()->is_loaded() && !field->is_volatile()) { 692 int offset = field->offset_in_bytes(); 693 Value result = nullptr; 694 int index = _newobjects.find(object); 695 if (index != -1) { 696 result = _fields.at(index)->at(field); 697 } else if (_objects.at_grow(offset, nullptr) == object) { 698 result = _values.at(field); 699 } 700 if (result != nullptr) { 701 #ifndef PRODUCT 702 if (PrintIRDuringConstruction && Verbose) { 703 tty->print_cr("Eliminated load: "); 704 load->print_line(); 705 } 706 #endif 707 assert(result->type()->tag() == load->type()->tag(), "wrong types"); 708 return result; 709 } 710 } 711 return load; 712 } 713 714 // Record this newly allocated object 715 void new_instance(NewInstance* object) { 716 int index = _newobjects.length(); 717 _newobjects.append(object); 718 if (_fields.at_grow(index, nullptr) == nullptr) { 719 _fields.at_put(index, new FieldBuffer()); 720 } else { 721 _fields.at(index)->kill(); 722 } 723 } 724 725 void store_value(Value value) { 726 int index = _newobjects.find(value); 727 if (index != -1) { 728 // stored a newly allocated object into another object. 729 // Assume we've lost track of it as separate slice of memory. 730 // We could do better by keeping track of whether individual 731 // fields could alias each other. 732 _newobjects.remove_at(index); 733 // pull out the field info and store it at the end up the list 734 // of field info list to be reused later. 735 _fields.append(_fields.at(index)); 736 _fields.remove_at(index); 737 } 738 } 739 740 void kill() { 741 _newobjects.trunc_to(0); 742 _objects.trunc_to(0); 743 _values.kill(); 744 } 745 }; 746 747 748 // Implementation of GraphBuilder's ScopeData 749 750 GraphBuilder::ScopeData::ScopeData(ScopeData* parent) 751 : _parent(parent) 752 , _bci2block(nullptr) 753 , _scope(nullptr) 754 , _has_handler(false) 755 , _stream(nullptr) 756 , _work_list(nullptr) 757 , _caller_stack_size(-1) 758 , _continuation(nullptr) 759 , _parsing_jsr(false) 760 , _jsr_xhandlers(nullptr) 761 , _num_returns(0) 762 , _cleanup_block(nullptr) 763 , _cleanup_return_prev(nullptr) 764 , _cleanup_state(nullptr) 765 , _ignore_return(false) 766 { 767 if (parent != nullptr) { 768 _max_inline_size = (intx) ((float) NestedInliningSizeRatio * (float) parent->max_inline_size() / 100.0f); 769 } else { 770 _max_inline_size = C1MaxInlineSize; 771 } 772 if (_max_inline_size < C1MaxTrivialSize) { 773 _max_inline_size = C1MaxTrivialSize; 774 } 775 } 776 777 778 void GraphBuilder::kill_all() { 779 if (UseLocalValueNumbering) { 780 vmap()->kill_all(); 781 } 782 _memory->kill(); 783 } 784 785 786 BlockBegin* GraphBuilder::ScopeData::block_at(int bci) { 787 if (parsing_jsr()) { 788 // It is necessary to clone all blocks associated with a 789 // subroutine, including those for exception handlers in the scope 790 // of the method containing the jsr (because those exception 791 // handlers may contain ret instructions in some cases). 792 BlockBegin* block = bci2block()->at(bci); 793 if (block != nullptr && block == parent()->bci2block()->at(bci)) { 794 BlockBegin* new_block = new BlockBegin(block->bci()); 795 if (PrintInitialBlockList) { 796 tty->print_cr("CFG: cloned block %d (bci %d) as block %d for jsr", 797 block->block_id(), block->bci(), new_block->block_id()); 798 } 799 // copy data from cloned blocked 800 new_block->set_depth_first_number(block->depth_first_number()); 801 if (block->is_set(BlockBegin::parser_loop_header_flag)) new_block->set(BlockBegin::parser_loop_header_flag); 802 // Preserve certain flags for assertion checking 803 if (block->is_set(BlockBegin::subroutine_entry_flag)) new_block->set(BlockBegin::subroutine_entry_flag); 804 if (block->is_set(BlockBegin::exception_entry_flag)) new_block->set(BlockBegin::exception_entry_flag); 805 806 // copy was_visited_flag to allow early detection of bailouts 807 // if a block that is used in a jsr has already been visited before, 808 // it is shared between the normal control flow and a subroutine 809 // BlockBegin::try_merge returns false when the flag is set, this leads 810 // to a compilation bailout 811 if (block->is_set(BlockBegin::was_visited_flag)) new_block->set(BlockBegin::was_visited_flag); 812 813 bci2block()->at_put(bci, new_block); 814 block = new_block; 815 } 816 return block; 817 } else { 818 return bci2block()->at(bci); 819 } 820 } 821 822 823 XHandlers* GraphBuilder::ScopeData::xhandlers() const { 824 if (_jsr_xhandlers == nullptr) { 825 assert(!parsing_jsr(), ""); 826 return scope()->xhandlers(); 827 } 828 assert(parsing_jsr(), ""); 829 return _jsr_xhandlers; 830 } 831 832 833 void GraphBuilder::ScopeData::set_scope(IRScope* scope) { 834 _scope = scope; 835 bool parent_has_handler = false; 836 if (parent() != nullptr) { 837 parent_has_handler = parent()->has_handler(); 838 } 839 _has_handler = parent_has_handler || scope->xhandlers()->has_handlers(); 840 } 841 842 843 void GraphBuilder::ScopeData::set_inline_cleanup_info(BlockBegin* block, 844 Instruction* return_prev, 845 ValueStack* return_state) { 846 _cleanup_block = block; 847 _cleanup_return_prev = return_prev; 848 _cleanup_state = return_state; 849 } 850 851 852 void GraphBuilder::ScopeData::add_to_work_list(BlockBegin* block) { 853 if (_work_list == nullptr) { 854 _work_list = new BlockList(); 855 } 856 857 if (!block->is_set(BlockBegin::is_on_work_list_flag)) { 858 // Do not start parsing the continuation block while in a 859 // sub-scope 860 if (parsing_jsr()) { 861 if (block == jsr_continuation()) { 862 return; 863 } 864 } else { 865 if (block == continuation()) { 866 return; 867 } 868 } 869 block->set(BlockBegin::is_on_work_list_flag); 870 _work_list->push(block); 871 872 sort_top_into_worklist(_work_list, block); 873 } 874 } 875 876 877 void GraphBuilder::sort_top_into_worklist(BlockList* worklist, BlockBegin* top) { 878 assert(worklist->top() == top, ""); 879 // sort block descending into work list 880 const int dfn = top->depth_first_number(); 881 assert(dfn != -1, "unknown depth first number"); 882 int i = worklist->length()-2; 883 while (i >= 0) { 884 BlockBegin* b = worklist->at(i); 885 if (b->depth_first_number() < dfn) { 886 worklist->at_put(i+1, b); 887 } else { 888 break; 889 } 890 i --; 891 } 892 if (i >= -1) worklist->at_put(i + 1, top); 893 } 894 895 896 BlockBegin* GraphBuilder::ScopeData::remove_from_work_list() { 897 if (is_work_list_empty()) { 898 return nullptr; 899 } 900 return _work_list->pop(); 901 } 902 903 904 bool GraphBuilder::ScopeData::is_work_list_empty() const { 905 return (_work_list == nullptr || _work_list->length() == 0); 906 } 907 908 909 void GraphBuilder::ScopeData::setup_jsr_xhandlers() { 910 assert(parsing_jsr(), ""); 911 // clone all the exception handlers from the scope 912 XHandlers* handlers = new XHandlers(scope()->xhandlers()); 913 const int n = handlers->length(); 914 for (int i = 0; i < n; i++) { 915 // The XHandlers need to be adjusted to dispatch to the cloned 916 // handler block instead of the default one but the synthetic 917 // unlocker needs to be handled specially. The synthetic unlocker 918 // should be left alone since there can be only one and all code 919 // should dispatch to the same one. 920 XHandler* h = handlers->handler_at(i); 921 assert(h->handler_bci() != SynchronizationEntryBCI, "must be real"); 922 h->set_entry_block(block_at(h->handler_bci())); 923 } 924 _jsr_xhandlers = handlers; 925 } 926 927 928 int GraphBuilder::ScopeData::num_returns() { 929 if (parsing_jsr()) { 930 return parent()->num_returns(); 931 } 932 return _num_returns; 933 } 934 935 936 void GraphBuilder::ScopeData::incr_num_returns() { 937 if (parsing_jsr()) { 938 parent()->incr_num_returns(); 939 } else { 940 ++_num_returns; 941 } 942 } 943 944 945 // Implementation of GraphBuilder 946 947 #define INLINE_BAILOUT(msg) { inline_bailout(msg); return false; } 948 949 950 void GraphBuilder::load_constant() { 951 ciConstant con = stream()->get_constant(); 952 if (con.is_valid()) { 953 ValueType* t = illegalType; 954 ValueStack* patch_state = nullptr; 955 switch (con.basic_type()) { 956 case T_BOOLEAN: t = new IntConstant (con.as_boolean()); break; 957 case T_BYTE : t = new IntConstant (con.as_byte ()); break; 958 case T_CHAR : t = new IntConstant (con.as_char ()); break; 959 case T_SHORT : t = new IntConstant (con.as_short ()); break; 960 case T_INT : t = new IntConstant (con.as_int ()); break; 961 case T_LONG : t = new LongConstant (con.as_long ()); break; 962 case T_FLOAT : t = new FloatConstant (con.as_float ()); break; 963 case T_DOUBLE : t = new DoubleConstant(con.as_double ()); break; 964 case T_ARRAY : // fall-through 965 case T_OBJECT : { 966 ciObject* obj = con.as_object(); 967 if (!obj->is_loaded() || (PatchALot && !stream()->is_string_constant())) { 968 // A Class, MethodType, MethodHandle, Dynamic, or String. 969 patch_state = copy_state_before(); 970 t = new ObjectConstant(obj); 971 } else { 972 // Might be a Class, MethodType, MethodHandle, or Dynamic constant 973 // result, which might turn out to be an array. 974 if (obj->is_null_object()) { 975 t = objectNull; 976 } else if (obj->is_array()) { 977 t = new ArrayConstant(obj->as_array()); 978 } else { 979 t = new InstanceConstant(obj->as_instance()); 980 } 981 } 982 break; 983 } 984 default: ShouldNotReachHere(); 985 } 986 Value x; 987 if (patch_state != nullptr) { 988 // Arbitrary memory effects from running BSM or class loading (using custom loader) during linkage. 989 bool kills_memory = stream()->is_dynamic_constant() || 990 (!stream()->is_string_constant() && !method()->holder()->has_trusted_loader()); 991 x = new Constant(t, patch_state, kills_memory); 992 } else { 993 x = new Constant(t); 994 } 995 996 // Unbox the value at runtime, if needed. 997 // ConstantDynamic entry can be of a primitive type, but it is cached in boxed form. 998 if (patch_state != nullptr) { 999 int cp_index = stream()->get_constant_pool_index(); 1000 BasicType type = stream()->get_basic_type_for_constant_at(cp_index); 1001 if (is_java_primitive(type)) { 1002 ciInstanceKlass* box_klass = ciEnv::current()->get_box_klass_for_primitive_type(type); 1003 assert(box_klass->is_loaded(), "sanity"); 1004 int offset = java_lang_boxing_object::value_offset(type); 1005 ciField* value_field = box_klass->get_field_by_offset(offset, false /*is_static*/); 1006 x = new LoadField(append(x), offset, value_field, false /*is_static*/, patch_state, false /*needs_patching*/); 1007 t = as_ValueType(type); 1008 } else { 1009 assert(is_reference_type(type), "not a reference: %s", type2name(type)); 1010 } 1011 } 1012 1013 push(t, append(x)); 1014 } else { 1015 BAILOUT("could not resolve a constant"); 1016 } 1017 } 1018 1019 1020 void GraphBuilder::load_local(ValueType* type, int index) { 1021 Value x = state()->local_at(index); 1022 assert(x != nullptr && !x->type()->is_illegal(), "access of illegal local variable"); 1023 push(type, x); 1024 } 1025 1026 1027 void GraphBuilder::store_local(ValueType* type, int index) { 1028 Value x = pop(type); 1029 store_local(state(), x, index); 1030 } 1031 1032 1033 void GraphBuilder::store_local(ValueStack* state, Value x, int index) { 1034 if (parsing_jsr()) { 1035 // We need to do additional tracking of the location of the return 1036 // address for jsrs since we don't handle arbitrary jsr/ret 1037 // constructs. Here we are figuring out in which circumstances we 1038 // need to bail out. 1039 if (x->type()->is_address()) { 1040 scope_data()->set_jsr_return_address_local(index); 1041 1042 // Also check parent jsrs (if any) at this time to see whether 1043 // they are using this local. We don't handle skipping over a 1044 // ret. 1045 for (ScopeData* cur_scope_data = scope_data()->parent(); 1046 cur_scope_data != nullptr && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope(); 1047 cur_scope_data = cur_scope_data->parent()) { 1048 if (cur_scope_data->jsr_return_address_local() == index) { 1049 BAILOUT("subroutine overwrites return address from previous subroutine"); 1050 } 1051 } 1052 } else if (index == scope_data()->jsr_return_address_local()) { 1053 scope_data()->set_jsr_return_address_local(-1); 1054 } 1055 } 1056 1057 state->store_local(index, round_fp(x)); 1058 } 1059 1060 1061 void GraphBuilder::load_indexed(BasicType type) { 1062 // In case of in block code motion in range check elimination 1063 ValueStack* state_before = copy_state_indexed_access(); 1064 compilation()->set_has_access_indexed(true); 1065 Value index = ipop(); 1066 Value array = apop(); 1067 Value length = nullptr; 1068 if (CSEArrayLength || 1069 (array->as_Constant() != nullptr) || 1070 (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) || 1071 (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant()) || 1072 (array->as_NewMultiArray() && array->as_NewMultiArray()->dims()->at(0)->type()->is_constant())) { 1073 length = append(new ArrayLength(array, state_before)); 1074 } 1075 push(as_ValueType(type), append(new LoadIndexed(array, index, length, type, state_before))); 1076 } 1077 1078 1079 void GraphBuilder::store_indexed(BasicType type) { 1080 // In case of in block code motion in range check elimination 1081 ValueStack* state_before = copy_state_indexed_access(); 1082 compilation()->set_has_access_indexed(true); 1083 Value value = pop(as_ValueType(type)); 1084 Value index = ipop(); 1085 Value array = apop(); 1086 Value length = nullptr; 1087 if (CSEArrayLength || 1088 (array->as_Constant() != nullptr) || 1089 (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) || 1090 (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant()) || 1091 (array->as_NewMultiArray() && array->as_NewMultiArray()->dims()->at(0)->type()->is_constant())) { 1092 length = append(new ArrayLength(array, state_before)); 1093 } 1094 ciType* array_type = array->declared_type(); 1095 bool check_boolean = false; 1096 if (array_type != nullptr) { 1097 if (array_type->is_loaded() && 1098 array_type->as_array_klass()->element_type()->basic_type() == T_BOOLEAN) { 1099 assert(type == T_BYTE, "boolean store uses bastore"); 1100 Value mask = append(new Constant(new IntConstant(1))); 1101 value = append(new LogicOp(Bytecodes::_iand, value, mask)); 1102 } 1103 } else if (type == T_BYTE) { 1104 check_boolean = true; 1105 } 1106 StoreIndexed* result = new StoreIndexed(array, index, length, type, value, state_before, check_boolean); 1107 append(result); 1108 _memory->store_value(value); 1109 1110 if (type == T_OBJECT && is_profiling()) { 1111 // Note that we'd collect profile data in this method if we wanted it. 1112 compilation()->set_would_profile(true); 1113 1114 if (profile_checkcasts()) { 1115 result->set_profiled_method(method()); 1116 result->set_profiled_bci(bci()); 1117 result->set_should_profile(true); 1118 } 1119 } 1120 } 1121 1122 1123 void GraphBuilder::stack_op(Bytecodes::Code code) { 1124 switch (code) { 1125 case Bytecodes::_pop: 1126 { state()->raw_pop(); 1127 } 1128 break; 1129 case Bytecodes::_pop2: 1130 { state()->raw_pop(); 1131 state()->raw_pop(); 1132 } 1133 break; 1134 case Bytecodes::_dup: 1135 { Value w = state()->raw_pop(); 1136 state()->raw_push(w); 1137 state()->raw_push(w); 1138 } 1139 break; 1140 case Bytecodes::_dup_x1: 1141 { Value w1 = state()->raw_pop(); 1142 Value w2 = state()->raw_pop(); 1143 state()->raw_push(w1); 1144 state()->raw_push(w2); 1145 state()->raw_push(w1); 1146 } 1147 break; 1148 case Bytecodes::_dup_x2: 1149 { Value w1 = state()->raw_pop(); 1150 Value w2 = state()->raw_pop(); 1151 Value w3 = state()->raw_pop(); 1152 state()->raw_push(w1); 1153 state()->raw_push(w3); 1154 state()->raw_push(w2); 1155 state()->raw_push(w1); 1156 } 1157 break; 1158 case Bytecodes::_dup2: 1159 { Value w1 = state()->raw_pop(); 1160 Value w2 = state()->raw_pop(); 1161 state()->raw_push(w2); 1162 state()->raw_push(w1); 1163 state()->raw_push(w2); 1164 state()->raw_push(w1); 1165 } 1166 break; 1167 case Bytecodes::_dup2_x1: 1168 { Value w1 = state()->raw_pop(); 1169 Value w2 = state()->raw_pop(); 1170 Value w3 = state()->raw_pop(); 1171 state()->raw_push(w2); 1172 state()->raw_push(w1); 1173 state()->raw_push(w3); 1174 state()->raw_push(w2); 1175 state()->raw_push(w1); 1176 } 1177 break; 1178 case Bytecodes::_dup2_x2: 1179 { Value w1 = state()->raw_pop(); 1180 Value w2 = state()->raw_pop(); 1181 Value w3 = state()->raw_pop(); 1182 Value w4 = state()->raw_pop(); 1183 state()->raw_push(w2); 1184 state()->raw_push(w1); 1185 state()->raw_push(w4); 1186 state()->raw_push(w3); 1187 state()->raw_push(w2); 1188 state()->raw_push(w1); 1189 } 1190 break; 1191 case Bytecodes::_swap: 1192 { Value w1 = state()->raw_pop(); 1193 Value w2 = state()->raw_pop(); 1194 state()->raw_push(w1); 1195 state()->raw_push(w2); 1196 } 1197 break; 1198 default: 1199 ShouldNotReachHere(); 1200 break; 1201 } 1202 } 1203 1204 1205 void GraphBuilder::arithmetic_op(ValueType* type, Bytecodes::Code code, ValueStack* state_before) { 1206 Value y = pop(type); 1207 Value x = pop(type); 1208 Value res = new ArithmeticOp(code, x, y, state_before); 1209 // Note: currently single-precision floating-point rounding on Intel is handled at the LIRGenerator level 1210 res = append(res); 1211 res = round_fp(res); 1212 push(type, res); 1213 } 1214 1215 1216 void GraphBuilder::negate_op(ValueType* type) { 1217 push(type, append(new NegateOp(pop(type)))); 1218 } 1219 1220 1221 void GraphBuilder::shift_op(ValueType* type, Bytecodes::Code code) { 1222 Value s = ipop(); 1223 Value x = pop(type); 1224 // try to simplify 1225 // Note: This code should go into the canonicalizer as soon as it can 1226 // can handle canonicalized forms that contain more than one node. 1227 if (CanonicalizeNodes && code == Bytecodes::_iushr) { 1228 // pattern: x >>> s 1229 IntConstant* s1 = s->type()->as_IntConstant(); 1230 if (s1 != nullptr) { 1231 // pattern: x >>> s1, with s1 constant 1232 ShiftOp* l = x->as_ShiftOp(); 1233 if (l != nullptr && l->op() == Bytecodes::_ishl) { 1234 // pattern: (a << b) >>> s1 1235 IntConstant* s0 = l->y()->type()->as_IntConstant(); 1236 if (s0 != nullptr) { 1237 // pattern: (a << s0) >>> s1 1238 const int s0c = s0->value() & 0x1F; // only the low 5 bits are significant for shifts 1239 const int s1c = s1->value() & 0x1F; // only the low 5 bits are significant for shifts 1240 if (s0c == s1c) { 1241 if (s0c == 0) { 1242 // pattern: (a << 0) >>> 0 => simplify to: a 1243 ipush(l->x()); 1244 } else { 1245 // pattern: (a << s0c) >>> s0c => simplify to: a & m, with m constant 1246 assert(0 < s0c && s0c < BitsPerInt, "adjust code below to handle corner cases"); 1247 const int m = checked_cast<int>(right_n_bits(BitsPerInt - s0c)); 1248 Value s = append(new Constant(new IntConstant(m))); 1249 ipush(append(new LogicOp(Bytecodes::_iand, l->x(), s))); 1250 } 1251 return; 1252 } 1253 } 1254 } 1255 } 1256 } 1257 // could not simplify 1258 push(type, append(new ShiftOp(code, x, s))); 1259 } 1260 1261 1262 void GraphBuilder::logic_op(ValueType* type, Bytecodes::Code code) { 1263 Value y = pop(type); 1264 Value x = pop(type); 1265 push(type, append(new LogicOp(code, x, y))); 1266 } 1267 1268 1269 void GraphBuilder::compare_op(ValueType* type, Bytecodes::Code code) { 1270 ValueStack* state_before = copy_state_before(); 1271 Value y = pop(type); 1272 Value x = pop(type); 1273 ipush(append(new CompareOp(code, x, y, state_before))); 1274 } 1275 1276 1277 void GraphBuilder::convert(Bytecodes::Code op, BasicType from, BasicType to) { 1278 push(as_ValueType(to), append(new Convert(op, pop(as_ValueType(from)), as_ValueType(to)))); 1279 } 1280 1281 1282 void GraphBuilder::increment() { 1283 int index = stream()->get_index(); 1284 int delta = stream()->is_wide() ? (signed short)Bytes::get_Java_u2(stream()->cur_bcp() + 4) : (signed char)(stream()->cur_bcp()[2]); 1285 load_local(intType, index); 1286 ipush(append(new Constant(new IntConstant(delta)))); 1287 arithmetic_op(intType, Bytecodes::_iadd); 1288 store_local(intType, index); 1289 } 1290 1291 1292 void GraphBuilder::_goto(int from_bci, int to_bci) { 1293 Goto *x = new Goto(block_at(to_bci), to_bci <= from_bci); 1294 if (is_profiling()) { 1295 compilation()->set_would_profile(true); 1296 x->set_profiled_bci(bci()); 1297 if (profile_branches()) { 1298 x->set_profiled_method(method()); 1299 x->set_should_profile(true); 1300 } 1301 } 1302 append(x); 1303 } 1304 1305 1306 void GraphBuilder::if_node(Value x, If::Condition cond, Value y, ValueStack* state_before) { 1307 BlockBegin* tsux = block_at(stream()->get_dest()); 1308 BlockBegin* fsux = block_at(stream()->next_bci()); 1309 bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci(); 1310 // In case of loop invariant code motion or predicate insertion 1311 // before the body of a loop the state is needed 1312 Instruction *i = append(new If(x, cond, false, y, tsux, fsux, (is_bb || compilation()->is_optimistic()) ? state_before : nullptr, is_bb)); 1313 1314 assert(i->as_Goto() == nullptr || 1315 (i->as_Goto()->sux_at(0) == tsux && i->as_Goto()->is_safepoint() == (tsux->bci() < stream()->cur_bci())) || 1316 (i->as_Goto()->sux_at(0) == fsux && i->as_Goto()->is_safepoint() == (fsux->bci() < stream()->cur_bci())), 1317 "safepoint state of Goto returned by canonicalizer incorrect"); 1318 1319 if (is_profiling()) { 1320 If* if_node = i->as_If(); 1321 if (if_node != nullptr) { 1322 // Note that we'd collect profile data in this method if we wanted it. 1323 compilation()->set_would_profile(true); 1324 // At level 2 we need the proper bci to count backedges 1325 if_node->set_profiled_bci(bci()); 1326 if (profile_branches()) { 1327 // Successors can be rotated by the canonicalizer, check for this case. 1328 if_node->set_profiled_method(method()); 1329 if_node->set_should_profile(true); 1330 if (if_node->tsux() == fsux) { 1331 if_node->set_swapped(true); 1332 } 1333 } 1334 return; 1335 } 1336 1337 // Check if this If was reduced to Goto. 1338 Goto *goto_node = i->as_Goto(); 1339 if (goto_node != nullptr) { 1340 compilation()->set_would_profile(true); 1341 goto_node->set_profiled_bci(bci()); 1342 if (profile_branches()) { 1343 goto_node->set_profiled_method(method()); 1344 goto_node->set_should_profile(true); 1345 // Find out which successor is used. 1346 if (goto_node->default_sux() == tsux) { 1347 goto_node->set_direction(Goto::taken); 1348 } else if (goto_node->default_sux() == fsux) { 1349 goto_node->set_direction(Goto::not_taken); 1350 } else { 1351 ShouldNotReachHere(); 1352 } 1353 } 1354 return; 1355 } 1356 } 1357 } 1358 1359 1360 void GraphBuilder::if_zero(ValueType* type, If::Condition cond) { 1361 Value y = append(new Constant(intZero)); 1362 ValueStack* state_before = copy_state_before(); 1363 Value x = ipop(); 1364 if_node(x, cond, y, state_before); 1365 } 1366 1367 1368 void GraphBuilder::if_null(ValueType* type, If::Condition cond) { 1369 Value y = append(new Constant(objectNull)); 1370 ValueStack* state_before = copy_state_before(); 1371 Value x = apop(); 1372 if_node(x, cond, y, state_before); 1373 } 1374 1375 1376 void GraphBuilder::if_same(ValueType* type, If::Condition cond) { 1377 ValueStack* state_before = copy_state_before(); 1378 Value y = pop(type); 1379 Value x = pop(type); 1380 if_node(x, cond, y, state_before); 1381 } 1382 1383 1384 void GraphBuilder::jsr(int dest) { 1385 // We only handle well-formed jsrs (those which are "block-structured"). 1386 // If the bytecodes are strange (jumping out of a jsr block) then we 1387 // might end up trying to re-parse a block containing a jsr which 1388 // has already been activated. Watch for this case and bail out. 1389 if (next_bci() >= method()->code_size()) { 1390 // This can happen if the subroutine does not terminate with a ret, 1391 // effectively turning the jsr into a goto. 1392 BAILOUT("too-complicated jsr/ret structure"); 1393 } 1394 for (ScopeData* cur_scope_data = scope_data(); 1395 cur_scope_data != nullptr && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope(); 1396 cur_scope_data = cur_scope_data->parent()) { 1397 if (cur_scope_data->jsr_entry_bci() == dest) { 1398 BAILOUT("too-complicated jsr/ret structure"); 1399 } 1400 } 1401 1402 push(addressType, append(new Constant(new AddressConstant(next_bci())))); 1403 if (!try_inline_jsr(dest)) { 1404 return; // bailed out while parsing and inlining subroutine 1405 } 1406 } 1407 1408 1409 void GraphBuilder::ret(int local_index) { 1410 if (!parsing_jsr()) BAILOUT("ret encountered while not parsing subroutine"); 1411 1412 if (local_index != scope_data()->jsr_return_address_local()) { 1413 BAILOUT("can not handle complicated jsr/ret constructs"); 1414 } 1415 1416 // Rets simply become (NON-SAFEPOINT) gotos to the jsr continuation 1417 append(new Goto(scope_data()->jsr_continuation(), false)); 1418 } 1419 1420 1421 void GraphBuilder::table_switch() { 1422 Bytecode_tableswitch sw(stream()); 1423 const int l = sw.length(); 1424 if (CanonicalizeNodes && l == 1 && compilation()->env()->comp_level() != CompLevel_full_profile) { 1425 // total of 2 successors => use If instead of switch 1426 // Note: This code should go into the canonicalizer as soon as it can 1427 // can handle canonicalized forms that contain more than one node. 1428 Value key = append(new Constant(new IntConstant(sw.low_key()))); 1429 BlockBegin* tsux = block_at(bci() + sw.dest_offset_at(0)); 1430 BlockBegin* fsux = block_at(bci() + sw.default_offset()); 1431 bool is_bb = tsux->bci() < bci() || fsux->bci() < bci(); 1432 // In case of loop invariant code motion or predicate insertion 1433 // before the body of a loop the state is needed 1434 ValueStack* state_before = copy_state_if_bb(is_bb); 1435 append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb)); 1436 } else { 1437 // collect successors 1438 BlockList* sux = new BlockList(l + 1, nullptr); 1439 int i; 1440 bool has_bb = false; 1441 for (i = 0; i < l; i++) { 1442 sux->at_put(i, block_at(bci() + sw.dest_offset_at(i))); 1443 if (sw.dest_offset_at(i) < 0) has_bb = true; 1444 } 1445 // add default successor 1446 if (sw.default_offset() < 0) has_bb = true; 1447 sux->at_put(i, block_at(bci() + sw.default_offset())); 1448 // In case of loop invariant code motion or predicate insertion 1449 // before the body of a loop the state is needed 1450 ValueStack* state_before = copy_state_if_bb(has_bb); 1451 Instruction* res = append(new TableSwitch(ipop(), sux, sw.low_key(), state_before, has_bb)); 1452 #ifdef ASSERT 1453 if (res->as_Goto()) { 1454 for (i = 0; i < l; i++) { 1455 if (sux->at(i) == res->as_Goto()->sux_at(0)) { 1456 assert(res->as_Goto()->is_safepoint() == (sw.dest_offset_at(i) < 0), "safepoint state of Goto returned by canonicalizer incorrect"); 1457 } 1458 } 1459 } 1460 #endif 1461 } 1462 } 1463 1464 1465 void GraphBuilder::lookup_switch() { 1466 Bytecode_lookupswitch sw(stream()); 1467 const int l = sw.number_of_pairs(); 1468 if (CanonicalizeNodes && l == 1 && compilation()->env()->comp_level() != CompLevel_full_profile) { 1469 // total of 2 successors => use If instead of switch 1470 // Note: This code should go into the canonicalizer as soon as it can 1471 // can handle canonicalized forms that contain more than one node. 1472 // simplify to If 1473 LookupswitchPair pair = sw.pair_at(0); 1474 Value key = append(new Constant(new IntConstant(pair.match()))); 1475 BlockBegin* tsux = block_at(bci() + pair.offset()); 1476 BlockBegin* fsux = block_at(bci() + sw.default_offset()); 1477 bool is_bb = tsux->bci() < bci() || fsux->bci() < bci(); 1478 // In case of loop invariant code motion or predicate insertion 1479 // before the body of a loop the state is needed 1480 ValueStack* state_before = copy_state_if_bb(is_bb);; 1481 append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb)); 1482 } else { 1483 // collect successors & keys 1484 BlockList* sux = new BlockList(l + 1, nullptr); 1485 intArray* keys = new intArray(l, l, 0); 1486 int i; 1487 bool has_bb = false; 1488 for (i = 0; i < l; i++) { 1489 LookupswitchPair pair = sw.pair_at(i); 1490 if (pair.offset() < 0) has_bb = true; 1491 sux->at_put(i, block_at(bci() + pair.offset())); 1492 keys->at_put(i, pair.match()); 1493 } 1494 // add default successor 1495 if (sw.default_offset() < 0) has_bb = true; 1496 sux->at_put(i, block_at(bci() + sw.default_offset())); 1497 // In case of loop invariant code motion or predicate insertion 1498 // before the body of a loop the state is needed 1499 ValueStack* state_before = copy_state_if_bb(has_bb); 1500 Instruction* res = append(new LookupSwitch(ipop(), sux, keys, state_before, has_bb)); 1501 #ifdef ASSERT 1502 if (res->as_Goto()) { 1503 for (i = 0; i < l; i++) { 1504 if (sux->at(i) == res->as_Goto()->sux_at(0)) { 1505 assert(res->as_Goto()->is_safepoint() == (sw.pair_at(i).offset() < 0), "safepoint state of Goto returned by canonicalizer incorrect"); 1506 } 1507 } 1508 } 1509 #endif 1510 } 1511 } 1512 1513 void GraphBuilder::call_register_finalizer() { 1514 // If the receiver requires finalization then emit code to perform 1515 // the registration on return. 1516 1517 // Gather some type information about the receiver 1518 Value receiver = state()->local_at(0); 1519 assert(receiver != nullptr, "must have a receiver"); 1520 ciType* declared_type = receiver->declared_type(); 1521 ciType* exact_type = receiver->exact_type(); 1522 if (exact_type == nullptr && 1523 receiver->as_Local() && 1524 receiver->as_Local()->java_index() == 0) { 1525 ciInstanceKlass* ik = compilation()->method()->holder(); 1526 if (ik->is_final()) { 1527 exact_type = ik; 1528 } else if (UseCHA && !(ik->has_subklass() || ik->is_interface())) { 1529 // test class is leaf class 1530 compilation()->dependency_recorder()->assert_leaf_type(ik); 1531 exact_type = ik; 1532 } else { 1533 declared_type = ik; 1534 } 1535 } 1536 1537 // see if we know statically that registration isn't required 1538 bool needs_check = true; 1539 if (exact_type != nullptr) { 1540 needs_check = exact_type->as_instance_klass()->has_finalizer(); 1541 } else if (declared_type != nullptr) { 1542 ciInstanceKlass* ik = declared_type->as_instance_klass(); 1543 if (!Dependencies::has_finalizable_subclass(ik)) { 1544 compilation()->dependency_recorder()->assert_has_no_finalizable_subclasses(ik); 1545 needs_check = false; 1546 } 1547 } 1548 1549 if (needs_check) { 1550 // Perform the registration of finalizable objects. 1551 ValueStack* state_before = copy_state_for_exception(); 1552 load_local(objectType, 0); 1553 append_split(new Intrinsic(voidType, vmIntrinsics::_Object_init, 1554 state()->pop_arguments(1), 1555 true, state_before, true)); 1556 } 1557 } 1558 1559 1560 void GraphBuilder::method_return(Value x, bool ignore_return) { 1561 if (method()->intrinsic_id() == vmIntrinsics::_Object_init) { 1562 call_register_finalizer(); 1563 } 1564 1565 // The conditions for a memory barrier are described in Parse::do_exits(). 1566 bool need_mem_bar = false; 1567 if (method()->name() == ciSymbols::object_initializer_name() && 1568 (scope()->wrote_final() || scope()->wrote_stable() || 1569 (AlwaysSafeConstructors && scope()->wrote_fields()) || 1570 (support_IRIW_for_not_multiple_copy_atomic_cpu && scope()->wrote_volatile()))) { 1571 need_mem_bar = true; 1572 } 1573 1574 BasicType bt = method()->return_type()->basic_type(); 1575 switch (bt) { 1576 case T_BYTE: 1577 { 1578 Value shift = append(new Constant(new IntConstant(24))); 1579 x = append(new ShiftOp(Bytecodes::_ishl, x, shift)); 1580 x = append(new ShiftOp(Bytecodes::_ishr, x, shift)); 1581 break; 1582 } 1583 case T_SHORT: 1584 { 1585 Value shift = append(new Constant(new IntConstant(16))); 1586 x = append(new ShiftOp(Bytecodes::_ishl, x, shift)); 1587 x = append(new ShiftOp(Bytecodes::_ishr, x, shift)); 1588 break; 1589 } 1590 case T_CHAR: 1591 { 1592 Value mask = append(new Constant(new IntConstant(0xFFFF))); 1593 x = append(new LogicOp(Bytecodes::_iand, x, mask)); 1594 break; 1595 } 1596 case T_BOOLEAN: 1597 { 1598 Value mask = append(new Constant(new IntConstant(1))); 1599 x = append(new LogicOp(Bytecodes::_iand, x, mask)); 1600 break; 1601 } 1602 default: 1603 break; 1604 } 1605 1606 // Check to see whether we are inlining. If so, Return 1607 // instructions become Gotos to the continuation point. 1608 if (continuation() != nullptr) { 1609 1610 int invoke_bci = state()->caller_state()->bci(); 1611 1612 if (x != nullptr && !ignore_return) { 1613 ciMethod* caller = state()->scope()->caller()->method(); 1614 Bytecodes::Code invoke_raw_bc = caller->raw_code_at_bci(invoke_bci); 1615 if (invoke_raw_bc == Bytecodes::_invokehandle || invoke_raw_bc == Bytecodes::_invokedynamic) { 1616 ciType* declared_ret_type = caller->get_declared_signature_at_bci(invoke_bci)->return_type(); 1617 if (declared_ret_type->is_klass() && x->exact_type() == nullptr && 1618 x->declared_type() != declared_ret_type && declared_ret_type != compilation()->env()->Object_klass()) { 1619 x = append(new TypeCast(declared_ret_type->as_klass(), x, copy_state_before())); 1620 } 1621 } 1622 } 1623 1624 assert(!method()->is_synchronized() || InlineSynchronizedMethods, "can not inline synchronized methods yet"); 1625 1626 if (compilation()->env()->dtrace_method_probes()) { 1627 // Report exit from inline methods 1628 Values* args = new Values(1); 1629 args->push(append(new Constant(new MethodConstant(method())))); 1630 append(new RuntimeCall(voidType, "dtrace_method_exit", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), args)); 1631 } 1632 1633 // If the inlined method is synchronized, the monitor must be 1634 // released before we jump to the continuation block. 1635 if (method()->is_synchronized()) { 1636 assert(state()->locks_size() == 1, "receiver must be locked here"); 1637 monitorexit(state()->lock_at(0), SynchronizationEntryBCI); 1638 } 1639 1640 if (need_mem_bar) { 1641 append(new MemBar(lir_membar_storestore)); 1642 } 1643 1644 // State at end of inlined method is the state of the caller 1645 // without the method parameters on stack, including the 1646 // return value, if any, of the inlined method on operand stack. 1647 set_state(state()->caller_state()->copy_for_parsing()); 1648 if (x != nullptr) { 1649 if (!ignore_return) { 1650 state()->push(x->type(), x); 1651 } 1652 if (profile_return() && x->type()->is_object_kind()) { 1653 ciMethod* caller = state()->scope()->method(); 1654 profile_return_type(x, method(), caller, invoke_bci); 1655 } 1656 } 1657 Goto* goto_callee = new Goto(continuation(), false); 1658 1659 // See whether this is the first return; if so, store off some 1660 // of the state for later examination 1661 if (num_returns() == 0) { 1662 set_inline_cleanup_info(); 1663 } 1664 1665 // The current bci() is in the wrong scope, so use the bci() of 1666 // the continuation point. 1667 append_with_bci(goto_callee, scope_data()->continuation()->bci()); 1668 incr_num_returns(); 1669 return; 1670 } 1671 1672 state()->truncate_stack(0); 1673 if (method()->is_synchronized()) { 1674 // perform the unlocking before exiting the method 1675 Value receiver; 1676 if (!method()->is_static()) { 1677 receiver = _initial_state->local_at(0); 1678 } else { 1679 receiver = append(new Constant(new ClassConstant(method()->holder()))); 1680 } 1681 append_split(new MonitorExit(receiver, state()->unlock())); 1682 } 1683 1684 if (need_mem_bar) { 1685 append(new MemBar(lir_membar_storestore)); 1686 } 1687 1688 assert(!ignore_return, "Ignoring return value works only for inlining"); 1689 append(new Return(x)); 1690 } 1691 1692 Value GraphBuilder::make_constant(ciConstant field_value, ciField* field) { 1693 if (!field_value.is_valid()) return nullptr; 1694 1695 BasicType field_type = field_value.basic_type(); 1696 ValueType* value = as_ValueType(field_value); 1697 1698 // Attach dimension info to stable arrays. 1699 if (FoldStableValues && 1700 field->is_stable() && field_type == T_ARRAY && !field_value.is_null_or_zero()) { 1701 ciArray* array = field_value.as_object()->as_array(); 1702 jint dimension = field->type()->as_array_klass()->dimension(); 1703 value = new StableArrayConstant(array, dimension); 1704 } 1705 1706 switch (field_type) { 1707 case T_ARRAY: 1708 case T_OBJECT: 1709 if (field_value.as_object()->should_be_constant()) { 1710 return new Constant(value); 1711 } 1712 return nullptr; // Not a constant. 1713 default: 1714 return new Constant(value); 1715 } 1716 } 1717 1718 void GraphBuilder::access_field(Bytecodes::Code code) { 1719 bool will_link; 1720 ciField* field = stream()->get_field(will_link); 1721 ciInstanceKlass* holder = field->holder(); 1722 BasicType field_type = field->type()->basic_type(); 1723 ValueType* type = as_ValueType(field_type); 1724 // call will_link again to determine if the field is valid. 1725 const bool needs_patching = !holder->is_loaded() || 1726 !field->will_link(method(), code) || 1727 PatchALot; 1728 1729 ValueStack* state_before = nullptr; 1730 if (!holder->is_initialized() || needs_patching) { 1731 // save state before instruction for debug info when 1732 // deoptimization happens during patching 1733 state_before = copy_state_before(); 1734 } 1735 1736 Value obj = nullptr; 1737 if (code == Bytecodes::_getstatic || code == Bytecodes::_putstatic) { 1738 if (state_before != nullptr) { 1739 // build a patching constant 1740 obj = new Constant(new InstanceConstant(holder->java_mirror()), state_before); 1741 } else { 1742 obj = new Constant(new InstanceConstant(holder->java_mirror())); 1743 } 1744 } 1745 1746 if (code == Bytecodes::_putfield) { 1747 scope()->set_wrote_fields(); 1748 if (field->is_volatile()) { 1749 scope()->set_wrote_volatile(); 1750 } 1751 if (field->is_final()) { 1752 scope()->set_wrote_final(); 1753 } 1754 if (field->is_stable()) { 1755 scope()->set_wrote_stable(); 1756 } 1757 } 1758 1759 const int offset = !needs_patching ? field->offset_in_bytes() : -1; 1760 switch (code) { 1761 case Bytecodes::_getstatic: { 1762 // check for compile-time constants, i.e., initialized static final fields 1763 Value constant = nullptr; 1764 if (field->is_static_constant() && !PatchALot) { 1765 ciConstant field_value = field->constant_value(); 1766 assert(!field->is_stable() || !field_value.is_null_or_zero(), 1767 "stable static w/ default value shouldn't be a constant"); 1768 constant = make_constant(field_value, field); 1769 } 1770 if (constant != nullptr) { 1771 push(type, append(constant)); 1772 } else { 1773 if (state_before == nullptr) { 1774 state_before = copy_state_for_exception(); 1775 } 1776 push(type, append(new LoadField(append(obj), offset, field, true, 1777 state_before, needs_patching))); 1778 } 1779 break; 1780 } 1781 case Bytecodes::_putstatic: { 1782 Value val = pop(type); 1783 if (state_before == nullptr) { 1784 state_before = copy_state_for_exception(); 1785 } 1786 if (field->type()->basic_type() == T_BOOLEAN) { 1787 Value mask = append(new Constant(new IntConstant(1))); 1788 val = append(new LogicOp(Bytecodes::_iand, val, mask)); 1789 } 1790 append(new StoreField(append(obj), offset, field, val, true, state_before, needs_patching)); 1791 break; 1792 } 1793 case Bytecodes::_getfield: { 1794 // Check for compile-time constants, i.e., trusted final non-static fields. 1795 Value constant = nullptr; 1796 obj = apop(); 1797 ObjectType* obj_type = obj->type()->as_ObjectType(); 1798 if (field->is_constant() && obj_type->is_constant() && !PatchALot) { 1799 ciObject* const_oop = obj_type->constant_value(); 1800 if (!const_oop->is_null_object() && const_oop->is_loaded()) { 1801 ciConstant field_value = field->constant_value_of(const_oop); 1802 if (field_value.is_valid()) { 1803 constant = make_constant(field_value, field); 1804 // For CallSite objects add a dependency for invalidation of the optimization. 1805 if (field->is_call_site_target()) { 1806 ciCallSite* call_site = const_oop->as_call_site(); 1807 if (!call_site->is_fully_initialized_constant_call_site()) { 1808 ciMethodHandle* target = field_value.as_object()->as_method_handle(); 1809 dependency_recorder()->assert_call_site_target_value(call_site, target); 1810 } 1811 } 1812 } 1813 } 1814 } 1815 if (constant != nullptr) { 1816 push(type, append(constant)); 1817 } else { 1818 if (state_before == nullptr) { 1819 state_before = copy_state_for_exception(); 1820 } 1821 LoadField* load = new LoadField(obj, offset, field, false, state_before, needs_patching); 1822 Value replacement = !needs_patching ? _memory->load(load) : load; 1823 if (replacement != load) { 1824 assert(replacement->is_linked() || !replacement->can_be_linked(), "should already by linked"); 1825 // Writing an (integer) value to a boolean, byte, char or short field includes an implicit narrowing 1826 // conversion. Emit an explicit conversion here to get the correct field value after the write. 1827 BasicType bt = field->type()->basic_type(); 1828 switch (bt) { 1829 case T_BOOLEAN: 1830 case T_BYTE: 1831 replacement = append(new Convert(Bytecodes::_i2b, replacement, as_ValueType(bt))); 1832 break; 1833 case T_CHAR: 1834 replacement = append(new Convert(Bytecodes::_i2c, replacement, as_ValueType(bt))); 1835 break; 1836 case T_SHORT: 1837 replacement = append(new Convert(Bytecodes::_i2s, replacement, as_ValueType(bt))); 1838 break; 1839 default: 1840 break; 1841 } 1842 push(type, replacement); 1843 } else { 1844 push(type, append(load)); 1845 } 1846 } 1847 break; 1848 } 1849 case Bytecodes::_putfield: { 1850 Value val = pop(type); 1851 obj = apop(); 1852 if (state_before == nullptr) { 1853 state_before = copy_state_for_exception(); 1854 } 1855 if (field->type()->basic_type() == T_BOOLEAN) { 1856 Value mask = append(new Constant(new IntConstant(1))); 1857 val = append(new LogicOp(Bytecodes::_iand, val, mask)); 1858 } 1859 StoreField* store = new StoreField(obj, offset, field, val, false, state_before, needs_patching); 1860 if (!needs_patching) store = _memory->store(store); 1861 if (store != nullptr) { 1862 append(store); 1863 } 1864 break; 1865 } 1866 default: 1867 ShouldNotReachHere(); 1868 break; 1869 } 1870 } 1871 1872 1873 Dependencies* GraphBuilder::dependency_recorder() const { 1874 assert(DeoptC1, "need debug information"); 1875 return compilation()->dependency_recorder(); 1876 } 1877 1878 // How many arguments do we want to profile? 1879 Values* GraphBuilder::args_list_for_profiling(ciMethod* target, int& start, bool may_have_receiver) { 1880 int n = 0; 1881 bool has_receiver = may_have_receiver && Bytecodes::has_receiver(method()->java_code_at_bci(bci())); 1882 start = has_receiver ? 1 : 0; 1883 if (profile_arguments()) { 1884 ciProfileData* data = method()->method_data()->bci_to_data(bci()); 1885 if (data != nullptr && (data->is_CallTypeData() || data->is_VirtualCallTypeData())) { 1886 n = data->is_CallTypeData() ? data->as_CallTypeData()->number_of_arguments() : data->as_VirtualCallTypeData()->number_of_arguments(); 1887 } 1888 } 1889 // If we are inlining then we need to collect arguments to profile parameters for the target 1890 if (profile_parameters() && target != nullptr) { 1891 if (target->method_data() != nullptr && target->method_data()->parameters_type_data() != nullptr) { 1892 // The receiver is profiled on method entry so it's included in 1893 // the number of parameters but here we're only interested in 1894 // actual arguments. 1895 n = MAX2(n, target->method_data()->parameters_type_data()->number_of_parameters() - start); 1896 } 1897 } 1898 if (n > 0) { 1899 return new Values(n); 1900 } 1901 return nullptr; 1902 } 1903 1904 void GraphBuilder::check_args_for_profiling(Values* obj_args, int expected) { 1905 #ifdef ASSERT 1906 bool ignored_will_link; 1907 ciSignature* declared_signature = nullptr; 1908 ciMethod* real_target = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature); 1909 assert(expected == obj_args->capacity() || real_target->is_method_handle_intrinsic(), "missed on arg?"); 1910 #endif 1911 } 1912 1913 // Collect arguments that we want to profile in a list 1914 Values* GraphBuilder::collect_args_for_profiling(Values* args, ciMethod* target, bool may_have_receiver) { 1915 int start = 0; 1916 Values* obj_args = args_list_for_profiling(target, start, may_have_receiver); 1917 if (obj_args == nullptr) { 1918 return nullptr; 1919 } 1920 int s = obj_args->capacity(); 1921 // if called through method handle invoke, some arguments may have been popped 1922 for (int i = start, j = 0; j < s && i < args->length(); i++) { 1923 if (args->at(i)->type()->is_object_kind()) { 1924 obj_args->push(args->at(i)); 1925 j++; 1926 } 1927 } 1928 check_args_for_profiling(obj_args, s); 1929 return obj_args; 1930 } 1931 1932 void GraphBuilder::invoke(Bytecodes::Code code) { 1933 bool will_link; 1934 ciSignature* declared_signature = nullptr; 1935 ciMethod* target = stream()->get_method(will_link, &declared_signature); 1936 ciKlass* holder = stream()->get_declared_method_holder(); 1937 const Bytecodes::Code bc_raw = stream()->cur_bc_raw(); 1938 assert(declared_signature != nullptr, "cannot be null"); 1939 assert(will_link == target->is_loaded(), ""); 1940 JFR_ONLY(Jfr::on_resolution(this, holder, target); CHECK_BAILOUT();) 1941 1942 ciInstanceKlass* klass = target->holder(); 1943 assert(!target->is_loaded() || klass->is_loaded(), "loaded target must imply loaded klass"); 1944 1945 // check if CHA possible: if so, change the code to invoke_special 1946 ciInstanceKlass* calling_klass = method()->holder(); 1947 ciInstanceKlass* callee_holder = ciEnv::get_instance_klass_for_declared_method_holder(holder); 1948 ciInstanceKlass* actual_recv = callee_holder; 1949 1950 CompileLog* log = compilation()->log(); 1951 if (log != nullptr) 1952 log->elem("call method='%d' instr='%s'", 1953 log->identify(target), 1954 Bytecodes::name(code)); 1955 1956 // Some methods are obviously bindable without any type checks so 1957 // convert them directly to an invokespecial or invokestatic. 1958 if (target->is_loaded() && !target->is_abstract() && target->can_be_statically_bound()) { 1959 switch (bc_raw) { 1960 case Bytecodes::_invokeinterface: 1961 // convert to invokespecial if the target is the private interface method. 1962 if (target->is_private()) { 1963 assert(holder->is_interface(), "How did we get a non-interface method here!"); 1964 code = Bytecodes::_invokespecial; 1965 } 1966 break; 1967 case Bytecodes::_invokevirtual: 1968 code = Bytecodes::_invokespecial; 1969 break; 1970 case Bytecodes::_invokehandle: 1971 code = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokespecial; 1972 break; 1973 default: 1974 break; 1975 } 1976 } else { 1977 if (bc_raw == Bytecodes::_invokehandle) { 1978 assert(!will_link, "should come here only for unlinked call"); 1979 code = Bytecodes::_invokespecial; 1980 } 1981 } 1982 1983 if (code == Bytecodes::_invokespecial) { 1984 // Additional receiver subtype checks for interface calls via invokespecial or invokeinterface. 1985 ciKlass* receiver_constraint = nullptr; 1986 1987 if (bc_raw == Bytecodes::_invokeinterface) { 1988 receiver_constraint = holder; 1989 } else if (bc_raw == Bytecodes::_invokespecial && !target->is_object_initializer() && calling_klass->is_interface()) { 1990 receiver_constraint = calling_klass; 1991 } 1992 1993 if (receiver_constraint != nullptr) { 1994 int index = state()->stack_size() - (target->arg_size_no_receiver() + 1); 1995 Value receiver = state()->stack_at(index); 1996 CheckCast* c = new CheckCast(receiver_constraint, receiver, copy_state_before()); 1997 // go to uncommon_trap when checkcast fails 1998 c->set_invokespecial_receiver_check(); 1999 state()->stack_at_put(index, append_split(c)); 2000 } 2001 } 2002 2003 // Push appendix argument (MethodType, CallSite, etc.), if one. 2004 bool patch_for_appendix = false; 2005 int patching_appendix_arg = 0; 2006 if (Bytecodes::has_optional_appendix(bc_raw) && (!will_link || PatchALot)) { 2007 Value arg = append(new Constant(new ObjectConstant(compilation()->env()->unloaded_ciinstance()), copy_state_before())); 2008 apush(arg); 2009 patch_for_appendix = true; 2010 patching_appendix_arg = (will_link && stream()->has_appendix()) ? 0 : 1; 2011 } else if (stream()->has_appendix()) { 2012 ciObject* appendix = stream()->get_appendix(); 2013 Value arg = append(new Constant(new ObjectConstant(appendix))); 2014 apush(arg); 2015 } 2016 2017 ciMethod* cha_monomorphic_target = nullptr; 2018 ciMethod* exact_target = nullptr; 2019 Value better_receiver = nullptr; 2020 if (UseCHA && DeoptC1 && target->is_loaded() && 2021 !(// %%% FIXME: Are both of these relevant? 2022 target->is_method_handle_intrinsic() || 2023 target->is_compiled_lambda_form()) && 2024 !patch_for_appendix) { 2025 Value receiver = nullptr; 2026 ciInstanceKlass* receiver_klass = nullptr; 2027 bool type_is_exact = false; 2028 // try to find a precise receiver type 2029 if (will_link && !target->is_static()) { 2030 int index = state()->stack_size() - (target->arg_size_no_receiver() + 1); 2031 receiver = state()->stack_at(index); 2032 ciType* type = receiver->exact_type(); 2033 if (type != nullptr && type->is_loaded()) { 2034 assert(!type->is_instance_klass() || !type->as_instance_klass()->is_interface(), "Must not be an interface"); 2035 // Detects non-interface instances, primitive arrays, and some object arrays. 2036 // Array receivers can only call Object methods, so we should be able to allow 2037 // all object arrays here too, even those with unloaded types. 2038 receiver_klass = (ciInstanceKlass*) type; 2039 type_is_exact = true; 2040 } 2041 if (type == nullptr) { 2042 type = receiver->declared_type(); 2043 if (type != nullptr && type->is_loaded() && 2044 type->is_instance_klass() && !type->as_instance_klass()->is_interface()) { 2045 receiver_klass = (ciInstanceKlass*) type; 2046 if (receiver_klass->is_leaf_type() && !receiver_klass->is_final()) { 2047 // Insert a dependency on this type since 2048 // find_monomorphic_target may assume it's already done. 2049 dependency_recorder()->assert_leaf_type(receiver_klass); 2050 type_is_exact = true; 2051 } 2052 } 2053 } 2054 } 2055 if (receiver_klass != nullptr && type_is_exact && 2056 receiver_klass->is_loaded() && code != Bytecodes::_invokespecial) { 2057 // If we have the exact receiver type we can bind directly to 2058 // the method to call. 2059 exact_target = target->resolve_invoke(calling_klass, receiver_klass); 2060 if (exact_target != nullptr) { 2061 target = exact_target; 2062 code = Bytecodes::_invokespecial; 2063 } 2064 } 2065 if (receiver_klass != nullptr && 2066 receiver_klass->is_subtype_of(actual_recv) && 2067 actual_recv->is_initialized()) { 2068 actual_recv = receiver_klass; 2069 } 2070 2071 if ((code == Bytecodes::_invokevirtual && callee_holder->is_initialized()) || 2072 (code == Bytecodes::_invokeinterface && callee_holder->is_initialized() && !actual_recv->is_interface())) { 2073 // Use CHA on the receiver to select a more precise method. 2074 cha_monomorphic_target = target->find_monomorphic_target(calling_klass, callee_holder, actual_recv); 2075 } else if (code == Bytecodes::_invokeinterface && callee_holder->is_loaded() && receiver != nullptr) { 2076 assert(callee_holder->is_interface(), "invokeinterface to non interface?"); 2077 // If there is only one implementor of this interface then we 2078 // may be able bind this invoke directly to the implementing 2079 // klass but we need both a dependence on the single interface 2080 // and on the method we bind to. Additionally since all we know 2081 // about the receiver type is the it's supposed to implement the 2082 // interface we have to insert a check that it's the class we 2083 // expect. Interface types are not checked by the verifier so 2084 // they are roughly equivalent to Object. 2085 // The number of implementors for declared_interface is less or 2086 // equal to the number of implementors for target->holder() so 2087 // if number of implementors of target->holder() == 1 then 2088 // number of implementors for decl_interface is 0 or 1. If 2089 // it's 0 then no class implements decl_interface and there's 2090 // no point in inlining. 2091 ciInstanceKlass* declared_interface = callee_holder; 2092 ciInstanceKlass* singleton = declared_interface->unique_implementor(); 2093 if (singleton != nullptr) { 2094 assert(singleton != declared_interface, "not a unique implementor"); 2095 cha_monomorphic_target = target->find_monomorphic_target(calling_klass, declared_interface, singleton); 2096 if (cha_monomorphic_target != nullptr) { 2097 ciInstanceKlass* holder = cha_monomorphic_target->holder(); 2098 ciInstanceKlass* constraint = (holder->is_subtype_of(singleton) ? holder : singleton); // avoid upcasts 2099 if (holder != compilation()->env()->Object_klass() && 2100 (!type_is_exact || receiver_klass->is_subtype_of(constraint))) { 2101 actual_recv = declared_interface; 2102 2103 // insert a check it's really the expected class. 2104 CheckCast* c = new CheckCast(constraint, receiver, copy_state_for_exception()); 2105 c->set_incompatible_class_change_check(); 2106 c->set_direct_compare(constraint->is_final()); 2107 // pass the result of the checkcast so that the compiler has 2108 // more accurate type info in the inlinee 2109 better_receiver = append_split(c); 2110 2111 dependency_recorder()->assert_unique_implementor(declared_interface, singleton); 2112 } else { 2113 cha_monomorphic_target = nullptr; 2114 } 2115 } 2116 } 2117 } 2118 } 2119 2120 if (cha_monomorphic_target != nullptr) { 2121 assert(!target->can_be_statically_bound() || target == cha_monomorphic_target, ""); 2122 assert(!cha_monomorphic_target->is_abstract(), ""); 2123 if (!cha_monomorphic_target->can_be_statically_bound(actual_recv)) { 2124 // If we inlined because CHA revealed only a single target method, 2125 // then we are dependent on that target method not getting overridden 2126 // by dynamic class loading. Be sure to test the "static" receiver 2127 // dest_method here, as opposed to the actual receiver, which may 2128 // falsely lead us to believe that the receiver is final or private. 2129 dependency_recorder()->assert_unique_concrete_method(actual_recv, cha_monomorphic_target, callee_holder, target); 2130 } 2131 code = Bytecodes::_invokespecial; 2132 } 2133 2134 // check if we could do inlining 2135 if (!PatchALot && Inline && target->is_loaded() && !patch_for_appendix && 2136 callee_holder->is_loaded()) { // the effect of symbolic reference resolution 2137 2138 // callee is known => check if we have static binding 2139 if ((code == Bytecodes::_invokestatic && klass->is_initialized()) || // invokestatic involves an initialization barrier on declaring class 2140 code == Bytecodes::_invokespecial || 2141 (code == Bytecodes::_invokevirtual && target->is_final_method()) || 2142 code == Bytecodes::_invokedynamic) { 2143 // static binding => check if callee is ok 2144 ciMethod* inline_target = (cha_monomorphic_target != nullptr) ? cha_monomorphic_target : target; 2145 bool holder_known = (cha_monomorphic_target != nullptr) || (exact_target != nullptr); 2146 bool success = try_inline(inline_target, holder_known, false /* ignore_return */, code, better_receiver); 2147 2148 CHECK_BAILOUT(); 2149 clear_inline_bailout(); 2150 2151 if (success) { 2152 // Register dependence if JVMTI has either breakpoint 2153 // setting or hotswapping of methods capabilities since they may 2154 // cause deoptimization. 2155 if (compilation()->env()->jvmti_can_hotswap_or_post_breakpoint()) { 2156 dependency_recorder()->assert_evol_method(inline_target); 2157 } 2158 return; 2159 } 2160 } else { 2161 print_inlining(target, "no static binding", /*success*/ false); 2162 } 2163 } else { 2164 print_inlining(target, "not inlineable", /*success*/ false); 2165 } 2166 2167 // If we attempted an inline which did not succeed because of a 2168 // bailout during construction of the callee graph, the entire 2169 // compilation has to be aborted. This is fairly rare and currently 2170 // seems to only occur for jasm-generated classes which contain 2171 // jsr/ret pairs which are not associated with finally clauses and 2172 // do not have exception handlers in the containing method, and are 2173 // therefore not caught early enough to abort the inlining without 2174 // corrupting the graph. (We currently bail out with a non-empty 2175 // stack at a ret in these situations.) 2176 CHECK_BAILOUT(); 2177 2178 // inlining not successful => standard invoke 2179 ValueType* result_type = as_ValueType(declared_signature->return_type()); 2180 ValueStack* state_before = copy_state_exhandling(); 2181 2182 // The bytecode (code) might change in this method so we are checking this very late. 2183 const bool has_receiver = 2184 code == Bytecodes::_invokespecial || 2185 code == Bytecodes::_invokevirtual || 2186 code == Bytecodes::_invokeinterface; 2187 Values* args = state()->pop_arguments(target->arg_size_no_receiver() + patching_appendix_arg); 2188 Value recv = has_receiver ? apop() : nullptr; 2189 2190 // A null check is required here (when there is a receiver) for any of the following cases 2191 // - invokespecial, always need a null check. 2192 // - invokevirtual, when the target is final and loaded. Calls to final targets will become optimized 2193 // and require null checking. If the target is loaded a null check is emitted here. 2194 // If the target isn't loaded the null check must happen after the call resolution. We achieve that 2195 // by using the target methods unverified entry point (see CompiledIC::compute_monomorphic_entry). 2196 // (The JVM specification requires that LinkageError must be thrown before a NPE. An unloaded target may 2197 // potentially fail, and can't have the null check before the resolution.) 2198 // - A call that will be profiled. (But we can't add a null check when the target is unloaded, by the same 2199 // reason as above, so calls with a receiver to unloaded targets can't be profiled.) 2200 // 2201 // Normal invokevirtual will perform the null check during lookup 2202 2203 bool need_null_check = (code == Bytecodes::_invokespecial) || 2204 (target->is_loaded() && (target->is_final_method() || (is_profiling() && profile_calls()))); 2205 2206 if (need_null_check) { 2207 if (recv != nullptr) { 2208 null_check(recv); 2209 } 2210 2211 if (is_profiling()) { 2212 // Note that we'd collect profile data in this method if we wanted it. 2213 compilation()->set_would_profile(true); 2214 2215 if (profile_calls()) { 2216 assert(cha_monomorphic_target == nullptr || exact_target == nullptr, "both can not be set"); 2217 ciKlass* target_klass = nullptr; 2218 if (cha_monomorphic_target != nullptr) { 2219 target_klass = cha_monomorphic_target->holder(); 2220 } else if (exact_target != nullptr) { 2221 target_klass = exact_target->holder(); 2222 } 2223 profile_call(target, recv, target_klass, collect_args_for_profiling(args, nullptr, false), false); 2224 } 2225 } 2226 } 2227 2228 Invoke* result = new Invoke(code, result_type, recv, args, target, state_before); 2229 // push result 2230 append_split(result); 2231 2232 if (result_type != voidType) { 2233 push(result_type, round_fp(result)); 2234 } 2235 if (profile_return() && result_type->is_object_kind()) { 2236 profile_return_type(result, target); 2237 } 2238 } 2239 2240 2241 void GraphBuilder::new_instance(int klass_index) { 2242 ValueStack* state_before = copy_state_exhandling(); 2243 ciKlass* klass = stream()->get_klass(); 2244 assert(klass->is_instance_klass(), "must be an instance klass"); 2245 NewInstance* new_instance = new NewInstance(klass->as_instance_klass(), state_before, stream()->is_unresolved_klass()); 2246 _memory->new_instance(new_instance); 2247 apush(append_split(new_instance)); 2248 } 2249 2250 2251 void GraphBuilder::new_type_array() { 2252 ValueStack* state_before = copy_state_exhandling(); 2253 apush(append_split(new NewTypeArray(ipop(), (BasicType)stream()->get_index(), state_before, true))); 2254 } 2255 2256 2257 void GraphBuilder::new_object_array() { 2258 ciKlass* klass = stream()->get_klass(); 2259 ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling(); 2260 NewArray* n = new NewObjectArray(klass, ipop(), state_before); 2261 apush(append_split(n)); 2262 } 2263 2264 2265 bool GraphBuilder::direct_compare(ciKlass* k) { 2266 if (k->is_loaded() && k->is_instance_klass() && !UseSlowPath) { 2267 ciInstanceKlass* ik = k->as_instance_klass(); 2268 if (ik->is_final()) { 2269 return true; 2270 } else { 2271 if (DeoptC1 && UseCHA && !(ik->has_subklass() || ik->is_interface())) { 2272 // test class is leaf class 2273 dependency_recorder()->assert_leaf_type(ik); 2274 return true; 2275 } 2276 } 2277 } 2278 return false; 2279 } 2280 2281 2282 void GraphBuilder::check_cast(int klass_index) { 2283 ciKlass* klass = stream()->get_klass(); 2284 ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_for_exception(); 2285 CheckCast* c = new CheckCast(klass, apop(), state_before); 2286 apush(append_split(c)); 2287 c->set_direct_compare(direct_compare(klass)); 2288 2289 if (is_profiling()) { 2290 // Note that we'd collect profile data in this method if we wanted it. 2291 compilation()->set_would_profile(true); 2292 2293 if (profile_checkcasts()) { 2294 c->set_profiled_method(method()); 2295 c->set_profiled_bci(bci()); 2296 c->set_should_profile(true); 2297 } 2298 } 2299 } 2300 2301 2302 void GraphBuilder::instance_of(int klass_index) { 2303 ciKlass* klass = stream()->get_klass(); 2304 ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling(); 2305 InstanceOf* i = new InstanceOf(klass, apop(), state_before); 2306 ipush(append_split(i)); 2307 i->set_direct_compare(direct_compare(klass)); 2308 2309 if (is_profiling()) { 2310 // Note that we'd collect profile data in this method if we wanted it. 2311 compilation()->set_would_profile(true); 2312 2313 if (profile_checkcasts()) { 2314 i->set_profiled_method(method()); 2315 i->set_profiled_bci(bci()); 2316 i->set_should_profile(true); 2317 } 2318 } 2319 } 2320 2321 2322 void GraphBuilder::monitorenter(Value x, int bci) { 2323 // save state before locking in case of deoptimization after a NullPointerException 2324 ValueStack* state_before = copy_state_for_exception_with_bci(bci); 2325 append_with_bci(new MonitorEnter(x, state()->lock(x), state_before), bci); 2326 kill_all(); 2327 } 2328 2329 2330 void GraphBuilder::monitorexit(Value x, int bci) { 2331 append_with_bci(new MonitorExit(x, state()->unlock()), bci); 2332 kill_all(); 2333 } 2334 2335 2336 void GraphBuilder::new_multi_array(int dimensions) { 2337 ciKlass* klass = stream()->get_klass(); 2338 ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling(); 2339 2340 Values* dims = new Values(dimensions, dimensions, nullptr); 2341 // fill in all dimensions 2342 int i = dimensions; 2343 while (i-- > 0) dims->at_put(i, ipop()); 2344 // create array 2345 NewArray* n = new NewMultiArray(klass, dims, state_before); 2346 apush(append_split(n)); 2347 } 2348 2349 2350 void GraphBuilder::throw_op(int bci) { 2351 // We require that the debug info for a Throw be the "state before" 2352 // the Throw (i.e., exception oop is still on TOS) 2353 ValueStack* state_before = copy_state_before_with_bci(bci); 2354 Throw* t = new Throw(apop(), state_before); 2355 // operand stack not needed after a throw 2356 state()->truncate_stack(0); 2357 append_with_bci(t, bci); 2358 } 2359 2360 2361 Value GraphBuilder::round_fp(Value fp_value) { 2362 if (strict_fp_requires_explicit_rounding) { 2363 #ifdef IA32 2364 // no rounding needed if SSE2 is used 2365 if (UseSSE < 2) { 2366 // Must currently insert rounding node for doubleword values that 2367 // are results of expressions (i.e., not loads from memory or 2368 // constants) 2369 if (fp_value->type()->tag() == doubleTag && 2370 fp_value->as_Constant() == nullptr && 2371 fp_value->as_Local() == nullptr && // method parameters need no rounding 2372 fp_value->as_RoundFP() == nullptr) { 2373 return append(new RoundFP(fp_value)); 2374 } 2375 } 2376 #else 2377 Unimplemented(); 2378 #endif // IA32 2379 } 2380 return fp_value; 2381 } 2382 2383 2384 Instruction* GraphBuilder::append_with_bci(Instruction* instr, int bci) { 2385 Canonicalizer canon(compilation(), instr, bci); 2386 Instruction* i1 = canon.canonical(); 2387 if (i1->is_linked() || !i1->can_be_linked()) { 2388 // Canonicalizer returned an instruction which was already 2389 // appended so simply return it. 2390 return i1; 2391 } 2392 2393 if (UseLocalValueNumbering) { 2394 // Lookup the instruction in the ValueMap and add it to the map if 2395 // it's not found. 2396 Instruction* i2 = vmap()->find_insert(i1); 2397 if (i2 != i1) { 2398 // found an entry in the value map, so just return it. 2399 assert(i2->is_linked(), "should already be linked"); 2400 return i2; 2401 } 2402 ValueNumberingEffects vne(vmap()); 2403 i1->visit(&vne); 2404 } 2405 2406 // i1 was not eliminated => append it 2407 assert(i1->next() == nullptr, "shouldn't already be linked"); 2408 _last = _last->set_next(i1, canon.bci()); 2409 2410 if (++_instruction_count >= InstructionCountCutoff && !bailed_out()) { 2411 // set the bailout state but complete normal processing. We 2412 // might do a little more work before noticing the bailout so we 2413 // want processing to continue normally until it's noticed. 2414 bailout("Method and/or inlining is too large"); 2415 } 2416 2417 #ifndef PRODUCT 2418 if (PrintIRDuringConstruction) { 2419 InstructionPrinter ip; 2420 ip.print_line(i1); 2421 if (Verbose) { 2422 state()->print(); 2423 } 2424 } 2425 #endif 2426 2427 // save state after modification of operand stack for StateSplit instructions 2428 StateSplit* s = i1->as_StateSplit(); 2429 if (s != nullptr) { 2430 if (EliminateFieldAccess) { 2431 Intrinsic* intrinsic = s->as_Intrinsic(); 2432 if (s->as_Invoke() != nullptr || (intrinsic && !intrinsic->preserves_state())) { 2433 _memory->kill(); 2434 } 2435 } 2436 s->set_state(state()->copy(ValueStack::StateAfter, canon.bci())); 2437 } 2438 2439 // set up exception handlers for this instruction if necessary 2440 if (i1->can_trap()) { 2441 i1->set_exception_handlers(handle_exception(i1)); 2442 assert(i1->exception_state() != nullptr || !i1->needs_exception_state() || bailed_out(), "handle_exception must set exception state"); 2443 } 2444 return i1; 2445 } 2446 2447 2448 Instruction* GraphBuilder::append(Instruction* instr) { 2449 assert(instr->as_StateSplit() == nullptr || instr->as_BlockEnd() != nullptr, "wrong append used"); 2450 return append_with_bci(instr, bci()); 2451 } 2452 2453 2454 Instruction* GraphBuilder::append_split(StateSplit* instr) { 2455 return append_with_bci(instr, bci()); 2456 } 2457 2458 2459 void GraphBuilder::null_check(Value value) { 2460 if (value->as_NewArray() != nullptr || value->as_NewInstance() != nullptr) { 2461 return; 2462 } else { 2463 Constant* con = value->as_Constant(); 2464 if (con) { 2465 ObjectType* c = con->type()->as_ObjectType(); 2466 if (c && c->is_loaded()) { 2467 ObjectConstant* oc = c->as_ObjectConstant(); 2468 if (!oc || !oc->value()->is_null_object()) { 2469 return; 2470 } 2471 } 2472 } 2473 } 2474 append(new NullCheck(value, copy_state_for_exception())); 2475 } 2476 2477 2478 2479 XHandlers* GraphBuilder::handle_exception(Instruction* instruction) { 2480 if (!has_handler() && (!instruction->needs_exception_state() || instruction->exception_state() != nullptr)) { 2481 assert(instruction->exception_state() == nullptr 2482 || instruction->exception_state()->kind() == ValueStack::EmptyExceptionState 2483 || (instruction->exception_state()->kind() == ValueStack::ExceptionState && _compilation->env()->should_retain_local_variables()), 2484 "exception_state should be of exception kind"); 2485 return new XHandlers(); 2486 } 2487 2488 XHandlers* exception_handlers = new XHandlers(); 2489 ScopeData* cur_scope_data = scope_data(); 2490 ValueStack* cur_state = instruction->state_before(); 2491 ValueStack* prev_state = nullptr; 2492 int scope_count = 0; 2493 2494 assert(cur_state != nullptr, "state_before must be set"); 2495 do { 2496 int cur_bci = cur_state->bci(); 2497 assert(cur_scope_data->scope() == cur_state->scope(), "scopes do not match"); 2498 assert(cur_bci == SynchronizationEntryBCI || cur_bci == cur_scope_data->stream()->cur_bci(), "invalid bci"); 2499 2500 // join with all potential exception handlers 2501 XHandlers* list = cur_scope_data->xhandlers(); 2502 const int n = list->length(); 2503 for (int i = 0; i < n; i++) { 2504 XHandler* h = list->handler_at(i); 2505 if (h->covers(cur_bci)) { 2506 // h is a potential exception handler => join it 2507 compilation()->set_has_exception_handlers(true); 2508 2509 BlockBegin* entry = h->entry_block(); 2510 if (entry == block()) { 2511 // It's acceptable for an exception handler to cover itself 2512 // but we don't handle that in the parser currently. It's 2513 // very rare so we bailout instead of trying to handle it. 2514 BAILOUT_("exception handler covers itself", exception_handlers); 2515 } 2516 assert(entry->bci() == h->handler_bci(), "must match"); 2517 assert(entry->bci() == -1 || entry == cur_scope_data->block_at(entry->bci()), "blocks must correspond"); 2518 2519 // previously this was a BAILOUT, but this is not necessary 2520 // now because asynchronous exceptions are not handled this way. 2521 assert(entry->state() == nullptr || cur_state->total_locks_size() == entry->state()->total_locks_size(), "locks do not match"); 2522 2523 // xhandler start with an empty expression stack 2524 if (cur_state->stack_size() != 0) { 2525 // locals are preserved 2526 // stack will be truncated 2527 cur_state = cur_state->copy(ValueStack::ExceptionState, cur_state->bci()); 2528 } 2529 if (instruction->exception_state() == nullptr) { 2530 instruction->set_exception_state(cur_state); 2531 } 2532 2533 // Note: Usually this join must work. However, very 2534 // complicated jsr-ret structures where we don't ret from 2535 // the subroutine can cause the objects on the monitor 2536 // stacks to not match because blocks can be parsed twice. 2537 // The only test case we've seen so far which exhibits this 2538 // problem is caught by the infinite recursion test in 2539 // GraphBuilder::jsr() if the join doesn't work. 2540 if (!entry->try_merge(cur_state, compilation()->has_irreducible_loops())) { 2541 BAILOUT_("error while joining with exception handler, prob. due to complicated jsr/rets", exception_handlers); 2542 } 2543 2544 // add current state for correct handling of phi functions at begin of xhandler 2545 int phi_operand = entry->add_exception_state(cur_state); 2546 2547 // add entry to the list of xhandlers of this block 2548 _block->add_exception_handler(entry); 2549 2550 // add back-edge from xhandler entry to this block 2551 if (!entry->is_predecessor(_block)) { 2552 entry->add_predecessor(_block); 2553 } 2554 2555 // clone XHandler because phi_operand and scope_count can not be shared 2556 XHandler* new_xhandler = new XHandler(h); 2557 new_xhandler->set_phi_operand(phi_operand); 2558 new_xhandler->set_scope_count(scope_count); 2559 exception_handlers->append(new_xhandler); 2560 2561 // fill in exception handler subgraph lazily 2562 assert(!entry->is_set(BlockBegin::was_visited_flag), "entry must not be visited yet"); 2563 cur_scope_data->add_to_work_list(entry); 2564 2565 // stop when reaching catchall 2566 if (h->catch_type() == 0) { 2567 return exception_handlers; 2568 } 2569 } 2570 } 2571 2572 if (exception_handlers->length() == 0) { 2573 // This scope and all callees do not handle exceptions, so the local 2574 // variables of this scope are not needed. However, the scope itself is 2575 // required for a correct exception stack trace -> clear out the locals. 2576 // Stack and locals are invalidated but not truncated in caller state. 2577 if (prev_state != nullptr) { 2578 assert(instruction->exception_state() != nullptr, "missed set?"); 2579 ValueStack::Kind exc_kind = ValueStack::empty_exception_kind(true /* caller */); 2580 cur_state = cur_state->copy(exc_kind, cur_state->bci()); 2581 // reset caller exception state 2582 prev_state->set_caller_state(cur_state); 2583 } else { 2584 assert(instruction->exception_state() == nullptr, "already set"); 2585 // set instruction exception state 2586 // truncate stack 2587 ValueStack::Kind exc_kind = ValueStack::empty_exception_kind(); 2588 cur_state = cur_state->copy(exc_kind, cur_state->bci()); 2589 instruction->set_exception_state(cur_state); 2590 } 2591 } 2592 2593 // Set up iteration for next time. 2594 // If parsing a jsr, do not grab exception handlers from the 2595 // parent scopes for this method (already got them, and they 2596 // needed to be cloned) 2597 2598 while (cur_scope_data->parsing_jsr()) { 2599 cur_scope_data = cur_scope_data->parent(); 2600 } 2601 2602 assert(cur_scope_data->scope() == cur_state->scope(), "scopes do not match"); 2603 assert(cur_state->locks_size() == 0 || cur_state->locks_size() == 1, "unlocking must be done in a catchall exception handler"); 2604 2605 prev_state = cur_state; 2606 cur_state = cur_state->caller_state(); 2607 cur_scope_data = cur_scope_data->parent(); 2608 scope_count++; 2609 } while (cur_scope_data != nullptr); 2610 2611 return exception_handlers; 2612 } 2613 2614 2615 // Helper class for simplifying Phis. 2616 class PhiSimplifier : public BlockClosure { 2617 private: 2618 bool _has_substitutions; 2619 Value simplify(Value v); 2620 2621 public: 2622 PhiSimplifier(BlockBegin* start) : _has_substitutions(false) { 2623 start->iterate_preorder(this); 2624 if (_has_substitutions) { 2625 SubstitutionResolver sr(start); 2626 } 2627 } 2628 void block_do(BlockBegin* b); 2629 bool has_substitutions() const { return _has_substitutions; } 2630 }; 2631 2632 2633 Value PhiSimplifier::simplify(Value v) { 2634 Phi* phi = v->as_Phi(); 2635 2636 if (phi == nullptr) { 2637 // no phi function 2638 return v; 2639 } else if (v->has_subst()) { 2640 // already substituted; subst can be phi itself -> simplify 2641 return simplify(v->subst()); 2642 } else if (phi->is_set(Phi::cannot_simplify)) { 2643 // already tried to simplify phi before 2644 return phi; 2645 } else if (phi->is_set(Phi::visited)) { 2646 // break cycles in phi functions 2647 return phi; 2648 } else if (phi->type()->is_illegal()) { 2649 // illegal phi functions are ignored anyway 2650 return phi; 2651 2652 } else { 2653 // mark phi function as processed to break cycles in phi functions 2654 phi->set(Phi::visited); 2655 2656 // simplify x = [y, x] and x = [y, y] to y 2657 Value subst = nullptr; 2658 int opd_count = phi->operand_count(); 2659 for (int i = 0; i < opd_count; i++) { 2660 Value opd = phi->operand_at(i); 2661 assert(opd != nullptr, "Operand must exist!"); 2662 2663 if (opd->type()->is_illegal()) { 2664 // if one operand is illegal, the entire phi function is illegal 2665 phi->make_illegal(); 2666 phi->clear(Phi::visited); 2667 return phi; 2668 } 2669 2670 Value new_opd = simplify(opd); 2671 assert(new_opd != nullptr, "Simplified operand must exist!"); 2672 2673 if (new_opd != phi && new_opd != subst) { 2674 if (subst == nullptr) { 2675 subst = new_opd; 2676 } else { 2677 // no simplification possible 2678 phi->set(Phi::cannot_simplify); 2679 phi->clear(Phi::visited); 2680 return phi; 2681 } 2682 } 2683 } 2684 2685 // successfully simplified phi function 2686 assert(subst != nullptr, "illegal phi function"); 2687 _has_substitutions = true; 2688 phi->clear(Phi::visited); 2689 phi->set_subst(subst); 2690 2691 #ifndef PRODUCT 2692 if (PrintPhiFunctions) { 2693 tty->print_cr("simplified phi function %c%d to %c%d (Block B%d)", phi->type()->tchar(), phi->id(), subst->type()->tchar(), subst->id(), phi->block()->block_id()); 2694 } 2695 #endif 2696 2697 return subst; 2698 } 2699 } 2700 2701 2702 void PhiSimplifier::block_do(BlockBegin* b) { 2703 for_each_phi_fun(b, phi, 2704 simplify(phi); 2705 ); 2706 2707 #ifdef ASSERT 2708 for_each_phi_fun(b, phi, 2709 assert(phi->operand_count() != 1 || phi->subst() != phi || phi->is_illegal(), "missed trivial simplification"); 2710 ); 2711 2712 ValueStack* state = b->state()->caller_state(); 2713 for_each_state_value(state, value, 2714 Phi* phi = value->as_Phi(); 2715 assert(phi == nullptr || phi->block() != b, "must not have phi function to simplify in caller state"); 2716 ); 2717 #endif 2718 } 2719 2720 // This method is called after all blocks are filled with HIR instructions 2721 // It eliminates all Phi functions of the form x = [y, y] and x = [y, x] 2722 void GraphBuilder::eliminate_redundant_phis(BlockBegin* start) { 2723 PhiSimplifier simplifier(start); 2724 } 2725 2726 2727 void GraphBuilder::connect_to_end(BlockBegin* beg) { 2728 // setup iteration 2729 kill_all(); 2730 _block = beg; 2731 _state = beg->state()->copy_for_parsing(); 2732 _last = beg; 2733 iterate_bytecodes_for_block(beg->bci()); 2734 } 2735 2736 2737 BlockEnd* GraphBuilder::iterate_bytecodes_for_block(int bci) { 2738 #ifndef PRODUCT 2739 if (PrintIRDuringConstruction) { 2740 tty->cr(); 2741 InstructionPrinter ip; 2742 ip.print_instr(_block); tty->cr(); 2743 ip.print_stack(_block->state()); tty->cr(); 2744 ip.print_inline_level(_block); 2745 ip.print_head(); 2746 tty->print_cr("locals size: %d stack size: %d", state()->locals_size(), state()->stack_size()); 2747 } 2748 #endif 2749 _skip_block = false; 2750 assert(state() != nullptr, "ValueStack missing!"); 2751 CompileLog* log = compilation()->log(); 2752 ciBytecodeStream s(method()); 2753 s.reset_to_bci(bci); 2754 int prev_bci = bci; 2755 scope_data()->set_stream(&s); 2756 // iterate 2757 Bytecodes::Code code = Bytecodes::_illegal; 2758 bool push_exception = false; 2759 2760 if (block()->is_set(BlockBegin::exception_entry_flag) && block()->next() == nullptr) { 2761 // first thing in the exception entry block should be the exception object. 2762 push_exception = true; 2763 } 2764 2765 bool ignore_return = scope_data()->ignore_return(); 2766 2767 while (!bailed_out() && last()->as_BlockEnd() == nullptr && 2768 (code = stream()->next()) != ciBytecodeStream::EOBC() && 2769 (block_at(s.cur_bci()) == nullptr || block_at(s.cur_bci()) == block())) { 2770 assert(state()->kind() == ValueStack::Parsing, "invalid state kind"); 2771 2772 if (log != nullptr) 2773 log->set_context("bc code='%d' bci='%d'", (int)code, s.cur_bci()); 2774 2775 // Check for active jsr during OSR compilation 2776 if (compilation()->is_osr_compile() 2777 && scope()->is_top_scope() 2778 && parsing_jsr() 2779 && s.cur_bci() == compilation()->osr_bci()) { 2780 bailout("OSR not supported while a jsr is active"); 2781 } 2782 2783 if (push_exception) { 2784 apush(append(new ExceptionObject())); 2785 push_exception = false; 2786 } 2787 2788 // handle bytecode 2789 switch (code) { 2790 case Bytecodes::_nop : /* nothing to do */ break; 2791 case Bytecodes::_aconst_null : apush(append(new Constant(objectNull ))); break; 2792 case Bytecodes::_iconst_m1 : ipush(append(new Constant(new IntConstant (-1)))); break; 2793 case Bytecodes::_iconst_0 : ipush(append(new Constant(intZero ))); break; 2794 case Bytecodes::_iconst_1 : ipush(append(new Constant(intOne ))); break; 2795 case Bytecodes::_iconst_2 : ipush(append(new Constant(new IntConstant ( 2)))); break; 2796 case Bytecodes::_iconst_3 : ipush(append(new Constant(new IntConstant ( 3)))); break; 2797 case Bytecodes::_iconst_4 : ipush(append(new Constant(new IntConstant ( 4)))); break; 2798 case Bytecodes::_iconst_5 : ipush(append(new Constant(new IntConstant ( 5)))); break; 2799 case Bytecodes::_lconst_0 : lpush(append(new Constant(new LongConstant ( 0)))); break; 2800 case Bytecodes::_lconst_1 : lpush(append(new Constant(new LongConstant ( 1)))); break; 2801 case Bytecodes::_fconst_0 : fpush(append(new Constant(new FloatConstant ( 0)))); break; 2802 case Bytecodes::_fconst_1 : fpush(append(new Constant(new FloatConstant ( 1)))); break; 2803 case Bytecodes::_fconst_2 : fpush(append(new Constant(new FloatConstant ( 2)))); break; 2804 case Bytecodes::_dconst_0 : dpush(append(new Constant(new DoubleConstant( 0)))); break; 2805 case Bytecodes::_dconst_1 : dpush(append(new Constant(new DoubleConstant( 1)))); break; 2806 case Bytecodes::_bipush : ipush(append(new Constant(new IntConstant(((signed char*)s.cur_bcp())[1])))); break; 2807 case Bytecodes::_sipush : ipush(append(new Constant(new IntConstant((short)Bytes::get_Java_u2(s.cur_bcp()+1))))); break; 2808 case Bytecodes::_ldc : // fall through 2809 case Bytecodes::_ldc_w : // fall through 2810 case Bytecodes::_ldc2_w : load_constant(); break; 2811 case Bytecodes::_iload : load_local(intType , s.get_index()); break; 2812 case Bytecodes::_lload : load_local(longType , s.get_index()); break; 2813 case Bytecodes::_fload : load_local(floatType , s.get_index()); break; 2814 case Bytecodes::_dload : load_local(doubleType , s.get_index()); break; 2815 case Bytecodes::_aload : load_local(instanceType, s.get_index()); break; 2816 case Bytecodes::_iload_0 : load_local(intType , 0); break; 2817 case Bytecodes::_iload_1 : load_local(intType , 1); break; 2818 case Bytecodes::_iload_2 : load_local(intType , 2); break; 2819 case Bytecodes::_iload_3 : load_local(intType , 3); break; 2820 case Bytecodes::_lload_0 : load_local(longType , 0); break; 2821 case Bytecodes::_lload_1 : load_local(longType , 1); break; 2822 case Bytecodes::_lload_2 : load_local(longType , 2); break; 2823 case Bytecodes::_lload_3 : load_local(longType , 3); break; 2824 case Bytecodes::_fload_0 : load_local(floatType , 0); break; 2825 case Bytecodes::_fload_1 : load_local(floatType , 1); break; 2826 case Bytecodes::_fload_2 : load_local(floatType , 2); break; 2827 case Bytecodes::_fload_3 : load_local(floatType , 3); break; 2828 case Bytecodes::_dload_0 : load_local(doubleType, 0); break; 2829 case Bytecodes::_dload_1 : load_local(doubleType, 1); break; 2830 case Bytecodes::_dload_2 : load_local(doubleType, 2); break; 2831 case Bytecodes::_dload_3 : load_local(doubleType, 3); break; 2832 case Bytecodes::_aload_0 : load_local(objectType, 0); break; 2833 case Bytecodes::_aload_1 : load_local(objectType, 1); break; 2834 case Bytecodes::_aload_2 : load_local(objectType, 2); break; 2835 case Bytecodes::_aload_3 : load_local(objectType, 3); break; 2836 case Bytecodes::_iaload : load_indexed(T_INT ); break; 2837 case Bytecodes::_laload : load_indexed(T_LONG ); break; 2838 case Bytecodes::_faload : load_indexed(T_FLOAT ); break; 2839 case Bytecodes::_daload : load_indexed(T_DOUBLE); break; 2840 case Bytecodes::_aaload : load_indexed(T_OBJECT); break; 2841 case Bytecodes::_baload : load_indexed(T_BYTE ); break; 2842 case Bytecodes::_caload : load_indexed(T_CHAR ); break; 2843 case Bytecodes::_saload : load_indexed(T_SHORT ); break; 2844 case Bytecodes::_istore : store_local(intType , s.get_index()); break; 2845 case Bytecodes::_lstore : store_local(longType , s.get_index()); break; 2846 case Bytecodes::_fstore : store_local(floatType , s.get_index()); break; 2847 case Bytecodes::_dstore : store_local(doubleType, s.get_index()); break; 2848 case Bytecodes::_astore : store_local(objectType, s.get_index()); break; 2849 case Bytecodes::_istore_0 : store_local(intType , 0); break; 2850 case Bytecodes::_istore_1 : store_local(intType , 1); break; 2851 case Bytecodes::_istore_2 : store_local(intType , 2); break; 2852 case Bytecodes::_istore_3 : store_local(intType , 3); break; 2853 case Bytecodes::_lstore_0 : store_local(longType , 0); break; 2854 case Bytecodes::_lstore_1 : store_local(longType , 1); break; 2855 case Bytecodes::_lstore_2 : store_local(longType , 2); break; 2856 case Bytecodes::_lstore_3 : store_local(longType , 3); break; 2857 case Bytecodes::_fstore_0 : store_local(floatType , 0); break; 2858 case Bytecodes::_fstore_1 : store_local(floatType , 1); break; 2859 case Bytecodes::_fstore_2 : store_local(floatType , 2); break; 2860 case Bytecodes::_fstore_3 : store_local(floatType , 3); break; 2861 case Bytecodes::_dstore_0 : store_local(doubleType, 0); break; 2862 case Bytecodes::_dstore_1 : store_local(doubleType, 1); break; 2863 case Bytecodes::_dstore_2 : store_local(doubleType, 2); break; 2864 case Bytecodes::_dstore_3 : store_local(doubleType, 3); break; 2865 case Bytecodes::_astore_0 : store_local(objectType, 0); break; 2866 case Bytecodes::_astore_1 : store_local(objectType, 1); break; 2867 case Bytecodes::_astore_2 : store_local(objectType, 2); break; 2868 case Bytecodes::_astore_3 : store_local(objectType, 3); break; 2869 case Bytecodes::_iastore : store_indexed(T_INT ); break; 2870 case Bytecodes::_lastore : store_indexed(T_LONG ); break; 2871 case Bytecodes::_fastore : store_indexed(T_FLOAT ); break; 2872 case Bytecodes::_dastore : store_indexed(T_DOUBLE); break; 2873 case Bytecodes::_aastore : store_indexed(T_OBJECT); break; 2874 case Bytecodes::_bastore : store_indexed(T_BYTE ); break; 2875 case Bytecodes::_castore : store_indexed(T_CHAR ); break; 2876 case Bytecodes::_sastore : store_indexed(T_SHORT ); break; 2877 case Bytecodes::_pop : // fall through 2878 case Bytecodes::_pop2 : // fall through 2879 case Bytecodes::_dup : // fall through 2880 case Bytecodes::_dup_x1 : // fall through 2881 case Bytecodes::_dup_x2 : // fall through 2882 case Bytecodes::_dup2 : // fall through 2883 case Bytecodes::_dup2_x1 : // fall through 2884 case Bytecodes::_dup2_x2 : // fall through 2885 case Bytecodes::_swap : stack_op(code); break; 2886 case Bytecodes::_iadd : arithmetic_op(intType , code); break; 2887 case Bytecodes::_ladd : arithmetic_op(longType , code); break; 2888 case Bytecodes::_fadd : arithmetic_op(floatType , code); break; 2889 case Bytecodes::_dadd : arithmetic_op(doubleType, code); break; 2890 case Bytecodes::_isub : arithmetic_op(intType , code); break; 2891 case Bytecodes::_lsub : arithmetic_op(longType , code); break; 2892 case Bytecodes::_fsub : arithmetic_op(floatType , code); break; 2893 case Bytecodes::_dsub : arithmetic_op(doubleType, code); break; 2894 case Bytecodes::_imul : arithmetic_op(intType , code); break; 2895 case Bytecodes::_lmul : arithmetic_op(longType , code); break; 2896 case Bytecodes::_fmul : arithmetic_op(floatType , code); break; 2897 case Bytecodes::_dmul : arithmetic_op(doubleType, code); break; 2898 case Bytecodes::_idiv : arithmetic_op(intType , code, copy_state_for_exception()); break; 2899 case Bytecodes::_ldiv : arithmetic_op(longType , code, copy_state_for_exception()); break; 2900 case Bytecodes::_fdiv : arithmetic_op(floatType , code); break; 2901 case Bytecodes::_ddiv : arithmetic_op(doubleType, code); break; 2902 case Bytecodes::_irem : arithmetic_op(intType , code, copy_state_for_exception()); break; 2903 case Bytecodes::_lrem : arithmetic_op(longType , code, copy_state_for_exception()); break; 2904 case Bytecodes::_frem : arithmetic_op(floatType , code); break; 2905 case Bytecodes::_drem : arithmetic_op(doubleType, code); break; 2906 case Bytecodes::_ineg : negate_op(intType ); break; 2907 case Bytecodes::_lneg : negate_op(longType ); break; 2908 case Bytecodes::_fneg : negate_op(floatType ); break; 2909 case Bytecodes::_dneg : negate_op(doubleType); break; 2910 case Bytecodes::_ishl : shift_op(intType , code); break; 2911 case Bytecodes::_lshl : shift_op(longType, code); break; 2912 case Bytecodes::_ishr : shift_op(intType , code); break; 2913 case Bytecodes::_lshr : shift_op(longType, code); break; 2914 case Bytecodes::_iushr : shift_op(intType , code); break; 2915 case Bytecodes::_lushr : shift_op(longType, code); break; 2916 case Bytecodes::_iand : logic_op(intType , code); break; 2917 case Bytecodes::_land : logic_op(longType, code); break; 2918 case Bytecodes::_ior : logic_op(intType , code); break; 2919 case Bytecodes::_lor : logic_op(longType, code); break; 2920 case Bytecodes::_ixor : logic_op(intType , code); break; 2921 case Bytecodes::_lxor : logic_op(longType, code); break; 2922 case Bytecodes::_iinc : increment(); break; 2923 case Bytecodes::_i2l : convert(code, T_INT , T_LONG ); break; 2924 case Bytecodes::_i2f : convert(code, T_INT , T_FLOAT ); break; 2925 case Bytecodes::_i2d : convert(code, T_INT , T_DOUBLE); break; 2926 case Bytecodes::_l2i : convert(code, T_LONG , T_INT ); break; 2927 case Bytecodes::_l2f : convert(code, T_LONG , T_FLOAT ); break; 2928 case Bytecodes::_l2d : convert(code, T_LONG , T_DOUBLE); break; 2929 case Bytecodes::_f2i : convert(code, T_FLOAT , T_INT ); break; 2930 case Bytecodes::_f2l : convert(code, T_FLOAT , T_LONG ); break; 2931 case Bytecodes::_f2d : convert(code, T_FLOAT , T_DOUBLE); break; 2932 case Bytecodes::_d2i : convert(code, T_DOUBLE, T_INT ); break; 2933 case Bytecodes::_d2l : convert(code, T_DOUBLE, T_LONG ); break; 2934 case Bytecodes::_d2f : convert(code, T_DOUBLE, T_FLOAT ); break; 2935 case Bytecodes::_i2b : convert(code, T_INT , T_BYTE ); break; 2936 case Bytecodes::_i2c : convert(code, T_INT , T_CHAR ); break; 2937 case Bytecodes::_i2s : convert(code, T_INT , T_SHORT ); break; 2938 case Bytecodes::_lcmp : compare_op(longType , code); break; 2939 case Bytecodes::_fcmpl : compare_op(floatType , code); break; 2940 case Bytecodes::_fcmpg : compare_op(floatType , code); break; 2941 case Bytecodes::_dcmpl : compare_op(doubleType, code); break; 2942 case Bytecodes::_dcmpg : compare_op(doubleType, code); break; 2943 case Bytecodes::_ifeq : if_zero(intType , If::eql); break; 2944 case Bytecodes::_ifne : if_zero(intType , If::neq); break; 2945 case Bytecodes::_iflt : if_zero(intType , If::lss); break; 2946 case Bytecodes::_ifge : if_zero(intType , If::geq); break; 2947 case Bytecodes::_ifgt : if_zero(intType , If::gtr); break; 2948 case Bytecodes::_ifle : if_zero(intType , If::leq); break; 2949 case Bytecodes::_if_icmpeq : if_same(intType , If::eql); break; 2950 case Bytecodes::_if_icmpne : if_same(intType , If::neq); break; 2951 case Bytecodes::_if_icmplt : if_same(intType , If::lss); break; 2952 case Bytecodes::_if_icmpge : if_same(intType , If::geq); break; 2953 case Bytecodes::_if_icmpgt : if_same(intType , If::gtr); break; 2954 case Bytecodes::_if_icmple : if_same(intType , If::leq); break; 2955 case Bytecodes::_if_acmpeq : if_same(objectType, If::eql); break; 2956 case Bytecodes::_if_acmpne : if_same(objectType, If::neq); break; 2957 case Bytecodes::_goto : _goto(s.cur_bci(), s.get_dest()); break; 2958 case Bytecodes::_jsr : jsr(s.get_dest()); break; 2959 case Bytecodes::_ret : ret(s.get_index()); break; 2960 case Bytecodes::_tableswitch : table_switch(); break; 2961 case Bytecodes::_lookupswitch : lookup_switch(); break; 2962 case Bytecodes::_ireturn : method_return(ipop(), ignore_return); break; 2963 case Bytecodes::_lreturn : method_return(lpop(), ignore_return); break; 2964 case Bytecodes::_freturn : method_return(fpop(), ignore_return); break; 2965 case Bytecodes::_dreturn : method_return(dpop(), ignore_return); break; 2966 case Bytecodes::_areturn : method_return(apop(), ignore_return); break; 2967 case Bytecodes::_return : method_return(nullptr, ignore_return); break; 2968 case Bytecodes::_getstatic : // fall through 2969 case Bytecodes::_putstatic : // fall through 2970 case Bytecodes::_getfield : // fall through 2971 case Bytecodes::_putfield : access_field(code); break; 2972 case Bytecodes::_invokevirtual : // fall through 2973 case Bytecodes::_invokespecial : // fall through 2974 case Bytecodes::_invokestatic : // fall through 2975 case Bytecodes::_invokedynamic : // fall through 2976 case Bytecodes::_invokeinterface: invoke(code); break; 2977 case Bytecodes::_new : new_instance(s.get_index_u2()); break; 2978 case Bytecodes::_newarray : new_type_array(); break; 2979 case Bytecodes::_anewarray : new_object_array(); break; 2980 case Bytecodes::_arraylength : { ValueStack* state_before = copy_state_for_exception(); ipush(append(new ArrayLength(apop(), state_before))); break; } 2981 case Bytecodes::_athrow : throw_op(s.cur_bci()); break; 2982 case Bytecodes::_checkcast : check_cast(s.get_index_u2()); break; 2983 case Bytecodes::_instanceof : instance_of(s.get_index_u2()); break; 2984 case Bytecodes::_monitorenter : monitorenter(apop(), s.cur_bci()); break; 2985 case Bytecodes::_monitorexit : monitorexit (apop(), s.cur_bci()); break; 2986 case Bytecodes::_wide : ShouldNotReachHere(); break; 2987 case Bytecodes::_multianewarray : new_multi_array(s.cur_bcp()[3]); break; 2988 case Bytecodes::_ifnull : if_null(objectType, If::eql); break; 2989 case Bytecodes::_ifnonnull : if_null(objectType, If::neq); break; 2990 case Bytecodes::_goto_w : _goto(s.cur_bci(), s.get_far_dest()); break; 2991 case Bytecodes::_jsr_w : jsr(s.get_far_dest()); break; 2992 case Bytecodes::_breakpoint : BAILOUT_("concurrent setting of breakpoint", nullptr); 2993 default : ShouldNotReachHere(); break; 2994 } 2995 2996 if (log != nullptr) 2997 log->clear_context(); // skip marker if nothing was printed 2998 2999 // save current bci to setup Goto at the end 3000 prev_bci = s.cur_bci(); 3001 3002 } 3003 CHECK_BAILOUT_(nullptr); 3004 // stop processing of this block (see try_inline_full) 3005 if (_skip_block) { 3006 _skip_block = false; 3007 assert(_last && _last->as_BlockEnd(), ""); 3008 return _last->as_BlockEnd(); 3009 } 3010 // if there are any, check if last instruction is a BlockEnd instruction 3011 BlockEnd* end = last()->as_BlockEnd(); 3012 if (end == nullptr) { 3013 // all blocks must end with a BlockEnd instruction => add a Goto 3014 end = new Goto(block_at(s.cur_bci()), false); 3015 append(end); 3016 } 3017 assert(end == last()->as_BlockEnd(), "inconsistency"); 3018 3019 assert(end->state() != nullptr, "state must already be present"); 3020 assert(end->as_Return() == nullptr || end->as_Throw() == nullptr || end->state()->stack_size() == 0, "stack not needed for return and throw"); 3021 3022 // connect to begin & set state 3023 // NOTE that inlining may have changed the block we are parsing 3024 block()->set_end(end); 3025 // propagate state 3026 for (int i = end->number_of_sux() - 1; i >= 0; i--) { 3027 BlockBegin* sux = end->sux_at(i); 3028 assert(sux->is_predecessor(block()), "predecessor missing"); 3029 // be careful, bailout if bytecodes are strange 3030 if (!sux->try_merge(end->state(), compilation()->has_irreducible_loops())) BAILOUT_("block join failed", nullptr); 3031 scope_data()->add_to_work_list(end->sux_at(i)); 3032 } 3033 3034 scope_data()->set_stream(nullptr); 3035 3036 // done 3037 return end; 3038 } 3039 3040 3041 void GraphBuilder::iterate_all_blocks(bool start_in_current_block_for_inlining) { 3042 do { 3043 if (start_in_current_block_for_inlining && !bailed_out()) { 3044 iterate_bytecodes_for_block(0); 3045 start_in_current_block_for_inlining = false; 3046 } else { 3047 BlockBegin* b; 3048 while ((b = scope_data()->remove_from_work_list()) != nullptr) { 3049 if (!b->is_set(BlockBegin::was_visited_flag)) { 3050 if (b->is_set(BlockBegin::osr_entry_flag)) { 3051 // we're about to parse the osr entry block, so make sure 3052 // we setup the OSR edge leading into this block so that 3053 // Phis get setup correctly. 3054 setup_osr_entry_block(); 3055 // this is no longer the osr entry block, so clear it. 3056 b->clear(BlockBegin::osr_entry_flag); 3057 } 3058 b->set(BlockBegin::was_visited_flag); 3059 connect_to_end(b); 3060 } 3061 } 3062 } 3063 } while (!bailed_out() && !scope_data()->is_work_list_empty()); 3064 } 3065 3066 3067 bool GraphBuilder::_can_trap [Bytecodes::number_of_java_codes]; 3068 3069 void GraphBuilder::initialize() { 3070 // the following bytecodes are assumed to potentially 3071 // throw exceptions in compiled code - note that e.g. 3072 // monitorexit & the return bytecodes do not throw 3073 // exceptions since monitor pairing proved that they 3074 // succeed (if monitor pairing succeeded) 3075 Bytecodes::Code can_trap_list[] = 3076 { Bytecodes::_ldc 3077 , Bytecodes::_ldc_w 3078 , Bytecodes::_ldc2_w 3079 , Bytecodes::_iaload 3080 , Bytecodes::_laload 3081 , Bytecodes::_faload 3082 , Bytecodes::_daload 3083 , Bytecodes::_aaload 3084 , Bytecodes::_baload 3085 , Bytecodes::_caload 3086 , Bytecodes::_saload 3087 , Bytecodes::_iastore 3088 , Bytecodes::_lastore 3089 , Bytecodes::_fastore 3090 , Bytecodes::_dastore 3091 , Bytecodes::_aastore 3092 , Bytecodes::_bastore 3093 , Bytecodes::_castore 3094 , Bytecodes::_sastore 3095 , Bytecodes::_idiv 3096 , Bytecodes::_ldiv 3097 , Bytecodes::_irem 3098 , Bytecodes::_lrem 3099 , Bytecodes::_getstatic 3100 , Bytecodes::_putstatic 3101 , Bytecodes::_getfield 3102 , Bytecodes::_putfield 3103 , Bytecodes::_invokevirtual 3104 , Bytecodes::_invokespecial 3105 , Bytecodes::_invokestatic 3106 , Bytecodes::_invokedynamic 3107 , Bytecodes::_invokeinterface 3108 , Bytecodes::_new 3109 , Bytecodes::_newarray 3110 , Bytecodes::_anewarray 3111 , Bytecodes::_arraylength 3112 , Bytecodes::_athrow 3113 , Bytecodes::_checkcast 3114 , Bytecodes::_instanceof 3115 , Bytecodes::_monitorenter 3116 , Bytecodes::_multianewarray 3117 }; 3118 3119 // inititialize trap tables 3120 for (int i = 0; i < Bytecodes::number_of_java_codes; i++) { 3121 _can_trap[i] = false; 3122 } 3123 // set standard trap info 3124 for (uint j = 0; j < ARRAY_SIZE(can_trap_list); j++) { 3125 _can_trap[can_trap_list[j]] = true; 3126 } 3127 } 3128 3129 3130 BlockBegin* GraphBuilder::header_block(BlockBegin* entry, BlockBegin::Flag f, ValueStack* state) { 3131 assert(entry->is_set(f), "entry/flag mismatch"); 3132 // create header block 3133 BlockBegin* h = new BlockBegin(entry->bci()); 3134 h->set_depth_first_number(0); 3135 3136 Value l = h; 3137 BlockEnd* g = new Goto(entry, false); 3138 l->set_next(g, entry->bci()); 3139 h->set_end(g); 3140 h->set(f); 3141 // setup header block end state 3142 ValueStack* s = state->copy(ValueStack::StateAfter, entry->bci()); // can use copy since stack is empty (=> no phis) 3143 assert(s->stack_is_empty(), "must have empty stack at entry point"); 3144 g->set_state(s); 3145 return h; 3146 } 3147 3148 3149 3150 BlockBegin* GraphBuilder::setup_start_block(int osr_bci, BlockBegin* std_entry, BlockBegin* osr_entry, ValueStack* state) { 3151 BlockBegin* start = new BlockBegin(0); 3152 3153 // This code eliminates the empty start block at the beginning of 3154 // each method. Previously, each method started with the 3155 // start-block created below, and this block was followed by the 3156 // header block that was always empty. This header block is only 3157 // necessary if std_entry is also a backward branch target because 3158 // then phi functions may be necessary in the header block. It's 3159 // also necessary when profiling so that there's a single block that 3160 // can increment the counters. 3161 // In addition, with range check elimination, we may need a valid block 3162 // that dominates all the rest to insert range predicates. 3163 BlockBegin* new_header_block; 3164 if (std_entry->number_of_preds() > 0 || is_profiling() || RangeCheckElimination) { 3165 new_header_block = header_block(std_entry, BlockBegin::std_entry_flag, state); 3166 } else { 3167 new_header_block = std_entry; 3168 } 3169 3170 // setup start block (root for the IR graph) 3171 Base* base = 3172 new Base( 3173 new_header_block, 3174 osr_entry 3175 ); 3176 start->set_next(base, 0); 3177 start->set_end(base); 3178 // create & setup state for start block 3179 start->set_state(state->copy(ValueStack::StateAfter, std_entry->bci())); 3180 base->set_state(state->copy(ValueStack::StateAfter, std_entry->bci())); 3181 3182 if (base->std_entry()->state() == nullptr) { 3183 // setup states for header blocks 3184 base->std_entry()->merge(state, compilation()->has_irreducible_loops()); 3185 } 3186 3187 assert(base->std_entry()->state() != nullptr, ""); 3188 return start; 3189 } 3190 3191 3192 void GraphBuilder::setup_osr_entry_block() { 3193 assert(compilation()->is_osr_compile(), "only for osrs"); 3194 3195 int osr_bci = compilation()->osr_bci(); 3196 ciBytecodeStream s(method()); 3197 s.reset_to_bci(osr_bci); 3198 s.next(); 3199 scope_data()->set_stream(&s); 3200 3201 // create a new block to be the osr setup code 3202 _osr_entry = new BlockBegin(osr_bci); 3203 _osr_entry->set(BlockBegin::osr_entry_flag); 3204 _osr_entry->set_depth_first_number(0); 3205 BlockBegin* target = bci2block()->at(osr_bci); 3206 assert(target != nullptr && target->is_set(BlockBegin::osr_entry_flag), "must be there"); 3207 // the osr entry has no values for locals 3208 ValueStack* state = target->state()->copy(); 3209 _osr_entry->set_state(state); 3210 3211 kill_all(); 3212 _block = _osr_entry; 3213 _state = _osr_entry->state()->copy(); 3214 assert(_state->bci() == osr_bci, "mismatch"); 3215 _last = _osr_entry; 3216 Value e = append(new OsrEntry()); 3217 e->set_needs_null_check(false); 3218 3219 // OSR buffer is 3220 // 3221 // locals[nlocals-1..0] 3222 // monitors[number_of_locks-1..0] 3223 // 3224 // locals is a direct copy of the interpreter frame so in the osr buffer 3225 // so first slot in the local array is the last local from the interpreter 3226 // and last slot is local[0] (receiver) from the interpreter 3227 // 3228 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 3229 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 3230 // in the interpreter frame (the method lock if a sync method) 3231 3232 // Initialize monitors in the compiled activation. 3233 3234 int index; 3235 Value local; 3236 3237 // find all the locals that the interpreter thinks contain live oops 3238 const ResourceBitMap live_oops = method()->live_local_oops_at_bci(osr_bci); 3239 3240 // compute the offset into the locals so that we can treat the buffer 3241 // as if the locals were still in the interpreter frame 3242 int locals_offset = BytesPerWord * (method()->max_locals() - 1); 3243 for_each_local_value(state, index, local) { 3244 int offset = locals_offset - (index + local->type()->size() - 1) * BytesPerWord; 3245 Value get; 3246 if (local->type()->is_object_kind() && !live_oops.at(index)) { 3247 // The interpreter thinks this local is dead but the compiler 3248 // doesn't so pretend that the interpreter passed in null. 3249 get = append(new Constant(objectNull)); 3250 } else { 3251 Value off_val = append(new Constant(new IntConstant(offset))); 3252 get = append(new UnsafeGet(as_BasicType(local->type()), e, 3253 off_val, 3254 false/*is_volatile*/, 3255 true/*is_raw*/)); 3256 } 3257 _state->store_local(index, get); 3258 } 3259 3260 // the storage for the OSR buffer is freed manually in the LIRGenerator. 3261 3262 assert(state->caller_state() == nullptr, "should be top scope"); 3263 state->clear_locals(); 3264 Goto* g = new Goto(target, false); 3265 append(g); 3266 _osr_entry->set_end(g); 3267 target->merge(_osr_entry->end()->state(), compilation()->has_irreducible_loops()); 3268 3269 scope_data()->set_stream(nullptr); 3270 } 3271 3272 3273 ValueStack* GraphBuilder::state_at_entry() { 3274 ValueStack* state = new ValueStack(scope(), nullptr); 3275 3276 // Set up locals for receiver 3277 int idx = 0; 3278 if (!method()->is_static()) { 3279 // we should always see the receiver 3280 state->store_local(idx, new Local(method()->holder(), objectType, idx, true)); 3281 idx = 1; 3282 } 3283 3284 // Set up locals for incoming arguments 3285 ciSignature* sig = method()->signature(); 3286 for (int i = 0; i < sig->count(); i++) { 3287 ciType* type = sig->type_at(i); 3288 BasicType basic_type = type->basic_type(); 3289 // don't allow T_ARRAY to propagate into locals types 3290 if (is_reference_type(basic_type)) basic_type = T_OBJECT; 3291 ValueType* vt = as_ValueType(basic_type); 3292 state->store_local(idx, new Local(type, vt, idx, false)); 3293 idx += type->size(); 3294 } 3295 3296 // lock synchronized method 3297 if (method()->is_synchronized()) { 3298 state->lock(nullptr); 3299 } 3300 3301 return state; 3302 } 3303 3304 3305 GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope) 3306 : _scope_data(nullptr) 3307 , _compilation(compilation) 3308 , _memory(new MemoryBuffer()) 3309 , _inline_bailout_msg(nullptr) 3310 , _instruction_count(0) 3311 , _osr_entry(nullptr) 3312 { 3313 int osr_bci = compilation->osr_bci(); 3314 3315 // determine entry points and bci2block mapping 3316 BlockListBuilder blm(compilation, scope, osr_bci); 3317 CHECK_BAILOUT(); 3318 3319 BlockList* bci2block = blm.bci2block(); 3320 BlockBegin* start_block = bci2block->at(0); 3321 3322 push_root_scope(scope, bci2block, start_block); 3323 3324 // setup state for std entry 3325 _initial_state = state_at_entry(); 3326 start_block->merge(_initial_state, compilation->has_irreducible_loops()); 3327 3328 // End nulls still exist here 3329 3330 // complete graph 3331 _vmap = new ValueMap(); 3332 switch (scope->method()->intrinsic_id()) { 3333 case vmIntrinsics::_dabs : // fall through 3334 case vmIntrinsics::_dsqrt : // fall through 3335 case vmIntrinsics::_dsqrt_strict : // fall through 3336 case vmIntrinsics::_dsin : // fall through 3337 case vmIntrinsics::_dcos : // fall through 3338 case vmIntrinsics::_dtan : // fall through 3339 case vmIntrinsics::_dtanh : // fall through 3340 case vmIntrinsics::_dlog : // fall through 3341 case vmIntrinsics::_dlog10 : // fall through 3342 case vmIntrinsics::_dexp : // fall through 3343 case vmIntrinsics::_dpow : // fall through 3344 { 3345 // Compiles where the root method is an intrinsic need a special 3346 // compilation environment because the bytecodes for the method 3347 // shouldn't be parsed during the compilation, only the special 3348 // Intrinsic node should be emitted. If this isn't done the 3349 // code for the inlined version will be different than the root 3350 // compiled version which could lead to monotonicity problems on 3351 // intel. 3352 if (CheckIntrinsics && !scope->method()->intrinsic_candidate()) { 3353 BAILOUT("failed to inline intrinsic, method not annotated"); 3354 } 3355 3356 // Set up a stream so that appending instructions works properly. 3357 ciBytecodeStream s(scope->method()); 3358 s.reset_to_bci(0); 3359 scope_data()->set_stream(&s); 3360 s.next(); 3361 3362 // setup the initial block state 3363 _block = start_block; 3364 _state = start_block->state()->copy_for_parsing(); 3365 _last = start_block; 3366 load_local(doubleType, 0); 3367 if (scope->method()->intrinsic_id() == vmIntrinsics::_dpow) { 3368 load_local(doubleType, 2); 3369 } 3370 3371 // Emit the intrinsic node. 3372 bool result = try_inline_intrinsics(scope->method()); 3373 if (!result) BAILOUT("failed to inline intrinsic"); 3374 method_return(dpop()); 3375 3376 // connect the begin and end blocks and we're all done. 3377 BlockEnd* end = last()->as_BlockEnd(); 3378 block()->set_end(end); 3379 break; 3380 } 3381 3382 case vmIntrinsics::_Reference_get: 3383 { 3384 { 3385 // With java.lang.ref.reference.get() we must go through the 3386 // intrinsic - when G1 is enabled - even when get() is the root 3387 // method of the compile so that, if necessary, the value in 3388 // the referent field of the reference object gets recorded by 3389 // the pre-barrier code. 3390 // Specifically, if G1 is enabled, the value in the referent 3391 // field is recorded by the G1 SATB pre barrier. This will 3392 // result in the referent being marked live and the reference 3393 // object removed from the list of discovered references during 3394 // reference processing. 3395 if (CheckIntrinsics && !scope->method()->intrinsic_candidate()) { 3396 BAILOUT("failed to inline intrinsic, method not annotated"); 3397 } 3398 3399 // Also we need intrinsic to prevent commoning reads from this field 3400 // across safepoint since GC can change its value. 3401 3402 // Set up a stream so that appending instructions works properly. 3403 ciBytecodeStream s(scope->method()); 3404 s.reset_to_bci(0); 3405 scope_data()->set_stream(&s); 3406 s.next(); 3407 3408 // setup the initial block state 3409 _block = start_block; 3410 _state = start_block->state()->copy_for_parsing(); 3411 _last = start_block; 3412 load_local(objectType, 0); 3413 3414 // Emit the intrinsic node. 3415 bool result = try_inline_intrinsics(scope->method()); 3416 if (!result) BAILOUT("failed to inline intrinsic"); 3417 method_return(apop()); 3418 3419 // connect the begin and end blocks and we're all done. 3420 BlockEnd* end = last()->as_BlockEnd(); 3421 block()->set_end(end); 3422 break; 3423 } 3424 // Otherwise, fall thru 3425 } 3426 3427 default: 3428 scope_data()->add_to_work_list(start_block); 3429 iterate_all_blocks(); 3430 break; 3431 } 3432 CHECK_BAILOUT(); 3433 3434 # ifdef ASSERT 3435 // For all blocks reachable from start_block: _end must be non-null 3436 { 3437 BlockList processed; 3438 BlockList to_go; 3439 to_go.append(start_block); 3440 while(to_go.length() > 0) { 3441 BlockBegin* current = to_go.pop(); 3442 assert(current != nullptr, "Should not happen."); 3443 assert(current->end() != nullptr, "All blocks reachable from start_block should have end() != nullptr."); 3444 processed.append(current); 3445 for(int i = 0; i < current->number_of_sux(); i++) { 3446 BlockBegin* s = current->sux_at(i); 3447 if (!processed.contains(s)) { 3448 to_go.append(s); 3449 } 3450 } 3451 } 3452 } 3453 #endif // ASSERT 3454 3455 _start = setup_start_block(osr_bci, start_block, _osr_entry, _initial_state); 3456 3457 eliminate_redundant_phis(_start); 3458 3459 NOT_PRODUCT(if (PrintValueNumbering && Verbose) print_stats()); 3460 // for osr compile, bailout if some requirements are not fulfilled 3461 if (osr_bci != -1) { 3462 BlockBegin* osr_block = blm.bci2block()->at(osr_bci); 3463 if (!osr_block->is_set(BlockBegin::was_visited_flag)) { 3464 BAILOUT("osr entry must have been visited for osr compile"); 3465 } 3466 3467 // check if osr entry point has empty stack - we cannot handle non-empty stacks at osr entry points 3468 if (!osr_block->state()->stack_is_empty()) { 3469 BAILOUT("stack not empty at OSR entry point"); 3470 } 3471 } 3472 #ifndef PRODUCT 3473 if (PrintCompilation && Verbose) tty->print_cr("Created %d Instructions", _instruction_count); 3474 #endif 3475 } 3476 3477 3478 ValueStack* GraphBuilder::copy_state_before() { 3479 return copy_state_before_with_bci(bci()); 3480 } 3481 3482 ValueStack* GraphBuilder::copy_state_exhandling() { 3483 return copy_state_exhandling_with_bci(bci()); 3484 } 3485 3486 ValueStack* GraphBuilder::copy_state_for_exception() { 3487 return copy_state_for_exception_with_bci(bci()); 3488 } 3489 3490 ValueStack* GraphBuilder::copy_state_before_with_bci(int bci) { 3491 return state()->copy(ValueStack::StateBefore, bci); 3492 } 3493 3494 ValueStack* GraphBuilder::copy_state_exhandling_with_bci(int bci) { 3495 if (!has_handler()) return nullptr; 3496 return state()->copy(ValueStack::StateBefore, bci); 3497 } 3498 3499 ValueStack* GraphBuilder::copy_state_for_exception_with_bci(int bci) { 3500 ValueStack* s = copy_state_exhandling_with_bci(bci); 3501 if (s == nullptr) { 3502 // no handler, no need to retain locals 3503 ValueStack::Kind exc_kind = ValueStack::empty_exception_kind(); 3504 s = state()->copy(exc_kind, bci); 3505 } 3506 return s; 3507 } 3508 3509 int GraphBuilder::recursive_inline_level(ciMethod* cur_callee) const { 3510 int recur_level = 0; 3511 for (IRScope* s = scope(); s != nullptr; s = s->caller()) { 3512 if (s->method() == cur_callee) { 3513 ++recur_level; 3514 } 3515 } 3516 return recur_level; 3517 } 3518 3519 static void set_flags_for_inlined_callee(Compilation* compilation, ciMethod* callee) { 3520 if (callee->has_reserved_stack_access()) { 3521 compilation->set_has_reserved_stack_access(true); 3522 } 3523 if (callee->is_synchronized() || callee->has_monitor_bytecodes()) { 3524 compilation->set_has_monitors(true); 3525 } 3526 if (callee->is_scoped()) { 3527 compilation->set_has_scoped_access(true); 3528 } 3529 } 3530 3531 bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, bool ignore_return, Bytecodes::Code bc, Value receiver) { 3532 const char* msg = nullptr; 3533 3534 // clear out any existing inline bailout condition 3535 clear_inline_bailout(); 3536 3537 // exclude methods we don't want to inline 3538 msg = should_not_inline(callee); 3539 if (msg != nullptr) { 3540 print_inlining(callee, msg, /*success*/ false); 3541 return false; 3542 } 3543 3544 // method handle invokes 3545 if (callee->is_method_handle_intrinsic()) { 3546 if (try_method_handle_inline(callee, ignore_return)) { 3547 set_flags_for_inlined_callee(compilation(), callee); 3548 return true; 3549 } 3550 return false; 3551 } 3552 3553 // handle intrinsics 3554 if (callee->intrinsic_id() != vmIntrinsics::_none && 3555 callee->check_intrinsic_candidate()) { 3556 if (try_inline_intrinsics(callee, ignore_return)) { 3557 print_inlining(callee, "intrinsic"); 3558 set_flags_for_inlined_callee(compilation(), callee); 3559 return true; 3560 } 3561 // try normal inlining 3562 } 3563 3564 // certain methods cannot be parsed at all 3565 msg = check_can_parse(callee); 3566 if (msg != nullptr) { 3567 print_inlining(callee, msg, /*success*/ false); 3568 return false; 3569 } 3570 3571 // If bytecode not set use the current one. 3572 if (bc == Bytecodes::_illegal) { 3573 bc = code(); 3574 } 3575 if (try_inline_full(callee, holder_known, ignore_return, bc, receiver)) { 3576 set_flags_for_inlined_callee(compilation(), callee); 3577 return true; 3578 } 3579 3580 // Entire compilation could fail during try_inline_full call. 3581 // In that case printing inlining decision info is useless. 3582 if (!bailed_out()) 3583 print_inlining(callee, _inline_bailout_msg, /*success*/ false); 3584 3585 return false; 3586 } 3587 3588 3589 const char* GraphBuilder::check_can_parse(ciMethod* callee) const { 3590 // Certain methods cannot be parsed at all: 3591 if ( callee->is_native()) return "native method"; 3592 if ( callee->is_abstract()) return "abstract method"; 3593 if (!callee->can_be_parsed()) return "cannot be parsed"; 3594 return nullptr; 3595 } 3596 3597 // negative filter: should callee NOT be inlined? returns null, ok to inline, or rejection msg 3598 const char* GraphBuilder::should_not_inline(ciMethod* callee) const { 3599 if ( compilation()->directive()->should_not_inline(callee)) return "disallowed by CompileCommand"; 3600 if ( callee->dont_inline()) return "don't inline by annotation"; 3601 return nullptr; 3602 } 3603 3604 void GraphBuilder::build_graph_for_intrinsic(ciMethod* callee, bool ignore_return) { 3605 vmIntrinsics::ID id = callee->intrinsic_id(); 3606 assert(id != vmIntrinsics::_none, "must be a VM intrinsic"); 3607 3608 // Some intrinsics need special IR nodes. 3609 switch(id) { 3610 case vmIntrinsics::_getReference : append_unsafe_get(callee, T_OBJECT, false); return; 3611 case vmIntrinsics::_getBoolean : append_unsafe_get(callee, T_BOOLEAN, false); return; 3612 case vmIntrinsics::_getByte : append_unsafe_get(callee, T_BYTE, false); return; 3613 case vmIntrinsics::_getShort : append_unsafe_get(callee, T_SHORT, false); return; 3614 case vmIntrinsics::_getChar : append_unsafe_get(callee, T_CHAR, false); return; 3615 case vmIntrinsics::_getInt : append_unsafe_get(callee, T_INT, false); return; 3616 case vmIntrinsics::_getLong : append_unsafe_get(callee, T_LONG, false); return; 3617 case vmIntrinsics::_getFloat : append_unsafe_get(callee, T_FLOAT, false); return; 3618 case vmIntrinsics::_getDouble : append_unsafe_get(callee, T_DOUBLE, false); return; 3619 case vmIntrinsics::_putReference : append_unsafe_put(callee, T_OBJECT, false); return; 3620 case vmIntrinsics::_putBoolean : append_unsafe_put(callee, T_BOOLEAN, false); return; 3621 case vmIntrinsics::_putByte : append_unsafe_put(callee, T_BYTE, false); return; 3622 case vmIntrinsics::_putShort : append_unsafe_put(callee, T_SHORT, false); return; 3623 case vmIntrinsics::_putChar : append_unsafe_put(callee, T_CHAR, false); return; 3624 case vmIntrinsics::_putInt : append_unsafe_put(callee, T_INT, false); return; 3625 case vmIntrinsics::_putLong : append_unsafe_put(callee, T_LONG, false); return; 3626 case vmIntrinsics::_putFloat : append_unsafe_put(callee, T_FLOAT, false); return; 3627 case vmIntrinsics::_putDouble : append_unsafe_put(callee, T_DOUBLE, false); return; 3628 case vmIntrinsics::_getShortUnaligned : append_unsafe_get(callee, T_SHORT, false); return; 3629 case vmIntrinsics::_getCharUnaligned : append_unsafe_get(callee, T_CHAR, false); return; 3630 case vmIntrinsics::_getIntUnaligned : append_unsafe_get(callee, T_INT, false); return; 3631 case vmIntrinsics::_getLongUnaligned : append_unsafe_get(callee, T_LONG, false); return; 3632 case vmIntrinsics::_putShortUnaligned : append_unsafe_put(callee, T_SHORT, false); return; 3633 case vmIntrinsics::_putCharUnaligned : append_unsafe_put(callee, T_CHAR, false); return; 3634 case vmIntrinsics::_putIntUnaligned : append_unsafe_put(callee, T_INT, false); return; 3635 case vmIntrinsics::_putLongUnaligned : append_unsafe_put(callee, T_LONG, false); return; 3636 case vmIntrinsics::_getReferenceVolatile : append_unsafe_get(callee, T_OBJECT, true); return; 3637 case vmIntrinsics::_getBooleanVolatile : append_unsafe_get(callee, T_BOOLEAN, true); return; 3638 case vmIntrinsics::_getByteVolatile : append_unsafe_get(callee, T_BYTE, true); return; 3639 case vmIntrinsics::_getShortVolatile : append_unsafe_get(callee, T_SHORT, true); return; 3640 case vmIntrinsics::_getCharVolatile : append_unsafe_get(callee, T_CHAR, true); return; 3641 case vmIntrinsics::_getIntVolatile : append_unsafe_get(callee, T_INT, true); return; 3642 case vmIntrinsics::_getLongVolatile : append_unsafe_get(callee, T_LONG, true); return; 3643 case vmIntrinsics::_getFloatVolatile : append_unsafe_get(callee, T_FLOAT, true); return; 3644 case vmIntrinsics::_getDoubleVolatile : append_unsafe_get(callee, T_DOUBLE, true); return; 3645 case vmIntrinsics::_putReferenceVolatile : append_unsafe_put(callee, T_OBJECT, true); return; 3646 case vmIntrinsics::_putBooleanVolatile : append_unsafe_put(callee, T_BOOLEAN, true); return; 3647 case vmIntrinsics::_putByteVolatile : append_unsafe_put(callee, T_BYTE, true); return; 3648 case vmIntrinsics::_putShortVolatile : append_unsafe_put(callee, T_SHORT, true); return; 3649 case vmIntrinsics::_putCharVolatile : append_unsafe_put(callee, T_CHAR, true); return; 3650 case vmIntrinsics::_putIntVolatile : append_unsafe_put(callee, T_INT, true); return; 3651 case vmIntrinsics::_putLongVolatile : append_unsafe_put(callee, T_LONG, true); return; 3652 case vmIntrinsics::_putFloatVolatile : append_unsafe_put(callee, T_FLOAT, true); return; 3653 case vmIntrinsics::_putDoubleVolatile : append_unsafe_put(callee, T_DOUBLE, true); return; 3654 case vmIntrinsics::_compareAndSetLong: 3655 case vmIntrinsics::_compareAndSetInt: 3656 case vmIntrinsics::_compareAndSetReference : append_unsafe_CAS(callee); return; 3657 case vmIntrinsics::_getAndAddInt: 3658 case vmIntrinsics::_getAndAddLong : append_unsafe_get_and_set(callee, true); return; 3659 case vmIntrinsics::_getAndSetInt : 3660 case vmIntrinsics::_getAndSetLong : 3661 case vmIntrinsics::_getAndSetReference : append_unsafe_get_and_set(callee, false); return; 3662 case vmIntrinsics::_getCharStringU : append_char_access(callee, false); return; 3663 case vmIntrinsics::_putCharStringU : append_char_access(callee, true); return; 3664 case vmIntrinsics::_clone : append_alloc_array_copy(callee); return; 3665 default: 3666 break; 3667 } 3668 if (_inline_bailout_msg != nullptr) { 3669 return; 3670 } 3671 3672 // create intrinsic node 3673 const bool has_receiver = !callee->is_static(); 3674 ValueType* result_type = as_ValueType(callee->return_type()); 3675 ValueStack* state_before = copy_state_for_exception(); 3676 3677 Values* args = state()->pop_arguments(callee->arg_size()); 3678 3679 if (is_profiling()) { 3680 // Don't profile in the special case where the root method 3681 // is the intrinsic 3682 if (callee != method()) { 3683 // Note that we'd collect profile data in this method if we wanted it. 3684 compilation()->set_would_profile(true); 3685 if (profile_calls()) { 3686 Value recv = nullptr; 3687 if (has_receiver) { 3688 recv = args->at(0); 3689 null_check(recv); 3690 } 3691 profile_call(callee, recv, nullptr, collect_args_for_profiling(args, callee, true), true); 3692 } 3693 } 3694 } 3695 3696 Intrinsic* result = new Intrinsic(result_type, callee->intrinsic_id(), 3697 args, has_receiver, state_before, 3698 vmIntrinsics::preserves_state(id), 3699 vmIntrinsics::can_trap(id)); 3700 // append instruction & push result 3701 Value value = append_split(result); 3702 if (result_type != voidType && !ignore_return) { 3703 push(result_type, value); 3704 } 3705 3706 if (callee != method() && profile_return() && result_type->is_object_kind()) { 3707 profile_return_type(result, callee); 3708 } 3709 } 3710 3711 bool GraphBuilder::try_inline_intrinsics(ciMethod* callee, bool ignore_return) { 3712 // For calling is_intrinsic_available we need to transition to 3713 // the '_thread_in_vm' state because is_intrinsic_available() 3714 // accesses critical VM-internal data. 3715 bool is_available = false; 3716 { 3717 VM_ENTRY_MARK; 3718 methodHandle mh(THREAD, callee->get_Method()); 3719 is_available = _compilation->compiler()->is_intrinsic_available(mh, _compilation->directive()); 3720 } 3721 3722 if (!is_available) { 3723 if (!InlineNatives) { 3724 // Return false and also set message that the inlining of 3725 // intrinsics has been disabled in general. 3726 INLINE_BAILOUT("intrinsic method inlining disabled"); 3727 } else { 3728 return false; 3729 } 3730 } 3731 build_graph_for_intrinsic(callee, ignore_return); 3732 if (_inline_bailout_msg != nullptr) { 3733 return false; 3734 } 3735 return true; 3736 } 3737 3738 3739 bool GraphBuilder::try_inline_jsr(int jsr_dest_bci) { 3740 // Introduce a new callee continuation point - all Ret instructions 3741 // will be replaced with Gotos to this point. 3742 if (next_bci() >= method()->code_size()) { 3743 return false; 3744 } 3745 BlockBegin* cont = block_at(next_bci()); 3746 assert(cont != nullptr, "continuation must exist (BlockListBuilder starts a new block after a jsr"); 3747 3748 // Note: can not assign state to continuation yet, as we have to 3749 // pick up the state from the Ret instructions. 3750 3751 // Push callee scope 3752 push_scope_for_jsr(cont, jsr_dest_bci); 3753 3754 // Temporarily set up bytecode stream so we can append instructions 3755 // (only using the bci of this stream) 3756 scope_data()->set_stream(scope_data()->parent()->stream()); 3757 3758 BlockBegin* jsr_start_block = block_at(jsr_dest_bci); 3759 assert(jsr_start_block != nullptr, "jsr start block must exist"); 3760 assert(!jsr_start_block->is_set(BlockBegin::was_visited_flag), "should not have visited jsr yet"); 3761 Goto* goto_sub = new Goto(jsr_start_block, false); 3762 // Must copy state to avoid wrong sharing when parsing bytecodes 3763 assert(jsr_start_block->state() == nullptr, "should have fresh jsr starting block"); 3764 jsr_start_block->set_state(copy_state_before_with_bci(jsr_dest_bci)); 3765 append(goto_sub); 3766 _block->set_end(goto_sub); 3767 _last = _block = jsr_start_block; 3768 3769 // Clear out bytecode stream 3770 scope_data()->set_stream(nullptr); 3771 3772 scope_data()->add_to_work_list(jsr_start_block); 3773 3774 // Ready to resume parsing in subroutine 3775 iterate_all_blocks(); 3776 3777 // If we bailed out during parsing, return immediately (this is bad news) 3778 CHECK_BAILOUT_(false); 3779 3780 // Detect whether the continuation can actually be reached. If not, 3781 // it has not had state set by the join() operations in 3782 // iterate_bytecodes_for_block()/ret() and we should not touch the 3783 // iteration state. The calling activation of 3784 // iterate_bytecodes_for_block will then complete normally. 3785 if (cont->state() != nullptr) { 3786 if (!cont->is_set(BlockBegin::was_visited_flag)) { 3787 // add continuation to work list instead of parsing it immediately 3788 scope_data()->parent()->add_to_work_list(cont); 3789 } 3790 } 3791 3792 assert(jsr_continuation() == cont, "continuation must not have changed"); 3793 assert(!jsr_continuation()->is_set(BlockBegin::was_visited_flag) || 3794 jsr_continuation()->is_set(BlockBegin::parser_loop_header_flag), 3795 "continuation can only be visited in case of backward branches"); 3796 assert(_last && _last->as_BlockEnd(), "block must have end"); 3797 3798 // continuation is in work list, so end iteration of current block 3799 _skip_block = true; 3800 pop_scope_for_jsr(); 3801 3802 return true; 3803 } 3804 3805 3806 // Inline the entry of a synchronized method as a monitor enter and 3807 // register the exception handler which releases the monitor if an 3808 // exception is thrown within the callee. Note that the monitor enter 3809 // cannot throw an exception itself, because the receiver is 3810 // guaranteed to be non-null by the explicit null check at the 3811 // beginning of inlining. 3812 void GraphBuilder::inline_sync_entry(Value lock, BlockBegin* sync_handler) { 3813 assert(lock != nullptr && sync_handler != nullptr, "lock or handler missing"); 3814 3815 monitorenter(lock, SynchronizationEntryBCI); 3816 assert(_last->as_MonitorEnter() != nullptr, "monitor enter expected"); 3817 _last->set_needs_null_check(false); 3818 3819 sync_handler->set(BlockBegin::exception_entry_flag); 3820 sync_handler->set(BlockBegin::is_on_work_list_flag); 3821 3822 ciExceptionHandler* desc = new ciExceptionHandler(method()->holder(), 0, method()->code_size(), -1, 0); 3823 XHandler* h = new XHandler(desc); 3824 h->set_entry_block(sync_handler); 3825 scope_data()->xhandlers()->append(h); 3826 scope_data()->set_has_handler(); 3827 } 3828 3829 3830 // If an exception is thrown and not handled within an inlined 3831 // synchronized method, the monitor must be released before the 3832 // exception is rethrown in the outer scope. Generate the appropriate 3833 // instructions here. 3834 void GraphBuilder::fill_sync_handler(Value lock, BlockBegin* sync_handler, bool default_handler) { 3835 BlockBegin* orig_block = _block; 3836 ValueStack* orig_state = _state; 3837 Instruction* orig_last = _last; 3838 _last = _block = sync_handler; 3839 _state = sync_handler->state()->copy(); 3840 3841 assert(sync_handler != nullptr, "handler missing"); 3842 assert(!sync_handler->is_set(BlockBegin::was_visited_flag), "is visited here"); 3843 3844 assert(lock != nullptr || default_handler, "lock or handler missing"); 3845 3846 XHandler* h = scope_data()->xhandlers()->remove_last(); 3847 assert(h->entry_block() == sync_handler, "corrupt list of handlers"); 3848 3849 block()->set(BlockBegin::was_visited_flag); 3850 Value exception = append_with_bci(new ExceptionObject(), SynchronizationEntryBCI); 3851 assert(exception->is_pinned(), "must be"); 3852 3853 int bci = SynchronizationEntryBCI; 3854 if (compilation()->env()->dtrace_method_probes()) { 3855 // Report exit from inline methods. We don't have a stream here 3856 // so pass an explicit bci of SynchronizationEntryBCI. 3857 Values* args = new Values(1); 3858 args->push(append_with_bci(new Constant(new MethodConstant(method())), bci)); 3859 append_with_bci(new RuntimeCall(voidType, "dtrace_method_exit", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), args), bci); 3860 } 3861 3862 if (lock) { 3863 assert(state()->locks_size() > 0 && state()->lock_at(state()->locks_size() - 1) == lock, "lock is missing"); 3864 if (!lock->is_linked()) { 3865 lock = append_with_bci(lock, bci); 3866 } 3867 3868 // exit the monitor in the context of the synchronized method 3869 monitorexit(lock, bci); 3870 3871 // exit the context of the synchronized method 3872 if (!default_handler) { 3873 pop_scope(); 3874 bci = _state->caller_state()->bci(); 3875 _state = _state->caller_state()->copy_for_parsing(); 3876 } 3877 } 3878 3879 // perform the throw as if at the call site 3880 apush(exception); 3881 throw_op(bci); 3882 3883 BlockEnd* end = last()->as_BlockEnd(); 3884 block()->set_end(end); 3885 3886 _block = orig_block; 3887 _state = orig_state; 3888 _last = orig_last; 3889 } 3890 3891 3892 bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, bool ignore_return, Bytecodes::Code bc, Value receiver) { 3893 assert(!callee->is_native(), "callee must not be native"); 3894 if (CompilationPolicy::should_not_inline(compilation()->env(), callee)) { 3895 INLINE_BAILOUT("inlining prohibited by policy"); 3896 } 3897 // first perform tests of things it's not possible to inline 3898 if (callee->has_exception_handlers() && 3899 !InlineMethodsWithExceptionHandlers) INLINE_BAILOUT("callee has exception handlers"); 3900 if (callee->is_synchronized() && 3901 !InlineSynchronizedMethods ) INLINE_BAILOUT("callee is synchronized"); 3902 if (!callee->holder()->is_linked()) INLINE_BAILOUT("callee's klass not linked yet"); 3903 if (bc == Bytecodes::_invokestatic && 3904 !callee->holder()->is_initialized()) INLINE_BAILOUT("callee's klass not initialized yet"); 3905 if (!callee->has_balanced_monitors()) INLINE_BAILOUT("callee's monitors do not match"); 3906 3907 // Proper inlining of methods with jsrs requires a little more work. 3908 if (callee->has_jsrs() ) INLINE_BAILOUT("jsrs not handled properly by inliner yet"); 3909 3910 if (is_profiling() && !callee->ensure_method_data()) { 3911 INLINE_BAILOUT("mdo allocation failed"); 3912 } 3913 3914 const bool is_invokedynamic = (bc == Bytecodes::_invokedynamic); 3915 const bool has_receiver = (bc != Bytecodes::_invokestatic && !is_invokedynamic); 3916 3917 const int args_base = state()->stack_size() - callee->arg_size(); 3918 assert(args_base >= 0, "stack underflow during inlining"); 3919 3920 Value recv = nullptr; 3921 if (has_receiver) { 3922 assert(!callee->is_static(), "callee must not be static"); 3923 assert(callee->arg_size() > 0, "must have at least a receiver"); 3924 3925 recv = state()->stack_at(args_base); 3926 if (recv->is_null_obj()) { 3927 INLINE_BAILOUT("receiver is always null"); 3928 } 3929 } 3930 3931 // now perform tests that are based on flag settings 3932 bool inlinee_by_directive = compilation()->directive()->should_inline(callee); 3933 if (callee->force_inline() || inlinee_by_directive) { 3934 if (inline_level() > MaxForceInlineLevel ) INLINE_BAILOUT("MaxForceInlineLevel"); 3935 if (recursive_inline_level(callee) > C1MaxRecursiveInlineLevel) INLINE_BAILOUT("recursive inlining too deep"); 3936 3937 const char* msg = ""; 3938 if (callee->force_inline()) msg = "force inline by annotation"; 3939 if (inlinee_by_directive) msg = "force inline by CompileCommand"; 3940 print_inlining(callee, msg); 3941 } else { 3942 // use heuristic controls on inlining 3943 if (inline_level() > C1MaxInlineLevel ) INLINE_BAILOUT("inlining too deep"); 3944 int callee_recursive_level = recursive_inline_level(callee); 3945 if (callee_recursive_level > C1MaxRecursiveInlineLevel ) INLINE_BAILOUT("recursive inlining too deep"); 3946 if (callee->code_size_for_inlining() > max_inline_size() ) INLINE_BAILOUT("callee is too large"); 3947 // Additional condition to limit stack usage for non-recursive calls. 3948 if ((callee_recursive_level == 0) && 3949 (callee->max_stack() + callee->max_locals() - callee->size_of_parameters() > C1InlineStackLimit)) { 3950 INLINE_BAILOUT("callee uses too much stack"); 3951 } 3952 3953 // don't inline throwable methods unless the inlining tree is rooted in a throwable class 3954 if (callee->name() == ciSymbols::object_initializer_name() && 3955 callee->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) { 3956 // Throwable constructor call 3957 IRScope* top = scope(); 3958 while (top->caller() != nullptr) { 3959 top = top->caller(); 3960 } 3961 if (!top->method()->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) { 3962 INLINE_BAILOUT("don't inline Throwable constructors"); 3963 } 3964 } 3965 3966 if (compilation()->env()->num_inlined_bytecodes() > DesiredMethodLimit) { 3967 INLINE_BAILOUT("total inlining greater than DesiredMethodLimit"); 3968 } 3969 // printing 3970 print_inlining(callee, "inline", /*success*/ true); 3971 } 3972 3973 assert(bc != Bytecodes::_invokestatic || callee->holder()->is_initialized(), "required"); 3974 3975 // NOTE: Bailouts from this point on, which occur at the 3976 // GraphBuilder level, do not cause bailout just of the inlining but 3977 // in fact of the entire compilation. 3978 3979 BlockBegin* orig_block = block(); 3980 3981 // Insert null check if necessary 3982 if (has_receiver) { 3983 // note: null check must happen even if first instruction of callee does 3984 // an implicit null check since the callee is in a different scope 3985 // and we must make sure exception handling does the right thing 3986 null_check(recv); 3987 } 3988 3989 if (is_profiling()) { 3990 // Note that we'd collect profile data in this method if we wanted it. 3991 // this may be redundant here... 3992 compilation()->set_would_profile(true); 3993 3994 if (profile_calls()) { 3995 int start = 0; 3996 Values* obj_args = args_list_for_profiling(callee, start, has_receiver); 3997 if (obj_args != nullptr) { 3998 int s = obj_args->capacity(); 3999 // if called through method handle invoke, some arguments may have been popped 4000 for (int i = args_base+start, j = 0; j < obj_args->capacity() && i < state()->stack_size(); ) { 4001 Value v = state()->stack_at_inc(i); 4002 if (v->type()->is_object_kind()) { 4003 obj_args->push(v); 4004 j++; 4005 } 4006 } 4007 check_args_for_profiling(obj_args, s); 4008 } 4009 profile_call(callee, recv, holder_known ? callee->holder() : nullptr, obj_args, true); 4010 } 4011 } 4012 4013 // Introduce a new callee continuation point - if the callee has 4014 // more than one return instruction or the return does not allow 4015 // fall-through of control flow, all return instructions of the 4016 // callee will need to be replaced by Goto's pointing to this 4017 // continuation point. 4018 BlockBegin* cont = block_at(next_bci()); 4019 bool continuation_existed = true; 4020 if (cont == nullptr) { 4021 cont = new BlockBegin(next_bci()); 4022 // low number so that continuation gets parsed as early as possible 4023 cont->set_depth_first_number(0); 4024 if (PrintInitialBlockList) { 4025 tty->print_cr("CFG: created block %d (bci %d) as continuation for inline at bci %d", 4026 cont->block_id(), cont->bci(), bci()); 4027 } 4028 continuation_existed = false; 4029 } 4030 // Record number of predecessors of continuation block before 4031 // inlining, to detect if inlined method has edges to its 4032 // continuation after inlining. 4033 int continuation_preds = cont->number_of_preds(); 4034 4035 // Push callee scope 4036 push_scope(callee, cont); 4037 4038 // the BlockListBuilder for the callee could have bailed out 4039 if (bailed_out()) 4040 return false; 4041 4042 // Temporarily set up bytecode stream so we can append instructions 4043 // (only using the bci of this stream) 4044 scope_data()->set_stream(scope_data()->parent()->stream()); 4045 4046 // Pass parameters into callee state: add assignments 4047 // note: this will also ensure that all arguments are computed before being passed 4048 ValueStack* callee_state = state(); 4049 ValueStack* caller_state = state()->caller_state(); 4050 for (int i = args_base; i < caller_state->stack_size(); ) { 4051 const int arg_no = i - args_base; 4052 Value arg = caller_state->stack_at_inc(i); 4053 store_local(callee_state, arg, arg_no); 4054 } 4055 4056 // Remove args from stack. 4057 // Note that we preserve locals state in case we can use it later 4058 // (see use of pop_scope() below) 4059 caller_state->truncate_stack(args_base); 4060 assert(callee_state->stack_size() == 0, "callee stack must be empty"); 4061 4062 Value lock = nullptr; 4063 BlockBegin* sync_handler = nullptr; 4064 4065 // Inline the locking of the receiver if the callee is synchronized 4066 if (callee->is_synchronized()) { 4067 lock = callee->is_static() ? append(new Constant(new InstanceConstant(callee->holder()->java_mirror()))) 4068 : state()->local_at(0); 4069 sync_handler = new BlockBegin(SynchronizationEntryBCI); 4070 inline_sync_entry(lock, sync_handler); 4071 } 4072 4073 if (compilation()->env()->dtrace_method_probes()) { 4074 Values* args = new Values(1); 4075 args->push(append(new Constant(new MethodConstant(method())))); 4076 append(new RuntimeCall(voidType, "dtrace_method_entry", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), args)); 4077 } 4078 4079 MethodDetails method_details(callee); 4080 RuntimeUpcallInfo* upcall = RuntimeUpcalls::get_first_upcall(RuntimeUpcallType::onMethodEntry, method_details); 4081 while (upcall != nullptr) { 4082 Values* args = new Values(0); 4083 append(new RuntimeCall(voidType, upcall->upcall_name(), upcall->upcall_address(), args)); 4084 upcall = RuntimeUpcalls::get_next_upcall(RuntimeUpcallType::onMethodEntry, method_details, upcall); 4085 } 4086 4087 if (profile_inlined_calls()) { 4088 profile_invocation(callee, copy_state_before_with_bci(SynchronizationEntryBCI)); 4089 } 4090 4091 BlockBegin* callee_start_block = block_at(0); 4092 if (callee_start_block != nullptr) { 4093 assert(callee_start_block->is_set(BlockBegin::parser_loop_header_flag), "must be loop header"); 4094 Goto* goto_callee = new Goto(callee_start_block, false); 4095 // The state for this goto is in the scope of the callee, so use 4096 // the entry bci for the callee instead of the call site bci. 4097 append_with_bci(goto_callee, 0); 4098 _block->set_end(goto_callee); 4099 callee_start_block->merge(callee_state, compilation()->has_irreducible_loops()); 4100 4101 _last = _block = callee_start_block; 4102 4103 scope_data()->add_to_work_list(callee_start_block); 4104 } 4105 4106 // Clear out bytecode stream 4107 scope_data()->set_stream(nullptr); 4108 scope_data()->set_ignore_return(ignore_return); 4109 4110 CompileLog* log = compilation()->log(); 4111 if (log != nullptr) log->head("parse method='%d'", log->identify(callee)); 4112 4113 // Ready to resume parsing in callee (either in the same block we 4114 // were in before or in the callee's start block) 4115 iterate_all_blocks(callee_start_block == nullptr); 4116 4117 if (log != nullptr) log->done("parse"); 4118 4119 // If we bailed out during parsing, return immediately (this is bad news) 4120 if (bailed_out()) 4121 return false; 4122 4123 // iterate_all_blocks theoretically traverses in random order; in 4124 // practice, we have only traversed the continuation if we are 4125 // inlining into a subroutine 4126 assert(continuation_existed || 4127 !continuation()->is_set(BlockBegin::was_visited_flag), 4128 "continuation should not have been parsed yet if we created it"); 4129 4130 // At this point we are almost ready to return and resume parsing of 4131 // the caller back in the GraphBuilder. The only thing we want to do 4132 // first is an optimization: during parsing of the callee we 4133 // generated at least one Goto to the continuation block. If we 4134 // generated exactly one, and if the inlined method spanned exactly 4135 // one block (and we didn't have to Goto its entry), then we snip 4136 // off the Goto to the continuation, allowing control to fall 4137 // through back into the caller block and effectively performing 4138 // block merging. This allows load elimination and CSE to take place 4139 // across multiple callee scopes if they are relatively simple, and 4140 // is currently essential to making inlining profitable. 4141 if (num_returns() == 1 4142 && block() == orig_block 4143 && block() == inline_cleanup_block()) { 4144 _last = inline_cleanup_return_prev(); 4145 _state = inline_cleanup_state(); 4146 } else if (continuation_preds == cont->number_of_preds()) { 4147 // Inlining caused that the instructions after the invoke in the 4148 // caller are not reachable any more. So skip filling this block 4149 // with instructions! 4150 assert(cont == continuation(), ""); 4151 assert(_last && _last->as_BlockEnd(), ""); 4152 _skip_block = true; 4153 } else { 4154 // Resume parsing in continuation block unless it was already parsed. 4155 // Note that if we don't change _last here, iteration in 4156 // iterate_bytecodes_for_block will stop when we return. 4157 if (!continuation()->is_set(BlockBegin::was_visited_flag)) { 4158 // add continuation to work list instead of parsing it immediately 4159 assert(_last && _last->as_BlockEnd(), ""); 4160 scope_data()->parent()->add_to_work_list(continuation()); 4161 _skip_block = true; 4162 } 4163 } 4164 4165 // Fill the exception handler for synchronized methods with instructions 4166 if (callee->is_synchronized() && sync_handler->state() != nullptr) { 4167 fill_sync_handler(lock, sync_handler); 4168 } else { 4169 pop_scope(); 4170 } 4171 4172 compilation()->notice_inlined_method(callee); 4173 4174 return true; 4175 } 4176 4177 4178 bool GraphBuilder::try_method_handle_inline(ciMethod* callee, bool ignore_return) { 4179 ValueStack* state_before = copy_state_before(); 4180 vmIntrinsics::ID iid = callee->intrinsic_id(); 4181 switch (iid) { 4182 case vmIntrinsics::_invokeBasic: 4183 { 4184 // get MethodHandle receiver 4185 const int args_base = state()->stack_size() - callee->arg_size(); 4186 ValueType* type = state()->stack_at(args_base)->type(); 4187 if (type->is_constant()) { 4188 ciObject* mh = type->as_ObjectType()->constant_value(); 4189 if (mh->is_method_handle()) { 4190 ciMethod* target = mh->as_method_handle()->get_vmtarget(); 4191 4192 // We don't do CHA here so only inline static and statically bindable methods. 4193 if (target->is_static() || target->can_be_statically_bound()) { 4194 if (ciMethod::is_consistent_info(callee, target)) { 4195 Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual; 4196 ignore_return = ignore_return || (callee->return_type()->is_void() && !target->return_type()->is_void()); 4197 if (try_inline(target, /*holder_known*/ !callee->is_static(), ignore_return, bc)) { 4198 return true; 4199 } 4200 } else { 4201 print_inlining(target, "signatures mismatch", /*success*/ false); 4202 } 4203 } else { 4204 assert(false, "no inlining through MH::invokeBasic"); // missing optimization opportunity due to suboptimal LF shape 4205 print_inlining(target, "not static or statically bindable", /*success*/ false); 4206 } 4207 } else { 4208 assert(mh->is_null_object(), "not a null"); 4209 print_inlining(callee, "receiver is always null", /*success*/ false); 4210 } 4211 } else { 4212 print_inlining(callee, "receiver not constant", /*success*/ false); 4213 } 4214 } 4215 break; 4216 4217 case vmIntrinsics::_linkToVirtual: 4218 case vmIntrinsics::_linkToStatic: 4219 case vmIntrinsics::_linkToSpecial: 4220 case vmIntrinsics::_linkToInterface: 4221 { 4222 // pop MemberName argument 4223 const int args_base = state()->stack_size() - callee->arg_size(); 4224 ValueType* type = apop()->type(); 4225 if (type->is_constant()) { 4226 ciMethod* target = type->as_ObjectType()->constant_value()->as_member_name()->get_vmtarget(); 4227 ignore_return = ignore_return || (callee->return_type()->is_void() && !target->return_type()->is_void()); 4228 // If the target is another method handle invoke, try to recursively get 4229 // a better target. 4230 if (target->is_method_handle_intrinsic()) { 4231 if (try_method_handle_inline(target, ignore_return)) { 4232 return true; 4233 } 4234 } else if (!ciMethod::is_consistent_info(callee, target)) { 4235 print_inlining(target, "signatures mismatch", /*success*/ false); 4236 } else { 4237 ciSignature* signature = target->signature(); 4238 const int receiver_skip = target->is_static() ? 0 : 1; 4239 // Cast receiver to its type. 4240 if (!target->is_static()) { 4241 ciKlass* tk = signature->accessing_klass(); 4242 Value obj = state()->stack_at(args_base); 4243 if (obj->exact_type() == nullptr && 4244 obj->declared_type() != tk && tk != compilation()->env()->Object_klass()) { 4245 TypeCast* c = new TypeCast(tk, obj, state_before); 4246 append(c); 4247 state()->stack_at_put(args_base, c); 4248 } 4249 } 4250 // Cast reference arguments to its type. 4251 for (int i = 0, j = 0; i < signature->count(); i++) { 4252 ciType* t = signature->type_at(i); 4253 if (t->is_klass()) { 4254 ciKlass* tk = t->as_klass(); 4255 Value obj = state()->stack_at(args_base + receiver_skip + j); 4256 if (obj->exact_type() == nullptr && 4257 obj->declared_type() != tk && tk != compilation()->env()->Object_klass()) { 4258 TypeCast* c = new TypeCast(t, obj, state_before); 4259 append(c); 4260 state()->stack_at_put(args_base + receiver_skip + j, c); 4261 } 4262 } 4263 j += t->size(); // long and double take two slots 4264 } 4265 // We don't do CHA here so only inline static and statically bindable methods. 4266 if (target->is_static() || target->can_be_statically_bound()) { 4267 Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual; 4268 if (try_inline(target, /*holder_known*/ !callee->is_static(), ignore_return, bc)) { 4269 return true; 4270 } 4271 } else { 4272 print_inlining(target, "not static or statically bindable", /*success*/ false); 4273 } 4274 } 4275 } else { 4276 print_inlining(callee, "MemberName not constant", /*success*/ false); 4277 } 4278 } 4279 break; 4280 4281 case vmIntrinsics::_linkToNative: 4282 print_inlining(callee, "native call", /*success*/ false); 4283 break; 4284 4285 default: 4286 fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid)); 4287 break; 4288 } 4289 set_state(state_before->copy_for_parsing()); 4290 return false; 4291 } 4292 4293 4294 void GraphBuilder::inline_bailout(const char* msg) { 4295 assert(msg != nullptr, "inline bailout msg must exist"); 4296 _inline_bailout_msg = msg; 4297 } 4298 4299 4300 void GraphBuilder::clear_inline_bailout() { 4301 _inline_bailout_msg = nullptr; 4302 } 4303 4304 4305 void GraphBuilder::push_root_scope(IRScope* scope, BlockList* bci2block, BlockBegin* start) { 4306 ScopeData* data = new ScopeData(nullptr); 4307 data->set_scope(scope); 4308 data->set_bci2block(bci2block); 4309 _scope_data = data; 4310 _block = start; 4311 } 4312 4313 4314 void GraphBuilder::push_scope(ciMethod* callee, BlockBegin* continuation) { 4315 IRScope* callee_scope = new IRScope(compilation(), scope(), bci(), callee, -1, false); 4316 scope()->add_callee(callee_scope); 4317 4318 BlockListBuilder blb(compilation(), callee_scope, -1); 4319 CHECK_BAILOUT(); 4320 4321 if (!blb.bci2block()->at(0)->is_set(BlockBegin::parser_loop_header_flag)) { 4322 // this scope can be inlined directly into the caller so remove 4323 // the block at bci 0. 4324 blb.bci2block()->at_put(0, nullptr); 4325 } 4326 4327 set_state(new ValueStack(callee_scope, state()->copy(ValueStack::CallerState, bci()))); 4328 4329 ScopeData* data = new ScopeData(scope_data()); 4330 data->set_scope(callee_scope); 4331 data->set_bci2block(blb.bci2block()); 4332 data->set_continuation(continuation); 4333 _scope_data = data; 4334 } 4335 4336 4337 void GraphBuilder::push_scope_for_jsr(BlockBegin* jsr_continuation, int jsr_dest_bci) { 4338 ScopeData* data = new ScopeData(scope_data()); 4339 data->set_parsing_jsr(); 4340 data->set_jsr_entry_bci(jsr_dest_bci); 4341 data->set_jsr_return_address_local(-1); 4342 // Must clone bci2block list as we will be mutating it in order to 4343 // properly clone all blocks in jsr region as well as exception 4344 // handlers containing rets 4345 BlockList* new_bci2block = new BlockList(bci2block()->length()); 4346 new_bci2block->appendAll(bci2block()); 4347 data->set_bci2block(new_bci2block); 4348 data->set_scope(scope()); 4349 data->setup_jsr_xhandlers(); 4350 data->set_continuation(continuation()); 4351 data->set_jsr_continuation(jsr_continuation); 4352 _scope_data = data; 4353 } 4354 4355 4356 void GraphBuilder::pop_scope() { 4357 int number_of_locks = scope()->number_of_locks(); 4358 _scope_data = scope_data()->parent(); 4359 // accumulate minimum number of monitor slots to be reserved 4360 scope()->set_min_number_of_locks(number_of_locks); 4361 } 4362 4363 4364 void GraphBuilder::pop_scope_for_jsr() { 4365 _scope_data = scope_data()->parent(); 4366 } 4367 4368 void GraphBuilder::append_unsafe_get(ciMethod* callee, BasicType t, bool is_volatile) { 4369 Values* args = state()->pop_arguments(callee->arg_size()); 4370 null_check(args->at(0)); 4371 Instruction* offset = args->at(2); 4372 #ifndef _LP64 4373 offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT))); 4374 #endif 4375 Instruction* op = append(new UnsafeGet(t, args->at(1), offset, is_volatile)); 4376 push(op->type(), op); 4377 compilation()->set_has_unsafe_access(true); 4378 } 4379 4380 4381 void GraphBuilder::append_unsafe_put(ciMethod* callee, BasicType t, bool is_volatile) { 4382 Values* args = state()->pop_arguments(callee->arg_size()); 4383 null_check(args->at(0)); 4384 Instruction* offset = args->at(2); 4385 #ifndef _LP64 4386 offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT))); 4387 #endif 4388 Value val = args->at(3); 4389 if (t == T_BOOLEAN) { 4390 Value mask = append(new Constant(new IntConstant(1))); 4391 val = append(new LogicOp(Bytecodes::_iand, val, mask)); 4392 } 4393 Instruction* op = append(new UnsafePut(t, args->at(1), offset, val, is_volatile)); 4394 compilation()->set_has_unsafe_access(true); 4395 kill_all(); 4396 } 4397 4398 void GraphBuilder::append_unsafe_CAS(ciMethod* callee) { 4399 ValueStack* state_before = copy_state_for_exception(); 4400 ValueType* result_type = as_ValueType(callee->return_type()); 4401 assert(result_type->is_int(), "int result"); 4402 Values* args = state()->pop_arguments(callee->arg_size()); 4403 4404 // Pop off some args to specially handle, then push back 4405 Value newval = args->pop(); 4406 Value cmpval = args->pop(); 4407 Value offset = args->pop(); 4408 Value src = args->pop(); 4409 Value unsafe_obj = args->pop(); 4410 4411 // Separately handle the unsafe arg. It is not needed for code 4412 // generation, but must be null checked 4413 null_check(unsafe_obj); 4414 4415 #ifndef _LP64 4416 offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT))); 4417 #endif 4418 4419 args->push(src); 4420 args->push(offset); 4421 args->push(cmpval); 4422 args->push(newval); 4423 4424 // An unsafe CAS can alias with other field accesses, but we don't 4425 // know which ones so mark the state as no preserved. This will 4426 // cause CSE to invalidate memory across it. 4427 bool preserves_state = false; 4428 Intrinsic* result = new Intrinsic(result_type, callee->intrinsic_id(), args, false, state_before, preserves_state); 4429 append_split(result); 4430 push(result_type, result); 4431 compilation()->set_has_unsafe_access(true); 4432 } 4433 4434 void GraphBuilder::append_char_access(ciMethod* callee, bool is_store) { 4435 // This intrinsic accesses byte[] array as char[] array. Computing the offsets 4436 // correctly requires matched array shapes. 4437 assert (arrayOopDesc::base_offset_in_bytes(T_CHAR) == arrayOopDesc::base_offset_in_bytes(T_BYTE), 4438 "sanity: byte[] and char[] bases agree"); 4439 assert (type2aelembytes(T_CHAR) == type2aelembytes(T_BYTE)*2, 4440 "sanity: byte[] and char[] scales agree"); 4441 4442 ValueStack* state_before = copy_state_indexed_access(); 4443 compilation()->set_has_access_indexed(true); 4444 Values* args = state()->pop_arguments(callee->arg_size()); 4445 Value array = args->at(0); 4446 Value index = args->at(1); 4447 if (is_store) { 4448 Value value = args->at(2); 4449 Instruction* store = append(new StoreIndexed(array, index, nullptr, T_CHAR, value, state_before, false, true)); 4450 store->set_flag(Instruction::NeedsRangeCheckFlag, false); 4451 _memory->store_value(value); 4452 } else { 4453 Instruction* load = append(new LoadIndexed(array, index, nullptr, T_CHAR, state_before, true)); 4454 load->set_flag(Instruction::NeedsRangeCheckFlag, false); 4455 push(load->type(), load); 4456 } 4457 } 4458 4459 void GraphBuilder::append_alloc_array_copy(ciMethod* callee) { 4460 const int args_base = state()->stack_size() - callee->arg_size(); 4461 ciType* receiver_type = state()->stack_at(args_base)->exact_type(); 4462 if (receiver_type == nullptr) { 4463 inline_bailout("must have a receiver"); 4464 return; 4465 } 4466 if (!receiver_type->is_type_array_klass()) { 4467 inline_bailout("clone array not primitive"); 4468 return; 4469 } 4470 4471 ValueStack* state_before = copy_state_before(); 4472 state_before->set_force_reexecute(); 4473 Value src = apop(); 4474 BasicType basic_type = src->exact_type()->as_array_klass()->element_type()->basic_type(); 4475 Value length = append(new ArrayLength(src, state_before)); 4476 Value new_array = append_split(new NewTypeArray(length, basic_type, state_before, false)); 4477 4478 ValueType* result_type = as_ValueType(callee->return_type()); 4479 vmIntrinsics::ID id = vmIntrinsics::_arraycopy; 4480 Values* args = new Values(5); 4481 args->push(src); 4482 args->push(append(new Constant(new IntConstant(0)))); 4483 args->push(new_array); 4484 args->push(append(new Constant(new IntConstant(0)))); 4485 args->push(length); 4486 const bool has_receiver = true; 4487 Intrinsic* array_copy = new Intrinsic(result_type, id, 4488 args, has_receiver, state_before, 4489 vmIntrinsics::preserves_state(id), 4490 vmIntrinsics::can_trap(id)); 4491 array_copy->set_flag(Instruction::OmitChecksFlag, true); 4492 append_split(array_copy); 4493 apush(new_array); 4494 append(new MemBar(lir_membar_storestore)); 4495 } 4496 4497 void GraphBuilder::print_inlining(ciMethod* callee, const char* msg, bool success) { 4498 CompileLog* log = compilation()->log(); 4499 if (log != nullptr) { 4500 assert(msg != nullptr, "inlining msg should not be null!"); 4501 if (success) { 4502 log->inline_success(msg); 4503 } else { 4504 log->inline_fail(msg); 4505 } 4506 } 4507 EventCompilerInlining event; 4508 if (event.should_commit()) { 4509 CompilerEvent::InlineEvent::post(event, compilation()->env()->task()->compile_id(), method()->get_Method(), callee, success, msg, bci()); 4510 } 4511 4512 CompileTask::print_inlining_ul(callee, scope()->level(), bci(), inlining_result_of(success), msg); 4513 4514 if (!compilation()->directive()->PrintInliningOption) { 4515 return; 4516 } 4517 CompileTask::print_inlining_tty(callee, scope()->level(), bci(), inlining_result_of(success), msg); 4518 if (success && CIPrintMethodCodes) { 4519 callee->print_codes(); 4520 } 4521 } 4522 4523 void GraphBuilder::append_unsafe_get_and_set(ciMethod* callee, bool is_add) { 4524 Values* args = state()->pop_arguments(callee->arg_size()); 4525 BasicType t = callee->return_type()->basic_type(); 4526 null_check(args->at(0)); 4527 Instruction* offset = args->at(2); 4528 #ifndef _LP64 4529 offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT))); 4530 #endif 4531 Instruction* op = append(new UnsafeGetAndSet(t, args->at(1), offset, args->at(3), is_add)); 4532 compilation()->set_has_unsafe_access(true); 4533 kill_all(); 4534 push(op->type(), op); 4535 } 4536 4537 #ifndef PRODUCT 4538 void GraphBuilder::print_stats() { 4539 if (UseLocalValueNumbering) { 4540 vmap()->print(); 4541 } 4542 } 4543 #endif // PRODUCT 4544 4545 void GraphBuilder::profile_call(ciMethod* callee, Value recv, ciKlass* known_holder, Values* obj_args, bool inlined) { 4546 assert(known_holder == nullptr || (known_holder->is_instance_klass() && 4547 (!known_holder->is_interface() || 4548 ((ciInstanceKlass*)known_holder)->has_nonstatic_concrete_methods())), "should be non-static concrete method"); 4549 if (known_holder != nullptr) { 4550 if (known_holder->exact_klass() == nullptr) { 4551 known_holder = compilation()->cha_exact_type(known_holder); 4552 } 4553 } 4554 4555 append(new ProfileCall(method(), bci(), callee, recv, known_holder, obj_args, inlined)); 4556 } 4557 4558 void GraphBuilder::profile_return_type(Value ret, ciMethod* callee, ciMethod* m, int invoke_bci) { 4559 assert((m == nullptr) == (invoke_bci < 0), "invalid method and invalid bci together"); 4560 if (m == nullptr) { 4561 m = method(); 4562 } 4563 if (invoke_bci < 0) { 4564 invoke_bci = bci(); 4565 } 4566 ciMethodData* md = m->method_data_or_null(); 4567 ciProfileData* data = md->bci_to_data(invoke_bci); 4568 if (data != nullptr && (data->is_CallTypeData() || data->is_VirtualCallTypeData())) { 4569 bool has_return = data->is_CallTypeData() ? ((ciCallTypeData*)data)->has_return() : ((ciVirtualCallTypeData*)data)->has_return(); 4570 if (has_return) { 4571 append(new ProfileReturnType(m , invoke_bci, callee, ret)); 4572 } 4573 } 4574 } 4575 4576 void GraphBuilder::profile_invocation(ciMethod* callee, ValueStack* state) { 4577 append(new ProfileInvoke(callee, state)); 4578 }