1 /* 2 * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_CFGPrinter.hpp" 27 #include "c1/c1_Canonicalizer.hpp" 28 #include "c1/c1_Compilation.hpp" 29 #include "c1/c1_GraphBuilder.hpp" 30 #include "c1/c1_InstructionPrinter.hpp" 31 #include "ci/ciCallSite.hpp" 32 #include "ci/ciField.hpp" 33 #include "ci/ciKlass.hpp" 34 #include "ci/ciMemberName.hpp" 35 #include "ci/ciSymbols.hpp" 36 #include "ci/ciUtilities.inline.hpp" 37 #include "classfile/javaClasses.hpp" 38 #include "compiler/compilationPolicy.hpp" 39 #include "compiler/compileBroker.hpp" 40 #include "compiler/compilerEvent.hpp" 41 #include "interpreter/bytecode.hpp" 42 #include "jfr/jfrEvents.hpp" 43 #include "memory/resourceArea.hpp" 44 #include "oops/oop.inline.hpp" 45 #include "runtime/runtimeUpcalls.hpp" 46 #include "runtime/sharedRuntime.hpp" 47 #include "utilities/checkedCast.hpp" 48 #include "utilities/macros.hpp" 49 #if INCLUDE_JFR 50 #include "jfr/jfr.hpp" 51 #endif 52 53 class BlockListBuilder { 54 private: 55 Compilation* _compilation; 56 IRScope* _scope; 57 58 BlockList _blocks; // internal list of all blocks 59 BlockList* _bci2block; // mapping from bci to blocks for GraphBuilder 60 GrowableArray<BlockList> _bci2block_successors; // Mapping bcis to their blocks successors while we dont have a blockend 61 62 // fields used by mark_loops 63 ResourceBitMap _active; // for iteration of control flow graph 64 ResourceBitMap _visited; // for iteration of control flow graph 65 GrowableArray<ResourceBitMap> _loop_map; // caches the information if a block is contained in a loop 66 int _next_loop_index; // next free loop number 67 int _next_block_number; // for reverse postorder numbering of blocks 68 int _block_id_start; 69 70 int bit_number(int block_id) const { return block_id - _block_id_start; } 71 // accessors 72 Compilation* compilation() const { return _compilation; } 73 IRScope* scope() const { return _scope; } 74 ciMethod* method() const { return scope()->method(); } 75 XHandlers* xhandlers() const { return scope()->xhandlers(); } 76 77 // unified bailout support 78 void bailout(const char* msg) const { compilation()->bailout(msg); } 79 bool bailed_out() const { return compilation()->bailed_out(); } 80 81 // helper functions 82 BlockBegin* make_block_at(int bci, BlockBegin* predecessor); 83 void handle_exceptions(BlockBegin* current, int cur_bci); 84 void handle_jsr(BlockBegin* current, int sr_bci, int next_bci); 85 void store_one(BlockBegin* current, int local); 86 void store_two(BlockBegin* current, int local); 87 void set_entries(int osr_bci); 88 void set_leaders(); 89 90 void make_loop_header(BlockBegin* block); 91 void mark_loops(); 92 BitMap& mark_loops(BlockBegin* b, bool in_subroutine); 93 94 // debugging 95 #ifndef PRODUCT 96 void print(); 97 #endif 98 99 int number_of_successors(BlockBegin* block); 100 BlockBegin* successor_at(BlockBegin* block, int i); 101 void add_successor(BlockBegin* block, BlockBegin* sux); 102 bool is_successor(BlockBegin* block, BlockBegin* sux); 103 104 public: 105 // creation 106 BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci); 107 108 // accessors for GraphBuilder 109 BlockList* bci2block() const { return _bci2block; } 110 }; 111 112 113 // Implementation of BlockListBuilder 114 115 BlockListBuilder::BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci) 116 : _compilation(compilation) 117 , _scope(scope) 118 , _blocks(16) 119 , _bci2block(new BlockList(scope->method()->code_size(), nullptr)) 120 , _bci2block_successors(scope->method()->code_size()) 121 , _active() // size not known yet 122 , _visited() // size not known yet 123 , _loop_map() // size not known yet 124 , _next_loop_index(0) 125 , _next_block_number(0) 126 , _block_id_start(0) 127 { 128 set_entries(osr_bci); 129 set_leaders(); 130 CHECK_BAILOUT(); 131 132 mark_loops(); 133 NOT_PRODUCT(if (PrintInitialBlockList) print()); 134 135 // _bci2block still contains blocks with _end == null and > 0 sux in _bci2block_successors. 136 137 #ifndef PRODUCT 138 if (PrintCFGToFile) { 139 stringStream title; 140 title.print("BlockListBuilder "); 141 scope->method()->print_name(&title); 142 CFGPrinter::print_cfg(_bci2block, title.freeze(), false, false); 143 } 144 #endif 145 } 146 147 148 void BlockListBuilder::set_entries(int osr_bci) { 149 // generate start blocks 150 BlockBegin* std_entry = make_block_at(0, nullptr); 151 if (scope()->caller() == nullptr) { 152 std_entry->set(BlockBegin::std_entry_flag); 153 } 154 if (osr_bci != -1) { 155 BlockBegin* osr_entry = make_block_at(osr_bci, nullptr); 156 osr_entry->set(BlockBegin::osr_entry_flag); 157 } 158 159 // generate exception entry blocks 160 XHandlers* list = xhandlers(); 161 const int n = list->length(); 162 for (int i = 0; i < n; i++) { 163 XHandler* h = list->handler_at(i); 164 BlockBegin* entry = make_block_at(h->handler_bci(), nullptr); 165 entry->set(BlockBegin::exception_entry_flag); 166 h->set_entry_block(entry); 167 } 168 } 169 170 171 BlockBegin* BlockListBuilder::make_block_at(int cur_bci, BlockBegin* predecessor) { 172 assert(method()->bci_block_start().at(cur_bci), "wrong block starts of MethodLivenessAnalyzer"); 173 174 BlockBegin* block = _bci2block->at(cur_bci); 175 if (block == nullptr) { 176 block = new BlockBegin(cur_bci); 177 block->init_stores_to_locals(method()->max_locals()); 178 _bci2block->at_put(cur_bci, block); 179 _bci2block_successors.at_put_grow(cur_bci, BlockList()); 180 _blocks.append(block); 181 182 assert(predecessor == nullptr || predecessor->bci() < cur_bci, "targets for backward branches must already exist"); 183 } 184 185 if (predecessor != nullptr) { 186 if (block->is_set(BlockBegin::exception_entry_flag)) { 187 BAILOUT_("Exception handler can be reached by both normal and exceptional control flow", block); 188 } 189 190 add_successor(predecessor, block); 191 block->increment_total_preds(); 192 } 193 194 return block; 195 } 196 197 198 inline void BlockListBuilder::store_one(BlockBegin* current, int local) { 199 current->stores_to_locals().set_bit(local); 200 } 201 inline void BlockListBuilder::store_two(BlockBegin* current, int local) { 202 store_one(current, local); 203 store_one(current, local + 1); 204 } 205 206 207 void BlockListBuilder::handle_exceptions(BlockBegin* current, int cur_bci) { 208 // Draws edges from a block to its exception handlers 209 XHandlers* list = xhandlers(); 210 const int n = list->length(); 211 212 for (int i = 0; i < n; i++) { 213 XHandler* h = list->handler_at(i); 214 215 if (h->covers(cur_bci)) { 216 BlockBegin* entry = h->entry_block(); 217 assert(entry != nullptr && entry == _bci2block->at(h->handler_bci()), "entry must be set"); 218 assert(entry->is_set(BlockBegin::exception_entry_flag), "flag must be set"); 219 220 // add each exception handler only once 221 if(!is_successor(current, entry)) { 222 add_successor(current, entry); 223 entry->increment_total_preds(); 224 } 225 226 // stop when reaching catchall 227 if (h->catch_type() == 0) break; 228 } 229 } 230 } 231 232 void BlockListBuilder::handle_jsr(BlockBegin* current, int sr_bci, int next_bci) { 233 if (next_bci < method()->code_size()) { 234 // start a new block after jsr-bytecode and link this block into cfg 235 make_block_at(next_bci, current); 236 } 237 238 // start a new block at the subroutine entry at mark it with special flag 239 BlockBegin* sr_block = make_block_at(sr_bci, current); 240 if (!sr_block->is_set(BlockBegin::subroutine_entry_flag)) { 241 sr_block->set(BlockBegin::subroutine_entry_flag); 242 } 243 } 244 245 246 void BlockListBuilder::set_leaders() { 247 bool has_xhandlers = xhandlers()->has_handlers(); 248 BlockBegin* current = nullptr; 249 250 // The information which bci starts a new block simplifies the analysis 251 // Without it, backward branches could jump to a bci where no block was created 252 // during bytecode iteration. This would require the creation of a new block at the 253 // branch target and a modification of the successor lists. 254 const BitMap& bci_block_start = method()->bci_block_start(); 255 256 int end_bci = method()->code_size(); 257 258 ciBytecodeStream s(method()); 259 while (s.next() != ciBytecodeStream::EOBC()) { 260 int cur_bci = s.cur_bci(); 261 262 if (bci_block_start.at(cur_bci)) { 263 current = make_block_at(cur_bci, current); 264 } 265 assert(current != nullptr, "must have current block"); 266 267 if (has_xhandlers && GraphBuilder::can_trap(method(), s.cur_bc())) { 268 handle_exceptions(current, cur_bci); 269 } 270 271 switch (s.cur_bc()) { 272 // track stores to local variables for selective creation of phi functions 273 case Bytecodes::_iinc: store_one(current, s.get_index()); break; 274 case Bytecodes::_istore: store_one(current, s.get_index()); break; 275 case Bytecodes::_lstore: store_two(current, s.get_index()); break; 276 case Bytecodes::_fstore: store_one(current, s.get_index()); break; 277 case Bytecodes::_dstore: store_two(current, s.get_index()); break; 278 case Bytecodes::_astore: store_one(current, s.get_index()); break; 279 case Bytecodes::_istore_0: store_one(current, 0); break; 280 case Bytecodes::_istore_1: store_one(current, 1); break; 281 case Bytecodes::_istore_2: store_one(current, 2); break; 282 case Bytecodes::_istore_3: store_one(current, 3); break; 283 case Bytecodes::_lstore_0: store_two(current, 0); break; 284 case Bytecodes::_lstore_1: store_two(current, 1); break; 285 case Bytecodes::_lstore_2: store_two(current, 2); break; 286 case Bytecodes::_lstore_3: store_two(current, 3); break; 287 case Bytecodes::_fstore_0: store_one(current, 0); break; 288 case Bytecodes::_fstore_1: store_one(current, 1); break; 289 case Bytecodes::_fstore_2: store_one(current, 2); break; 290 case Bytecodes::_fstore_3: store_one(current, 3); break; 291 case Bytecodes::_dstore_0: store_two(current, 0); break; 292 case Bytecodes::_dstore_1: store_two(current, 1); break; 293 case Bytecodes::_dstore_2: store_two(current, 2); break; 294 case Bytecodes::_dstore_3: store_two(current, 3); break; 295 case Bytecodes::_astore_0: store_one(current, 0); break; 296 case Bytecodes::_astore_1: store_one(current, 1); break; 297 case Bytecodes::_astore_2: store_one(current, 2); break; 298 case Bytecodes::_astore_3: store_one(current, 3); break; 299 300 // track bytecodes that affect the control flow 301 case Bytecodes::_athrow: // fall through 302 case Bytecodes::_ret: // fall through 303 case Bytecodes::_ireturn: // fall through 304 case Bytecodes::_lreturn: // fall through 305 case Bytecodes::_freturn: // fall through 306 case Bytecodes::_dreturn: // fall through 307 case Bytecodes::_areturn: // fall through 308 case Bytecodes::_return: 309 current = nullptr; 310 break; 311 312 case Bytecodes::_ifeq: // fall through 313 case Bytecodes::_ifne: // fall through 314 case Bytecodes::_iflt: // fall through 315 case Bytecodes::_ifge: // fall through 316 case Bytecodes::_ifgt: // fall through 317 case Bytecodes::_ifle: // fall through 318 case Bytecodes::_if_icmpeq: // fall through 319 case Bytecodes::_if_icmpne: // fall through 320 case Bytecodes::_if_icmplt: // fall through 321 case Bytecodes::_if_icmpge: // fall through 322 case Bytecodes::_if_icmpgt: // fall through 323 case Bytecodes::_if_icmple: // fall through 324 case Bytecodes::_if_acmpeq: // fall through 325 case Bytecodes::_if_acmpne: // fall through 326 case Bytecodes::_ifnull: // fall through 327 case Bytecodes::_ifnonnull: 328 if (s.next_bci() < end_bci) { 329 make_block_at(s.next_bci(), current); 330 } 331 make_block_at(s.get_dest(), current); 332 current = nullptr; 333 break; 334 335 case Bytecodes::_goto: 336 make_block_at(s.get_dest(), current); 337 current = nullptr; 338 break; 339 340 case Bytecodes::_goto_w: 341 make_block_at(s.get_far_dest(), current); 342 current = nullptr; 343 break; 344 345 case Bytecodes::_jsr: 346 handle_jsr(current, s.get_dest(), s.next_bci()); 347 current = nullptr; 348 break; 349 350 case Bytecodes::_jsr_w: 351 handle_jsr(current, s.get_far_dest(), s.next_bci()); 352 current = nullptr; 353 break; 354 355 case Bytecodes::_tableswitch: { 356 // set block for each case 357 Bytecode_tableswitch sw(&s); 358 int l = sw.length(); 359 for (int i = 0; i < l; i++) { 360 make_block_at(cur_bci + sw.dest_offset_at(i), current); 361 } 362 make_block_at(cur_bci + sw.default_offset(), current); 363 current = nullptr; 364 break; 365 } 366 367 case Bytecodes::_lookupswitch: { 368 // set block for each case 369 Bytecode_lookupswitch sw(&s); 370 int l = sw.number_of_pairs(); 371 for (int i = 0; i < l; i++) { 372 make_block_at(cur_bci + sw.pair_at(i).offset(), current); 373 } 374 make_block_at(cur_bci + sw.default_offset(), current); 375 current = nullptr; 376 break; 377 } 378 379 default: 380 break; 381 } 382 } 383 } 384 385 386 void BlockListBuilder::mark_loops() { 387 ResourceMark rm; 388 389 const int number_of_blocks = _blocks.length(); 390 _active.initialize(number_of_blocks); 391 _visited.initialize(number_of_blocks); 392 _loop_map = GrowableArray<ResourceBitMap>(number_of_blocks, number_of_blocks, ResourceBitMap()); 393 for (int i = 0; i < number_of_blocks; i++) { 394 _loop_map.at(i).initialize(number_of_blocks); 395 } 396 _next_loop_index = 0; 397 _next_block_number = _blocks.length(); 398 399 // The loop detection algorithm works as follows: 400 // - We maintain the _loop_map, where for each block we have a bitmap indicating which loops contain this block. 401 // - The CFG is recursively traversed (depth-first) and if we detect a loop, we assign the loop a unique number that is stored 402 // in the bitmap associated with the loop header block. Until we return back through that loop header the bitmap contains 403 // only a single bit corresponding to the loop number. 404 // - The bit is then propagated for all the blocks in the loop after we exit them (post-order). There could be multiple bits 405 // of course in case of nested loops. 406 // - When we exit the loop header we remove that single bit and assign the real loop state for it. 407 // - Now, the tricky part here is how we detect irreducible loops. In the algorithm above the loop state bits 408 // are propagated to the predecessors. If we encounter an irreducible loop (a loop with multiple heads) we would see 409 // a node with some loop bit set that would then propagate back and be never cleared because we would 410 // never go back through the original loop header. Therefore if there are any irreducible loops the bits in the states 411 // for these loops are going to propagate back to the root. 412 BlockBegin* start = _bci2block->at(0); 413 _block_id_start = start->block_id(); 414 BitMap& loop_state = mark_loops(start, false); 415 if (!loop_state.is_empty()) { 416 compilation()->set_has_irreducible_loops(true); 417 } 418 assert(_next_block_number >= 0, "invalid block numbers"); 419 420 // Remove dangling Resource pointers before the ResourceMark goes out-of-scope. 421 _active.resize(0); 422 _visited.resize(0); 423 _loop_map.clear(); 424 } 425 426 void BlockListBuilder::make_loop_header(BlockBegin* block) { 427 int block_id = block->block_id(); 428 int block_bit = bit_number(block_id); 429 if (block->is_set(BlockBegin::exception_entry_flag)) { 430 // exception edges may look like loops but don't mark them as such 431 // since it screws up block ordering. 432 return; 433 } 434 if (!block->is_set(BlockBegin::parser_loop_header_flag)) { 435 block->set(BlockBegin::parser_loop_header_flag); 436 437 assert(_loop_map.at(block_bit).is_empty(), "must not be set yet"); 438 assert(0 <= _next_loop_index && _next_loop_index < _loop_map.length(), "_next_loop_index is too large"); 439 _loop_map.at(block_bit).set_bit(_next_loop_index++); 440 } else { 441 // block already marked as loop header 442 assert(_loop_map.at(block_bit).count_one_bits() == 1, "exactly one bit must be set"); 443 } 444 } 445 446 BitMap& BlockListBuilder::mark_loops(BlockBegin* block, bool in_subroutine) { 447 int block_id = block->block_id(); 448 int block_bit = bit_number(block_id); 449 if (_visited.at(block_bit)) { 450 if (_active.at(block_bit)) { 451 // reached block via backward branch 452 make_loop_header(block); 453 } 454 // return cached loop information for this block 455 return _loop_map.at(block_bit); 456 } 457 458 if (block->is_set(BlockBegin::subroutine_entry_flag)) { 459 in_subroutine = true; 460 } 461 462 // set active and visited bits before successors are processed 463 _visited.set_bit(block_bit); 464 _active.set_bit(block_bit); 465 466 ResourceMark rm; 467 ResourceBitMap loop_state(_loop_map.length()); 468 for (int i = number_of_successors(block) - 1; i >= 0; i--) { 469 BlockBegin* sux = successor_at(block, i); 470 // recursively process all successors 471 loop_state.set_union(mark_loops(sux, in_subroutine)); 472 } 473 474 // clear active-bit after all successors are processed 475 _active.clear_bit(block_bit); 476 477 // reverse-post-order numbering of all blocks 478 block->set_depth_first_number(_next_block_number); 479 _next_block_number--; 480 481 if (!loop_state.is_empty() || in_subroutine ) { 482 // block is contained at least in one loop, so phi functions are necessary 483 // phi functions are also necessary for all locals stored in a subroutine 484 scope()->requires_phi_function().set_union(block->stores_to_locals()); 485 } 486 487 if (block->is_set(BlockBegin::parser_loop_header_flag)) { 488 BitMap& header_loop_state = _loop_map.at(block_bit); 489 assert(header_loop_state.count_one_bits() == 1, "exactly one bit must be set"); 490 // remove the bit with the loop number for the state (header is outside of the loop) 491 loop_state.set_difference(header_loop_state); 492 } 493 494 // cache and return loop information for this block 495 _loop_map.at(block_bit).set_from(loop_state); 496 return _loop_map.at(block_bit); 497 } 498 499 inline int BlockListBuilder::number_of_successors(BlockBegin* block) 500 { 501 assert(_bci2block_successors.length() > block->bci(), "sux must exist"); 502 return _bci2block_successors.at(block->bci()).length(); 503 } 504 505 inline BlockBegin* BlockListBuilder::successor_at(BlockBegin* block, int i) 506 { 507 assert(_bci2block_successors.length() > block->bci(), "sux must exist"); 508 return _bci2block_successors.at(block->bci()).at(i); 509 } 510 511 inline void BlockListBuilder::add_successor(BlockBegin* block, BlockBegin* sux) 512 { 513 assert(_bci2block_successors.length() > block->bci(), "sux must exist"); 514 _bci2block_successors.at(block->bci()).append(sux); 515 } 516 517 inline bool BlockListBuilder::is_successor(BlockBegin* block, BlockBegin* sux) { 518 assert(_bci2block_successors.length() > block->bci(), "sux must exist"); 519 return _bci2block_successors.at(block->bci()).contains(sux); 520 } 521 522 #ifndef PRODUCT 523 524 static int compare_depth_first(BlockBegin** a, BlockBegin** b) { 525 return (*a)->depth_first_number() - (*b)->depth_first_number(); 526 } 527 528 void BlockListBuilder::print() { 529 tty->print("----- initial block list of BlockListBuilder for method "); 530 method()->print_short_name(); 531 tty->cr(); 532 533 // better readability if blocks are sorted in processing order 534 _blocks.sort(compare_depth_first); 535 536 for (int i = 0; i < _blocks.length(); i++) { 537 BlockBegin* cur = _blocks.at(i); 538 tty->print("%4d: B%-4d bci: %-4d preds: %-4d ", cur->depth_first_number(), cur->block_id(), cur->bci(), cur->total_preds()); 539 540 tty->print(cur->is_set(BlockBegin::std_entry_flag) ? " std" : " "); 541 tty->print(cur->is_set(BlockBegin::osr_entry_flag) ? " osr" : " "); 542 tty->print(cur->is_set(BlockBegin::exception_entry_flag) ? " ex" : " "); 543 tty->print(cur->is_set(BlockBegin::subroutine_entry_flag) ? " sr" : " "); 544 tty->print(cur->is_set(BlockBegin::parser_loop_header_flag) ? " lh" : " "); 545 546 if (number_of_successors(cur) > 0) { 547 tty->print(" sux: "); 548 for (int j = 0; j < number_of_successors(cur); j++) { 549 BlockBegin* sux = successor_at(cur, j); 550 tty->print("B%d ", sux->block_id()); 551 } 552 } 553 tty->cr(); 554 } 555 } 556 557 #endif 558 559 560 // A simple growable array of Values indexed by ciFields 561 class FieldBuffer: public CompilationResourceObj { 562 private: 563 GrowableArray<Value> _values; 564 565 public: 566 FieldBuffer() {} 567 568 void kill() { 569 _values.trunc_to(0); 570 } 571 572 Value at(ciField* field) { 573 assert(field->holder()->is_loaded(), "must be a loaded field"); 574 int offset = field->offset_in_bytes(); 575 if (offset < _values.length()) { 576 return _values.at(offset); 577 } else { 578 return nullptr; 579 } 580 } 581 582 void at_put(ciField* field, Value value) { 583 assert(field->holder()->is_loaded(), "must be a loaded field"); 584 int offset = field->offset_in_bytes(); 585 _values.at_put_grow(offset, value, nullptr); 586 } 587 588 }; 589 590 591 // MemoryBuffer is fairly simple model of the current state of memory. 592 // It partitions memory into several pieces. The first piece is 593 // generic memory where little is known about the owner of the memory. 594 // This is conceptually represented by the tuple <O, F, V> which says 595 // that the field F of object O has value V. This is flattened so 596 // that F is represented by the offset of the field and the parallel 597 // arrays _objects and _values are used for O and V. Loads of O.F can 598 // simply use V. Newly allocated objects are kept in a separate list 599 // along with a parallel array for each object which represents the 600 // current value of its fields. Stores of the default value to fields 601 // which have never been stored to before are eliminated since they 602 // are redundant. Once newly allocated objects are stored into 603 // another object or they are passed out of the current compile they 604 // are treated like generic memory. 605 606 class MemoryBuffer: public CompilationResourceObj { 607 private: 608 FieldBuffer _values; 609 GrowableArray<Value> _objects; 610 GrowableArray<Value> _newobjects; 611 GrowableArray<FieldBuffer*> _fields; 612 613 public: 614 MemoryBuffer() {} 615 616 StoreField* store(StoreField* st) { 617 if (!EliminateFieldAccess) { 618 return st; 619 } 620 621 Value object = st->obj(); 622 Value value = st->value(); 623 ciField* field = st->field(); 624 if (field->holder()->is_loaded()) { 625 int offset = field->offset_in_bytes(); 626 int index = _newobjects.find(object); 627 if (index != -1) { 628 // newly allocated object with no other stores performed on this field 629 FieldBuffer* buf = _fields.at(index); 630 if (buf->at(field) == nullptr && is_default_value(value)) { 631 #ifndef PRODUCT 632 if (PrintIRDuringConstruction && Verbose) { 633 tty->print_cr("Eliminated store for object %d:", index); 634 st->print_line(); 635 } 636 #endif 637 return nullptr; 638 } else { 639 buf->at_put(field, value); 640 } 641 } else { 642 _objects.at_put_grow(offset, object, nullptr); 643 _values.at_put(field, value); 644 } 645 646 store_value(value); 647 } else { 648 // if we held onto field names we could alias based on names but 649 // we don't know what's being stored to so kill it all. 650 kill(); 651 } 652 return st; 653 } 654 655 656 // return true if this value correspond to the default value of a field. 657 bool is_default_value(Value value) { 658 Constant* con = value->as_Constant(); 659 if (con) { 660 switch (con->type()->tag()) { 661 case intTag: return con->type()->as_IntConstant()->value() == 0; 662 case longTag: return con->type()->as_LongConstant()->value() == 0; 663 case floatTag: return jint_cast(con->type()->as_FloatConstant()->value()) == 0; 664 case doubleTag: return jlong_cast(con->type()->as_DoubleConstant()->value()) == jlong_cast(0); 665 case objectTag: return con->type() == objectNull; 666 default: ShouldNotReachHere(); 667 } 668 } 669 return false; 670 } 671 672 673 // return either the actual value of a load or the load itself 674 Value load(LoadField* load) { 675 if (!EliminateFieldAccess) { 676 return load; 677 } 678 679 if (strict_fp_requires_explicit_rounding && load->type()->is_float_kind()) { 680 #ifdef IA32 681 if (UseSSE < 2) { 682 // can't skip load since value might get rounded as a side effect 683 return load; 684 } 685 #else 686 Unimplemented(); 687 #endif // IA32 688 } 689 690 ciField* field = load->field(); 691 Value object = load->obj(); 692 if (field->holder()->is_loaded() && !field->is_volatile()) { 693 int offset = field->offset_in_bytes(); 694 Value result = nullptr; 695 int index = _newobjects.find(object); 696 if (index != -1) { 697 result = _fields.at(index)->at(field); 698 } else if (_objects.at_grow(offset, nullptr) == object) { 699 result = _values.at(field); 700 } 701 if (result != nullptr) { 702 #ifndef PRODUCT 703 if (PrintIRDuringConstruction && Verbose) { 704 tty->print_cr("Eliminated load: "); 705 load->print_line(); 706 } 707 #endif 708 assert(result->type()->tag() == load->type()->tag(), "wrong types"); 709 return result; 710 } 711 } 712 return load; 713 } 714 715 // Record this newly allocated object 716 void new_instance(NewInstance* object) { 717 int index = _newobjects.length(); 718 _newobjects.append(object); 719 if (_fields.at_grow(index, nullptr) == nullptr) { 720 _fields.at_put(index, new FieldBuffer()); 721 } else { 722 _fields.at(index)->kill(); 723 } 724 } 725 726 void store_value(Value value) { 727 int index = _newobjects.find(value); 728 if (index != -1) { 729 // stored a newly allocated object into another object. 730 // Assume we've lost track of it as separate slice of memory. 731 // We could do better by keeping track of whether individual 732 // fields could alias each other. 733 _newobjects.remove_at(index); 734 // pull out the field info and store it at the end up the list 735 // of field info list to be reused later. 736 _fields.append(_fields.at(index)); 737 _fields.remove_at(index); 738 } 739 } 740 741 void kill() { 742 _newobjects.trunc_to(0); 743 _objects.trunc_to(0); 744 _values.kill(); 745 } 746 }; 747 748 749 // Implementation of GraphBuilder's ScopeData 750 751 GraphBuilder::ScopeData::ScopeData(ScopeData* parent) 752 : _parent(parent) 753 , _bci2block(nullptr) 754 , _scope(nullptr) 755 , _has_handler(false) 756 , _stream(nullptr) 757 , _work_list(nullptr) 758 , _caller_stack_size(-1) 759 , _continuation(nullptr) 760 , _parsing_jsr(false) 761 , _jsr_xhandlers(nullptr) 762 , _num_returns(0) 763 , _cleanup_block(nullptr) 764 , _cleanup_return_prev(nullptr) 765 , _cleanup_state(nullptr) 766 , _ignore_return(false) 767 { 768 if (parent != nullptr) { 769 _max_inline_size = (intx) ((float) NestedInliningSizeRatio * (float) parent->max_inline_size() / 100.0f); 770 } else { 771 _max_inline_size = C1MaxInlineSize; 772 } 773 if (_max_inline_size < C1MaxTrivialSize) { 774 _max_inline_size = C1MaxTrivialSize; 775 } 776 } 777 778 779 void GraphBuilder::kill_all() { 780 if (UseLocalValueNumbering) { 781 vmap()->kill_all(); 782 } 783 _memory->kill(); 784 } 785 786 787 BlockBegin* GraphBuilder::ScopeData::block_at(int bci) { 788 if (parsing_jsr()) { 789 // It is necessary to clone all blocks associated with a 790 // subroutine, including those for exception handlers in the scope 791 // of the method containing the jsr (because those exception 792 // handlers may contain ret instructions in some cases). 793 BlockBegin* block = bci2block()->at(bci); 794 if (block != nullptr && block == parent()->bci2block()->at(bci)) { 795 BlockBegin* new_block = new BlockBegin(block->bci()); 796 if (PrintInitialBlockList) { 797 tty->print_cr("CFG: cloned block %d (bci %d) as block %d for jsr", 798 block->block_id(), block->bci(), new_block->block_id()); 799 } 800 // copy data from cloned blocked 801 new_block->set_depth_first_number(block->depth_first_number()); 802 if (block->is_set(BlockBegin::parser_loop_header_flag)) new_block->set(BlockBegin::parser_loop_header_flag); 803 // Preserve certain flags for assertion checking 804 if (block->is_set(BlockBegin::subroutine_entry_flag)) new_block->set(BlockBegin::subroutine_entry_flag); 805 if (block->is_set(BlockBegin::exception_entry_flag)) new_block->set(BlockBegin::exception_entry_flag); 806 807 // copy was_visited_flag to allow early detection of bailouts 808 // if a block that is used in a jsr has already been visited before, 809 // it is shared between the normal control flow and a subroutine 810 // BlockBegin::try_merge returns false when the flag is set, this leads 811 // to a compilation bailout 812 if (block->is_set(BlockBegin::was_visited_flag)) new_block->set(BlockBegin::was_visited_flag); 813 814 bci2block()->at_put(bci, new_block); 815 block = new_block; 816 } 817 return block; 818 } else { 819 return bci2block()->at(bci); 820 } 821 } 822 823 824 XHandlers* GraphBuilder::ScopeData::xhandlers() const { 825 if (_jsr_xhandlers == nullptr) { 826 assert(!parsing_jsr(), ""); 827 return scope()->xhandlers(); 828 } 829 assert(parsing_jsr(), ""); 830 return _jsr_xhandlers; 831 } 832 833 834 void GraphBuilder::ScopeData::set_scope(IRScope* scope) { 835 _scope = scope; 836 bool parent_has_handler = false; 837 if (parent() != nullptr) { 838 parent_has_handler = parent()->has_handler(); 839 } 840 _has_handler = parent_has_handler || scope->xhandlers()->has_handlers(); 841 } 842 843 844 void GraphBuilder::ScopeData::set_inline_cleanup_info(BlockBegin* block, 845 Instruction* return_prev, 846 ValueStack* return_state) { 847 _cleanup_block = block; 848 _cleanup_return_prev = return_prev; 849 _cleanup_state = return_state; 850 } 851 852 853 void GraphBuilder::ScopeData::add_to_work_list(BlockBegin* block) { 854 if (_work_list == nullptr) { 855 _work_list = new BlockList(); 856 } 857 858 if (!block->is_set(BlockBegin::is_on_work_list_flag)) { 859 // Do not start parsing the continuation block while in a 860 // sub-scope 861 if (parsing_jsr()) { 862 if (block == jsr_continuation()) { 863 return; 864 } 865 } else { 866 if (block == continuation()) { 867 return; 868 } 869 } 870 block->set(BlockBegin::is_on_work_list_flag); 871 _work_list->push(block); 872 873 sort_top_into_worklist(_work_list, block); 874 } 875 } 876 877 878 void GraphBuilder::sort_top_into_worklist(BlockList* worklist, BlockBegin* top) { 879 assert(worklist->top() == top, ""); 880 // sort block descending into work list 881 const int dfn = top->depth_first_number(); 882 assert(dfn != -1, "unknown depth first number"); 883 int i = worklist->length()-2; 884 while (i >= 0) { 885 BlockBegin* b = worklist->at(i); 886 if (b->depth_first_number() < dfn) { 887 worklist->at_put(i+1, b); 888 } else { 889 break; 890 } 891 i --; 892 } 893 if (i >= -1) worklist->at_put(i + 1, top); 894 } 895 896 897 BlockBegin* GraphBuilder::ScopeData::remove_from_work_list() { 898 if (is_work_list_empty()) { 899 return nullptr; 900 } 901 return _work_list->pop(); 902 } 903 904 905 bool GraphBuilder::ScopeData::is_work_list_empty() const { 906 return (_work_list == nullptr || _work_list->length() == 0); 907 } 908 909 910 void GraphBuilder::ScopeData::setup_jsr_xhandlers() { 911 assert(parsing_jsr(), ""); 912 // clone all the exception handlers from the scope 913 XHandlers* handlers = new XHandlers(scope()->xhandlers()); 914 const int n = handlers->length(); 915 for (int i = 0; i < n; i++) { 916 // The XHandlers need to be adjusted to dispatch to the cloned 917 // handler block instead of the default one but the synthetic 918 // unlocker needs to be handled specially. The synthetic unlocker 919 // should be left alone since there can be only one and all code 920 // should dispatch to the same one. 921 XHandler* h = handlers->handler_at(i); 922 assert(h->handler_bci() != SynchronizationEntryBCI, "must be real"); 923 h->set_entry_block(block_at(h->handler_bci())); 924 } 925 _jsr_xhandlers = handlers; 926 } 927 928 929 int GraphBuilder::ScopeData::num_returns() { 930 if (parsing_jsr()) { 931 return parent()->num_returns(); 932 } 933 return _num_returns; 934 } 935 936 937 void GraphBuilder::ScopeData::incr_num_returns() { 938 if (parsing_jsr()) { 939 parent()->incr_num_returns(); 940 } else { 941 ++_num_returns; 942 } 943 } 944 945 946 // Implementation of GraphBuilder 947 948 #define INLINE_BAILOUT(msg) { inline_bailout(msg); return false; } 949 950 951 void GraphBuilder::load_constant() { 952 ciConstant con = stream()->get_constant(); 953 if (con.is_valid()) { 954 ValueType* t = illegalType; 955 ValueStack* patch_state = nullptr; 956 switch (con.basic_type()) { 957 case T_BOOLEAN: t = new IntConstant (con.as_boolean()); break; 958 case T_BYTE : t = new IntConstant (con.as_byte ()); break; 959 case T_CHAR : t = new IntConstant (con.as_char ()); break; 960 case T_SHORT : t = new IntConstant (con.as_short ()); break; 961 case T_INT : t = new IntConstant (con.as_int ()); break; 962 case T_LONG : t = new LongConstant (con.as_long ()); break; 963 case T_FLOAT : t = new FloatConstant (con.as_float ()); break; 964 case T_DOUBLE : t = new DoubleConstant(con.as_double ()); break; 965 case T_ARRAY : // fall-through 966 case T_OBJECT : { 967 ciObject* obj = con.as_object(); 968 if (!obj->is_loaded() || (PatchALot && !stream()->is_string_constant())) { 969 // A Class, MethodType, MethodHandle, Dynamic, or String. 970 patch_state = copy_state_before(); 971 t = new ObjectConstant(obj); 972 } else { 973 // Might be a Class, MethodType, MethodHandle, or Dynamic constant 974 // result, which might turn out to be an array. 975 if (obj->is_null_object()) { 976 t = objectNull; 977 } else if (obj->is_array()) { 978 t = new ArrayConstant(obj->as_array()); 979 } else { 980 t = new InstanceConstant(obj->as_instance()); 981 } 982 } 983 break; 984 } 985 default: ShouldNotReachHere(); 986 } 987 Value x; 988 if (patch_state != nullptr) { 989 // Arbitrary memory effects from running BSM or class loading (using custom loader) during linkage. 990 bool kills_memory = stream()->is_dynamic_constant() || 991 (!stream()->is_string_constant() && !method()->holder()->has_trusted_loader()); 992 x = new Constant(t, patch_state, kills_memory); 993 } else { 994 x = new Constant(t); 995 } 996 997 // Unbox the value at runtime, if needed. 998 // ConstantDynamic entry can be of a primitive type, but it is cached in boxed form. 999 if (patch_state != nullptr) { 1000 int cp_index = stream()->get_constant_pool_index(); 1001 BasicType type = stream()->get_basic_type_for_constant_at(cp_index); 1002 if (is_java_primitive(type)) { 1003 ciInstanceKlass* box_klass = ciEnv::current()->get_box_klass_for_primitive_type(type); 1004 assert(box_klass->is_loaded(), "sanity"); 1005 int offset = java_lang_boxing_object::value_offset(type); 1006 ciField* value_field = box_klass->get_field_by_offset(offset, false /*is_static*/); 1007 x = new LoadField(append(x), offset, value_field, false /*is_static*/, patch_state, false /*needs_patching*/); 1008 t = as_ValueType(type); 1009 } else { 1010 assert(is_reference_type(type), "not a reference: %s", type2name(type)); 1011 } 1012 } 1013 1014 push(t, append(x)); 1015 } else { 1016 BAILOUT("could not resolve a constant"); 1017 } 1018 } 1019 1020 1021 void GraphBuilder::load_local(ValueType* type, int index) { 1022 Value x = state()->local_at(index); 1023 assert(x != nullptr && !x->type()->is_illegal(), "access of illegal local variable"); 1024 push(type, x); 1025 } 1026 1027 1028 void GraphBuilder::store_local(ValueType* type, int index) { 1029 Value x = pop(type); 1030 store_local(state(), x, index); 1031 } 1032 1033 1034 void GraphBuilder::store_local(ValueStack* state, Value x, int index) { 1035 if (parsing_jsr()) { 1036 // We need to do additional tracking of the location of the return 1037 // address for jsrs since we don't handle arbitrary jsr/ret 1038 // constructs. Here we are figuring out in which circumstances we 1039 // need to bail out. 1040 if (x->type()->is_address()) { 1041 scope_data()->set_jsr_return_address_local(index); 1042 1043 // Also check parent jsrs (if any) at this time to see whether 1044 // they are using this local. We don't handle skipping over a 1045 // ret. 1046 for (ScopeData* cur_scope_data = scope_data()->parent(); 1047 cur_scope_data != nullptr && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope(); 1048 cur_scope_data = cur_scope_data->parent()) { 1049 if (cur_scope_data->jsr_return_address_local() == index) { 1050 BAILOUT("subroutine overwrites return address from previous subroutine"); 1051 } 1052 } 1053 } else if (index == scope_data()->jsr_return_address_local()) { 1054 scope_data()->set_jsr_return_address_local(-1); 1055 } 1056 } 1057 1058 state->store_local(index, round_fp(x)); 1059 } 1060 1061 1062 void GraphBuilder::load_indexed(BasicType type) { 1063 // In case of in block code motion in range check elimination 1064 ValueStack* state_before = copy_state_indexed_access(); 1065 compilation()->set_has_access_indexed(true); 1066 Value index = ipop(); 1067 Value array = apop(); 1068 Value length = nullptr; 1069 if (CSEArrayLength || 1070 (array->as_Constant() != nullptr) || 1071 (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) || 1072 (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant()) || 1073 (array->as_NewMultiArray() && array->as_NewMultiArray()->dims()->at(0)->type()->is_constant())) { 1074 length = append(new ArrayLength(array, state_before)); 1075 } 1076 push(as_ValueType(type), append(new LoadIndexed(array, index, length, type, state_before))); 1077 } 1078 1079 1080 void GraphBuilder::store_indexed(BasicType type) { 1081 // In case of in block code motion in range check elimination 1082 ValueStack* state_before = copy_state_indexed_access(); 1083 compilation()->set_has_access_indexed(true); 1084 Value value = pop(as_ValueType(type)); 1085 Value index = ipop(); 1086 Value array = apop(); 1087 Value length = nullptr; 1088 if (CSEArrayLength || 1089 (array->as_Constant() != nullptr) || 1090 (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) || 1091 (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant()) || 1092 (array->as_NewMultiArray() && array->as_NewMultiArray()->dims()->at(0)->type()->is_constant())) { 1093 length = append(new ArrayLength(array, state_before)); 1094 } 1095 ciType* array_type = array->declared_type(); 1096 bool check_boolean = false; 1097 if (array_type != nullptr) { 1098 if (array_type->is_loaded() && 1099 array_type->as_array_klass()->element_type()->basic_type() == T_BOOLEAN) { 1100 assert(type == T_BYTE, "boolean store uses bastore"); 1101 Value mask = append(new Constant(new IntConstant(1))); 1102 value = append(new LogicOp(Bytecodes::_iand, value, mask)); 1103 } 1104 } else if (type == T_BYTE) { 1105 check_boolean = true; 1106 } 1107 StoreIndexed* result = new StoreIndexed(array, index, length, type, value, state_before, check_boolean); 1108 append(result); 1109 _memory->store_value(value); 1110 1111 if (type == T_OBJECT && is_profiling()) { 1112 // Note that we'd collect profile data in this method if we wanted it. 1113 compilation()->set_would_profile(true); 1114 1115 if (profile_checkcasts()) { 1116 result->set_profiled_method(method()); 1117 result->set_profiled_bci(bci()); 1118 result->set_should_profile(true); 1119 } 1120 } 1121 } 1122 1123 1124 void GraphBuilder::stack_op(Bytecodes::Code code) { 1125 switch (code) { 1126 case Bytecodes::_pop: 1127 { state()->raw_pop(); 1128 } 1129 break; 1130 case Bytecodes::_pop2: 1131 { state()->raw_pop(); 1132 state()->raw_pop(); 1133 } 1134 break; 1135 case Bytecodes::_dup: 1136 { Value w = state()->raw_pop(); 1137 state()->raw_push(w); 1138 state()->raw_push(w); 1139 } 1140 break; 1141 case Bytecodes::_dup_x1: 1142 { Value w1 = state()->raw_pop(); 1143 Value w2 = state()->raw_pop(); 1144 state()->raw_push(w1); 1145 state()->raw_push(w2); 1146 state()->raw_push(w1); 1147 } 1148 break; 1149 case Bytecodes::_dup_x2: 1150 { Value w1 = state()->raw_pop(); 1151 Value w2 = state()->raw_pop(); 1152 Value w3 = state()->raw_pop(); 1153 state()->raw_push(w1); 1154 state()->raw_push(w3); 1155 state()->raw_push(w2); 1156 state()->raw_push(w1); 1157 } 1158 break; 1159 case Bytecodes::_dup2: 1160 { Value w1 = state()->raw_pop(); 1161 Value w2 = state()->raw_pop(); 1162 state()->raw_push(w2); 1163 state()->raw_push(w1); 1164 state()->raw_push(w2); 1165 state()->raw_push(w1); 1166 } 1167 break; 1168 case Bytecodes::_dup2_x1: 1169 { Value w1 = state()->raw_pop(); 1170 Value w2 = state()->raw_pop(); 1171 Value w3 = state()->raw_pop(); 1172 state()->raw_push(w2); 1173 state()->raw_push(w1); 1174 state()->raw_push(w3); 1175 state()->raw_push(w2); 1176 state()->raw_push(w1); 1177 } 1178 break; 1179 case Bytecodes::_dup2_x2: 1180 { Value w1 = state()->raw_pop(); 1181 Value w2 = state()->raw_pop(); 1182 Value w3 = state()->raw_pop(); 1183 Value w4 = state()->raw_pop(); 1184 state()->raw_push(w2); 1185 state()->raw_push(w1); 1186 state()->raw_push(w4); 1187 state()->raw_push(w3); 1188 state()->raw_push(w2); 1189 state()->raw_push(w1); 1190 } 1191 break; 1192 case Bytecodes::_swap: 1193 { Value w1 = state()->raw_pop(); 1194 Value w2 = state()->raw_pop(); 1195 state()->raw_push(w1); 1196 state()->raw_push(w2); 1197 } 1198 break; 1199 default: 1200 ShouldNotReachHere(); 1201 break; 1202 } 1203 } 1204 1205 1206 void GraphBuilder::arithmetic_op(ValueType* type, Bytecodes::Code code, ValueStack* state_before) { 1207 Value y = pop(type); 1208 Value x = pop(type); 1209 Value res = new ArithmeticOp(code, x, y, state_before); 1210 // Note: currently single-precision floating-point rounding on Intel is handled at the LIRGenerator level 1211 res = append(res); 1212 res = round_fp(res); 1213 push(type, res); 1214 } 1215 1216 1217 void GraphBuilder::negate_op(ValueType* type) { 1218 push(type, append(new NegateOp(pop(type)))); 1219 } 1220 1221 1222 void GraphBuilder::shift_op(ValueType* type, Bytecodes::Code code) { 1223 Value s = ipop(); 1224 Value x = pop(type); 1225 // try to simplify 1226 // Note: This code should go into the canonicalizer as soon as it can 1227 // can handle canonicalized forms that contain more than one node. 1228 if (CanonicalizeNodes && code == Bytecodes::_iushr) { 1229 // pattern: x >>> s 1230 IntConstant* s1 = s->type()->as_IntConstant(); 1231 if (s1 != nullptr) { 1232 // pattern: x >>> s1, with s1 constant 1233 ShiftOp* l = x->as_ShiftOp(); 1234 if (l != nullptr && l->op() == Bytecodes::_ishl) { 1235 // pattern: (a << b) >>> s1 1236 IntConstant* s0 = l->y()->type()->as_IntConstant(); 1237 if (s0 != nullptr) { 1238 // pattern: (a << s0) >>> s1 1239 const int s0c = s0->value() & 0x1F; // only the low 5 bits are significant for shifts 1240 const int s1c = s1->value() & 0x1F; // only the low 5 bits are significant for shifts 1241 if (s0c == s1c) { 1242 if (s0c == 0) { 1243 // pattern: (a << 0) >>> 0 => simplify to: a 1244 ipush(l->x()); 1245 } else { 1246 // pattern: (a << s0c) >>> s0c => simplify to: a & m, with m constant 1247 assert(0 < s0c && s0c < BitsPerInt, "adjust code below to handle corner cases"); 1248 const int m = checked_cast<int>(right_n_bits(BitsPerInt - s0c)); 1249 Value s = append(new Constant(new IntConstant(m))); 1250 ipush(append(new LogicOp(Bytecodes::_iand, l->x(), s))); 1251 } 1252 return; 1253 } 1254 } 1255 } 1256 } 1257 } 1258 // could not simplify 1259 push(type, append(new ShiftOp(code, x, s))); 1260 } 1261 1262 1263 void GraphBuilder::logic_op(ValueType* type, Bytecodes::Code code) { 1264 Value y = pop(type); 1265 Value x = pop(type); 1266 push(type, append(new LogicOp(code, x, y))); 1267 } 1268 1269 1270 void GraphBuilder::compare_op(ValueType* type, Bytecodes::Code code) { 1271 ValueStack* state_before = copy_state_before(); 1272 Value y = pop(type); 1273 Value x = pop(type); 1274 ipush(append(new CompareOp(code, x, y, state_before))); 1275 } 1276 1277 1278 void GraphBuilder::convert(Bytecodes::Code op, BasicType from, BasicType to) { 1279 push(as_ValueType(to), append(new Convert(op, pop(as_ValueType(from)), as_ValueType(to)))); 1280 } 1281 1282 1283 void GraphBuilder::increment() { 1284 int index = stream()->get_index(); 1285 int delta = stream()->is_wide() ? (signed short)Bytes::get_Java_u2(stream()->cur_bcp() + 4) : (signed char)(stream()->cur_bcp()[2]); 1286 load_local(intType, index); 1287 ipush(append(new Constant(new IntConstant(delta)))); 1288 arithmetic_op(intType, Bytecodes::_iadd); 1289 store_local(intType, index); 1290 } 1291 1292 1293 void GraphBuilder::_goto(int from_bci, int to_bci) { 1294 Goto *x = new Goto(block_at(to_bci), to_bci <= from_bci); 1295 if (is_profiling()) { 1296 compilation()->set_would_profile(true); 1297 x->set_profiled_bci(bci()); 1298 if (profile_branches()) { 1299 x->set_profiled_method(method()); 1300 x->set_should_profile(true); 1301 } 1302 } 1303 append(x); 1304 } 1305 1306 1307 void GraphBuilder::if_node(Value x, If::Condition cond, Value y, ValueStack* state_before) { 1308 BlockBegin* tsux = block_at(stream()->get_dest()); 1309 BlockBegin* fsux = block_at(stream()->next_bci()); 1310 bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci(); 1311 // In case of loop invariant code motion or predicate insertion 1312 // before the body of a loop the state is needed 1313 Instruction *i = append(new If(x, cond, false, y, tsux, fsux, (is_bb || compilation()->is_optimistic()) ? state_before : nullptr, is_bb)); 1314 1315 assert(i->as_Goto() == nullptr || 1316 (i->as_Goto()->sux_at(0) == tsux && i->as_Goto()->is_safepoint() == (tsux->bci() < stream()->cur_bci())) || 1317 (i->as_Goto()->sux_at(0) == fsux && i->as_Goto()->is_safepoint() == (fsux->bci() < stream()->cur_bci())), 1318 "safepoint state of Goto returned by canonicalizer incorrect"); 1319 1320 if (is_profiling()) { 1321 If* if_node = i->as_If(); 1322 if (if_node != nullptr) { 1323 // Note that we'd collect profile data in this method if we wanted it. 1324 compilation()->set_would_profile(true); 1325 // At level 2 we need the proper bci to count backedges 1326 if_node->set_profiled_bci(bci()); 1327 if (profile_branches()) { 1328 // Successors can be rotated by the canonicalizer, check for this case. 1329 if_node->set_profiled_method(method()); 1330 if_node->set_should_profile(true); 1331 if (if_node->tsux() == fsux) { 1332 if_node->set_swapped(true); 1333 } 1334 } 1335 return; 1336 } 1337 1338 // Check if this If was reduced to Goto. 1339 Goto *goto_node = i->as_Goto(); 1340 if (goto_node != nullptr) { 1341 compilation()->set_would_profile(true); 1342 goto_node->set_profiled_bci(bci()); 1343 if (profile_branches()) { 1344 goto_node->set_profiled_method(method()); 1345 goto_node->set_should_profile(true); 1346 // Find out which successor is used. 1347 if (goto_node->default_sux() == tsux) { 1348 goto_node->set_direction(Goto::taken); 1349 } else if (goto_node->default_sux() == fsux) { 1350 goto_node->set_direction(Goto::not_taken); 1351 } else { 1352 ShouldNotReachHere(); 1353 } 1354 } 1355 return; 1356 } 1357 } 1358 } 1359 1360 1361 void GraphBuilder::if_zero(ValueType* type, If::Condition cond) { 1362 Value y = append(new Constant(intZero)); 1363 ValueStack* state_before = copy_state_before(); 1364 Value x = ipop(); 1365 if_node(x, cond, y, state_before); 1366 } 1367 1368 1369 void GraphBuilder::if_null(ValueType* type, If::Condition cond) { 1370 Value y = append(new Constant(objectNull)); 1371 ValueStack* state_before = copy_state_before(); 1372 Value x = apop(); 1373 if_node(x, cond, y, state_before); 1374 } 1375 1376 1377 void GraphBuilder::if_same(ValueType* type, If::Condition cond) { 1378 ValueStack* state_before = copy_state_before(); 1379 Value y = pop(type); 1380 Value x = pop(type); 1381 if_node(x, cond, y, state_before); 1382 } 1383 1384 1385 void GraphBuilder::jsr(int dest) { 1386 // We only handle well-formed jsrs (those which are "block-structured"). 1387 // If the bytecodes are strange (jumping out of a jsr block) then we 1388 // might end up trying to re-parse a block containing a jsr which 1389 // has already been activated. Watch for this case and bail out. 1390 if (next_bci() >= method()->code_size()) { 1391 // This can happen if the subroutine does not terminate with a ret, 1392 // effectively turning the jsr into a goto. 1393 BAILOUT("too-complicated jsr/ret structure"); 1394 } 1395 for (ScopeData* cur_scope_data = scope_data(); 1396 cur_scope_data != nullptr && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope(); 1397 cur_scope_data = cur_scope_data->parent()) { 1398 if (cur_scope_data->jsr_entry_bci() == dest) { 1399 BAILOUT("too-complicated jsr/ret structure"); 1400 } 1401 } 1402 1403 push(addressType, append(new Constant(new AddressConstant(next_bci())))); 1404 if (!try_inline_jsr(dest)) { 1405 return; // bailed out while parsing and inlining subroutine 1406 } 1407 } 1408 1409 1410 void GraphBuilder::ret(int local_index) { 1411 if (!parsing_jsr()) BAILOUT("ret encountered while not parsing subroutine"); 1412 1413 if (local_index != scope_data()->jsr_return_address_local()) { 1414 BAILOUT("can not handle complicated jsr/ret constructs"); 1415 } 1416 1417 // Rets simply become (NON-SAFEPOINT) gotos to the jsr continuation 1418 append(new Goto(scope_data()->jsr_continuation(), false)); 1419 } 1420 1421 1422 void GraphBuilder::table_switch() { 1423 Bytecode_tableswitch sw(stream()); 1424 const int l = sw.length(); 1425 if (CanonicalizeNodes && l == 1 && compilation()->env()->comp_level() != CompLevel_full_profile) { 1426 // total of 2 successors => use If instead of switch 1427 // Note: This code should go into the canonicalizer as soon as it can 1428 // can handle canonicalized forms that contain more than one node. 1429 Value key = append(new Constant(new IntConstant(sw.low_key()))); 1430 BlockBegin* tsux = block_at(bci() + sw.dest_offset_at(0)); 1431 BlockBegin* fsux = block_at(bci() + sw.default_offset()); 1432 bool is_bb = tsux->bci() < bci() || fsux->bci() < bci(); 1433 // In case of loop invariant code motion or predicate insertion 1434 // before the body of a loop the state is needed 1435 ValueStack* state_before = copy_state_if_bb(is_bb); 1436 append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb)); 1437 } else { 1438 // collect successors 1439 BlockList* sux = new BlockList(l + 1, nullptr); 1440 int i; 1441 bool has_bb = false; 1442 for (i = 0; i < l; i++) { 1443 sux->at_put(i, block_at(bci() + sw.dest_offset_at(i))); 1444 if (sw.dest_offset_at(i) < 0) has_bb = true; 1445 } 1446 // add default successor 1447 if (sw.default_offset() < 0) has_bb = true; 1448 sux->at_put(i, block_at(bci() + sw.default_offset())); 1449 // In case of loop invariant code motion or predicate insertion 1450 // before the body of a loop the state is needed 1451 ValueStack* state_before = copy_state_if_bb(has_bb); 1452 Instruction* res = append(new TableSwitch(ipop(), sux, sw.low_key(), state_before, has_bb)); 1453 #ifdef ASSERT 1454 if (res->as_Goto()) { 1455 for (i = 0; i < l; i++) { 1456 if (sux->at(i) == res->as_Goto()->sux_at(0)) { 1457 assert(res->as_Goto()->is_safepoint() == (sw.dest_offset_at(i) < 0), "safepoint state of Goto returned by canonicalizer incorrect"); 1458 } 1459 } 1460 } 1461 #endif 1462 } 1463 } 1464 1465 1466 void GraphBuilder::lookup_switch() { 1467 Bytecode_lookupswitch sw(stream()); 1468 const int l = sw.number_of_pairs(); 1469 if (CanonicalizeNodes && l == 1 && compilation()->env()->comp_level() != CompLevel_full_profile) { 1470 // total of 2 successors => use If instead of switch 1471 // Note: This code should go into the canonicalizer as soon as it can 1472 // can handle canonicalized forms that contain more than one node. 1473 // simplify to If 1474 LookupswitchPair pair = sw.pair_at(0); 1475 Value key = append(new Constant(new IntConstant(pair.match()))); 1476 BlockBegin* tsux = block_at(bci() + pair.offset()); 1477 BlockBegin* fsux = block_at(bci() + sw.default_offset()); 1478 bool is_bb = tsux->bci() < bci() || fsux->bci() < bci(); 1479 // In case of loop invariant code motion or predicate insertion 1480 // before the body of a loop the state is needed 1481 ValueStack* state_before = copy_state_if_bb(is_bb);; 1482 append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb)); 1483 } else { 1484 // collect successors & keys 1485 BlockList* sux = new BlockList(l + 1, nullptr); 1486 intArray* keys = new intArray(l, l, 0); 1487 int i; 1488 bool has_bb = false; 1489 for (i = 0; i < l; i++) { 1490 LookupswitchPair pair = sw.pair_at(i); 1491 if (pair.offset() < 0) has_bb = true; 1492 sux->at_put(i, block_at(bci() + pair.offset())); 1493 keys->at_put(i, pair.match()); 1494 } 1495 // add default successor 1496 if (sw.default_offset() < 0) has_bb = true; 1497 sux->at_put(i, block_at(bci() + sw.default_offset())); 1498 // In case of loop invariant code motion or predicate insertion 1499 // before the body of a loop the state is needed 1500 ValueStack* state_before = copy_state_if_bb(has_bb); 1501 Instruction* res = append(new LookupSwitch(ipop(), sux, keys, state_before, has_bb)); 1502 #ifdef ASSERT 1503 if (res->as_Goto()) { 1504 for (i = 0; i < l; i++) { 1505 if (sux->at(i) == res->as_Goto()->sux_at(0)) { 1506 assert(res->as_Goto()->is_safepoint() == (sw.pair_at(i).offset() < 0), "safepoint state of Goto returned by canonicalizer incorrect"); 1507 } 1508 } 1509 } 1510 #endif 1511 } 1512 } 1513 1514 void GraphBuilder::call_register_finalizer() { 1515 // If the receiver requires finalization then emit code to perform 1516 // the registration on return. 1517 1518 // Gather some type information about the receiver 1519 Value receiver = state()->local_at(0); 1520 assert(receiver != nullptr, "must have a receiver"); 1521 ciType* declared_type = receiver->declared_type(); 1522 ciType* exact_type = receiver->exact_type(); 1523 if (exact_type == nullptr && 1524 receiver->as_Local() && 1525 receiver->as_Local()->java_index() == 0) { 1526 ciInstanceKlass* ik = compilation()->method()->holder(); 1527 if (ik->is_final()) { 1528 exact_type = ik; 1529 } else if (UseCHA && !(ik->has_subklass() || ik->is_interface())) { 1530 // test class is leaf class 1531 compilation()->dependency_recorder()->assert_leaf_type(ik); 1532 exact_type = ik; 1533 } else { 1534 declared_type = ik; 1535 } 1536 } 1537 1538 // see if we know statically that registration isn't required 1539 bool needs_check = true; 1540 if (exact_type != nullptr) { 1541 needs_check = exact_type->as_instance_klass()->has_finalizer(); 1542 } else if (declared_type != nullptr) { 1543 ciInstanceKlass* ik = declared_type->as_instance_klass(); 1544 if (!Dependencies::has_finalizable_subclass(ik)) { 1545 compilation()->dependency_recorder()->assert_has_no_finalizable_subclasses(ik); 1546 needs_check = false; 1547 } 1548 } 1549 1550 if (needs_check) { 1551 // Perform the registration of finalizable objects. 1552 ValueStack* state_before = copy_state_for_exception(); 1553 load_local(objectType, 0); 1554 append_split(new Intrinsic(voidType, vmIntrinsics::_Object_init, 1555 state()->pop_arguments(1), 1556 true, state_before, true)); 1557 } 1558 } 1559 1560 1561 void GraphBuilder::method_return(Value x, bool ignore_return) { 1562 if (method()->intrinsic_id() == vmIntrinsics::_Object_init) { 1563 call_register_finalizer(); 1564 } 1565 1566 // The conditions for a memory barrier are described in Parse::do_exits(). 1567 bool need_mem_bar = false; 1568 if (method()->name() == ciSymbols::object_initializer_name() && 1569 (scope()->wrote_final() || scope()->wrote_stable() || 1570 (AlwaysSafeConstructors && scope()->wrote_fields()) || 1571 (support_IRIW_for_not_multiple_copy_atomic_cpu && scope()->wrote_volatile()))) { 1572 need_mem_bar = true; 1573 } 1574 1575 BasicType bt = method()->return_type()->basic_type(); 1576 switch (bt) { 1577 case T_BYTE: 1578 { 1579 Value shift = append(new Constant(new IntConstant(24))); 1580 x = append(new ShiftOp(Bytecodes::_ishl, x, shift)); 1581 x = append(new ShiftOp(Bytecodes::_ishr, x, shift)); 1582 break; 1583 } 1584 case T_SHORT: 1585 { 1586 Value shift = append(new Constant(new IntConstant(16))); 1587 x = append(new ShiftOp(Bytecodes::_ishl, x, shift)); 1588 x = append(new ShiftOp(Bytecodes::_ishr, x, shift)); 1589 break; 1590 } 1591 case T_CHAR: 1592 { 1593 Value mask = append(new Constant(new IntConstant(0xFFFF))); 1594 x = append(new LogicOp(Bytecodes::_iand, x, mask)); 1595 break; 1596 } 1597 case T_BOOLEAN: 1598 { 1599 Value mask = append(new Constant(new IntConstant(1))); 1600 x = append(new LogicOp(Bytecodes::_iand, x, mask)); 1601 break; 1602 } 1603 default: 1604 break; 1605 } 1606 1607 // Check to see whether we are inlining. If so, Return 1608 // instructions become Gotos to the continuation point. 1609 if (continuation() != nullptr) { 1610 1611 int invoke_bci = state()->caller_state()->bci(); 1612 1613 if (x != nullptr && !ignore_return) { 1614 ciMethod* caller = state()->scope()->caller()->method(); 1615 Bytecodes::Code invoke_raw_bc = caller->raw_code_at_bci(invoke_bci); 1616 if (invoke_raw_bc == Bytecodes::_invokehandle || invoke_raw_bc == Bytecodes::_invokedynamic) { 1617 ciType* declared_ret_type = caller->get_declared_signature_at_bci(invoke_bci)->return_type(); 1618 if (declared_ret_type->is_klass() && x->exact_type() == nullptr && 1619 x->declared_type() != declared_ret_type && declared_ret_type != compilation()->env()->Object_klass()) { 1620 x = append(new TypeCast(declared_ret_type->as_klass(), x, copy_state_before())); 1621 } 1622 } 1623 } 1624 1625 assert(!method()->is_synchronized() || InlineSynchronizedMethods, "can not inline synchronized methods yet"); 1626 1627 if (compilation()->env()->dtrace_method_probes()) { 1628 // Report exit from inline methods 1629 Values* args = new Values(1); 1630 args->push(append(new Constant(new MethodConstant(method())))); 1631 append(new RuntimeCall(voidType, "dtrace_method_exit", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), args)); 1632 } 1633 1634 // If the inlined method is synchronized, the monitor must be 1635 // released before we jump to the continuation block. 1636 if (method()->is_synchronized()) { 1637 assert(state()->locks_size() == 1, "receiver must be locked here"); 1638 monitorexit(state()->lock_at(0), SynchronizationEntryBCI); 1639 } 1640 1641 if (need_mem_bar) { 1642 append(new MemBar(lir_membar_storestore)); 1643 } 1644 1645 // State at end of inlined method is the state of the caller 1646 // without the method parameters on stack, including the 1647 // return value, if any, of the inlined method on operand stack. 1648 set_state(state()->caller_state()->copy_for_parsing()); 1649 if (x != nullptr) { 1650 if (!ignore_return) { 1651 state()->push(x->type(), x); 1652 } 1653 if (profile_return() && x->type()->is_object_kind()) { 1654 ciMethod* caller = state()->scope()->method(); 1655 profile_return_type(x, method(), caller, invoke_bci); 1656 } 1657 } 1658 Goto* goto_callee = new Goto(continuation(), false); 1659 1660 // See whether this is the first return; if so, store off some 1661 // of the state for later examination 1662 if (num_returns() == 0) { 1663 set_inline_cleanup_info(); 1664 } 1665 1666 // The current bci() is in the wrong scope, so use the bci() of 1667 // the continuation point. 1668 append_with_bci(goto_callee, scope_data()->continuation()->bci()); 1669 incr_num_returns(); 1670 return; 1671 } 1672 1673 state()->truncate_stack(0); 1674 if (method()->is_synchronized()) { 1675 // perform the unlocking before exiting the method 1676 Value receiver; 1677 if (!method()->is_static()) { 1678 receiver = _initial_state->local_at(0); 1679 } else { 1680 receiver = append(new Constant(new ClassConstant(method()->holder()))); 1681 } 1682 append_split(new MonitorExit(receiver, state()->unlock())); 1683 } 1684 1685 if (need_mem_bar) { 1686 append(new MemBar(lir_membar_storestore)); 1687 } 1688 1689 assert(!ignore_return, "Ignoring return value works only for inlining"); 1690 append(new Return(x)); 1691 } 1692 1693 Value GraphBuilder::make_constant(ciConstant field_value, ciField* field) { 1694 if (!field_value.is_valid()) return nullptr; 1695 1696 BasicType field_type = field_value.basic_type(); 1697 ValueType* value = as_ValueType(field_value); 1698 1699 // Attach dimension info to stable arrays. 1700 if (FoldStableValues && 1701 field->is_stable() && field_type == T_ARRAY && !field_value.is_null_or_zero()) { 1702 ciArray* array = field_value.as_object()->as_array(); 1703 jint dimension = field->type()->as_array_klass()->dimension(); 1704 value = new StableArrayConstant(array, dimension); 1705 } 1706 1707 switch (field_type) { 1708 case T_ARRAY: 1709 case T_OBJECT: 1710 if (field_value.as_object()->should_be_constant()) { 1711 return new Constant(value); 1712 } 1713 return nullptr; // Not a constant. 1714 default: 1715 return new Constant(value); 1716 } 1717 } 1718 1719 void GraphBuilder::access_field(Bytecodes::Code code) { 1720 bool will_link; 1721 ciField* field = stream()->get_field(will_link); 1722 ciInstanceKlass* holder = field->holder(); 1723 BasicType field_type = field->type()->basic_type(); 1724 ValueType* type = as_ValueType(field_type); 1725 // call will_link again to determine if the field is valid. 1726 const bool needs_patching = !holder->is_loaded() || 1727 !field->will_link(method(), code) || 1728 PatchALot; 1729 1730 ValueStack* state_before = nullptr; 1731 if (!holder->is_initialized() || needs_patching) { 1732 // save state before instruction for debug info when 1733 // deoptimization happens during patching 1734 state_before = copy_state_before(); 1735 } 1736 1737 Value obj = nullptr; 1738 if (code == Bytecodes::_getstatic || code == Bytecodes::_putstatic) { 1739 if (state_before != nullptr) { 1740 // build a patching constant 1741 obj = new Constant(new InstanceConstant(holder->java_mirror()), state_before); 1742 } else { 1743 obj = new Constant(new InstanceConstant(holder->java_mirror())); 1744 } 1745 } 1746 1747 if (code == Bytecodes::_putfield) { 1748 scope()->set_wrote_fields(); 1749 if (field->is_volatile()) { 1750 scope()->set_wrote_volatile(); 1751 } 1752 if (field->is_final()) { 1753 scope()->set_wrote_final(); 1754 } 1755 if (field->is_stable()) { 1756 scope()->set_wrote_stable(); 1757 } 1758 } 1759 1760 const int offset = !needs_patching ? field->offset_in_bytes() : -1; 1761 switch (code) { 1762 case Bytecodes::_getstatic: { 1763 // check for compile-time constants, i.e., initialized static final fields 1764 Value constant = nullptr; 1765 if (field->is_static_constant() && !PatchALot) { 1766 ciConstant field_value = field->constant_value(); 1767 assert(!field->is_stable() || !field_value.is_null_or_zero(), 1768 "stable static w/ default value shouldn't be a constant"); 1769 constant = make_constant(field_value, field); 1770 } 1771 if (constant != nullptr) { 1772 push(type, append(constant)); 1773 } else { 1774 if (state_before == nullptr) { 1775 state_before = copy_state_for_exception(); 1776 } 1777 push(type, append(new LoadField(append(obj), offset, field, true, 1778 state_before, needs_patching))); 1779 } 1780 break; 1781 } 1782 case Bytecodes::_putstatic: { 1783 Value val = pop(type); 1784 if (state_before == nullptr) { 1785 state_before = copy_state_for_exception(); 1786 } 1787 if (field->type()->basic_type() == T_BOOLEAN) { 1788 Value mask = append(new Constant(new IntConstant(1))); 1789 val = append(new LogicOp(Bytecodes::_iand, val, mask)); 1790 } 1791 append(new StoreField(append(obj), offset, field, val, true, state_before, needs_patching)); 1792 break; 1793 } 1794 case Bytecodes::_getfield: { 1795 // Check for compile-time constants, i.e., trusted final non-static fields. 1796 Value constant = nullptr; 1797 obj = apop(); 1798 ObjectType* obj_type = obj->type()->as_ObjectType(); 1799 if (field->is_constant() && obj_type->is_constant() && !PatchALot) { 1800 ciObject* const_oop = obj_type->constant_value(); 1801 if (!const_oop->is_null_object() && const_oop->is_loaded()) { 1802 ciConstant field_value = field->constant_value_of(const_oop); 1803 if (field_value.is_valid()) { 1804 constant = make_constant(field_value, field); 1805 // For CallSite objects add a dependency for invalidation of the optimization. 1806 if (field->is_call_site_target()) { 1807 ciCallSite* call_site = const_oop->as_call_site(); 1808 if (!call_site->is_fully_initialized_constant_call_site()) { 1809 ciMethodHandle* target = field_value.as_object()->as_method_handle(); 1810 dependency_recorder()->assert_call_site_target_value(call_site, target); 1811 } 1812 } 1813 } 1814 } 1815 } 1816 if (constant != nullptr) { 1817 push(type, append(constant)); 1818 } else { 1819 if (state_before == nullptr) { 1820 state_before = copy_state_for_exception(); 1821 } 1822 LoadField* load = new LoadField(obj, offset, field, false, state_before, needs_patching); 1823 Value replacement = !needs_patching ? _memory->load(load) : load; 1824 if (replacement != load) { 1825 assert(replacement->is_linked() || !replacement->can_be_linked(), "should already by linked"); 1826 // Writing an (integer) value to a boolean, byte, char or short field includes an implicit narrowing 1827 // conversion. Emit an explicit conversion here to get the correct field value after the write. 1828 BasicType bt = field->type()->basic_type(); 1829 switch (bt) { 1830 case T_BOOLEAN: 1831 case T_BYTE: 1832 replacement = append(new Convert(Bytecodes::_i2b, replacement, as_ValueType(bt))); 1833 break; 1834 case T_CHAR: 1835 replacement = append(new Convert(Bytecodes::_i2c, replacement, as_ValueType(bt))); 1836 break; 1837 case T_SHORT: 1838 replacement = append(new Convert(Bytecodes::_i2s, replacement, as_ValueType(bt))); 1839 break; 1840 default: 1841 break; 1842 } 1843 push(type, replacement); 1844 } else { 1845 push(type, append(load)); 1846 } 1847 } 1848 break; 1849 } 1850 case Bytecodes::_putfield: { 1851 Value val = pop(type); 1852 obj = apop(); 1853 if (state_before == nullptr) { 1854 state_before = copy_state_for_exception(); 1855 } 1856 if (field->type()->basic_type() == T_BOOLEAN) { 1857 Value mask = append(new Constant(new IntConstant(1))); 1858 val = append(new LogicOp(Bytecodes::_iand, val, mask)); 1859 } 1860 StoreField* store = new StoreField(obj, offset, field, val, false, state_before, needs_patching); 1861 if (!needs_patching) store = _memory->store(store); 1862 if (store != nullptr) { 1863 append(store); 1864 } 1865 break; 1866 } 1867 default: 1868 ShouldNotReachHere(); 1869 break; 1870 } 1871 } 1872 1873 1874 Dependencies* GraphBuilder::dependency_recorder() const { 1875 assert(DeoptC1, "need debug information"); 1876 return compilation()->dependency_recorder(); 1877 } 1878 1879 // How many arguments do we want to profile? 1880 Values* GraphBuilder::args_list_for_profiling(ciMethod* target, int& start, bool may_have_receiver) { 1881 int n = 0; 1882 bool has_receiver = may_have_receiver && Bytecodes::has_receiver(method()->java_code_at_bci(bci())); 1883 start = has_receiver ? 1 : 0; 1884 if (profile_arguments()) { 1885 ciProfileData* data = method()->method_data()->bci_to_data(bci()); 1886 if (data != nullptr && (data->is_CallTypeData() || data->is_VirtualCallTypeData())) { 1887 n = data->is_CallTypeData() ? data->as_CallTypeData()->number_of_arguments() : data->as_VirtualCallTypeData()->number_of_arguments(); 1888 } 1889 } 1890 // If we are inlining then we need to collect arguments to profile parameters for the target 1891 if (profile_parameters() && target != nullptr) { 1892 if (target->method_data() != nullptr && target->method_data()->parameters_type_data() != nullptr) { 1893 // The receiver is profiled on method entry so it's included in 1894 // the number of parameters but here we're only interested in 1895 // actual arguments. 1896 n = MAX2(n, target->method_data()->parameters_type_data()->number_of_parameters() - start); 1897 } 1898 } 1899 if (n > 0) { 1900 return new Values(n); 1901 } 1902 return nullptr; 1903 } 1904 1905 void GraphBuilder::check_args_for_profiling(Values* obj_args, int expected) { 1906 #ifdef ASSERT 1907 bool ignored_will_link; 1908 ciSignature* declared_signature = nullptr; 1909 ciMethod* real_target = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature); 1910 assert(expected == obj_args->capacity() || real_target->is_method_handle_intrinsic(), "missed on arg?"); 1911 #endif 1912 } 1913 1914 // Collect arguments that we want to profile in a list 1915 Values* GraphBuilder::collect_args_for_profiling(Values* args, ciMethod* target, bool may_have_receiver) { 1916 int start = 0; 1917 Values* obj_args = args_list_for_profiling(target, start, may_have_receiver); 1918 if (obj_args == nullptr) { 1919 return nullptr; 1920 } 1921 int s = obj_args->capacity(); 1922 // if called through method handle invoke, some arguments may have been popped 1923 for (int i = start, j = 0; j < s && i < args->length(); i++) { 1924 if (args->at(i)->type()->is_object_kind()) { 1925 obj_args->push(args->at(i)); 1926 j++; 1927 } 1928 } 1929 check_args_for_profiling(obj_args, s); 1930 return obj_args; 1931 } 1932 1933 void GraphBuilder::invoke(Bytecodes::Code code) { 1934 bool will_link; 1935 ciSignature* declared_signature = nullptr; 1936 ciMethod* target = stream()->get_method(will_link, &declared_signature); 1937 ciKlass* holder = stream()->get_declared_method_holder(); 1938 const Bytecodes::Code bc_raw = stream()->cur_bc_raw(); 1939 assert(declared_signature != nullptr, "cannot be null"); 1940 assert(will_link == target->is_loaded(), ""); 1941 JFR_ONLY(Jfr::on_resolution(this, holder, target); CHECK_BAILOUT();) 1942 1943 ciInstanceKlass* klass = target->holder(); 1944 assert(!target->is_loaded() || klass->is_loaded(), "loaded target must imply loaded klass"); 1945 1946 // check if CHA possible: if so, change the code to invoke_special 1947 ciInstanceKlass* calling_klass = method()->holder(); 1948 ciInstanceKlass* callee_holder = ciEnv::get_instance_klass_for_declared_method_holder(holder); 1949 ciInstanceKlass* actual_recv = callee_holder; 1950 1951 CompileLog* log = compilation()->log(); 1952 if (log != nullptr) 1953 log->elem("call method='%d' instr='%s'", 1954 log->identify(target), 1955 Bytecodes::name(code)); 1956 1957 // Some methods are obviously bindable without any type checks so 1958 // convert them directly to an invokespecial or invokestatic. 1959 if (target->is_loaded() && !target->is_abstract() && target->can_be_statically_bound()) { 1960 switch (bc_raw) { 1961 case Bytecodes::_invokeinterface: 1962 // convert to invokespecial if the target is the private interface method. 1963 if (target->is_private()) { 1964 assert(holder->is_interface(), "How did we get a non-interface method here!"); 1965 code = Bytecodes::_invokespecial; 1966 } 1967 break; 1968 case Bytecodes::_invokevirtual: 1969 code = Bytecodes::_invokespecial; 1970 break; 1971 case Bytecodes::_invokehandle: 1972 code = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokespecial; 1973 break; 1974 default: 1975 break; 1976 } 1977 } else { 1978 if (bc_raw == Bytecodes::_invokehandle) { 1979 assert(!will_link, "should come here only for unlinked call"); 1980 code = Bytecodes::_invokespecial; 1981 } 1982 } 1983 1984 if (code == Bytecodes::_invokespecial) { 1985 // Additional receiver subtype checks for interface calls via invokespecial or invokeinterface. 1986 ciKlass* receiver_constraint = nullptr; 1987 1988 if (bc_raw == Bytecodes::_invokeinterface) { 1989 receiver_constraint = holder; 1990 } else if (bc_raw == Bytecodes::_invokespecial && !target->is_object_initializer() && calling_klass->is_interface()) { 1991 receiver_constraint = calling_klass; 1992 } 1993 1994 if (receiver_constraint != nullptr) { 1995 int index = state()->stack_size() - (target->arg_size_no_receiver() + 1); 1996 Value receiver = state()->stack_at(index); 1997 CheckCast* c = new CheckCast(receiver_constraint, receiver, copy_state_before()); 1998 // go to uncommon_trap when checkcast fails 1999 c->set_invokespecial_receiver_check(); 2000 state()->stack_at_put(index, append_split(c)); 2001 } 2002 } 2003 2004 // Push appendix argument (MethodType, CallSite, etc.), if one. 2005 bool patch_for_appendix = false; 2006 int patching_appendix_arg = 0; 2007 if (Bytecodes::has_optional_appendix(bc_raw) && (!will_link || PatchALot)) { 2008 Value arg = append(new Constant(new ObjectConstant(compilation()->env()->unloaded_ciinstance()), copy_state_before())); 2009 apush(arg); 2010 patch_for_appendix = true; 2011 patching_appendix_arg = (will_link && stream()->has_appendix()) ? 0 : 1; 2012 } else if (stream()->has_appendix()) { 2013 ciObject* appendix = stream()->get_appendix(); 2014 Value arg = append(new Constant(new ObjectConstant(appendix))); 2015 apush(arg); 2016 } 2017 2018 ciMethod* cha_monomorphic_target = nullptr; 2019 ciMethod* exact_target = nullptr; 2020 Value better_receiver = nullptr; 2021 if (UseCHA && DeoptC1 && target->is_loaded() && 2022 !(// %%% FIXME: Are both of these relevant? 2023 target->is_method_handle_intrinsic() || 2024 target->is_compiled_lambda_form()) && 2025 !patch_for_appendix) { 2026 Value receiver = nullptr; 2027 ciInstanceKlass* receiver_klass = nullptr; 2028 bool type_is_exact = false; 2029 // try to find a precise receiver type 2030 if (will_link && !target->is_static()) { 2031 int index = state()->stack_size() - (target->arg_size_no_receiver() + 1); 2032 receiver = state()->stack_at(index); 2033 ciType* type = receiver->exact_type(); 2034 if (type != nullptr && type->is_loaded()) { 2035 assert(!type->is_instance_klass() || !type->as_instance_klass()->is_interface(), "Must not be an interface"); 2036 // Detects non-interface instances, primitive arrays, and some object arrays. 2037 // Array receivers can only call Object methods, so we should be able to allow 2038 // all object arrays here too, even those with unloaded types. 2039 receiver_klass = (ciInstanceKlass*) type; 2040 type_is_exact = true; 2041 } 2042 if (type == nullptr) { 2043 type = receiver->declared_type(); 2044 if (type != nullptr && type->is_loaded() && 2045 type->is_instance_klass() && !type->as_instance_klass()->is_interface()) { 2046 receiver_klass = (ciInstanceKlass*) type; 2047 if (receiver_klass->is_leaf_type() && !receiver_klass->is_final()) { 2048 // Insert a dependency on this type since 2049 // find_monomorphic_target may assume it's already done. 2050 dependency_recorder()->assert_leaf_type(receiver_klass); 2051 type_is_exact = true; 2052 } 2053 } 2054 } 2055 } 2056 if (receiver_klass != nullptr && type_is_exact && 2057 receiver_klass->is_loaded() && code != Bytecodes::_invokespecial) { 2058 // If we have the exact receiver type we can bind directly to 2059 // the method to call. 2060 exact_target = target->resolve_invoke(calling_klass, receiver_klass); 2061 if (exact_target != nullptr) { 2062 target = exact_target; 2063 code = Bytecodes::_invokespecial; 2064 } 2065 } 2066 if (receiver_klass != nullptr && 2067 receiver_klass->is_subtype_of(actual_recv) && 2068 actual_recv->is_initialized()) { 2069 actual_recv = receiver_klass; 2070 } 2071 2072 if ((code == Bytecodes::_invokevirtual && callee_holder->is_initialized()) || 2073 (code == Bytecodes::_invokeinterface && callee_holder->is_initialized() && !actual_recv->is_interface())) { 2074 // Use CHA on the receiver to select a more precise method. 2075 cha_monomorphic_target = target->find_monomorphic_target(calling_klass, callee_holder, actual_recv); 2076 } else if (code == Bytecodes::_invokeinterface && callee_holder->is_loaded() && receiver != nullptr) { 2077 assert(callee_holder->is_interface(), "invokeinterface to non interface?"); 2078 // If there is only one implementor of this interface then we 2079 // may be able bind this invoke directly to the implementing 2080 // klass but we need both a dependence on the single interface 2081 // and on the method we bind to. Additionally since all we know 2082 // about the receiver type is the it's supposed to implement the 2083 // interface we have to insert a check that it's the class we 2084 // expect. Interface types are not checked by the verifier so 2085 // they are roughly equivalent to Object. 2086 // The number of implementors for declared_interface is less or 2087 // equal to the number of implementors for target->holder() so 2088 // if number of implementors of target->holder() == 1 then 2089 // number of implementors for decl_interface is 0 or 1. If 2090 // it's 0 then no class implements decl_interface and there's 2091 // no point in inlining. 2092 ciInstanceKlass* declared_interface = callee_holder; 2093 ciInstanceKlass* singleton = declared_interface->unique_implementor(); 2094 if (singleton != nullptr) { 2095 assert(singleton != declared_interface, "not a unique implementor"); 2096 cha_monomorphic_target = target->find_monomorphic_target(calling_klass, declared_interface, singleton); 2097 if (cha_monomorphic_target != nullptr) { 2098 ciInstanceKlass* holder = cha_monomorphic_target->holder(); 2099 ciInstanceKlass* constraint = (holder->is_subtype_of(singleton) ? holder : singleton); // avoid upcasts 2100 if (holder != compilation()->env()->Object_klass() && 2101 (!type_is_exact || receiver_klass->is_subtype_of(constraint))) { 2102 actual_recv = declared_interface; 2103 2104 // insert a check it's really the expected class. 2105 CheckCast* c = new CheckCast(constraint, receiver, copy_state_for_exception()); 2106 c->set_incompatible_class_change_check(); 2107 c->set_direct_compare(constraint->is_final()); 2108 // pass the result of the checkcast so that the compiler has 2109 // more accurate type info in the inlinee 2110 better_receiver = append_split(c); 2111 2112 dependency_recorder()->assert_unique_implementor(declared_interface, singleton); 2113 } else { 2114 cha_monomorphic_target = nullptr; 2115 } 2116 } 2117 } 2118 } 2119 } 2120 2121 if (cha_monomorphic_target != nullptr) { 2122 assert(!target->can_be_statically_bound() || target == cha_monomorphic_target, ""); 2123 assert(!cha_monomorphic_target->is_abstract(), ""); 2124 if (!cha_monomorphic_target->can_be_statically_bound(actual_recv)) { 2125 // If we inlined because CHA revealed only a single target method, 2126 // then we are dependent on that target method not getting overridden 2127 // by dynamic class loading. Be sure to test the "static" receiver 2128 // dest_method here, as opposed to the actual receiver, which may 2129 // falsely lead us to believe that the receiver is final or private. 2130 dependency_recorder()->assert_unique_concrete_method(actual_recv, cha_monomorphic_target, callee_holder, target); 2131 } 2132 code = Bytecodes::_invokespecial; 2133 } 2134 2135 // check if we could do inlining 2136 if (!PatchALot && Inline && target->is_loaded() && !patch_for_appendix && 2137 callee_holder->is_loaded()) { // the effect of symbolic reference resolution 2138 2139 // callee is known => check if we have static binding 2140 if ((code == Bytecodes::_invokestatic && klass->is_initialized()) || // invokestatic involves an initialization barrier on declaring class 2141 code == Bytecodes::_invokespecial || 2142 (code == Bytecodes::_invokevirtual && target->is_final_method()) || 2143 code == Bytecodes::_invokedynamic) { 2144 // static binding => check if callee is ok 2145 ciMethod* inline_target = (cha_monomorphic_target != nullptr) ? cha_monomorphic_target : target; 2146 bool holder_known = (cha_monomorphic_target != nullptr) || (exact_target != nullptr); 2147 bool success = try_inline(inline_target, holder_known, false /* ignore_return */, code, better_receiver); 2148 2149 CHECK_BAILOUT(); 2150 clear_inline_bailout(); 2151 2152 if (success) { 2153 // Register dependence if JVMTI has either breakpoint 2154 // setting or hotswapping of methods capabilities since they may 2155 // cause deoptimization. 2156 if (compilation()->env()->jvmti_can_hotswap_or_post_breakpoint()) { 2157 dependency_recorder()->assert_evol_method(inline_target); 2158 } 2159 return; 2160 } 2161 } else { 2162 print_inlining(target, "no static binding", /*success*/ false); 2163 } 2164 } else { 2165 print_inlining(target, "not inlineable", /*success*/ false); 2166 } 2167 2168 // If we attempted an inline which did not succeed because of a 2169 // bailout during construction of the callee graph, the entire 2170 // compilation has to be aborted. This is fairly rare and currently 2171 // seems to only occur for jasm-generated classes which contain 2172 // jsr/ret pairs which are not associated with finally clauses and 2173 // do not have exception handlers in the containing method, and are 2174 // therefore not caught early enough to abort the inlining without 2175 // corrupting the graph. (We currently bail out with a non-empty 2176 // stack at a ret in these situations.) 2177 CHECK_BAILOUT(); 2178 2179 // inlining not successful => standard invoke 2180 ValueType* result_type = as_ValueType(declared_signature->return_type()); 2181 ValueStack* state_before = copy_state_exhandling(); 2182 2183 // The bytecode (code) might change in this method so we are checking this very late. 2184 const bool has_receiver = 2185 code == Bytecodes::_invokespecial || 2186 code == Bytecodes::_invokevirtual || 2187 code == Bytecodes::_invokeinterface; 2188 Values* args = state()->pop_arguments(target->arg_size_no_receiver() + patching_appendix_arg); 2189 Value recv = has_receiver ? apop() : nullptr; 2190 2191 // A null check is required here (when there is a receiver) for any of the following cases 2192 // - invokespecial, always need a null check. 2193 // - invokevirtual, when the target is final and loaded. Calls to final targets will become optimized 2194 // and require null checking. If the target is loaded a null check is emitted here. 2195 // If the target isn't loaded the null check must happen after the call resolution. We achieve that 2196 // by using the target methods unverified entry point (see CompiledIC::compute_monomorphic_entry). 2197 // (The JVM specification requires that LinkageError must be thrown before a NPE. An unloaded target may 2198 // potentially fail, and can't have the null check before the resolution.) 2199 // - A call that will be profiled. (But we can't add a null check when the target is unloaded, by the same 2200 // reason as above, so calls with a receiver to unloaded targets can't be profiled.) 2201 // 2202 // Normal invokevirtual will perform the null check during lookup 2203 2204 bool need_null_check = (code == Bytecodes::_invokespecial) || 2205 (target->is_loaded() && (target->is_final_method() || (is_profiling() && profile_calls()))); 2206 2207 if (need_null_check) { 2208 if (recv != nullptr) { 2209 null_check(recv); 2210 } 2211 2212 if (is_profiling()) { 2213 // Note that we'd collect profile data in this method if we wanted it. 2214 compilation()->set_would_profile(true); 2215 2216 if (profile_calls()) { 2217 assert(cha_monomorphic_target == nullptr || exact_target == nullptr, "both can not be set"); 2218 ciKlass* target_klass = nullptr; 2219 if (cha_monomorphic_target != nullptr) { 2220 target_klass = cha_monomorphic_target->holder(); 2221 } else if (exact_target != nullptr) { 2222 target_klass = exact_target->holder(); 2223 } 2224 profile_call(target, recv, target_klass, collect_args_for_profiling(args, nullptr, false), false); 2225 } 2226 } 2227 } 2228 2229 Invoke* result = new Invoke(code, result_type, recv, args, target, state_before); 2230 // push result 2231 append_split(result); 2232 2233 if (result_type != voidType) { 2234 push(result_type, round_fp(result)); 2235 } 2236 if (profile_return() && result_type->is_object_kind()) { 2237 profile_return_type(result, target); 2238 } 2239 } 2240 2241 2242 void GraphBuilder::new_instance(int klass_index) { 2243 ValueStack* state_before = copy_state_exhandling(); 2244 ciKlass* klass = stream()->get_klass(); 2245 assert(klass->is_instance_klass(), "must be an instance klass"); 2246 NewInstance* new_instance = new NewInstance(klass->as_instance_klass(), state_before, stream()->is_unresolved_klass()); 2247 _memory->new_instance(new_instance); 2248 apush(append_split(new_instance)); 2249 } 2250 2251 2252 void GraphBuilder::new_type_array() { 2253 ValueStack* state_before = copy_state_exhandling(); 2254 apush(append_split(new NewTypeArray(ipop(), (BasicType)stream()->get_index(), state_before, true))); 2255 } 2256 2257 2258 void GraphBuilder::new_object_array() { 2259 ciKlass* klass = stream()->get_klass(); 2260 ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling(); 2261 NewArray* n = new NewObjectArray(klass, ipop(), state_before); 2262 apush(append_split(n)); 2263 } 2264 2265 2266 bool GraphBuilder::direct_compare(ciKlass* k) { 2267 if (k->is_loaded() && k->is_instance_klass() && !UseSlowPath) { 2268 ciInstanceKlass* ik = k->as_instance_klass(); 2269 if (ik->is_final()) { 2270 return true; 2271 } else { 2272 if (DeoptC1 && UseCHA && !(ik->has_subklass() || ik->is_interface())) { 2273 // test class is leaf class 2274 dependency_recorder()->assert_leaf_type(ik); 2275 return true; 2276 } 2277 } 2278 } 2279 return false; 2280 } 2281 2282 2283 void GraphBuilder::check_cast(int klass_index) { 2284 ciKlass* klass = stream()->get_klass(); 2285 ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_for_exception(); 2286 CheckCast* c = new CheckCast(klass, apop(), state_before); 2287 apush(append_split(c)); 2288 c->set_direct_compare(direct_compare(klass)); 2289 2290 if (is_profiling()) { 2291 // Note that we'd collect profile data in this method if we wanted it. 2292 compilation()->set_would_profile(true); 2293 2294 if (profile_checkcasts()) { 2295 c->set_profiled_method(method()); 2296 c->set_profiled_bci(bci()); 2297 c->set_should_profile(true); 2298 } 2299 } 2300 } 2301 2302 2303 void GraphBuilder::instance_of(int klass_index) { 2304 ciKlass* klass = stream()->get_klass(); 2305 ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling(); 2306 InstanceOf* i = new InstanceOf(klass, apop(), state_before); 2307 ipush(append_split(i)); 2308 i->set_direct_compare(direct_compare(klass)); 2309 2310 if (is_profiling()) { 2311 // Note that we'd collect profile data in this method if we wanted it. 2312 compilation()->set_would_profile(true); 2313 2314 if (profile_checkcasts()) { 2315 i->set_profiled_method(method()); 2316 i->set_profiled_bci(bci()); 2317 i->set_should_profile(true); 2318 } 2319 } 2320 } 2321 2322 2323 void GraphBuilder::monitorenter(Value x, int bci) { 2324 // save state before locking in case of deoptimization after a NullPointerException 2325 ValueStack* state_before = copy_state_for_exception_with_bci(bci); 2326 append_with_bci(new MonitorEnter(x, state()->lock(x), state_before), bci); 2327 kill_all(); 2328 } 2329 2330 2331 void GraphBuilder::monitorexit(Value x, int bci) { 2332 append_with_bci(new MonitorExit(x, state()->unlock()), bci); 2333 kill_all(); 2334 } 2335 2336 2337 void GraphBuilder::new_multi_array(int dimensions) { 2338 ciKlass* klass = stream()->get_klass(); 2339 ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling(); 2340 2341 Values* dims = new Values(dimensions, dimensions, nullptr); 2342 // fill in all dimensions 2343 int i = dimensions; 2344 while (i-- > 0) dims->at_put(i, ipop()); 2345 // create array 2346 NewArray* n = new NewMultiArray(klass, dims, state_before); 2347 apush(append_split(n)); 2348 } 2349 2350 2351 void GraphBuilder::throw_op(int bci) { 2352 // We require that the debug info for a Throw be the "state before" 2353 // the Throw (i.e., exception oop is still on TOS) 2354 ValueStack* state_before = copy_state_before_with_bci(bci); 2355 Throw* t = new Throw(apop(), state_before); 2356 // operand stack not needed after a throw 2357 state()->truncate_stack(0); 2358 append_with_bci(t, bci); 2359 } 2360 2361 2362 Value GraphBuilder::round_fp(Value fp_value) { 2363 if (strict_fp_requires_explicit_rounding) { 2364 #ifdef IA32 2365 // no rounding needed if SSE2 is used 2366 if (UseSSE < 2) { 2367 // Must currently insert rounding node for doubleword values that 2368 // are results of expressions (i.e., not loads from memory or 2369 // constants) 2370 if (fp_value->type()->tag() == doubleTag && 2371 fp_value->as_Constant() == nullptr && 2372 fp_value->as_Local() == nullptr && // method parameters need no rounding 2373 fp_value->as_RoundFP() == nullptr) { 2374 return append(new RoundFP(fp_value)); 2375 } 2376 } 2377 #else 2378 Unimplemented(); 2379 #endif // IA32 2380 } 2381 return fp_value; 2382 } 2383 2384 2385 Instruction* GraphBuilder::append_with_bci(Instruction* instr, int bci) { 2386 Canonicalizer canon(compilation(), instr, bci); 2387 Instruction* i1 = canon.canonical(); 2388 if (i1->is_linked() || !i1->can_be_linked()) { 2389 // Canonicalizer returned an instruction which was already 2390 // appended so simply return it. 2391 return i1; 2392 } 2393 2394 if (UseLocalValueNumbering) { 2395 // Lookup the instruction in the ValueMap and add it to the map if 2396 // it's not found. 2397 Instruction* i2 = vmap()->find_insert(i1); 2398 if (i2 != i1) { 2399 // found an entry in the value map, so just return it. 2400 assert(i2->is_linked(), "should already be linked"); 2401 return i2; 2402 } 2403 ValueNumberingEffects vne(vmap()); 2404 i1->visit(&vne); 2405 } 2406 2407 // i1 was not eliminated => append it 2408 assert(i1->next() == nullptr, "shouldn't already be linked"); 2409 _last = _last->set_next(i1, canon.bci()); 2410 2411 if (++_instruction_count >= InstructionCountCutoff && !bailed_out()) { 2412 // set the bailout state but complete normal processing. We 2413 // might do a little more work before noticing the bailout so we 2414 // want processing to continue normally until it's noticed. 2415 bailout("Method and/or inlining is too large"); 2416 } 2417 2418 #ifndef PRODUCT 2419 if (PrintIRDuringConstruction) { 2420 InstructionPrinter ip; 2421 ip.print_line(i1); 2422 if (Verbose) { 2423 state()->print(); 2424 } 2425 } 2426 #endif 2427 2428 // save state after modification of operand stack for StateSplit instructions 2429 StateSplit* s = i1->as_StateSplit(); 2430 if (s != nullptr) { 2431 if (EliminateFieldAccess) { 2432 Intrinsic* intrinsic = s->as_Intrinsic(); 2433 if (s->as_Invoke() != nullptr || (intrinsic && !intrinsic->preserves_state())) { 2434 _memory->kill(); 2435 } 2436 } 2437 s->set_state(state()->copy(ValueStack::StateAfter, canon.bci())); 2438 } 2439 2440 // set up exception handlers for this instruction if necessary 2441 if (i1->can_trap()) { 2442 i1->set_exception_handlers(handle_exception(i1)); 2443 assert(i1->exception_state() != nullptr || !i1->needs_exception_state() || bailed_out(), "handle_exception must set exception state"); 2444 } 2445 return i1; 2446 } 2447 2448 2449 Instruction* GraphBuilder::append(Instruction* instr) { 2450 assert(instr->as_StateSplit() == nullptr || instr->as_BlockEnd() != nullptr, "wrong append used"); 2451 return append_with_bci(instr, bci()); 2452 } 2453 2454 2455 Instruction* GraphBuilder::append_split(StateSplit* instr) { 2456 return append_with_bci(instr, bci()); 2457 } 2458 2459 2460 void GraphBuilder::null_check(Value value) { 2461 if (value->as_NewArray() != nullptr || value->as_NewInstance() != nullptr) { 2462 return; 2463 } else { 2464 Constant* con = value->as_Constant(); 2465 if (con) { 2466 ObjectType* c = con->type()->as_ObjectType(); 2467 if (c && c->is_loaded()) { 2468 ObjectConstant* oc = c->as_ObjectConstant(); 2469 if (!oc || !oc->value()->is_null_object()) { 2470 return; 2471 } 2472 } 2473 } 2474 } 2475 append(new NullCheck(value, copy_state_for_exception())); 2476 } 2477 2478 2479 2480 XHandlers* GraphBuilder::handle_exception(Instruction* instruction) { 2481 if (!has_handler() && (!instruction->needs_exception_state() || instruction->exception_state() != nullptr)) { 2482 assert(instruction->exception_state() == nullptr 2483 || instruction->exception_state()->kind() == ValueStack::EmptyExceptionState 2484 || (instruction->exception_state()->kind() == ValueStack::ExceptionState && _compilation->env()->should_retain_local_variables()), 2485 "exception_state should be of exception kind"); 2486 return new XHandlers(); 2487 } 2488 2489 XHandlers* exception_handlers = new XHandlers(); 2490 ScopeData* cur_scope_data = scope_data(); 2491 ValueStack* cur_state = instruction->state_before(); 2492 ValueStack* prev_state = nullptr; 2493 int scope_count = 0; 2494 2495 assert(cur_state != nullptr, "state_before must be set"); 2496 do { 2497 int cur_bci = cur_state->bci(); 2498 assert(cur_scope_data->scope() == cur_state->scope(), "scopes do not match"); 2499 assert(cur_bci == SynchronizationEntryBCI || cur_bci == cur_scope_data->stream()->cur_bci(), "invalid bci"); 2500 2501 // join with all potential exception handlers 2502 XHandlers* list = cur_scope_data->xhandlers(); 2503 const int n = list->length(); 2504 for (int i = 0; i < n; i++) { 2505 XHandler* h = list->handler_at(i); 2506 if (h->covers(cur_bci)) { 2507 // h is a potential exception handler => join it 2508 compilation()->set_has_exception_handlers(true); 2509 2510 BlockBegin* entry = h->entry_block(); 2511 if (entry == block()) { 2512 // It's acceptable for an exception handler to cover itself 2513 // but we don't handle that in the parser currently. It's 2514 // very rare so we bailout instead of trying to handle it. 2515 BAILOUT_("exception handler covers itself", exception_handlers); 2516 } 2517 assert(entry->bci() == h->handler_bci(), "must match"); 2518 assert(entry->bci() == -1 || entry == cur_scope_data->block_at(entry->bci()), "blocks must correspond"); 2519 2520 // previously this was a BAILOUT, but this is not necessary 2521 // now because asynchronous exceptions are not handled this way. 2522 assert(entry->state() == nullptr || cur_state->total_locks_size() == entry->state()->total_locks_size(), "locks do not match"); 2523 2524 // xhandler start with an empty expression stack 2525 if (cur_state->stack_size() != 0) { 2526 // locals are preserved 2527 // stack will be truncated 2528 cur_state = cur_state->copy(ValueStack::ExceptionState, cur_state->bci()); 2529 } 2530 if (instruction->exception_state() == nullptr) { 2531 instruction->set_exception_state(cur_state); 2532 } 2533 2534 // Note: Usually this join must work. However, very 2535 // complicated jsr-ret structures where we don't ret from 2536 // the subroutine can cause the objects on the monitor 2537 // stacks to not match because blocks can be parsed twice. 2538 // The only test case we've seen so far which exhibits this 2539 // problem is caught by the infinite recursion test in 2540 // GraphBuilder::jsr() if the join doesn't work. 2541 if (!entry->try_merge(cur_state, compilation()->has_irreducible_loops())) { 2542 BAILOUT_("error while joining with exception handler, prob. due to complicated jsr/rets", exception_handlers); 2543 } 2544 2545 // add current state for correct handling of phi functions at begin of xhandler 2546 int phi_operand = entry->add_exception_state(cur_state); 2547 2548 // add entry to the list of xhandlers of this block 2549 _block->add_exception_handler(entry); 2550 2551 // add back-edge from xhandler entry to this block 2552 if (!entry->is_predecessor(_block)) { 2553 entry->add_predecessor(_block); 2554 } 2555 2556 // clone XHandler because phi_operand and scope_count can not be shared 2557 XHandler* new_xhandler = new XHandler(h); 2558 new_xhandler->set_phi_operand(phi_operand); 2559 new_xhandler->set_scope_count(scope_count); 2560 exception_handlers->append(new_xhandler); 2561 2562 // fill in exception handler subgraph lazily 2563 assert(!entry->is_set(BlockBegin::was_visited_flag), "entry must not be visited yet"); 2564 cur_scope_data->add_to_work_list(entry); 2565 2566 // stop when reaching catchall 2567 if (h->catch_type() == 0) { 2568 return exception_handlers; 2569 } 2570 } 2571 } 2572 2573 if (exception_handlers->length() == 0) { 2574 // This scope and all callees do not handle exceptions, so the local 2575 // variables of this scope are not needed. However, the scope itself is 2576 // required for a correct exception stack trace -> clear out the locals. 2577 // Stack and locals are invalidated but not truncated in caller state. 2578 if (prev_state != nullptr) { 2579 assert(instruction->exception_state() != nullptr, "missed set?"); 2580 ValueStack::Kind exc_kind = ValueStack::empty_exception_kind(true /* caller */); 2581 cur_state = cur_state->copy(exc_kind, cur_state->bci()); 2582 // reset caller exception state 2583 prev_state->set_caller_state(cur_state); 2584 } else { 2585 assert(instruction->exception_state() == nullptr, "already set"); 2586 // set instruction exception state 2587 // truncate stack 2588 ValueStack::Kind exc_kind = ValueStack::empty_exception_kind(); 2589 cur_state = cur_state->copy(exc_kind, cur_state->bci()); 2590 instruction->set_exception_state(cur_state); 2591 } 2592 } 2593 2594 // Set up iteration for next time. 2595 // If parsing a jsr, do not grab exception handlers from the 2596 // parent scopes for this method (already got them, and they 2597 // needed to be cloned) 2598 2599 while (cur_scope_data->parsing_jsr()) { 2600 cur_scope_data = cur_scope_data->parent(); 2601 } 2602 2603 assert(cur_scope_data->scope() == cur_state->scope(), "scopes do not match"); 2604 assert(cur_state->locks_size() == 0 || cur_state->locks_size() == 1, "unlocking must be done in a catchall exception handler"); 2605 2606 prev_state = cur_state; 2607 cur_state = cur_state->caller_state(); 2608 cur_scope_data = cur_scope_data->parent(); 2609 scope_count++; 2610 } while (cur_scope_data != nullptr); 2611 2612 return exception_handlers; 2613 } 2614 2615 2616 // Helper class for simplifying Phis. 2617 class PhiSimplifier : public BlockClosure { 2618 private: 2619 bool _has_substitutions; 2620 Value simplify(Value v); 2621 2622 public: 2623 PhiSimplifier(BlockBegin* start) : _has_substitutions(false) { 2624 start->iterate_preorder(this); 2625 if (_has_substitutions) { 2626 SubstitutionResolver sr(start); 2627 } 2628 } 2629 void block_do(BlockBegin* b); 2630 bool has_substitutions() const { return _has_substitutions; } 2631 }; 2632 2633 2634 Value PhiSimplifier::simplify(Value v) { 2635 Phi* phi = v->as_Phi(); 2636 2637 if (phi == nullptr) { 2638 // no phi function 2639 return v; 2640 } else if (v->has_subst()) { 2641 // already substituted; subst can be phi itself -> simplify 2642 return simplify(v->subst()); 2643 } else if (phi->is_set(Phi::cannot_simplify)) { 2644 // already tried to simplify phi before 2645 return phi; 2646 } else if (phi->is_set(Phi::visited)) { 2647 // break cycles in phi functions 2648 return phi; 2649 } else if (phi->type()->is_illegal()) { 2650 // illegal phi functions are ignored anyway 2651 return phi; 2652 2653 } else { 2654 // mark phi function as processed to break cycles in phi functions 2655 phi->set(Phi::visited); 2656 2657 // simplify x = [y, x] and x = [y, y] to y 2658 Value subst = nullptr; 2659 int opd_count = phi->operand_count(); 2660 for (int i = 0; i < opd_count; i++) { 2661 Value opd = phi->operand_at(i); 2662 assert(opd != nullptr, "Operand must exist!"); 2663 2664 if (opd->type()->is_illegal()) { 2665 // if one operand is illegal, the entire phi function is illegal 2666 phi->make_illegal(); 2667 phi->clear(Phi::visited); 2668 return phi; 2669 } 2670 2671 Value new_opd = simplify(opd); 2672 assert(new_opd != nullptr, "Simplified operand must exist!"); 2673 2674 if (new_opd != phi && new_opd != subst) { 2675 if (subst == nullptr) { 2676 subst = new_opd; 2677 } else { 2678 // no simplification possible 2679 phi->set(Phi::cannot_simplify); 2680 phi->clear(Phi::visited); 2681 return phi; 2682 } 2683 } 2684 } 2685 2686 // successfully simplified phi function 2687 assert(subst != nullptr, "illegal phi function"); 2688 _has_substitutions = true; 2689 phi->clear(Phi::visited); 2690 phi->set_subst(subst); 2691 2692 #ifndef PRODUCT 2693 if (PrintPhiFunctions) { 2694 tty->print_cr("simplified phi function %c%d to %c%d (Block B%d)", phi->type()->tchar(), phi->id(), subst->type()->tchar(), subst->id(), phi->block()->block_id()); 2695 } 2696 #endif 2697 2698 return subst; 2699 } 2700 } 2701 2702 2703 void PhiSimplifier::block_do(BlockBegin* b) { 2704 for_each_phi_fun(b, phi, 2705 simplify(phi); 2706 ); 2707 2708 #ifdef ASSERT 2709 for_each_phi_fun(b, phi, 2710 assert(phi->operand_count() != 1 || phi->subst() != phi || phi->is_illegal(), "missed trivial simplification"); 2711 ); 2712 2713 ValueStack* state = b->state()->caller_state(); 2714 for_each_state_value(state, value, 2715 Phi* phi = value->as_Phi(); 2716 assert(phi == nullptr || phi->block() != b, "must not have phi function to simplify in caller state"); 2717 ); 2718 #endif 2719 } 2720 2721 // This method is called after all blocks are filled with HIR instructions 2722 // It eliminates all Phi functions of the form x = [y, y] and x = [y, x] 2723 void GraphBuilder::eliminate_redundant_phis(BlockBegin* start) { 2724 PhiSimplifier simplifier(start); 2725 } 2726 2727 2728 void GraphBuilder::connect_to_end(BlockBegin* beg) { 2729 // setup iteration 2730 kill_all(); 2731 _block = beg; 2732 _state = beg->state()->copy_for_parsing(); 2733 _last = beg; 2734 iterate_bytecodes_for_block(beg->bci()); 2735 } 2736 2737 2738 BlockEnd* GraphBuilder::iterate_bytecodes_for_block(int bci) { 2739 #ifndef PRODUCT 2740 if (PrintIRDuringConstruction) { 2741 tty->cr(); 2742 InstructionPrinter ip; 2743 ip.print_instr(_block); tty->cr(); 2744 ip.print_stack(_block->state()); tty->cr(); 2745 ip.print_inline_level(_block); 2746 ip.print_head(); 2747 tty->print_cr("locals size: %d stack size: %d", state()->locals_size(), state()->stack_size()); 2748 } 2749 #endif 2750 _skip_block = false; 2751 assert(state() != nullptr, "ValueStack missing!"); 2752 CompileLog* log = compilation()->log(); 2753 ciBytecodeStream s(method()); 2754 s.reset_to_bci(bci); 2755 int prev_bci = bci; 2756 scope_data()->set_stream(&s); 2757 // iterate 2758 Bytecodes::Code code = Bytecodes::_illegal; 2759 bool push_exception = false; 2760 2761 if (block()->is_set(BlockBegin::exception_entry_flag) && block()->next() == nullptr) { 2762 // first thing in the exception entry block should be the exception object. 2763 push_exception = true; 2764 } 2765 2766 bool ignore_return = scope_data()->ignore_return(); 2767 2768 while (!bailed_out() && last()->as_BlockEnd() == nullptr && 2769 (code = stream()->next()) != ciBytecodeStream::EOBC() && 2770 (block_at(s.cur_bci()) == nullptr || block_at(s.cur_bci()) == block())) { 2771 assert(state()->kind() == ValueStack::Parsing, "invalid state kind"); 2772 2773 if (log != nullptr) 2774 log->set_context("bc code='%d' bci='%d'", (int)code, s.cur_bci()); 2775 2776 // Check for active jsr during OSR compilation 2777 if (compilation()->is_osr_compile() 2778 && scope()->is_top_scope() 2779 && parsing_jsr() 2780 && s.cur_bci() == compilation()->osr_bci()) { 2781 bailout("OSR not supported while a jsr is active"); 2782 } 2783 2784 if (push_exception) { 2785 apush(append(new ExceptionObject())); 2786 push_exception = false; 2787 } 2788 2789 // handle bytecode 2790 switch (code) { 2791 case Bytecodes::_nop : /* nothing to do */ break; 2792 case Bytecodes::_aconst_null : apush(append(new Constant(objectNull ))); break; 2793 case Bytecodes::_iconst_m1 : ipush(append(new Constant(new IntConstant (-1)))); break; 2794 case Bytecodes::_iconst_0 : ipush(append(new Constant(intZero ))); break; 2795 case Bytecodes::_iconst_1 : ipush(append(new Constant(intOne ))); break; 2796 case Bytecodes::_iconst_2 : ipush(append(new Constant(new IntConstant ( 2)))); break; 2797 case Bytecodes::_iconst_3 : ipush(append(new Constant(new IntConstant ( 3)))); break; 2798 case Bytecodes::_iconst_4 : ipush(append(new Constant(new IntConstant ( 4)))); break; 2799 case Bytecodes::_iconst_5 : ipush(append(new Constant(new IntConstant ( 5)))); break; 2800 case Bytecodes::_lconst_0 : lpush(append(new Constant(new LongConstant ( 0)))); break; 2801 case Bytecodes::_lconst_1 : lpush(append(new Constant(new LongConstant ( 1)))); break; 2802 case Bytecodes::_fconst_0 : fpush(append(new Constant(new FloatConstant ( 0)))); break; 2803 case Bytecodes::_fconst_1 : fpush(append(new Constant(new FloatConstant ( 1)))); break; 2804 case Bytecodes::_fconst_2 : fpush(append(new Constant(new FloatConstant ( 2)))); break; 2805 case Bytecodes::_dconst_0 : dpush(append(new Constant(new DoubleConstant( 0)))); break; 2806 case Bytecodes::_dconst_1 : dpush(append(new Constant(new DoubleConstant( 1)))); break; 2807 case Bytecodes::_bipush : ipush(append(new Constant(new IntConstant(((signed char*)s.cur_bcp())[1])))); break; 2808 case Bytecodes::_sipush : ipush(append(new Constant(new IntConstant((short)Bytes::get_Java_u2(s.cur_bcp()+1))))); break; 2809 case Bytecodes::_ldc : // fall through 2810 case Bytecodes::_ldc_w : // fall through 2811 case Bytecodes::_ldc2_w : load_constant(); break; 2812 case Bytecodes::_iload : load_local(intType , s.get_index()); break; 2813 case Bytecodes::_lload : load_local(longType , s.get_index()); break; 2814 case Bytecodes::_fload : load_local(floatType , s.get_index()); break; 2815 case Bytecodes::_dload : load_local(doubleType , s.get_index()); break; 2816 case Bytecodes::_aload : load_local(instanceType, s.get_index()); break; 2817 case Bytecodes::_iload_0 : load_local(intType , 0); break; 2818 case Bytecodes::_iload_1 : load_local(intType , 1); break; 2819 case Bytecodes::_iload_2 : load_local(intType , 2); break; 2820 case Bytecodes::_iload_3 : load_local(intType , 3); break; 2821 case Bytecodes::_lload_0 : load_local(longType , 0); break; 2822 case Bytecodes::_lload_1 : load_local(longType , 1); break; 2823 case Bytecodes::_lload_2 : load_local(longType , 2); break; 2824 case Bytecodes::_lload_3 : load_local(longType , 3); break; 2825 case Bytecodes::_fload_0 : load_local(floatType , 0); break; 2826 case Bytecodes::_fload_1 : load_local(floatType , 1); break; 2827 case Bytecodes::_fload_2 : load_local(floatType , 2); break; 2828 case Bytecodes::_fload_3 : load_local(floatType , 3); break; 2829 case Bytecodes::_dload_0 : load_local(doubleType, 0); break; 2830 case Bytecodes::_dload_1 : load_local(doubleType, 1); break; 2831 case Bytecodes::_dload_2 : load_local(doubleType, 2); break; 2832 case Bytecodes::_dload_3 : load_local(doubleType, 3); break; 2833 case Bytecodes::_aload_0 : load_local(objectType, 0); break; 2834 case Bytecodes::_aload_1 : load_local(objectType, 1); break; 2835 case Bytecodes::_aload_2 : load_local(objectType, 2); break; 2836 case Bytecodes::_aload_3 : load_local(objectType, 3); break; 2837 case Bytecodes::_iaload : load_indexed(T_INT ); break; 2838 case Bytecodes::_laload : load_indexed(T_LONG ); break; 2839 case Bytecodes::_faload : load_indexed(T_FLOAT ); break; 2840 case Bytecodes::_daload : load_indexed(T_DOUBLE); break; 2841 case Bytecodes::_aaload : load_indexed(T_OBJECT); break; 2842 case Bytecodes::_baload : load_indexed(T_BYTE ); break; 2843 case Bytecodes::_caload : load_indexed(T_CHAR ); break; 2844 case Bytecodes::_saload : load_indexed(T_SHORT ); break; 2845 case Bytecodes::_istore : store_local(intType , s.get_index()); break; 2846 case Bytecodes::_lstore : store_local(longType , s.get_index()); break; 2847 case Bytecodes::_fstore : store_local(floatType , s.get_index()); break; 2848 case Bytecodes::_dstore : store_local(doubleType, s.get_index()); break; 2849 case Bytecodes::_astore : store_local(objectType, s.get_index()); break; 2850 case Bytecodes::_istore_0 : store_local(intType , 0); break; 2851 case Bytecodes::_istore_1 : store_local(intType , 1); break; 2852 case Bytecodes::_istore_2 : store_local(intType , 2); break; 2853 case Bytecodes::_istore_3 : store_local(intType , 3); break; 2854 case Bytecodes::_lstore_0 : store_local(longType , 0); break; 2855 case Bytecodes::_lstore_1 : store_local(longType , 1); break; 2856 case Bytecodes::_lstore_2 : store_local(longType , 2); break; 2857 case Bytecodes::_lstore_3 : store_local(longType , 3); break; 2858 case Bytecodes::_fstore_0 : store_local(floatType , 0); break; 2859 case Bytecodes::_fstore_1 : store_local(floatType , 1); break; 2860 case Bytecodes::_fstore_2 : store_local(floatType , 2); break; 2861 case Bytecodes::_fstore_3 : store_local(floatType , 3); break; 2862 case Bytecodes::_dstore_0 : store_local(doubleType, 0); break; 2863 case Bytecodes::_dstore_1 : store_local(doubleType, 1); break; 2864 case Bytecodes::_dstore_2 : store_local(doubleType, 2); break; 2865 case Bytecodes::_dstore_3 : store_local(doubleType, 3); break; 2866 case Bytecodes::_astore_0 : store_local(objectType, 0); break; 2867 case Bytecodes::_astore_1 : store_local(objectType, 1); break; 2868 case Bytecodes::_astore_2 : store_local(objectType, 2); break; 2869 case Bytecodes::_astore_3 : store_local(objectType, 3); break; 2870 case Bytecodes::_iastore : store_indexed(T_INT ); break; 2871 case Bytecodes::_lastore : store_indexed(T_LONG ); break; 2872 case Bytecodes::_fastore : store_indexed(T_FLOAT ); break; 2873 case Bytecodes::_dastore : store_indexed(T_DOUBLE); break; 2874 case Bytecodes::_aastore : store_indexed(T_OBJECT); break; 2875 case Bytecodes::_bastore : store_indexed(T_BYTE ); break; 2876 case Bytecodes::_castore : store_indexed(T_CHAR ); break; 2877 case Bytecodes::_sastore : store_indexed(T_SHORT ); break; 2878 case Bytecodes::_pop : // fall through 2879 case Bytecodes::_pop2 : // fall through 2880 case Bytecodes::_dup : // fall through 2881 case Bytecodes::_dup_x1 : // fall through 2882 case Bytecodes::_dup_x2 : // fall through 2883 case Bytecodes::_dup2 : // fall through 2884 case Bytecodes::_dup2_x1 : // fall through 2885 case Bytecodes::_dup2_x2 : // fall through 2886 case Bytecodes::_swap : stack_op(code); break; 2887 case Bytecodes::_iadd : arithmetic_op(intType , code); break; 2888 case Bytecodes::_ladd : arithmetic_op(longType , code); break; 2889 case Bytecodes::_fadd : arithmetic_op(floatType , code); break; 2890 case Bytecodes::_dadd : arithmetic_op(doubleType, code); break; 2891 case Bytecodes::_isub : arithmetic_op(intType , code); break; 2892 case Bytecodes::_lsub : arithmetic_op(longType , code); break; 2893 case Bytecodes::_fsub : arithmetic_op(floatType , code); break; 2894 case Bytecodes::_dsub : arithmetic_op(doubleType, code); break; 2895 case Bytecodes::_imul : arithmetic_op(intType , code); break; 2896 case Bytecodes::_lmul : arithmetic_op(longType , code); break; 2897 case Bytecodes::_fmul : arithmetic_op(floatType , code); break; 2898 case Bytecodes::_dmul : arithmetic_op(doubleType, code); break; 2899 case Bytecodes::_idiv : arithmetic_op(intType , code, copy_state_for_exception()); break; 2900 case Bytecodes::_ldiv : arithmetic_op(longType , code, copy_state_for_exception()); break; 2901 case Bytecodes::_fdiv : arithmetic_op(floatType , code); break; 2902 case Bytecodes::_ddiv : arithmetic_op(doubleType, code); break; 2903 case Bytecodes::_irem : arithmetic_op(intType , code, copy_state_for_exception()); break; 2904 case Bytecodes::_lrem : arithmetic_op(longType , code, copy_state_for_exception()); break; 2905 case Bytecodes::_frem : arithmetic_op(floatType , code); break; 2906 case Bytecodes::_drem : arithmetic_op(doubleType, code); break; 2907 case Bytecodes::_ineg : negate_op(intType ); break; 2908 case Bytecodes::_lneg : negate_op(longType ); break; 2909 case Bytecodes::_fneg : negate_op(floatType ); break; 2910 case Bytecodes::_dneg : negate_op(doubleType); break; 2911 case Bytecodes::_ishl : shift_op(intType , code); break; 2912 case Bytecodes::_lshl : shift_op(longType, code); break; 2913 case Bytecodes::_ishr : shift_op(intType , code); break; 2914 case Bytecodes::_lshr : shift_op(longType, code); break; 2915 case Bytecodes::_iushr : shift_op(intType , code); break; 2916 case Bytecodes::_lushr : shift_op(longType, code); break; 2917 case Bytecodes::_iand : logic_op(intType , code); break; 2918 case Bytecodes::_land : logic_op(longType, code); break; 2919 case Bytecodes::_ior : logic_op(intType , code); break; 2920 case Bytecodes::_lor : logic_op(longType, code); break; 2921 case Bytecodes::_ixor : logic_op(intType , code); break; 2922 case Bytecodes::_lxor : logic_op(longType, code); break; 2923 case Bytecodes::_iinc : increment(); break; 2924 case Bytecodes::_i2l : convert(code, T_INT , T_LONG ); break; 2925 case Bytecodes::_i2f : convert(code, T_INT , T_FLOAT ); break; 2926 case Bytecodes::_i2d : convert(code, T_INT , T_DOUBLE); break; 2927 case Bytecodes::_l2i : convert(code, T_LONG , T_INT ); break; 2928 case Bytecodes::_l2f : convert(code, T_LONG , T_FLOAT ); break; 2929 case Bytecodes::_l2d : convert(code, T_LONG , T_DOUBLE); break; 2930 case Bytecodes::_f2i : convert(code, T_FLOAT , T_INT ); break; 2931 case Bytecodes::_f2l : convert(code, T_FLOAT , T_LONG ); break; 2932 case Bytecodes::_f2d : convert(code, T_FLOAT , T_DOUBLE); break; 2933 case Bytecodes::_d2i : convert(code, T_DOUBLE, T_INT ); break; 2934 case Bytecodes::_d2l : convert(code, T_DOUBLE, T_LONG ); break; 2935 case Bytecodes::_d2f : convert(code, T_DOUBLE, T_FLOAT ); break; 2936 case Bytecodes::_i2b : convert(code, T_INT , T_BYTE ); break; 2937 case Bytecodes::_i2c : convert(code, T_INT , T_CHAR ); break; 2938 case Bytecodes::_i2s : convert(code, T_INT , T_SHORT ); break; 2939 case Bytecodes::_lcmp : compare_op(longType , code); break; 2940 case Bytecodes::_fcmpl : compare_op(floatType , code); break; 2941 case Bytecodes::_fcmpg : compare_op(floatType , code); break; 2942 case Bytecodes::_dcmpl : compare_op(doubleType, code); break; 2943 case Bytecodes::_dcmpg : compare_op(doubleType, code); break; 2944 case Bytecodes::_ifeq : if_zero(intType , If::eql); break; 2945 case Bytecodes::_ifne : if_zero(intType , If::neq); break; 2946 case Bytecodes::_iflt : if_zero(intType , If::lss); break; 2947 case Bytecodes::_ifge : if_zero(intType , If::geq); break; 2948 case Bytecodes::_ifgt : if_zero(intType , If::gtr); break; 2949 case Bytecodes::_ifle : if_zero(intType , If::leq); break; 2950 case Bytecodes::_if_icmpeq : if_same(intType , If::eql); break; 2951 case Bytecodes::_if_icmpne : if_same(intType , If::neq); break; 2952 case Bytecodes::_if_icmplt : if_same(intType , If::lss); break; 2953 case Bytecodes::_if_icmpge : if_same(intType , If::geq); break; 2954 case Bytecodes::_if_icmpgt : if_same(intType , If::gtr); break; 2955 case Bytecodes::_if_icmple : if_same(intType , If::leq); break; 2956 case Bytecodes::_if_acmpeq : if_same(objectType, If::eql); break; 2957 case Bytecodes::_if_acmpne : if_same(objectType, If::neq); break; 2958 case Bytecodes::_goto : _goto(s.cur_bci(), s.get_dest()); break; 2959 case Bytecodes::_jsr : jsr(s.get_dest()); break; 2960 case Bytecodes::_ret : ret(s.get_index()); break; 2961 case Bytecodes::_tableswitch : table_switch(); break; 2962 case Bytecodes::_lookupswitch : lookup_switch(); break; 2963 case Bytecodes::_ireturn : method_return(ipop(), ignore_return); break; 2964 case Bytecodes::_lreturn : method_return(lpop(), ignore_return); break; 2965 case Bytecodes::_freturn : method_return(fpop(), ignore_return); break; 2966 case Bytecodes::_dreturn : method_return(dpop(), ignore_return); break; 2967 case Bytecodes::_areturn : method_return(apop(), ignore_return); break; 2968 case Bytecodes::_return : method_return(nullptr, ignore_return); break; 2969 case Bytecodes::_getstatic : // fall through 2970 case Bytecodes::_putstatic : // fall through 2971 case Bytecodes::_getfield : // fall through 2972 case Bytecodes::_putfield : access_field(code); break; 2973 case Bytecodes::_invokevirtual : // fall through 2974 case Bytecodes::_invokespecial : // fall through 2975 case Bytecodes::_invokestatic : // fall through 2976 case Bytecodes::_invokedynamic : // fall through 2977 case Bytecodes::_invokeinterface: invoke(code); break; 2978 case Bytecodes::_new : new_instance(s.get_index_u2()); break; 2979 case Bytecodes::_newarray : new_type_array(); break; 2980 case Bytecodes::_anewarray : new_object_array(); break; 2981 case Bytecodes::_arraylength : { ValueStack* state_before = copy_state_for_exception(); ipush(append(new ArrayLength(apop(), state_before))); break; } 2982 case Bytecodes::_athrow : throw_op(s.cur_bci()); break; 2983 case Bytecodes::_checkcast : check_cast(s.get_index_u2()); break; 2984 case Bytecodes::_instanceof : instance_of(s.get_index_u2()); break; 2985 case Bytecodes::_monitorenter : monitorenter(apop(), s.cur_bci()); break; 2986 case Bytecodes::_monitorexit : monitorexit (apop(), s.cur_bci()); break; 2987 case Bytecodes::_wide : ShouldNotReachHere(); break; 2988 case Bytecodes::_multianewarray : new_multi_array(s.cur_bcp()[3]); break; 2989 case Bytecodes::_ifnull : if_null(objectType, If::eql); break; 2990 case Bytecodes::_ifnonnull : if_null(objectType, If::neq); break; 2991 case Bytecodes::_goto_w : _goto(s.cur_bci(), s.get_far_dest()); break; 2992 case Bytecodes::_jsr_w : jsr(s.get_far_dest()); break; 2993 case Bytecodes::_breakpoint : BAILOUT_("concurrent setting of breakpoint", nullptr); 2994 default : ShouldNotReachHere(); break; 2995 } 2996 2997 if (log != nullptr) 2998 log->clear_context(); // skip marker if nothing was printed 2999 3000 // save current bci to setup Goto at the end 3001 prev_bci = s.cur_bci(); 3002 3003 } 3004 CHECK_BAILOUT_(nullptr); 3005 // stop processing of this block (see try_inline_full) 3006 if (_skip_block) { 3007 _skip_block = false; 3008 assert(_last && _last->as_BlockEnd(), ""); 3009 return _last->as_BlockEnd(); 3010 } 3011 // if there are any, check if last instruction is a BlockEnd instruction 3012 BlockEnd* end = last()->as_BlockEnd(); 3013 if (end == nullptr) { 3014 // all blocks must end with a BlockEnd instruction => add a Goto 3015 end = new Goto(block_at(s.cur_bci()), false); 3016 append(end); 3017 } 3018 assert(end == last()->as_BlockEnd(), "inconsistency"); 3019 3020 assert(end->state() != nullptr, "state must already be present"); 3021 assert(end->as_Return() == nullptr || end->as_Throw() == nullptr || end->state()->stack_size() == 0, "stack not needed for return and throw"); 3022 3023 // connect to begin & set state 3024 // NOTE that inlining may have changed the block we are parsing 3025 block()->set_end(end); 3026 // propagate state 3027 for (int i = end->number_of_sux() - 1; i >= 0; i--) { 3028 BlockBegin* sux = end->sux_at(i); 3029 assert(sux->is_predecessor(block()), "predecessor missing"); 3030 // be careful, bailout if bytecodes are strange 3031 if (!sux->try_merge(end->state(), compilation()->has_irreducible_loops())) BAILOUT_("block join failed", nullptr); 3032 scope_data()->add_to_work_list(end->sux_at(i)); 3033 } 3034 3035 scope_data()->set_stream(nullptr); 3036 3037 // done 3038 return end; 3039 } 3040 3041 3042 void GraphBuilder::iterate_all_blocks(bool start_in_current_block_for_inlining) { 3043 do { 3044 if (start_in_current_block_for_inlining && !bailed_out()) { 3045 iterate_bytecodes_for_block(0); 3046 start_in_current_block_for_inlining = false; 3047 } else { 3048 BlockBegin* b; 3049 while ((b = scope_data()->remove_from_work_list()) != nullptr) { 3050 if (!b->is_set(BlockBegin::was_visited_flag)) { 3051 if (b->is_set(BlockBegin::osr_entry_flag)) { 3052 // we're about to parse the osr entry block, so make sure 3053 // we setup the OSR edge leading into this block so that 3054 // Phis get setup correctly. 3055 setup_osr_entry_block(); 3056 // this is no longer the osr entry block, so clear it. 3057 b->clear(BlockBegin::osr_entry_flag); 3058 } 3059 b->set(BlockBegin::was_visited_flag); 3060 connect_to_end(b); 3061 } 3062 } 3063 } 3064 } while (!bailed_out() && !scope_data()->is_work_list_empty()); 3065 } 3066 3067 3068 bool GraphBuilder::_can_trap [Bytecodes::number_of_java_codes]; 3069 3070 void GraphBuilder::initialize() { 3071 // the following bytecodes are assumed to potentially 3072 // throw exceptions in compiled code - note that e.g. 3073 // monitorexit & the return bytecodes do not throw 3074 // exceptions since monitor pairing proved that they 3075 // succeed (if monitor pairing succeeded) 3076 Bytecodes::Code can_trap_list[] = 3077 { Bytecodes::_ldc 3078 , Bytecodes::_ldc_w 3079 , Bytecodes::_ldc2_w 3080 , Bytecodes::_iaload 3081 , Bytecodes::_laload 3082 , Bytecodes::_faload 3083 , Bytecodes::_daload 3084 , Bytecodes::_aaload 3085 , Bytecodes::_baload 3086 , Bytecodes::_caload 3087 , Bytecodes::_saload 3088 , Bytecodes::_iastore 3089 , Bytecodes::_lastore 3090 , Bytecodes::_fastore 3091 , Bytecodes::_dastore 3092 , Bytecodes::_aastore 3093 , Bytecodes::_bastore 3094 , Bytecodes::_castore 3095 , Bytecodes::_sastore 3096 , Bytecodes::_idiv 3097 , Bytecodes::_ldiv 3098 , Bytecodes::_irem 3099 , Bytecodes::_lrem 3100 , Bytecodes::_getstatic 3101 , Bytecodes::_putstatic 3102 , Bytecodes::_getfield 3103 , Bytecodes::_putfield 3104 , Bytecodes::_invokevirtual 3105 , Bytecodes::_invokespecial 3106 , Bytecodes::_invokestatic 3107 , Bytecodes::_invokedynamic 3108 , Bytecodes::_invokeinterface 3109 , Bytecodes::_new 3110 , Bytecodes::_newarray 3111 , Bytecodes::_anewarray 3112 , Bytecodes::_arraylength 3113 , Bytecodes::_athrow 3114 , Bytecodes::_checkcast 3115 , Bytecodes::_instanceof 3116 , Bytecodes::_monitorenter 3117 , Bytecodes::_multianewarray 3118 }; 3119 3120 // inititialize trap tables 3121 for (int i = 0; i < Bytecodes::number_of_java_codes; i++) { 3122 _can_trap[i] = false; 3123 } 3124 // set standard trap info 3125 for (uint j = 0; j < ARRAY_SIZE(can_trap_list); j++) { 3126 _can_trap[can_trap_list[j]] = true; 3127 } 3128 } 3129 3130 3131 BlockBegin* GraphBuilder::header_block(BlockBegin* entry, BlockBegin::Flag f, ValueStack* state) { 3132 assert(entry->is_set(f), "entry/flag mismatch"); 3133 // create header block 3134 BlockBegin* h = new BlockBegin(entry->bci()); 3135 h->set_depth_first_number(0); 3136 3137 Value l = h; 3138 BlockEnd* g = new Goto(entry, false); 3139 l->set_next(g, entry->bci()); 3140 h->set_end(g); 3141 h->set(f); 3142 // setup header block end state 3143 ValueStack* s = state->copy(ValueStack::StateAfter, entry->bci()); // can use copy since stack is empty (=> no phis) 3144 assert(s->stack_is_empty(), "must have empty stack at entry point"); 3145 g->set_state(s); 3146 return h; 3147 } 3148 3149 3150 3151 BlockBegin* GraphBuilder::setup_start_block(int osr_bci, BlockBegin* std_entry, BlockBegin* osr_entry, ValueStack* state) { 3152 BlockBegin* start = new BlockBegin(0); 3153 3154 // This code eliminates the empty start block at the beginning of 3155 // each method. Previously, each method started with the 3156 // start-block created below, and this block was followed by the 3157 // header block that was always empty. This header block is only 3158 // necessary if std_entry is also a backward branch target because 3159 // then phi functions may be necessary in the header block. It's 3160 // also necessary when profiling so that there's a single block that 3161 // can increment the counters. 3162 // In addition, with range check elimination, we may need a valid block 3163 // that dominates all the rest to insert range predicates. 3164 BlockBegin* new_header_block; 3165 if (std_entry->number_of_preds() > 0 || is_profiling() || RangeCheckElimination) { 3166 new_header_block = header_block(std_entry, BlockBegin::std_entry_flag, state); 3167 } else { 3168 new_header_block = std_entry; 3169 } 3170 3171 // setup start block (root for the IR graph) 3172 Base* base = 3173 new Base( 3174 new_header_block, 3175 osr_entry 3176 ); 3177 start->set_next(base, 0); 3178 start->set_end(base); 3179 // create & setup state for start block 3180 start->set_state(state->copy(ValueStack::StateAfter, std_entry->bci())); 3181 base->set_state(state->copy(ValueStack::StateAfter, std_entry->bci())); 3182 3183 if (base->std_entry()->state() == nullptr) { 3184 // setup states for header blocks 3185 base->std_entry()->merge(state, compilation()->has_irreducible_loops()); 3186 } 3187 3188 assert(base->std_entry()->state() != nullptr, ""); 3189 return start; 3190 } 3191 3192 3193 void GraphBuilder::setup_osr_entry_block() { 3194 assert(compilation()->is_osr_compile(), "only for osrs"); 3195 3196 int osr_bci = compilation()->osr_bci(); 3197 ciBytecodeStream s(method()); 3198 s.reset_to_bci(osr_bci); 3199 s.next(); 3200 scope_data()->set_stream(&s); 3201 3202 // create a new block to be the osr setup code 3203 _osr_entry = new BlockBegin(osr_bci); 3204 _osr_entry->set(BlockBegin::osr_entry_flag); 3205 _osr_entry->set_depth_first_number(0); 3206 BlockBegin* target = bci2block()->at(osr_bci); 3207 assert(target != nullptr && target->is_set(BlockBegin::osr_entry_flag), "must be there"); 3208 // the osr entry has no values for locals 3209 ValueStack* state = target->state()->copy(); 3210 _osr_entry->set_state(state); 3211 3212 kill_all(); 3213 _block = _osr_entry; 3214 _state = _osr_entry->state()->copy(); 3215 assert(_state->bci() == osr_bci, "mismatch"); 3216 _last = _osr_entry; 3217 Value e = append(new OsrEntry()); 3218 e->set_needs_null_check(false); 3219 3220 // OSR buffer is 3221 // 3222 // locals[nlocals-1..0] 3223 // monitors[number_of_locks-1..0] 3224 // 3225 // locals is a direct copy of the interpreter frame so in the osr buffer 3226 // so first slot in the local array is the last local from the interpreter 3227 // and last slot is local[0] (receiver) from the interpreter 3228 // 3229 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 3230 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 3231 // in the interpreter frame (the method lock if a sync method) 3232 3233 // Initialize monitors in the compiled activation. 3234 3235 int index; 3236 Value local; 3237 3238 // find all the locals that the interpreter thinks contain live oops 3239 const ResourceBitMap live_oops = method()->live_local_oops_at_bci(osr_bci); 3240 3241 // compute the offset into the locals so that we can treat the buffer 3242 // as if the locals were still in the interpreter frame 3243 int locals_offset = BytesPerWord * (method()->max_locals() - 1); 3244 for_each_local_value(state, index, local) { 3245 int offset = locals_offset - (index + local->type()->size() - 1) * BytesPerWord; 3246 Value get; 3247 if (local->type()->is_object_kind() && !live_oops.at(index)) { 3248 // The interpreter thinks this local is dead but the compiler 3249 // doesn't so pretend that the interpreter passed in null. 3250 get = append(new Constant(objectNull)); 3251 } else { 3252 Value off_val = append(new Constant(new IntConstant(offset))); 3253 get = append(new UnsafeGet(as_BasicType(local->type()), e, 3254 off_val, 3255 false/*is_volatile*/, 3256 true/*is_raw*/)); 3257 } 3258 _state->store_local(index, get); 3259 } 3260 3261 // the storage for the OSR buffer is freed manually in the LIRGenerator. 3262 3263 assert(state->caller_state() == nullptr, "should be top scope"); 3264 state->clear_locals(); 3265 Goto* g = new Goto(target, false); 3266 append(g); 3267 _osr_entry->set_end(g); 3268 target->merge(_osr_entry->end()->state(), compilation()->has_irreducible_loops()); 3269 3270 scope_data()->set_stream(nullptr); 3271 } 3272 3273 3274 ValueStack* GraphBuilder::state_at_entry() { 3275 ValueStack* state = new ValueStack(scope(), nullptr); 3276 3277 // Set up locals for receiver 3278 int idx = 0; 3279 if (!method()->is_static()) { 3280 // we should always see the receiver 3281 state->store_local(idx, new Local(method()->holder(), objectType, idx, true)); 3282 idx = 1; 3283 } 3284 3285 // Set up locals for incoming arguments 3286 ciSignature* sig = method()->signature(); 3287 for (int i = 0; i < sig->count(); i++) { 3288 ciType* type = sig->type_at(i); 3289 BasicType basic_type = type->basic_type(); 3290 // don't allow T_ARRAY to propagate into locals types 3291 if (is_reference_type(basic_type)) basic_type = T_OBJECT; 3292 ValueType* vt = as_ValueType(basic_type); 3293 state->store_local(idx, new Local(type, vt, idx, false)); 3294 idx += type->size(); 3295 } 3296 3297 // lock synchronized method 3298 if (method()->is_synchronized()) { 3299 state->lock(nullptr); 3300 } 3301 3302 return state; 3303 } 3304 3305 3306 GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope) 3307 : _scope_data(nullptr) 3308 , _compilation(compilation) 3309 , _memory(new MemoryBuffer()) 3310 , _inline_bailout_msg(nullptr) 3311 , _instruction_count(0) 3312 , _osr_entry(nullptr) 3313 { 3314 int osr_bci = compilation->osr_bci(); 3315 3316 // determine entry points and bci2block mapping 3317 BlockListBuilder blm(compilation, scope, osr_bci); 3318 CHECK_BAILOUT(); 3319 3320 BlockList* bci2block = blm.bci2block(); 3321 BlockBegin* start_block = bci2block->at(0); 3322 3323 push_root_scope(scope, bci2block, start_block); 3324 3325 // setup state for std entry 3326 _initial_state = state_at_entry(); 3327 start_block->merge(_initial_state, compilation->has_irreducible_loops()); 3328 3329 // End nulls still exist here 3330 3331 // complete graph 3332 _vmap = new ValueMap(); 3333 switch (scope->method()->intrinsic_id()) { 3334 case vmIntrinsics::_dabs : // fall through 3335 case vmIntrinsics::_dsqrt : // fall through 3336 case vmIntrinsics::_dsqrt_strict : // fall through 3337 case vmIntrinsics::_dsin : // fall through 3338 case vmIntrinsics::_dcos : // fall through 3339 case vmIntrinsics::_dtan : // fall through 3340 case vmIntrinsics::_dtanh : // fall through 3341 case vmIntrinsics::_dlog : // fall through 3342 case vmIntrinsics::_dlog10 : // fall through 3343 case vmIntrinsics::_dexp : // fall through 3344 case vmIntrinsics::_dpow : // fall through 3345 { 3346 // Compiles where the root method is an intrinsic need a special 3347 // compilation environment because the bytecodes for the method 3348 // shouldn't be parsed during the compilation, only the special 3349 // Intrinsic node should be emitted. If this isn't done the 3350 // code for the inlined version will be different than the root 3351 // compiled version which could lead to monotonicity problems on 3352 // intel. 3353 if (CheckIntrinsics && !scope->method()->intrinsic_candidate()) { 3354 BAILOUT("failed to inline intrinsic, method not annotated"); 3355 } 3356 3357 // Set up a stream so that appending instructions works properly. 3358 ciBytecodeStream s(scope->method()); 3359 s.reset_to_bci(0); 3360 scope_data()->set_stream(&s); 3361 s.next(); 3362 3363 // setup the initial block state 3364 _block = start_block; 3365 _state = start_block->state()->copy_for_parsing(); 3366 _last = start_block; 3367 load_local(doubleType, 0); 3368 if (scope->method()->intrinsic_id() == vmIntrinsics::_dpow) { 3369 load_local(doubleType, 2); 3370 } 3371 3372 // Emit the intrinsic node. 3373 bool result = try_inline_intrinsics(scope->method()); 3374 if (!result) BAILOUT("failed to inline intrinsic"); 3375 method_return(dpop()); 3376 3377 // connect the begin and end blocks and we're all done. 3378 BlockEnd* end = last()->as_BlockEnd(); 3379 block()->set_end(end); 3380 break; 3381 } 3382 3383 case vmIntrinsics::_Reference_get: 3384 { 3385 { 3386 // With java.lang.ref.reference.get() we must go through the 3387 // intrinsic - when G1 is enabled - even when get() is the root 3388 // method of the compile so that, if necessary, the value in 3389 // the referent field of the reference object gets recorded by 3390 // the pre-barrier code. 3391 // Specifically, if G1 is enabled, the value in the referent 3392 // field is recorded by the G1 SATB pre barrier. This will 3393 // result in the referent being marked live and the reference 3394 // object removed from the list of discovered references during 3395 // reference processing. 3396 if (CheckIntrinsics && !scope->method()->intrinsic_candidate()) { 3397 BAILOUT("failed to inline intrinsic, method not annotated"); 3398 } 3399 3400 // Also we need intrinsic to prevent commoning reads from this field 3401 // across safepoint since GC can change its value. 3402 3403 // Set up a stream so that appending instructions works properly. 3404 ciBytecodeStream s(scope->method()); 3405 s.reset_to_bci(0); 3406 scope_data()->set_stream(&s); 3407 s.next(); 3408 3409 // setup the initial block state 3410 _block = start_block; 3411 _state = start_block->state()->copy_for_parsing(); 3412 _last = start_block; 3413 load_local(objectType, 0); 3414 3415 // Emit the intrinsic node. 3416 bool result = try_inline_intrinsics(scope->method()); 3417 if (!result) BAILOUT("failed to inline intrinsic"); 3418 method_return(apop()); 3419 3420 // connect the begin and end blocks and we're all done. 3421 BlockEnd* end = last()->as_BlockEnd(); 3422 block()->set_end(end); 3423 break; 3424 } 3425 // Otherwise, fall thru 3426 } 3427 3428 default: 3429 scope_data()->add_to_work_list(start_block); 3430 iterate_all_blocks(); 3431 break; 3432 } 3433 CHECK_BAILOUT(); 3434 3435 # ifdef ASSERT 3436 // For all blocks reachable from start_block: _end must be non-null 3437 { 3438 BlockList processed; 3439 BlockList to_go; 3440 to_go.append(start_block); 3441 while(to_go.length() > 0) { 3442 BlockBegin* current = to_go.pop(); 3443 assert(current != nullptr, "Should not happen."); 3444 assert(current->end() != nullptr, "All blocks reachable from start_block should have end() != nullptr."); 3445 processed.append(current); 3446 for(int i = 0; i < current->number_of_sux(); i++) { 3447 BlockBegin* s = current->sux_at(i); 3448 if (!processed.contains(s)) { 3449 to_go.append(s); 3450 } 3451 } 3452 } 3453 } 3454 #endif // ASSERT 3455 3456 _start = setup_start_block(osr_bci, start_block, _osr_entry, _initial_state); 3457 3458 eliminate_redundant_phis(_start); 3459 3460 NOT_PRODUCT(if (PrintValueNumbering && Verbose) print_stats()); 3461 // for osr compile, bailout if some requirements are not fulfilled 3462 if (osr_bci != -1) { 3463 BlockBegin* osr_block = blm.bci2block()->at(osr_bci); 3464 if (!osr_block->is_set(BlockBegin::was_visited_flag)) { 3465 BAILOUT("osr entry must have been visited for osr compile"); 3466 } 3467 3468 // check if osr entry point has empty stack - we cannot handle non-empty stacks at osr entry points 3469 if (!osr_block->state()->stack_is_empty()) { 3470 BAILOUT("stack not empty at OSR entry point"); 3471 } 3472 } 3473 #ifndef PRODUCT 3474 if (PrintCompilation && Verbose) tty->print_cr("Created %d Instructions", _instruction_count); 3475 #endif 3476 } 3477 3478 3479 ValueStack* GraphBuilder::copy_state_before() { 3480 return copy_state_before_with_bci(bci()); 3481 } 3482 3483 ValueStack* GraphBuilder::copy_state_exhandling() { 3484 return copy_state_exhandling_with_bci(bci()); 3485 } 3486 3487 ValueStack* GraphBuilder::copy_state_for_exception() { 3488 return copy_state_for_exception_with_bci(bci()); 3489 } 3490 3491 ValueStack* GraphBuilder::copy_state_before_with_bci(int bci) { 3492 return state()->copy(ValueStack::StateBefore, bci); 3493 } 3494 3495 ValueStack* GraphBuilder::copy_state_exhandling_with_bci(int bci) { 3496 if (!has_handler()) return nullptr; 3497 return state()->copy(ValueStack::StateBefore, bci); 3498 } 3499 3500 ValueStack* GraphBuilder::copy_state_for_exception_with_bci(int bci) { 3501 ValueStack* s = copy_state_exhandling_with_bci(bci); 3502 if (s == nullptr) { 3503 // no handler, no need to retain locals 3504 ValueStack::Kind exc_kind = ValueStack::empty_exception_kind(); 3505 s = state()->copy(exc_kind, bci); 3506 } 3507 return s; 3508 } 3509 3510 int GraphBuilder::recursive_inline_level(ciMethod* cur_callee) const { 3511 int recur_level = 0; 3512 for (IRScope* s = scope(); s != nullptr; s = s->caller()) { 3513 if (s->method() == cur_callee) { 3514 ++recur_level; 3515 } 3516 } 3517 return recur_level; 3518 } 3519 3520 static void set_flags_for_inlined_callee(Compilation* compilation, ciMethod* callee) { 3521 if (callee->has_reserved_stack_access()) { 3522 compilation->set_has_reserved_stack_access(true); 3523 } 3524 if (callee->is_synchronized() || callee->has_monitor_bytecodes()) { 3525 compilation->set_has_monitors(true); 3526 } 3527 if (callee->is_scoped()) { 3528 compilation->set_has_scoped_access(true); 3529 } 3530 } 3531 3532 bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, bool ignore_return, Bytecodes::Code bc, Value receiver) { 3533 const char* msg = nullptr; 3534 3535 // clear out any existing inline bailout condition 3536 clear_inline_bailout(); 3537 3538 // exclude methods we don't want to inline 3539 msg = should_not_inline(callee); 3540 if (msg != nullptr) { 3541 print_inlining(callee, msg, /*success*/ false); 3542 return false; 3543 } 3544 3545 // method handle invokes 3546 if (callee->is_method_handle_intrinsic()) { 3547 if (try_method_handle_inline(callee, ignore_return)) { 3548 set_flags_for_inlined_callee(compilation(), callee); 3549 return true; 3550 } 3551 return false; 3552 } 3553 3554 // handle intrinsics 3555 if (callee->intrinsic_id() != vmIntrinsics::_none && 3556 callee->check_intrinsic_candidate()) { 3557 if (try_inline_intrinsics(callee, ignore_return)) { 3558 print_inlining(callee, "intrinsic"); 3559 set_flags_for_inlined_callee(compilation(), callee); 3560 return true; 3561 } 3562 // try normal inlining 3563 } 3564 3565 // certain methods cannot be parsed at all 3566 msg = check_can_parse(callee); 3567 if (msg != nullptr) { 3568 print_inlining(callee, msg, /*success*/ false); 3569 return false; 3570 } 3571 3572 // If bytecode not set use the current one. 3573 if (bc == Bytecodes::_illegal) { 3574 bc = code(); 3575 } 3576 if (try_inline_full(callee, holder_known, ignore_return, bc, receiver)) { 3577 set_flags_for_inlined_callee(compilation(), callee); 3578 return true; 3579 } 3580 3581 // Entire compilation could fail during try_inline_full call. 3582 // In that case printing inlining decision info is useless. 3583 if (!bailed_out()) 3584 print_inlining(callee, _inline_bailout_msg, /*success*/ false); 3585 3586 return false; 3587 } 3588 3589 3590 const char* GraphBuilder::check_can_parse(ciMethod* callee) const { 3591 // Certain methods cannot be parsed at all: 3592 if ( callee->is_native()) return "native method"; 3593 if ( callee->is_abstract()) return "abstract method"; 3594 if (!callee->can_be_parsed()) return "cannot be parsed"; 3595 return nullptr; 3596 } 3597 3598 // negative filter: should callee NOT be inlined? returns null, ok to inline, or rejection msg 3599 const char* GraphBuilder::should_not_inline(ciMethod* callee) const { 3600 if ( compilation()->directive()->should_not_inline(callee)) return "disallowed by CompileCommand"; 3601 if ( callee->dont_inline()) return "don't inline by annotation"; 3602 return nullptr; 3603 } 3604 3605 void GraphBuilder::build_graph_for_intrinsic(ciMethod* callee, bool ignore_return) { 3606 vmIntrinsics::ID id = callee->intrinsic_id(); 3607 assert(id != vmIntrinsics::_none, "must be a VM intrinsic"); 3608 3609 // Some intrinsics need special IR nodes. 3610 switch(id) { 3611 case vmIntrinsics::_getReference : append_unsafe_get(callee, T_OBJECT, false); return; 3612 case vmIntrinsics::_getBoolean : append_unsafe_get(callee, T_BOOLEAN, false); return; 3613 case vmIntrinsics::_getByte : append_unsafe_get(callee, T_BYTE, false); return; 3614 case vmIntrinsics::_getShort : append_unsafe_get(callee, T_SHORT, false); return; 3615 case vmIntrinsics::_getChar : append_unsafe_get(callee, T_CHAR, false); return; 3616 case vmIntrinsics::_getInt : append_unsafe_get(callee, T_INT, false); return; 3617 case vmIntrinsics::_getLong : append_unsafe_get(callee, T_LONG, false); return; 3618 case vmIntrinsics::_getFloat : append_unsafe_get(callee, T_FLOAT, false); return; 3619 case vmIntrinsics::_getDouble : append_unsafe_get(callee, T_DOUBLE, false); return; 3620 case vmIntrinsics::_putReference : append_unsafe_put(callee, T_OBJECT, false); return; 3621 case vmIntrinsics::_putBoolean : append_unsafe_put(callee, T_BOOLEAN, false); return; 3622 case vmIntrinsics::_putByte : append_unsafe_put(callee, T_BYTE, false); return; 3623 case vmIntrinsics::_putShort : append_unsafe_put(callee, T_SHORT, false); return; 3624 case vmIntrinsics::_putChar : append_unsafe_put(callee, T_CHAR, false); return; 3625 case vmIntrinsics::_putInt : append_unsafe_put(callee, T_INT, false); return; 3626 case vmIntrinsics::_putLong : append_unsafe_put(callee, T_LONG, false); return; 3627 case vmIntrinsics::_putFloat : append_unsafe_put(callee, T_FLOAT, false); return; 3628 case vmIntrinsics::_putDouble : append_unsafe_put(callee, T_DOUBLE, false); return; 3629 case vmIntrinsics::_getShortUnaligned : append_unsafe_get(callee, T_SHORT, false); return; 3630 case vmIntrinsics::_getCharUnaligned : append_unsafe_get(callee, T_CHAR, false); return; 3631 case vmIntrinsics::_getIntUnaligned : append_unsafe_get(callee, T_INT, false); return; 3632 case vmIntrinsics::_getLongUnaligned : append_unsafe_get(callee, T_LONG, false); return; 3633 case vmIntrinsics::_putShortUnaligned : append_unsafe_put(callee, T_SHORT, false); return; 3634 case vmIntrinsics::_putCharUnaligned : append_unsafe_put(callee, T_CHAR, false); return; 3635 case vmIntrinsics::_putIntUnaligned : append_unsafe_put(callee, T_INT, false); return; 3636 case vmIntrinsics::_putLongUnaligned : append_unsafe_put(callee, T_LONG, false); return; 3637 case vmIntrinsics::_getReferenceVolatile : append_unsafe_get(callee, T_OBJECT, true); return; 3638 case vmIntrinsics::_getBooleanVolatile : append_unsafe_get(callee, T_BOOLEAN, true); return; 3639 case vmIntrinsics::_getByteVolatile : append_unsafe_get(callee, T_BYTE, true); return; 3640 case vmIntrinsics::_getShortVolatile : append_unsafe_get(callee, T_SHORT, true); return; 3641 case vmIntrinsics::_getCharVolatile : append_unsafe_get(callee, T_CHAR, true); return; 3642 case vmIntrinsics::_getIntVolatile : append_unsafe_get(callee, T_INT, true); return; 3643 case vmIntrinsics::_getLongVolatile : append_unsafe_get(callee, T_LONG, true); return; 3644 case vmIntrinsics::_getFloatVolatile : append_unsafe_get(callee, T_FLOAT, true); return; 3645 case vmIntrinsics::_getDoubleVolatile : append_unsafe_get(callee, T_DOUBLE, true); return; 3646 case vmIntrinsics::_putReferenceVolatile : append_unsafe_put(callee, T_OBJECT, true); return; 3647 case vmIntrinsics::_putBooleanVolatile : append_unsafe_put(callee, T_BOOLEAN, true); return; 3648 case vmIntrinsics::_putByteVolatile : append_unsafe_put(callee, T_BYTE, true); return; 3649 case vmIntrinsics::_putShortVolatile : append_unsafe_put(callee, T_SHORT, true); return; 3650 case vmIntrinsics::_putCharVolatile : append_unsafe_put(callee, T_CHAR, true); return; 3651 case vmIntrinsics::_putIntVolatile : append_unsafe_put(callee, T_INT, true); return; 3652 case vmIntrinsics::_putLongVolatile : append_unsafe_put(callee, T_LONG, true); return; 3653 case vmIntrinsics::_putFloatVolatile : append_unsafe_put(callee, T_FLOAT, true); return; 3654 case vmIntrinsics::_putDoubleVolatile : append_unsafe_put(callee, T_DOUBLE, true); return; 3655 case vmIntrinsics::_compareAndSetLong: 3656 case vmIntrinsics::_compareAndSetInt: 3657 case vmIntrinsics::_compareAndSetReference : append_unsafe_CAS(callee); return; 3658 case vmIntrinsics::_getAndAddInt: 3659 case vmIntrinsics::_getAndAddLong : append_unsafe_get_and_set(callee, true); return; 3660 case vmIntrinsics::_getAndSetInt : 3661 case vmIntrinsics::_getAndSetLong : 3662 case vmIntrinsics::_getAndSetReference : append_unsafe_get_and_set(callee, false); return; 3663 case vmIntrinsics::_getCharStringU : append_char_access(callee, false); return; 3664 case vmIntrinsics::_putCharStringU : append_char_access(callee, true); return; 3665 case vmIntrinsics::_clone : append_alloc_array_copy(callee); return; 3666 default: 3667 break; 3668 } 3669 if (_inline_bailout_msg != nullptr) { 3670 return; 3671 } 3672 3673 // create intrinsic node 3674 const bool has_receiver = !callee->is_static(); 3675 ValueType* result_type = as_ValueType(callee->return_type()); 3676 ValueStack* state_before = copy_state_for_exception(); 3677 3678 Values* args = state()->pop_arguments(callee->arg_size()); 3679 3680 if (is_profiling()) { 3681 // Don't profile in the special case where the root method 3682 // is the intrinsic 3683 if (callee != method()) { 3684 // Note that we'd collect profile data in this method if we wanted it. 3685 compilation()->set_would_profile(true); 3686 if (profile_calls()) { 3687 Value recv = nullptr; 3688 if (has_receiver) { 3689 recv = args->at(0); 3690 null_check(recv); 3691 } 3692 profile_call(callee, recv, nullptr, collect_args_for_profiling(args, callee, true), true); 3693 } 3694 } 3695 } 3696 3697 Intrinsic* result = new Intrinsic(result_type, callee->intrinsic_id(), 3698 args, has_receiver, state_before, 3699 vmIntrinsics::preserves_state(id), 3700 vmIntrinsics::can_trap(id)); 3701 // append instruction & push result 3702 Value value = append_split(result); 3703 if (result_type != voidType && !ignore_return) { 3704 push(result_type, value); 3705 } 3706 3707 if (callee != method() && profile_return() && result_type->is_object_kind()) { 3708 profile_return_type(result, callee); 3709 } 3710 } 3711 3712 bool GraphBuilder::try_inline_intrinsics(ciMethod* callee, bool ignore_return) { 3713 // For calling is_intrinsic_available we need to transition to 3714 // the '_thread_in_vm' state because is_intrinsic_available() 3715 // accesses critical VM-internal data. 3716 bool is_available = false; 3717 { 3718 VM_ENTRY_MARK; 3719 methodHandle mh(THREAD, callee->get_Method()); 3720 is_available = _compilation->compiler()->is_intrinsic_available(mh, _compilation->directive()); 3721 } 3722 3723 if (!is_available) { 3724 if (!InlineNatives) { 3725 // Return false and also set message that the inlining of 3726 // intrinsics has been disabled in general. 3727 INLINE_BAILOUT("intrinsic method inlining disabled"); 3728 } else { 3729 return false; 3730 } 3731 } 3732 build_graph_for_intrinsic(callee, ignore_return); 3733 if (_inline_bailout_msg != nullptr) { 3734 return false; 3735 } 3736 return true; 3737 } 3738 3739 3740 bool GraphBuilder::try_inline_jsr(int jsr_dest_bci) { 3741 // Introduce a new callee continuation point - all Ret instructions 3742 // will be replaced with Gotos to this point. 3743 if (next_bci() >= method()->code_size()) { 3744 return false; 3745 } 3746 BlockBegin* cont = block_at(next_bci()); 3747 assert(cont != nullptr, "continuation must exist (BlockListBuilder starts a new block after a jsr"); 3748 3749 // Note: can not assign state to continuation yet, as we have to 3750 // pick up the state from the Ret instructions. 3751 3752 // Push callee scope 3753 push_scope_for_jsr(cont, jsr_dest_bci); 3754 3755 // Temporarily set up bytecode stream so we can append instructions 3756 // (only using the bci of this stream) 3757 scope_data()->set_stream(scope_data()->parent()->stream()); 3758 3759 BlockBegin* jsr_start_block = block_at(jsr_dest_bci); 3760 assert(jsr_start_block != nullptr, "jsr start block must exist"); 3761 assert(!jsr_start_block->is_set(BlockBegin::was_visited_flag), "should not have visited jsr yet"); 3762 Goto* goto_sub = new Goto(jsr_start_block, false); 3763 // Must copy state to avoid wrong sharing when parsing bytecodes 3764 assert(jsr_start_block->state() == nullptr, "should have fresh jsr starting block"); 3765 jsr_start_block->set_state(copy_state_before_with_bci(jsr_dest_bci)); 3766 append(goto_sub); 3767 _block->set_end(goto_sub); 3768 _last = _block = jsr_start_block; 3769 3770 // Clear out bytecode stream 3771 scope_data()->set_stream(nullptr); 3772 3773 scope_data()->add_to_work_list(jsr_start_block); 3774 3775 // Ready to resume parsing in subroutine 3776 iterate_all_blocks(); 3777 3778 // If we bailed out during parsing, return immediately (this is bad news) 3779 CHECK_BAILOUT_(false); 3780 3781 // Detect whether the continuation can actually be reached. If not, 3782 // it has not had state set by the join() operations in 3783 // iterate_bytecodes_for_block()/ret() and we should not touch the 3784 // iteration state. The calling activation of 3785 // iterate_bytecodes_for_block will then complete normally. 3786 if (cont->state() != nullptr) { 3787 if (!cont->is_set(BlockBegin::was_visited_flag)) { 3788 // add continuation to work list instead of parsing it immediately 3789 scope_data()->parent()->add_to_work_list(cont); 3790 } 3791 } 3792 3793 assert(jsr_continuation() == cont, "continuation must not have changed"); 3794 assert(!jsr_continuation()->is_set(BlockBegin::was_visited_flag) || 3795 jsr_continuation()->is_set(BlockBegin::parser_loop_header_flag), 3796 "continuation can only be visited in case of backward branches"); 3797 assert(_last && _last->as_BlockEnd(), "block must have end"); 3798 3799 // continuation is in work list, so end iteration of current block 3800 _skip_block = true; 3801 pop_scope_for_jsr(); 3802 3803 return true; 3804 } 3805 3806 3807 // Inline the entry of a synchronized method as a monitor enter and 3808 // register the exception handler which releases the monitor if an 3809 // exception is thrown within the callee. Note that the monitor enter 3810 // cannot throw an exception itself, because the receiver is 3811 // guaranteed to be non-null by the explicit null check at the 3812 // beginning of inlining. 3813 void GraphBuilder::inline_sync_entry(Value lock, BlockBegin* sync_handler) { 3814 assert(lock != nullptr && sync_handler != nullptr, "lock or handler missing"); 3815 3816 monitorenter(lock, SynchronizationEntryBCI); 3817 assert(_last->as_MonitorEnter() != nullptr, "monitor enter expected"); 3818 _last->set_needs_null_check(false); 3819 3820 sync_handler->set(BlockBegin::exception_entry_flag); 3821 sync_handler->set(BlockBegin::is_on_work_list_flag); 3822 3823 ciExceptionHandler* desc = new ciExceptionHandler(method()->holder(), 0, method()->code_size(), -1, 0); 3824 XHandler* h = new XHandler(desc); 3825 h->set_entry_block(sync_handler); 3826 scope_data()->xhandlers()->append(h); 3827 scope_data()->set_has_handler(); 3828 } 3829 3830 3831 // If an exception is thrown and not handled within an inlined 3832 // synchronized method, the monitor must be released before the 3833 // exception is rethrown in the outer scope. Generate the appropriate 3834 // instructions here. 3835 void GraphBuilder::fill_sync_handler(Value lock, BlockBegin* sync_handler, bool default_handler) { 3836 BlockBegin* orig_block = _block; 3837 ValueStack* orig_state = _state; 3838 Instruction* orig_last = _last; 3839 _last = _block = sync_handler; 3840 _state = sync_handler->state()->copy(); 3841 3842 assert(sync_handler != nullptr, "handler missing"); 3843 assert(!sync_handler->is_set(BlockBegin::was_visited_flag), "is visited here"); 3844 3845 assert(lock != nullptr || default_handler, "lock or handler missing"); 3846 3847 XHandler* h = scope_data()->xhandlers()->remove_last(); 3848 assert(h->entry_block() == sync_handler, "corrupt list of handlers"); 3849 3850 block()->set(BlockBegin::was_visited_flag); 3851 Value exception = append_with_bci(new ExceptionObject(), SynchronizationEntryBCI); 3852 assert(exception->is_pinned(), "must be"); 3853 3854 int bci = SynchronizationEntryBCI; 3855 if (compilation()->env()->dtrace_method_probes()) { 3856 // Report exit from inline methods. We don't have a stream here 3857 // so pass an explicit bci of SynchronizationEntryBCI. 3858 Values* args = new Values(1); 3859 args->push(append_with_bci(new Constant(new MethodConstant(method())), bci)); 3860 append_with_bci(new RuntimeCall(voidType, "dtrace_method_exit", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), args), bci); 3861 } 3862 3863 if (lock) { 3864 assert(state()->locks_size() > 0 && state()->lock_at(state()->locks_size() - 1) == lock, "lock is missing"); 3865 if (!lock->is_linked()) { 3866 lock = append_with_bci(lock, bci); 3867 } 3868 3869 // exit the monitor in the context of the synchronized method 3870 monitorexit(lock, bci); 3871 3872 // exit the context of the synchronized method 3873 if (!default_handler) { 3874 pop_scope(); 3875 bci = _state->caller_state()->bci(); 3876 _state = _state->caller_state()->copy_for_parsing(); 3877 } 3878 } 3879 3880 // perform the throw as if at the call site 3881 apush(exception); 3882 throw_op(bci); 3883 3884 BlockEnd* end = last()->as_BlockEnd(); 3885 block()->set_end(end); 3886 3887 _block = orig_block; 3888 _state = orig_state; 3889 _last = orig_last; 3890 } 3891 3892 3893 bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, bool ignore_return, Bytecodes::Code bc, Value receiver) { 3894 assert(!callee->is_native(), "callee must not be native"); 3895 if (CompilationPolicy::should_not_inline(compilation()->env(), callee)) { 3896 INLINE_BAILOUT("inlining prohibited by policy"); 3897 } 3898 // first perform tests of things it's not possible to inline 3899 if (callee->has_exception_handlers() && 3900 !InlineMethodsWithExceptionHandlers) INLINE_BAILOUT("callee has exception handlers"); 3901 if (callee->is_synchronized() && 3902 !InlineSynchronizedMethods ) INLINE_BAILOUT("callee is synchronized"); 3903 if (!callee->holder()->is_linked()) INLINE_BAILOUT("callee's klass not linked yet"); 3904 if (bc == Bytecodes::_invokestatic && 3905 !callee->holder()->is_initialized()) INLINE_BAILOUT("callee's klass not initialized yet"); 3906 if (!callee->has_balanced_monitors()) INLINE_BAILOUT("callee's monitors do not match"); 3907 3908 // Proper inlining of methods with jsrs requires a little more work. 3909 if (callee->has_jsrs() ) INLINE_BAILOUT("jsrs not handled properly by inliner yet"); 3910 3911 if (is_profiling() && !callee->ensure_method_data()) { 3912 INLINE_BAILOUT("mdo allocation failed"); 3913 } 3914 3915 const bool is_invokedynamic = (bc == Bytecodes::_invokedynamic); 3916 const bool has_receiver = (bc != Bytecodes::_invokestatic && !is_invokedynamic); 3917 3918 const int args_base = state()->stack_size() - callee->arg_size(); 3919 assert(args_base >= 0, "stack underflow during inlining"); 3920 3921 Value recv = nullptr; 3922 if (has_receiver) { 3923 assert(!callee->is_static(), "callee must not be static"); 3924 assert(callee->arg_size() > 0, "must have at least a receiver"); 3925 3926 recv = state()->stack_at(args_base); 3927 if (recv->is_null_obj()) { 3928 INLINE_BAILOUT("receiver is always null"); 3929 } 3930 } 3931 3932 // now perform tests that are based on flag settings 3933 bool inlinee_by_directive = compilation()->directive()->should_inline(callee); 3934 if (callee->force_inline() || inlinee_by_directive) { 3935 if (inline_level() > MaxForceInlineLevel ) INLINE_BAILOUT("MaxForceInlineLevel"); 3936 if (recursive_inline_level(callee) > C1MaxRecursiveInlineLevel) INLINE_BAILOUT("recursive inlining too deep"); 3937 3938 const char* msg = ""; 3939 if (callee->force_inline()) msg = "force inline by annotation"; 3940 if (inlinee_by_directive) msg = "force inline by CompileCommand"; 3941 print_inlining(callee, msg); 3942 } else { 3943 // use heuristic controls on inlining 3944 if (inline_level() > C1MaxInlineLevel ) INLINE_BAILOUT("inlining too deep"); 3945 int callee_recursive_level = recursive_inline_level(callee); 3946 if (callee_recursive_level > C1MaxRecursiveInlineLevel ) INLINE_BAILOUT("recursive inlining too deep"); 3947 if (callee->code_size_for_inlining() > max_inline_size() ) INLINE_BAILOUT("callee is too large"); 3948 // Additional condition to limit stack usage for non-recursive calls. 3949 if ((callee_recursive_level == 0) && 3950 (callee->max_stack() + callee->max_locals() - callee->size_of_parameters() > C1InlineStackLimit)) { 3951 INLINE_BAILOUT("callee uses too much stack"); 3952 } 3953 3954 // don't inline throwable methods unless the inlining tree is rooted in a throwable class 3955 if (callee->name() == ciSymbols::object_initializer_name() && 3956 callee->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) { 3957 // Throwable constructor call 3958 IRScope* top = scope(); 3959 while (top->caller() != nullptr) { 3960 top = top->caller(); 3961 } 3962 if (!top->method()->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) { 3963 INLINE_BAILOUT("don't inline Throwable constructors"); 3964 } 3965 } 3966 3967 if (compilation()->env()->num_inlined_bytecodes() > DesiredMethodLimit) { 3968 INLINE_BAILOUT("total inlining greater than DesiredMethodLimit"); 3969 } 3970 // printing 3971 print_inlining(callee, "inline", /*success*/ true); 3972 } 3973 3974 assert(bc != Bytecodes::_invokestatic || callee->holder()->is_initialized(), "required"); 3975 3976 // NOTE: Bailouts from this point on, which occur at the 3977 // GraphBuilder level, do not cause bailout just of the inlining but 3978 // in fact of the entire compilation. 3979 3980 BlockBegin* orig_block = block(); 3981 3982 // Insert null check if necessary 3983 if (has_receiver) { 3984 // note: null check must happen even if first instruction of callee does 3985 // an implicit null check since the callee is in a different scope 3986 // and we must make sure exception handling does the right thing 3987 null_check(recv); 3988 } 3989 3990 if (is_profiling()) { 3991 // Note that we'd collect profile data in this method if we wanted it. 3992 // this may be redundant here... 3993 compilation()->set_would_profile(true); 3994 3995 if (profile_calls()) { 3996 int start = 0; 3997 Values* obj_args = args_list_for_profiling(callee, start, has_receiver); 3998 if (obj_args != nullptr) { 3999 int s = obj_args->capacity(); 4000 // if called through method handle invoke, some arguments may have been popped 4001 for (int i = args_base+start, j = 0; j < obj_args->capacity() && i < state()->stack_size(); ) { 4002 Value v = state()->stack_at_inc(i); 4003 if (v->type()->is_object_kind()) { 4004 obj_args->push(v); 4005 j++; 4006 } 4007 } 4008 check_args_for_profiling(obj_args, s); 4009 } 4010 profile_call(callee, recv, holder_known ? callee->holder() : nullptr, obj_args, true); 4011 } 4012 } 4013 4014 // Introduce a new callee continuation point - if the callee has 4015 // more than one return instruction or the return does not allow 4016 // fall-through of control flow, all return instructions of the 4017 // callee will need to be replaced by Goto's pointing to this 4018 // continuation point. 4019 BlockBegin* cont = block_at(next_bci()); 4020 bool continuation_existed = true; 4021 if (cont == nullptr) { 4022 cont = new BlockBegin(next_bci()); 4023 // low number so that continuation gets parsed as early as possible 4024 cont->set_depth_first_number(0); 4025 if (PrintInitialBlockList) { 4026 tty->print_cr("CFG: created block %d (bci %d) as continuation for inline at bci %d", 4027 cont->block_id(), cont->bci(), bci()); 4028 } 4029 continuation_existed = false; 4030 } 4031 // Record number of predecessors of continuation block before 4032 // inlining, to detect if inlined method has edges to its 4033 // continuation after inlining. 4034 int continuation_preds = cont->number_of_preds(); 4035 4036 // Push callee scope 4037 push_scope(callee, cont); 4038 4039 // the BlockListBuilder for the callee could have bailed out 4040 if (bailed_out()) 4041 return false; 4042 4043 // Temporarily set up bytecode stream so we can append instructions 4044 // (only using the bci of this stream) 4045 scope_data()->set_stream(scope_data()->parent()->stream()); 4046 4047 // Pass parameters into callee state: add assignments 4048 // note: this will also ensure that all arguments are computed before being passed 4049 ValueStack* callee_state = state(); 4050 ValueStack* caller_state = state()->caller_state(); 4051 for (int i = args_base; i < caller_state->stack_size(); ) { 4052 const int arg_no = i - args_base; 4053 Value arg = caller_state->stack_at_inc(i); 4054 store_local(callee_state, arg, arg_no); 4055 } 4056 4057 // Remove args from stack. 4058 // Note that we preserve locals state in case we can use it later 4059 // (see use of pop_scope() below) 4060 caller_state->truncate_stack(args_base); 4061 assert(callee_state->stack_size() == 0, "callee stack must be empty"); 4062 4063 Value lock = nullptr; 4064 BlockBegin* sync_handler = nullptr; 4065 4066 // Inline the locking of the receiver if the callee is synchronized 4067 if (callee->is_synchronized()) { 4068 lock = callee->is_static() ? append(new Constant(new InstanceConstant(callee->holder()->java_mirror()))) 4069 : state()->local_at(0); 4070 sync_handler = new BlockBegin(SynchronizationEntryBCI); 4071 inline_sync_entry(lock, sync_handler); 4072 } 4073 4074 if (compilation()->env()->dtrace_method_probes()) { 4075 Values* args = new Values(1); 4076 args->push(append(new Constant(new MethodConstant(method())))); 4077 append(new RuntimeCall(voidType, "dtrace_method_entry", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), args)); 4078 } 4079 4080 MethodDetails method_details(callee); 4081 RuntimeUpcallInfo* upcall = RuntimeUpcalls::get_first_upcall(RuntimeUpcallType::onMethodEntry, method_details); 4082 while (upcall != nullptr) { 4083 Values* args = new Values(0); 4084 append(new RuntimeCall(voidType, upcall->upcall_name(), upcall->upcall_address(), args)); 4085 upcall = RuntimeUpcalls::get_next_upcall(RuntimeUpcallType::onMethodEntry, method_details, upcall); 4086 } 4087 4088 if (profile_inlined_calls()) { 4089 profile_invocation(callee, copy_state_before_with_bci(SynchronizationEntryBCI)); 4090 } 4091 4092 BlockBegin* callee_start_block = block_at(0); 4093 if (callee_start_block != nullptr) { 4094 assert(callee_start_block->is_set(BlockBegin::parser_loop_header_flag), "must be loop header"); 4095 Goto* goto_callee = new Goto(callee_start_block, false); 4096 // The state for this goto is in the scope of the callee, so use 4097 // the entry bci for the callee instead of the call site bci. 4098 append_with_bci(goto_callee, 0); 4099 _block->set_end(goto_callee); 4100 callee_start_block->merge(callee_state, compilation()->has_irreducible_loops()); 4101 4102 _last = _block = callee_start_block; 4103 4104 scope_data()->add_to_work_list(callee_start_block); 4105 } 4106 4107 // Clear out bytecode stream 4108 scope_data()->set_stream(nullptr); 4109 scope_data()->set_ignore_return(ignore_return); 4110 4111 CompileLog* log = compilation()->log(); 4112 if (log != nullptr) log->head("parse method='%d'", log->identify(callee)); 4113 4114 // Ready to resume parsing in callee (either in the same block we 4115 // were in before or in the callee's start block) 4116 iterate_all_blocks(callee_start_block == nullptr); 4117 4118 if (log != nullptr) log->done("parse"); 4119 4120 // If we bailed out during parsing, return immediately (this is bad news) 4121 if (bailed_out()) 4122 return false; 4123 4124 // iterate_all_blocks theoretically traverses in random order; in 4125 // practice, we have only traversed the continuation if we are 4126 // inlining into a subroutine 4127 assert(continuation_existed || 4128 !continuation()->is_set(BlockBegin::was_visited_flag), 4129 "continuation should not have been parsed yet if we created it"); 4130 4131 // At this point we are almost ready to return and resume parsing of 4132 // the caller back in the GraphBuilder. The only thing we want to do 4133 // first is an optimization: during parsing of the callee we 4134 // generated at least one Goto to the continuation block. If we 4135 // generated exactly one, and if the inlined method spanned exactly 4136 // one block (and we didn't have to Goto its entry), then we snip 4137 // off the Goto to the continuation, allowing control to fall 4138 // through back into the caller block and effectively performing 4139 // block merging. This allows load elimination and CSE to take place 4140 // across multiple callee scopes if they are relatively simple, and 4141 // is currently essential to making inlining profitable. 4142 if (num_returns() == 1 4143 && block() == orig_block 4144 && block() == inline_cleanup_block()) { 4145 _last = inline_cleanup_return_prev(); 4146 _state = inline_cleanup_state(); 4147 } else if (continuation_preds == cont->number_of_preds()) { 4148 // Inlining caused that the instructions after the invoke in the 4149 // caller are not reachable any more. So skip filling this block 4150 // with instructions! 4151 assert(cont == continuation(), ""); 4152 assert(_last && _last->as_BlockEnd(), ""); 4153 _skip_block = true; 4154 } else { 4155 // Resume parsing in continuation block unless it was already parsed. 4156 // Note that if we don't change _last here, iteration in 4157 // iterate_bytecodes_for_block will stop when we return. 4158 if (!continuation()->is_set(BlockBegin::was_visited_flag)) { 4159 // add continuation to work list instead of parsing it immediately 4160 assert(_last && _last->as_BlockEnd(), ""); 4161 scope_data()->parent()->add_to_work_list(continuation()); 4162 _skip_block = true; 4163 } 4164 } 4165 4166 // Fill the exception handler for synchronized methods with instructions 4167 if (callee->is_synchronized() && sync_handler->state() != nullptr) { 4168 fill_sync_handler(lock, sync_handler); 4169 } else { 4170 pop_scope(); 4171 } 4172 4173 compilation()->notice_inlined_method(callee); 4174 4175 return true; 4176 } 4177 4178 4179 bool GraphBuilder::try_method_handle_inline(ciMethod* callee, bool ignore_return) { 4180 ValueStack* state_before = copy_state_before(); 4181 vmIntrinsics::ID iid = callee->intrinsic_id(); 4182 switch (iid) { 4183 case vmIntrinsics::_invokeBasic: 4184 { 4185 // get MethodHandle receiver 4186 const int args_base = state()->stack_size() - callee->arg_size(); 4187 ValueType* type = state()->stack_at(args_base)->type(); 4188 if (type->is_constant()) { 4189 ciObject* mh = type->as_ObjectType()->constant_value(); 4190 if (mh->is_method_handle()) { 4191 ciMethod* target = mh->as_method_handle()->get_vmtarget(); 4192 4193 // We don't do CHA here so only inline static and statically bindable methods. 4194 if (target->is_static() || target->can_be_statically_bound()) { 4195 if (ciMethod::is_consistent_info(callee, target)) { 4196 Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual; 4197 ignore_return = ignore_return || (callee->return_type()->is_void() && !target->return_type()->is_void()); 4198 if (try_inline(target, /*holder_known*/ !callee->is_static(), ignore_return, bc)) { 4199 return true; 4200 } 4201 } else { 4202 print_inlining(target, "signatures mismatch", /*success*/ false); 4203 } 4204 } else { 4205 assert(false, "no inlining through MH::invokeBasic"); // missing optimization opportunity due to suboptimal LF shape 4206 print_inlining(target, "not static or statically bindable", /*success*/ false); 4207 } 4208 } else { 4209 assert(mh->is_null_object(), "not a null"); 4210 print_inlining(callee, "receiver is always null", /*success*/ false); 4211 } 4212 } else { 4213 print_inlining(callee, "receiver not constant", /*success*/ false); 4214 } 4215 } 4216 break; 4217 4218 case vmIntrinsics::_linkToVirtual: 4219 case vmIntrinsics::_linkToStatic: 4220 case vmIntrinsics::_linkToSpecial: 4221 case vmIntrinsics::_linkToInterface: 4222 { 4223 // pop MemberName argument 4224 const int args_base = state()->stack_size() - callee->arg_size(); 4225 ValueType* type = apop()->type(); 4226 if (type->is_constant()) { 4227 ciMethod* target = type->as_ObjectType()->constant_value()->as_member_name()->get_vmtarget(); 4228 ignore_return = ignore_return || (callee->return_type()->is_void() && !target->return_type()->is_void()); 4229 // If the target is another method handle invoke, try to recursively get 4230 // a better target. 4231 if (target->is_method_handle_intrinsic()) { 4232 if (try_method_handle_inline(target, ignore_return)) { 4233 return true; 4234 } 4235 } else if (!ciMethod::is_consistent_info(callee, target)) { 4236 print_inlining(target, "signatures mismatch", /*success*/ false); 4237 } else { 4238 ciSignature* signature = target->signature(); 4239 const int receiver_skip = target->is_static() ? 0 : 1; 4240 // Cast receiver to its type. 4241 if (!target->is_static()) { 4242 ciKlass* tk = signature->accessing_klass(); 4243 Value obj = state()->stack_at(args_base); 4244 if (obj->exact_type() == nullptr && 4245 obj->declared_type() != tk && tk != compilation()->env()->Object_klass()) { 4246 TypeCast* c = new TypeCast(tk, obj, state_before); 4247 append(c); 4248 state()->stack_at_put(args_base, c); 4249 } 4250 } 4251 // Cast reference arguments to its type. 4252 for (int i = 0, j = 0; i < signature->count(); i++) { 4253 ciType* t = signature->type_at(i); 4254 if (t->is_klass()) { 4255 ciKlass* tk = t->as_klass(); 4256 Value obj = state()->stack_at(args_base + receiver_skip + j); 4257 if (obj->exact_type() == nullptr && 4258 obj->declared_type() != tk && tk != compilation()->env()->Object_klass()) { 4259 TypeCast* c = new TypeCast(t, obj, state_before); 4260 append(c); 4261 state()->stack_at_put(args_base + receiver_skip + j, c); 4262 } 4263 } 4264 j += t->size(); // long and double take two slots 4265 } 4266 // We don't do CHA here so only inline static and statically bindable methods. 4267 if (target->is_static() || target->can_be_statically_bound()) { 4268 Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual; 4269 if (try_inline(target, /*holder_known*/ !callee->is_static(), ignore_return, bc)) { 4270 return true; 4271 } 4272 } else { 4273 print_inlining(target, "not static or statically bindable", /*success*/ false); 4274 } 4275 } 4276 } else { 4277 print_inlining(callee, "MemberName not constant", /*success*/ false); 4278 } 4279 } 4280 break; 4281 4282 case vmIntrinsics::_linkToNative: 4283 print_inlining(callee, "native call", /*success*/ false); 4284 break; 4285 4286 default: 4287 fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid)); 4288 break; 4289 } 4290 set_state(state_before->copy_for_parsing()); 4291 return false; 4292 } 4293 4294 4295 void GraphBuilder::inline_bailout(const char* msg) { 4296 assert(msg != nullptr, "inline bailout msg must exist"); 4297 _inline_bailout_msg = msg; 4298 } 4299 4300 4301 void GraphBuilder::clear_inline_bailout() { 4302 _inline_bailout_msg = nullptr; 4303 } 4304 4305 4306 void GraphBuilder::push_root_scope(IRScope* scope, BlockList* bci2block, BlockBegin* start) { 4307 ScopeData* data = new ScopeData(nullptr); 4308 data->set_scope(scope); 4309 data->set_bci2block(bci2block); 4310 _scope_data = data; 4311 _block = start; 4312 } 4313 4314 4315 void GraphBuilder::push_scope(ciMethod* callee, BlockBegin* continuation) { 4316 IRScope* callee_scope = new IRScope(compilation(), scope(), bci(), callee, -1, false); 4317 scope()->add_callee(callee_scope); 4318 4319 BlockListBuilder blb(compilation(), callee_scope, -1); 4320 CHECK_BAILOUT(); 4321 4322 if (!blb.bci2block()->at(0)->is_set(BlockBegin::parser_loop_header_flag)) { 4323 // this scope can be inlined directly into the caller so remove 4324 // the block at bci 0. 4325 blb.bci2block()->at_put(0, nullptr); 4326 } 4327 4328 set_state(new ValueStack(callee_scope, state()->copy(ValueStack::CallerState, bci()))); 4329 4330 ScopeData* data = new ScopeData(scope_data()); 4331 data->set_scope(callee_scope); 4332 data->set_bci2block(blb.bci2block()); 4333 data->set_continuation(continuation); 4334 _scope_data = data; 4335 } 4336 4337 4338 void GraphBuilder::push_scope_for_jsr(BlockBegin* jsr_continuation, int jsr_dest_bci) { 4339 ScopeData* data = new ScopeData(scope_data()); 4340 data->set_parsing_jsr(); 4341 data->set_jsr_entry_bci(jsr_dest_bci); 4342 data->set_jsr_return_address_local(-1); 4343 // Must clone bci2block list as we will be mutating it in order to 4344 // properly clone all blocks in jsr region as well as exception 4345 // handlers containing rets 4346 BlockList* new_bci2block = new BlockList(bci2block()->length()); 4347 new_bci2block->appendAll(bci2block()); 4348 data->set_bci2block(new_bci2block); 4349 data->set_scope(scope()); 4350 data->setup_jsr_xhandlers(); 4351 data->set_continuation(continuation()); 4352 data->set_jsr_continuation(jsr_continuation); 4353 _scope_data = data; 4354 } 4355 4356 4357 void GraphBuilder::pop_scope() { 4358 int number_of_locks = scope()->number_of_locks(); 4359 _scope_data = scope_data()->parent(); 4360 // accumulate minimum number of monitor slots to be reserved 4361 scope()->set_min_number_of_locks(number_of_locks); 4362 } 4363 4364 4365 void GraphBuilder::pop_scope_for_jsr() { 4366 _scope_data = scope_data()->parent(); 4367 } 4368 4369 void GraphBuilder::append_unsafe_get(ciMethod* callee, BasicType t, bool is_volatile) { 4370 Values* args = state()->pop_arguments(callee->arg_size()); 4371 null_check(args->at(0)); 4372 Instruction* offset = args->at(2); 4373 #ifndef _LP64 4374 offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT))); 4375 #endif 4376 Instruction* op = append(new UnsafeGet(t, args->at(1), offset, is_volatile)); 4377 push(op->type(), op); 4378 compilation()->set_has_unsafe_access(true); 4379 } 4380 4381 4382 void GraphBuilder::append_unsafe_put(ciMethod* callee, BasicType t, bool is_volatile) { 4383 Values* args = state()->pop_arguments(callee->arg_size()); 4384 null_check(args->at(0)); 4385 Instruction* offset = args->at(2); 4386 #ifndef _LP64 4387 offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT))); 4388 #endif 4389 Value val = args->at(3); 4390 if (t == T_BOOLEAN) { 4391 Value mask = append(new Constant(new IntConstant(1))); 4392 val = append(new LogicOp(Bytecodes::_iand, val, mask)); 4393 } 4394 Instruction* op = append(new UnsafePut(t, args->at(1), offset, val, is_volatile)); 4395 compilation()->set_has_unsafe_access(true); 4396 kill_all(); 4397 } 4398 4399 void GraphBuilder::append_unsafe_CAS(ciMethod* callee) { 4400 ValueStack* state_before = copy_state_for_exception(); 4401 ValueType* result_type = as_ValueType(callee->return_type()); 4402 assert(result_type->is_int(), "int result"); 4403 Values* args = state()->pop_arguments(callee->arg_size()); 4404 4405 // Pop off some args to specially handle, then push back 4406 Value newval = args->pop(); 4407 Value cmpval = args->pop(); 4408 Value offset = args->pop(); 4409 Value src = args->pop(); 4410 Value unsafe_obj = args->pop(); 4411 4412 // Separately handle the unsafe arg. It is not needed for code 4413 // generation, but must be null checked 4414 null_check(unsafe_obj); 4415 4416 #ifndef _LP64 4417 offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT))); 4418 #endif 4419 4420 args->push(src); 4421 args->push(offset); 4422 args->push(cmpval); 4423 args->push(newval); 4424 4425 // An unsafe CAS can alias with other field accesses, but we don't 4426 // know which ones so mark the state as no preserved. This will 4427 // cause CSE to invalidate memory across it. 4428 bool preserves_state = false; 4429 Intrinsic* result = new Intrinsic(result_type, callee->intrinsic_id(), args, false, state_before, preserves_state); 4430 append_split(result); 4431 push(result_type, result); 4432 compilation()->set_has_unsafe_access(true); 4433 } 4434 4435 void GraphBuilder::append_char_access(ciMethod* callee, bool is_store) { 4436 // This intrinsic accesses byte[] array as char[] array. Computing the offsets 4437 // correctly requires matched array shapes. 4438 assert (arrayOopDesc::base_offset_in_bytes(T_CHAR) == arrayOopDesc::base_offset_in_bytes(T_BYTE), 4439 "sanity: byte[] and char[] bases agree"); 4440 assert (type2aelembytes(T_CHAR) == type2aelembytes(T_BYTE)*2, 4441 "sanity: byte[] and char[] scales agree"); 4442 4443 ValueStack* state_before = copy_state_indexed_access(); 4444 compilation()->set_has_access_indexed(true); 4445 Values* args = state()->pop_arguments(callee->arg_size()); 4446 Value array = args->at(0); 4447 Value index = args->at(1); 4448 if (is_store) { 4449 Value value = args->at(2); 4450 Instruction* store = append(new StoreIndexed(array, index, nullptr, T_CHAR, value, state_before, false, true)); 4451 store->set_flag(Instruction::NeedsRangeCheckFlag, false); 4452 _memory->store_value(value); 4453 } else { 4454 Instruction* load = append(new LoadIndexed(array, index, nullptr, T_CHAR, state_before, true)); 4455 load->set_flag(Instruction::NeedsRangeCheckFlag, false); 4456 push(load->type(), load); 4457 } 4458 } 4459 4460 void GraphBuilder::append_alloc_array_copy(ciMethod* callee) { 4461 const int args_base = state()->stack_size() - callee->arg_size(); 4462 ciType* receiver_type = state()->stack_at(args_base)->exact_type(); 4463 if (receiver_type == nullptr) { 4464 inline_bailout("must have a receiver"); 4465 return; 4466 } 4467 if (!receiver_type->is_type_array_klass()) { 4468 inline_bailout("clone array not primitive"); 4469 return; 4470 } 4471 4472 ValueStack* state_before = copy_state_before(); 4473 state_before->set_force_reexecute(); 4474 Value src = apop(); 4475 BasicType basic_type = src->exact_type()->as_array_klass()->element_type()->basic_type(); 4476 Value length = append(new ArrayLength(src, state_before)); 4477 Value new_array = append_split(new NewTypeArray(length, basic_type, state_before, false)); 4478 4479 ValueType* result_type = as_ValueType(callee->return_type()); 4480 vmIntrinsics::ID id = vmIntrinsics::_arraycopy; 4481 Values* args = new Values(5); 4482 args->push(src); 4483 args->push(append(new Constant(new IntConstant(0)))); 4484 args->push(new_array); 4485 args->push(append(new Constant(new IntConstant(0)))); 4486 args->push(length); 4487 const bool has_receiver = true; 4488 Intrinsic* array_copy = new Intrinsic(result_type, id, 4489 args, has_receiver, state_before, 4490 vmIntrinsics::preserves_state(id), 4491 vmIntrinsics::can_trap(id)); 4492 array_copy->set_flag(Instruction::OmitChecksFlag, true); 4493 append_split(array_copy); 4494 apush(new_array); 4495 append(new MemBar(lir_membar_storestore)); 4496 } 4497 4498 void GraphBuilder::print_inlining(ciMethod* callee, const char* msg, bool success) { 4499 CompileLog* log = compilation()->log(); 4500 if (log != nullptr) { 4501 assert(msg != nullptr, "inlining msg should not be null!"); 4502 if (success) { 4503 log->inline_success(msg); 4504 } else { 4505 log->inline_fail(msg); 4506 } 4507 } 4508 EventCompilerInlining event; 4509 if (event.should_commit()) { 4510 CompilerEvent::InlineEvent::post(event, compilation()->env()->task()->compile_id(), method()->get_Method(), callee, success, msg, bci()); 4511 } 4512 4513 CompileTask::print_inlining_ul(callee, scope()->level(), bci(), inlining_result_of(success), msg); 4514 4515 if (!compilation()->directive()->PrintInliningOption) { 4516 return; 4517 } 4518 CompileTask::print_inlining_tty(callee, scope()->level(), bci(), inlining_result_of(success), msg); 4519 if (success && CIPrintMethodCodes) { 4520 callee->print_codes(); 4521 } 4522 } 4523 4524 void GraphBuilder::append_unsafe_get_and_set(ciMethod* callee, bool is_add) { 4525 Values* args = state()->pop_arguments(callee->arg_size()); 4526 BasicType t = callee->return_type()->basic_type(); 4527 null_check(args->at(0)); 4528 Instruction* offset = args->at(2); 4529 #ifndef _LP64 4530 offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT))); 4531 #endif 4532 Instruction* op = append(new UnsafeGetAndSet(t, args->at(1), offset, args->at(3), is_add)); 4533 compilation()->set_has_unsafe_access(true); 4534 kill_all(); 4535 push(op->type(), op); 4536 } 4537 4538 #ifndef PRODUCT 4539 void GraphBuilder::print_stats() { 4540 if (UseLocalValueNumbering) { 4541 vmap()->print(); 4542 } 4543 } 4544 #endif // PRODUCT 4545 4546 void GraphBuilder::profile_call(ciMethod* callee, Value recv, ciKlass* known_holder, Values* obj_args, bool inlined) { 4547 assert(known_holder == nullptr || (known_holder->is_instance_klass() && 4548 (!known_holder->is_interface() || 4549 ((ciInstanceKlass*)known_holder)->has_nonstatic_concrete_methods())), "should be non-static concrete method"); 4550 if (known_holder != nullptr) { 4551 if (known_holder->exact_klass() == nullptr) { 4552 known_holder = compilation()->cha_exact_type(known_holder); 4553 } 4554 } 4555 4556 append(new ProfileCall(method(), bci(), callee, recv, known_holder, obj_args, inlined)); 4557 } 4558 4559 void GraphBuilder::profile_return_type(Value ret, ciMethod* callee, ciMethod* m, int invoke_bci) { 4560 assert((m == nullptr) == (invoke_bci < 0), "invalid method and invalid bci together"); 4561 if (m == nullptr) { 4562 m = method(); 4563 } 4564 if (invoke_bci < 0) { 4565 invoke_bci = bci(); 4566 } 4567 ciMethodData* md = m->method_data_or_null(); 4568 ciProfileData* data = md->bci_to_data(invoke_bci); 4569 if (data != nullptr && (data->is_CallTypeData() || data->is_VirtualCallTypeData())) { 4570 bool has_return = data->is_CallTypeData() ? ((ciCallTypeData*)data)->has_return() : ((ciVirtualCallTypeData*)data)->has_return(); 4571 if (has_return) { 4572 append(new ProfileReturnType(m , invoke_bci, callee, ret)); 4573 } 4574 } 4575 } 4576 4577 void GraphBuilder::profile_invocation(ciMethod* callee, ValueStack* state) { 4578 append(new ProfileInvoke(callee, state)); 4579 }