1 /*
2 * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "c1/c1_Canonicalizer.hpp"
26 #include "c1/c1_CFGPrinter.hpp"
27 #include "c1/c1_Compilation.hpp"
28 #include "c1/c1_GraphBuilder.hpp"
29 #include "c1/c1_Instruction.hpp"
30 #include "c1/c1_InstructionPrinter.hpp"
31 #include "c1/c1_ValueType.hpp"
32 #include "ci/ciCallSite.hpp"
33 #include "ci/ciField.hpp"
34 #include "ci/ciFlatArrayKlass.hpp"
35 #include "ci/ciInlineKlass.hpp"
36 #include "ci/ciKlass.hpp"
37 #include "ci/ciMemberName.hpp"
38 #include "ci/ciSymbols.hpp"
39 #include "ci/ciUtilities.inline.hpp"
40 #include "classfile/javaClasses.hpp"
41 #include "compiler/compilationPolicy.hpp"
42 #include "compiler/compileBroker.hpp"
43 #include "compiler/compilerEvent.hpp"
44 #include "interpreter/bytecode.hpp"
45 #include "jfr/jfrEvents.hpp"
46 #include "memory/resourceArea.hpp"
47 #include "runtime/arguments.hpp"
48 #include "runtime/sharedRuntime.hpp"
49 #include "utilities/checkedCast.hpp"
50 #include "utilities/macros.hpp"
51 #if INCLUDE_JFR
52 #include "jfr/jfr.hpp"
53 #endif
54
55 class BlockListBuilder {
56 private:
57 Compilation* _compilation;
58 IRScope* _scope;
59
60 BlockList _blocks; // internal list of all blocks
61 BlockList* _bci2block; // mapping from bci to blocks for GraphBuilder
62 GrowableArray<BlockList> _bci2block_successors; // Mapping bcis to their blocks successors while we dont have a blockend
63
64 // fields used by mark_loops
65 ResourceBitMap _active; // for iteration of control flow graph
66 ResourceBitMap _visited; // for iteration of control flow graph
67 GrowableArray<ResourceBitMap> _loop_map; // caches the information if a block is contained in a loop
68 int _next_loop_index; // next free loop number
69 int _next_block_number; // for reverse postorder numbering of blocks
70 int _block_id_start;
71
72 int bit_number(int block_id) const { return block_id - _block_id_start; }
73 // accessors
74 Compilation* compilation() const { return _compilation; }
75 IRScope* scope() const { return _scope; }
76 ciMethod* method() const { return scope()->method(); }
77 XHandlers* xhandlers() const { return scope()->xhandlers(); }
78
79 // unified bailout support
80 void bailout(const char* msg) const { compilation()->bailout(msg); }
81 bool bailed_out() const { return compilation()->bailed_out(); }
82
83 // helper functions
84 BlockBegin* make_block_at(int bci, BlockBegin* predecessor);
85 void handle_exceptions(BlockBegin* current, int cur_bci);
86 void handle_jsr(BlockBegin* current, int sr_bci, int next_bci);
87 void store_one(BlockBegin* current, int local);
88 void store_two(BlockBegin* current, int local);
89 void set_entries(int osr_bci);
90 void set_leaders();
91
92 void make_loop_header(BlockBegin* block);
93 void mark_loops();
94 BitMap& mark_loops(BlockBegin* b, bool in_subroutine);
95
96 // debugging
97 #ifndef PRODUCT
98 void print();
99 #endif
100
101 int number_of_successors(BlockBegin* block);
102 BlockBegin* successor_at(BlockBegin* block, int i);
103 void add_successor(BlockBegin* block, BlockBegin* sux);
104 bool is_successor(BlockBegin* block, BlockBegin* sux);
105
106 public:
107 // creation
108 BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci);
109
110 // accessors for GraphBuilder
111 BlockList* bci2block() const { return _bci2block; }
112 };
113
114
115 // Implementation of BlockListBuilder
116
117 BlockListBuilder::BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci)
118 : _compilation(compilation)
119 , _scope(scope)
120 , _blocks(16)
121 , _bci2block(new BlockList(scope->method()->code_size(), nullptr))
122 , _bci2block_successors(scope->method()->code_size())
123 , _active() // size not known yet
124 , _visited() // size not known yet
125 , _loop_map() // size not known yet
126 , _next_loop_index(0)
127 , _next_block_number(0)
128 , _block_id_start(0)
129 {
130 set_entries(osr_bci);
131 set_leaders();
132 CHECK_BAILOUT();
133
134 mark_loops();
135 NOT_PRODUCT(if (PrintInitialBlockList) print());
136
137 // _bci2block still contains blocks with _end == null and > 0 sux in _bci2block_successors.
138
139 #ifndef PRODUCT
140 if (PrintCFGToFile) {
141 stringStream title;
142 title.print("BlockListBuilder ");
143 scope->method()->print_name(&title);
144 CFGPrinter::print_cfg(_bci2block, title.freeze(), false, false);
145 }
146 #endif
147 }
148
149
150 void BlockListBuilder::set_entries(int osr_bci) {
151 // generate start blocks
152 BlockBegin* std_entry = make_block_at(0, nullptr);
153 if (scope()->caller() == nullptr) {
154 std_entry->set(BlockBegin::std_entry_flag);
155 }
156 if (osr_bci != -1) {
157 BlockBegin* osr_entry = make_block_at(osr_bci, nullptr);
158 osr_entry->set(BlockBegin::osr_entry_flag);
159 }
160
161 // generate exception entry blocks
162 XHandlers* list = xhandlers();
163 const int n = list->length();
164 for (int i = 0; i < n; i++) {
165 XHandler* h = list->handler_at(i);
166 BlockBegin* entry = make_block_at(h->handler_bci(), nullptr);
167 entry->set(BlockBegin::exception_entry_flag);
168 h->set_entry_block(entry);
169 }
170 }
171
172
173 BlockBegin* BlockListBuilder::make_block_at(int cur_bci, BlockBegin* predecessor) {
174 assert(method()->bci_block_start().at(cur_bci), "wrong block starts of MethodLivenessAnalyzer");
175
176 BlockBegin* block = _bci2block->at(cur_bci);
177 if (block == nullptr) {
178 block = new BlockBegin(cur_bci);
179 block->init_stores_to_locals(method()->max_locals());
180 _bci2block->at_put(cur_bci, block);
181 _bci2block_successors.at_put_grow(cur_bci, BlockList());
182 _blocks.append(block);
183
184 assert(predecessor == nullptr || predecessor->bci() < cur_bci, "targets for backward branches must already exist");
185 }
186
187 if (predecessor != nullptr) {
188 if (block->is_set(BlockBegin::exception_entry_flag)) {
189 BAILOUT_("Exception handler can be reached by both normal and exceptional control flow", block);
190 }
191
192 add_successor(predecessor, block);
193 block->increment_total_preds();
194 }
195
196 return block;
197 }
198
199
200 inline void BlockListBuilder::store_one(BlockBegin* current, int local) {
201 current->stores_to_locals().set_bit(local);
202 }
203 inline void BlockListBuilder::store_two(BlockBegin* current, int local) {
204 store_one(current, local);
205 store_one(current, local + 1);
206 }
207
208
209 void BlockListBuilder::handle_exceptions(BlockBegin* current, int cur_bci) {
210 // Draws edges from a block to its exception handlers
211 XHandlers* list = xhandlers();
212 const int n = list->length();
213
214 for (int i = 0; i < n; i++) {
215 XHandler* h = list->handler_at(i);
216
217 if (h->covers(cur_bci)) {
218 BlockBegin* entry = h->entry_block();
219 assert(entry != nullptr && entry == _bci2block->at(h->handler_bci()), "entry must be set");
220 assert(entry->is_set(BlockBegin::exception_entry_flag), "flag must be set");
221
222 // add each exception handler only once
223 if(!is_successor(current, entry)) {
224 add_successor(current, entry);
225 entry->increment_total_preds();
226 }
227
228 // stop when reaching catchall
229 if (h->catch_type() == 0) break;
230 }
231 }
232 }
233
234 void BlockListBuilder::handle_jsr(BlockBegin* current, int sr_bci, int next_bci) {
235 if (next_bci < method()->code_size()) {
236 // start a new block after jsr-bytecode and link this block into cfg
237 make_block_at(next_bci, current);
238 }
239
240 // start a new block at the subroutine entry at mark it with special flag
241 BlockBegin* sr_block = make_block_at(sr_bci, current);
242 if (!sr_block->is_set(BlockBegin::subroutine_entry_flag)) {
243 sr_block->set(BlockBegin::subroutine_entry_flag);
244 }
245 }
246
247
248 void BlockListBuilder::set_leaders() {
249 bool has_xhandlers = xhandlers()->has_handlers();
250 BlockBegin* current = nullptr;
251
252 // The information which bci starts a new block simplifies the analysis
253 // Without it, backward branches could jump to a bci where no block was created
254 // during bytecode iteration. This would require the creation of a new block at the
255 // branch target and a modification of the successor lists.
256 const BitMap& bci_block_start = method()->bci_block_start();
257
258 int end_bci = method()->code_size();
259
260 ciBytecodeStream s(method());
261 while (s.next() != ciBytecodeStream::EOBC()) {
262 int cur_bci = s.cur_bci();
263
264 if (bci_block_start.at(cur_bci)) {
265 current = make_block_at(cur_bci, current);
266 }
267 assert(current != nullptr, "must have current block");
268
269 if (has_xhandlers && GraphBuilder::can_trap(method(), s.cur_bc())) {
270 handle_exceptions(current, cur_bci);
271 }
272
273 switch (s.cur_bc()) {
274 // track stores to local variables for selective creation of phi functions
275 case Bytecodes::_iinc: store_one(current, s.get_index()); break;
276 case Bytecodes::_istore: store_one(current, s.get_index()); break;
277 case Bytecodes::_lstore: store_two(current, s.get_index()); break;
278 case Bytecodes::_fstore: store_one(current, s.get_index()); break;
279 case Bytecodes::_dstore: store_two(current, s.get_index()); break;
280 case Bytecodes::_astore: store_one(current, s.get_index()); break;
281 case Bytecodes::_istore_0: store_one(current, 0); break;
282 case Bytecodes::_istore_1: store_one(current, 1); break;
283 case Bytecodes::_istore_2: store_one(current, 2); break;
284 case Bytecodes::_istore_3: store_one(current, 3); break;
285 case Bytecodes::_lstore_0: store_two(current, 0); break;
286 case Bytecodes::_lstore_1: store_two(current, 1); break;
287 case Bytecodes::_lstore_2: store_two(current, 2); break;
288 case Bytecodes::_lstore_3: store_two(current, 3); break;
289 case Bytecodes::_fstore_0: store_one(current, 0); break;
290 case Bytecodes::_fstore_1: store_one(current, 1); break;
291 case Bytecodes::_fstore_2: store_one(current, 2); break;
292 case Bytecodes::_fstore_3: store_one(current, 3); break;
293 case Bytecodes::_dstore_0: store_two(current, 0); break;
294 case Bytecodes::_dstore_1: store_two(current, 1); break;
295 case Bytecodes::_dstore_2: store_two(current, 2); break;
296 case Bytecodes::_dstore_3: store_two(current, 3); break;
297 case Bytecodes::_astore_0: store_one(current, 0); break;
298 case Bytecodes::_astore_1: store_one(current, 1); break;
299 case Bytecodes::_astore_2: store_one(current, 2); break;
300 case Bytecodes::_astore_3: store_one(current, 3); break;
301
302 // track bytecodes that affect the control flow
303 case Bytecodes::_athrow: // fall through
304 case Bytecodes::_ret: // fall through
305 case Bytecodes::_ireturn: // fall through
306 case Bytecodes::_lreturn: // fall through
307 case Bytecodes::_freturn: // fall through
308 case Bytecodes::_dreturn: // fall through
309 case Bytecodes::_areturn: // fall through
310 case Bytecodes::_return:
311 current = nullptr;
312 break;
313
314 case Bytecodes::_ifeq: // fall through
315 case Bytecodes::_ifne: // fall through
316 case Bytecodes::_iflt: // fall through
317 case Bytecodes::_ifge: // fall through
318 case Bytecodes::_ifgt: // fall through
319 case Bytecodes::_ifle: // fall through
320 case Bytecodes::_if_icmpeq: // fall through
321 case Bytecodes::_if_icmpne: // fall through
322 case Bytecodes::_if_icmplt: // fall through
323 case Bytecodes::_if_icmpge: // fall through
324 case Bytecodes::_if_icmpgt: // fall through
325 case Bytecodes::_if_icmple: // fall through
326 case Bytecodes::_if_acmpeq: // fall through
327 case Bytecodes::_if_acmpne: // fall through
328 case Bytecodes::_ifnull: // fall through
329 case Bytecodes::_ifnonnull:
330 if (s.next_bci() < end_bci) {
331 make_block_at(s.next_bci(), current);
332 }
333 make_block_at(s.get_dest(), current);
334 current = nullptr;
335 break;
336
337 case Bytecodes::_goto:
338 make_block_at(s.get_dest(), current);
339 current = nullptr;
340 break;
341
342 case Bytecodes::_goto_w:
343 make_block_at(s.get_far_dest(), current);
344 current = nullptr;
345 break;
346
347 case Bytecodes::_jsr:
348 handle_jsr(current, s.get_dest(), s.next_bci());
349 current = nullptr;
350 break;
351
352 case Bytecodes::_jsr_w:
353 handle_jsr(current, s.get_far_dest(), s.next_bci());
354 current = nullptr;
355 break;
356
357 case Bytecodes::_tableswitch: {
358 // set block for each case
359 Bytecode_tableswitch sw(&s);
360 int l = sw.length();
361 for (int i = 0; i < l; i++) {
362 make_block_at(cur_bci + sw.dest_offset_at(i), current);
363 }
364 make_block_at(cur_bci + sw.default_offset(), current);
365 current = nullptr;
366 break;
367 }
368
369 case Bytecodes::_lookupswitch: {
370 // set block for each case
371 Bytecode_lookupswitch sw(&s);
372 int l = sw.number_of_pairs();
373 for (int i = 0; i < l; i++) {
374 make_block_at(cur_bci + sw.pair_at(i).offset(), current);
375 }
376 make_block_at(cur_bci + sw.default_offset(), current);
377 current = nullptr;
378 break;
379 }
380
381 default:
382 break;
383 }
384 }
385 }
386
387
388 void BlockListBuilder::mark_loops() {
389 ResourceMark rm;
390
391 const int number_of_blocks = _blocks.length();
392 _active.initialize(number_of_blocks);
393 _visited.initialize(number_of_blocks);
394 _loop_map = GrowableArray<ResourceBitMap>(number_of_blocks, number_of_blocks, ResourceBitMap());
395 for (int i = 0; i < number_of_blocks; i++) {
396 _loop_map.at(i).initialize(number_of_blocks);
397 }
398 _next_loop_index = 0;
399 _next_block_number = _blocks.length();
400
401 // The loop detection algorithm works as follows:
402 // - We maintain the _loop_map, where for each block we have a bitmap indicating which loops contain this block.
403 // - The CFG is recursively traversed (depth-first) and if we detect a loop, we assign the loop a unique number that is stored
404 // in the bitmap associated with the loop header block. Until we return back through that loop header the bitmap contains
405 // only a single bit corresponding to the loop number.
406 // - The bit is then propagated for all the blocks in the loop after we exit them (post-order). There could be multiple bits
407 // of course in case of nested loops.
408 // - When we exit the loop header we remove that single bit and assign the real loop state for it.
409 // - Now, the tricky part here is how we detect irreducible loops. In the algorithm above the loop state bits
410 // are propagated to the predecessors. If we encounter an irreducible loop (a loop with multiple heads) we would see
411 // a node with some loop bit set that would then propagate back and be never cleared because we would
412 // never go back through the original loop header. Therefore if there are any irreducible loops the bits in the states
413 // for these loops are going to propagate back to the root.
414 BlockBegin* start = _bci2block->at(0);
415 _block_id_start = start->block_id();
416 BitMap& loop_state = mark_loops(start, false);
417 if (!loop_state.is_empty()) {
418 compilation()->set_has_irreducible_loops(true);
419 }
420 assert(_next_block_number >= 0, "invalid block numbers");
421
422 // Remove dangling Resource pointers before the ResourceMark goes out-of-scope.
423 _active.resize(0);
424 _visited.resize(0);
425 _loop_map.clear();
426 }
427
428 void BlockListBuilder::make_loop_header(BlockBegin* block) {
429 int block_id = block->block_id();
430 int block_bit = bit_number(block_id);
431 if (block->is_set(BlockBegin::exception_entry_flag)) {
432 // exception edges may look like loops but don't mark them as such
433 // since it screws up block ordering.
434 return;
435 }
436 if (!block->is_set(BlockBegin::parser_loop_header_flag)) {
437 block->set(BlockBegin::parser_loop_header_flag);
438
439 assert(_loop_map.at(block_bit).is_empty(), "must not be set yet");
440 assert(0 <= _next_loop_index && _next_loop_index < _loop_map.length(), "_next_loop_index is too large");
441 _loop_map.at(block_bit).set_bit(_next_loop_index++);
442 } else {
443 // block already marked as loop header
444 assert(_loop_map.at(block_bit).count_one_bits() == 1, "exactly one bit must be set");
445 }
446 }
447
448 BitMap& BlockListBuilder::mark_loops(BlockBegin* block, bool in_subroutine) {
449 int block_id = block->block_id();
450 int block_bit = bit_number(block_id);
451 if (_visited.at(block_bit)) {
452 if (_active.at(block_bit)) {
453 // reached block via backward branch
454 make_loop_header(block);
455 }
456 // return cached loop information for this block
457 return _loop_map.at(block_bit);
458 }
459
460 if (block->is_set(BlockBegin::subroutine_entry_flag)) {
461 in_subroutine = true;
462 }
463
464 // set active and visited bits before successors are processed
465 _visited.set_bit(block_bit);
466 _active.set_bit(block_bit);
467
468 ResourceMark rm;
469 ResourceBitMap loop_state(_loop_map.length());
470 for (int i = number_of_successors(block) - 1; i >= 0; i--) {
471 BlockBegin* sux = successor_at(block, i);
472 // recursively process all successors
473 loop_state.set_union(mark_loops(sux, in_subroutine));
474 }
475
476 // clear active-bit after all successors are processed
477 _active.clear_bit(block_bit);
478
479 // reverse-post-order numbering of all blocks
480 block->set_depth_first_number(_next_block_number);
481 _next_block_number--;
482
483 if (!loop_state.is_empty() || in_subroutine ) {
484 // block is contained at least in one loop, so phi functions are necessary
485 // phi functions are also necessary for all locals stored in a subroutine
486 scope()->requires_phi_function().set_union(block->stores_to_locals());
487 }
488
489 if (block->is_set(BlockBegin::parser_loop_header_flag)) {
490 BitMap& header_loop_state = _loop_map.at(block_bit);
491 assert(header_loop_state.count_one_bits() == 1, "exactly one bit must be set");
492 // remove the bit with the loop number for the state (header is outside of the loop)
493 loop_state.set_difference(header_loop_state);
494 }
495
496 // cache and return loop information for this block
497 _loop_map.at(block_bit).set_from(loop_state);
498 return _loop_map.at(block_bit);
499 }
500
501 inline int BlockListBuilder::number_of_successors(BlockBegin* block)
502 {
503 assert(_bci2block_successors.length() > block->bci(), "sux must exist");
504 return _bci2block_successors.at(block->bci()).length();
505 }
506
507 inline BlockBegin* BlockListBuilder::successor_at(BlockBegin* block, int i)
508 {
509 assert(_bci2block_successors.length() > block->bci(), "sux must exist");
510 return _bci2block_successors.at(block->bci()).at(i);
511 }
512
513 inline void BlockListBuilder::add_successor(BlockBegin* block, BlockBegin* sux)
514 {
515 assert(_bci2block_successors.length() > block->bci(), "sux must exist");
516 _bci2block_successors.at(block->bci()).append(sux);
517 }
518
519 inline bool BlockListBuilder::is_successor(BlockBegin* block, BlockBegin* sux) {
520 assert(_bci2block_successors.length() > block->bci(), "sux must exist");
521 return _bci2block_successors.at(block->bci()).contains(sux);
522 }
523
524 #ifndef PRODUCT
525
526 static int compare_depth_first(BlockBegin** a, BlockBegin** b) {
527 return (*a)->depth_first_number() - (*b)->depth_first_number();
528 }
529
530 void BlockListBuilder::print() {
531 tty->print("----- initial block list of BlockListBuilder for method ");
532 method()->print_short_name();
533 tty->cr();
534
535 // better readability if blocks are sorted in processing order
536 _blocks.sort(compare_depth_first);
537
538 for (int i = 0; i < _blocks.length(); i++) {
539 BlockBegin* cur = _blocks.at(i);
540 tty->print("%4d: B%-4d bci: %-4d preds: %-4d ", cur->depth_first_number(), cur->block_id(), cur->bci(), cur->total_preds());
541
542 tty->print(cur->is_set(BlockBegin::std_entry_flag) ? " std" : " ");
543 tty->print(cur->is_set(BlockBegin::osr_entry_flag) ? " osr" : " ");
544 tty->print(cur->is_set(BlockBegin::exception_entry_flag) ? " ex" : " ");
545 tty->print(cur->is_set(BlockBegin::subroutine_entry_flag) ? " sr" : " ");
546 tty->print(cur->is_set(BlockBegin::parser_loop_header_flag) ? " lh" : " ");
547
548 if (number_of_successors(cur) > 0) {
549 tty->print(" sux: ");
550 for (int j = 0; j < number_of_successors(cur); j++) {
551 BlockBegin* sux = successor_at(cur, j);
552 tty->print("B%d ", sux->block_id());
553 }
554 }
555 tty->cr();
556 }
557 }
558
559 #endif
560
561
562 // A simple growable array of Values indexed by ciFields
563 class FieldBuffer: public CompilationResourceObj {
564 private:
565 GrowableArray<Value> _values;
566
567 public:
568 FieldBuffer() {}
569
570 void kill() {
571 _values.trunc_to(0);
572 }
573
574 Value at(ciField* field) {
575 assert(field->holder()->is_loaded(), "must be a loaded field");
576 int offset = field->offset_in_bytes();
577 if (offset < _values.length()) {
578 return _values.at(offset);
579 } else {
580 return nullptr;
581 }
582 }
583
584 void at_put(ciField* field, Value value) {
585 assert(field->holder()->is_loaded(), "must be a loaded field");
586 int offset = field->offset_in_bytes();
587 _values.at_put_grow(offset, value, nullptr);
588 }
589
590 };
591
592
593 // MemoryBuffer is fairly simple model of the current state of memory.
594 // It partitions memory into several pieces. The first piece is
595 // generic memory where little is known about the owner of the memory.
596 // This is conceptually represented by the tuple <O, F, V> which says
597 // that the field F of object O has value V. This is flattened so
598 // that F is represented by the offset of the field and the parallel
599 // arrays _objects and _values are used for O and V. Loads of O.F can
600 // simply use V. Newly allocated objects are kept in a separate list
601 // along with a parallel array for each object which represents the
602 // current value of its fields. Stores of the default value to fields
603 // which have never been stored to before are eliminated since they
604 // are redundant. Once newly allocated objects are stored into
605 // another object or they are passed out of the current compile they
606 // are treated like generic memory.
607
608 class MemoryBuffer: public CompilationResourceObj {
609 private:
610 FieldBuffer _values;
611 GrowableArray<Value> _objects;
612 GrowableArray<Value> _newobjects;
613 GrowableArray<FieldBuffer*> _fields;
614
615 public:
616 MemoryBuffer() {}
617
618 StoreField* store(StoreField* st) {
619 if (!EliminateFieldAccess) {
620 return st;
621 }
622
623 Value object = st->obj();
624 Value value = st->value();
625 ciField* field = st->field();
626 if (field->holder()->is_loaded()) {
627 int offset = field->offset_in_bytes();
628 int index = _newobjects.find(object);
629 if (index != -1) {
630 // newly allocated object with no other stores performed on this field
631 FieldBuffer* buf = _fields.at(index);
632 if (buf->at(field) == nullptr && is_default_value(value)) {
633 #ifndef PRODUCT
634 if (PrintIRDuringConstruction && Verbose) {
635 tty->print_cr("Eliminated store for object %d:", index);
636 st->print_line();
637 }
638 #endif
639 return nullptr;
640 } else {
641 buf->at_put(field, value);
642 }
643 } else {
644 _objects.at_put_grow(offset, object, nullptr);
645 _values.at_put(field, value);
646 }
647
648 store_value(value);
649 } else {
650 // if we held onto field names we could alias based on names but
651 // we don't know what's being stored to so kill it all.
652 kill();
653 }
654 return st;
655 }
656
657
658 // return true if this value correspond to the default value of a field.
659 bool is_default_value(Value value) {
660 Constant* con = value->as_Constant();
661 if (con) {
662 switch (con->type()->tag()) {
663 case intTag: return con->type()->as_IntConstant()->value() == 0;
664 case longTag: return con->type()->as_LongConstant()->value() == 0;
665 case floatTag: return jint_cast(con->type()->as_FloatConstant()->value()) == 0;
666 case doubleTag: return jlong_cast(con->type()->as_DoubleConstant()->value()) == jlong_cast(0);
667 case objectTag: return con->type() == objectNull;
668 default: ShouldNotReachHere();
669 }
670 }
671 return false;
672 }
673
674
675 // return either the actual value of a load or the load itself
676 Value load(LoadField* load) {
677 if (!EliminateFieldAccess) {
678 return load;
679 }
680
681 ciField* field = load->field();
682 Value object = load->obj();
683 if (field->holder()->is_loaded() && !field->is_volatile()) {
684 int offset = field->offset_in_bytes();
685 Value result = nullptr;
686 int index = _newobjects.find(object);
687 if (index != -1) {
688 result = _fields.at(index)->at(field);
689 } else if (_objects.at_grow(offset, nullptr) == object) {
690 result = _values.at(field);
691 }
692 if (result != nullptr) {
693 #ifndef PRODUCT
694 if (PrintIRDuringConstruction && Verbose) {
695 tty->print_cr("Eliminated load: ");
696 load->print_line();
697 }
698 #endif
699 assert(result->type()->tag() == load->type()->tag(), "wrong types");
700 return result;
701 }
702 }
703 return load;
704 }
705
706 // Record this newly allocated object
707 void new_instance(NewInstance* object) {
708 int index = _newobjects.length();
709 _newobjects.append(object);
710 if (_fields.at_grow(index, nullptr) == nullptr) {
711 _fields.at_put(index, new FieldBuffer());
712 } else {
713 _fields.at(index)->kill();
714 }
715 }
716
717 void store_value(Value value) {
718 int index = _newobjects.find(value);
719 if (index != -1) {
720 // stored a newly allocated object into another object.
721 // Assume we've lost track of it as separate slice of memory.
722 // We could do better by keeping track of whether individual
723 // fields could alias each other.
724 _newobjects.remove_at(index);
725 // pull out the field info and store it at the end up the list
726 // of field info list to be reused later.
727 _fields.append(_fields.at(index));
728 _fields.remove_at(index);
729 }
730 }
731
732 void kill() {
733 _newobjects.trunc_to(0);
734 _objects.trunc_to(0);
735 _values.kill();
736 }
737 };
738
739
740 // Implementation of GraphBuilder's ScopeData
741
742 GraphBuilder::ScopeData::ScopeData(ScopeData* parent)
743 : _parent(parent)
744 , _bci2block(nullptr)
745 , _scope(nullptr)
746 , _has_handler(false)
747 , _stream(nullptr)
748 , _work_list(nullptr)
749 , _caller_stack_size(-1)
750 , _continuation(nullptr)
751 , _parsing_jsr(false)
752 , _jsr_xhandlers(nullptr)
753 , _num_returns(0)
754 , _cleanup_block(nullptr)
755 , _cleanup_return_prev(nullptr)
756 , _cleanup_state(nullptr)
757 , _ignore_return(false)
758 {
759 if (parent != nullptr) {
760 _max_inline_size = (intx) ((float) NestedInliningSizeRatio * (float) parent->max_inline_size() / 100.0f);
761 } else {
762 _max_inline_size = C1MaxInlineSize;
763 }
764 if (_max_inline_size < C1MaxTrivialSize) {
765 _max_inline_size = C1MaxTrivialSize;
766 }
767 }
768
769
770 void GraphBuilder::kill_all() {
771 if (UseLocalValueNumbering) {
772 vmap()->kill_all();
773 }
774 _memory->kill();
775 }
776
777
778 BlockBegin* GraphBuilder::ScopeData::block_at(int bci) {
779 if (parsing_jsr()) {
780 // It is necessary to clone all blocks associated with a
781 // subroutine, including those for exception handlers in the scope
782 // of the method containing the jsr (because those exception
783 // handlers may contain ret instructions in some cases).
784 BlockBegin* block = bci2block()->at(bci);
785 if (block != nullptr && block == parent()->bci2block()->at(bci)) {
786 BlockBegin* new_block = new BlockBegin(block->bci());
787 if (PrintInitialBlockList) {
788 tty->print_cr("CFG: cloned block %d (bci %d) as block %d for jsr",
789 block->block_id(), block->bci(), new_block->block_id());
790 }
791 // copy data from cloned blocked
792 new_block->set_depth_first_number(block->depth_first_number());
793 if (block->is_set(BlockBegin::parser_loop_header_flag)) new_block->set(BlockBegin::parser_loop_header_flag);
794 // Preserve certain flags for assertion checking
795 if (block->is_set(BlockBegin::subroutine_entry_flag)) new_block->set(BlockBegin::subroutine_entry_flag);
796 if (block->is_set(BlockBegin::exception_entry_flag)) new_block->set(BlockBegin::exception_entry_flag);
797
798 // copy was_visited_flag to allow early detection of bailouts
799 // if a block that is used in a jsr has already been visited before,
800 // it is shared between the normal control flow and a subroutine
801 // BlockBegin::try_merge returns false when the flag is set, this leads
802 // to a compilation bailout
803 if (block->is_set(BlockBegin::was_visited_flag)) new_block->set(BlockBegin::was_visited_flag);
804
805 bci2block()->at_put(bci, new_block);
806 block = new_block;
807 }
808 return block;
809 } else {
810 return bci2block()->at(bci);
811 }
812 }
813
814
815 XHandlers* GraphBuilder::ScopeData::xhandlers() const {
816 if (_jsr_xhandlers == nullptr) {
817 assert(!parsing_jsr(), "");
818 return scope()->xhandlers();
819 }
820 assert(parsing_jsr(), "");
821 return _jsr_xhandlers;
822 }
823
824
825 void GraphBuilder::ScopeData::set_scope(IRScope* scope) {
826 _scope = scope;
827 bool parent_has_handler = false;
828 if (parent() != nullptr) {
829 parent_has_handler = parent()->has_handler();
830 }
831 _has_handler = parent_has_handler || scope->xhandlers()->has_handlers();
832 }
833
834
835 void GraphBuilder::ScopeData::set_inline_cleanup_info(BlockBegin* block,
836 Instruction* return_prev,
837 ValueStack* return_state) {
838 _cleanup_block = block;
839 _cleanup_return_prev = return_prev;
840 _cleanup_state = return_state;
841 }
842
843
844 void GraphBuilder::ScopeData::add_to_work_list(BlockBegin* block) {
845 if (_work_list == nullptr) {
846 _work_list = new BlockList();
847 }
848
849 if (!block->is_set(BlockBegin::is_on_work_list_flag)) {
850 // Do not start parsing the continuation block while in a
851 // sub-scope
852 if (parsing_jsr()) {
853 if (block == jsr_continuation()) {
854 return;
855 }
856 } else {
857 if (block == continuation()) {
858 return;
859 }
860 }
861 block->set(BlockBegin::is_on_work_list_flag);
862 _work_list->push(block);
863
864 sort_top_into_worklist(_work_list, block);
865 }
866 }
867
868
869 void GraphBuilder::sort_top_into_worklist(BlockList* worklist, BlockBegin* top) {
870 assert(worklist->top() == top, "");
871 // sort block descending into work list
872 const int dfn = top->depth_first_number();
873 assert(dfn != -1, "unknown depth first number");
874 int i = worklist->length()-2;
875 while (i >= 0) {
876 BlockBegin* b = worklist->at(i);
877 if (b->depth_first_number() < dfn) {
878 worklist->at_put(i+1, b);
879 } else {
880 break;
881 }
882 i --;
883 }
884 if (i >= -1) worklist->at_put(i + 1, top);
885 }
886
887
888 BlockBegin* GraphBuilder::ScopeData::remove_from_work_list() {
889 if (is_work_list_empty()) {
890 return nullptr;
891 }
892 return _work_list->pop();
893 }
894
895
896 bool GraphBuilder::ScopeData::is_work_list_empty() const {
897 return (_work_list == nullptr || _work_list->length() == 0);
898 }
899
900
901 void GraphBuilder::ScopeData::setup_jsr_xhandlers() {
902 assert(parsing_jsr(), "");
903 // clone all the exception handlers from the scope
904 XHandlers* handlers = new XHandlers(scope()->xhandlers());
905 const int n = handlers->length();
906 for (int i = 0; i < n; i++) {
907 // The XHandlers need to be adjusted to dispatch to the cloned
908 // handler block instead of the default one but the synthetic
909 // unlocker needs to be handled specially. The synthetic unlocker
910 // should be left alone since there can be only one and all code
911 // should dispatch to the same one.
912 XHandler* h = handlers->handler_at(i);
913 assert(h->handler_bci() != SynchronizationEntryBCI, "must be real");
914 h->set_entry_block(block_at(h->handler_bci()));
915 }
916 _jsr_xhandlers = handlers;
917 }
918
919
920 int GraphBuilder::ScopeData::num_returns() {
921 if (parsing_jsr()) {
922 return parent()->num_returns();
923 }
924 return _num_returns;
925 }
926
927
928 void GraphBuilder::ScopeData::incr_num_returns() {
929 if (parsing_jsr()) {
930 parent()->incr_num_returns();
931 } else {
932 ++_num_returns;
933 }
934 }
935
936
937 // Implementation of GraphBuilder
938
939 #define INLINE_BAILOUT(msg) { inline_bailout(msg); return false; }
940
941
942 void GraphBuilder::load_constant() {
943 ciConstant con = stream()->get_constant();
944 if (con.is_valid()) {
945 ValueType* t = illegalType;
946 ValueStack* patch_state = nullptr;
947 switch (con.basic_type()) {
948 case T_BOOLEAN: t = new IntConstant (con.as_boolean()); break;
949 case T_BYTE : t = new IntConstant (con.as_byte ()); break;
950 case T_CHAR : t = new IntConstant (con.as_char ()); break;
951 case T_SHORT : t = new IntConstant (con.as_short ()); break;
952 case T_INT : t = new IntConstant (con.as_int ()); break;
953 case T_LONG : t = new LongConstant (con.as_long ()); break;
954 case T_FLOAT : t = new FloatConstant (con.as_float ()); break;
955 case T_DOUBLE : t = new DoubleConstant(con.as_double ()); break;
956 case T_ARRAY : // fall-through
957 case T_OBJECT : {
958 ciObject* obj = con.as_object();
959 if (!obj->is_loaded() || (PatchALot && !stream()->is_string_constant())) {
960 // A Class, MethodType, MethodHandle, Dynamic, or String.
961 patch_state = copy_state_before();
962 t = new ObjectConstant(obj);
963 } else {
964 // Might be a Class, MethodType, MethodHandle, or Dynamic constant
965 // result, which might turn out to be an array.
966 if (obj->is_null_object()) {
967 t = objectNull;
968 } else if (obj->is_array()) {
969 t = new ArrayConstant(obj->as_array());
970 } else {
971 t = new InstanceConstant(obj->as_instance());
972 }
973 }
974 break;
975 }
976 default: ShouldNotReachHere();
977 }
978 Value x;
979 if (patch_state != nullptr) {
980 // Arbitrary memory effects from running BSM or class loading (using custom loader) during linkage.
981 bool kills_memory = stream()->is_dynamic_constant() ||
982 (!stream()->is_string_constant() && !method()->holder()->has_trusted_loader());
983 x = new Constant(t, patch_state, kills_memory);
984 } else {
985 x = new Constant(t);
986 }
987
988 // Unbox the value at runtime, if needed.
989 // ConstantDynamic entry can be of a primitive type, but it is cached in boxed form.
990 if (patch_state != nullptr) {
991 int cp_index = stream()->get_constant_pool_index();
992 BasicType type = stream()->get_basic_type_for_constant_at(cp_index);
993 if (is_java_primitive(type)) {
994 ciInstanceKlass* box_klass = ciEnv::current()->get_box_klass_for_primitive_type(type);
995 assert(box_klass->is_loaded(), "sanity");
996 int offset = java_lang_boxing_object::value_offset(type);
997 ciField* value_field = box_klass->get_field_by_offset(offset, false /*is_static*/);
998 x = new LoadField(append(x), offset, value_field, false /*is_static*/, patch_state, false /*needs_patching*/);
999 t = as_ValueType(type);
1000 } else {
1001 assert(is_reference_type(type), "not a reference: %s", type2name(type));
1002 }
1003 }
1004
1005 push(t, append(x));
1006 } else {
1007 BAILOUT("could not resolve a constant");
1008 }
1009 }
1010
1011
1012 void GraphBuilder::load_local(ValueType* type, int index) {
1013 Value x = state()->local_at(index);
1014 assert(x != nullptr && !x->type()->is_illegal(), "access of illegal local variable");
1015 push(type, x);
1016 }
1017
1018
1019 void GraphBuilder::store_local(ValueType* type, int index) {
1020 Value x = pop(type);
1021 store_local(state(), x, index);
1022 }
1023
1024
1025 void GraphBuilder::store_local(ValueStack* state, Value x, int index) {
1026 if (parsing_jsr()) {
1027 // We need to do additional tracking of the location of the return
1028 // address for jsrs since we don't handle arbitrary jsr/ret
1029 // constructs. Here we are figuring out in which circumstances we
1030 // need to bail out.
1031 if (x->type()->is_address()) {
1032 scope_data()->set_jsr_return_address_local(index);
1033
1034 // Also check parent jsrs (if any) at this time to see whether
1035 // they are using this local. We don't handle skipping over a
1036 // ret.
1037 for (ScopeData* cur_scope_data = scope_data()->parent();
1038 cur_scope_data != nullptr && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope();
1039 cur_scope_data = cur_scope_data->parent()) {
1040 if (cur_scope_data->jsr_return_address_local() == index) {
1041 BAILOUT("subroutine overwrites return address from previous subroutine");
1042 }
1043 }
1044 } else if (index == scope_data()->jsr_return_address_local()) {
1045 scope_data()->set_jsr_return_address_local(-1);
1046 }
1047 }
1048
1049 state->store_local(index, x);
1050 }
1051
1052
1053 void GraphBuilder::load_indexed(BasicType type) {
1054 // In case of in block code motion in range check elimination
1055 ValueStack* state_before = nullptr;
1056 int array_idx = state()->stack_size() - 2;
1057 if (type == T_OBJECT && state()->stack_at(array_idx)->maybe_flat_array()) {
1058 // Save the entire state and re-execute on deopt when accessing flat arrays
1059 state_before = copy_state_before();
1060 state_before->set_should_reexecute(true);
1061 } else {
1062 state_before = copy_state_indexed_access();
1063 }
1064 compilation()->set_has_access_indexed(true);
1065 Value index = ipop();
1066 Value array = apop();
1067 Value length = nullptr;
1068 if (CSEArrayLength ||
1069 (array->as_Constant() != nullptr) ||
1070 (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) ||
1071 (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant()) ||
1072 (array->as_NewMultiArray() && array->as_NewMultiArray()->dims()->at(0)->type()->is_constant())) {
1073 length = append(new ArrayLength(array, state_before));
1074 }
1075
1076 bool need_membar = false;
1077 LoadIndexed* load_indexed = nullptr;
1078 Instruction* result = nullptr;
1079 if (array->is_loaded_flat_array()) {
1080 ciType* array_type = array->declared_type();
1081 ciFlatArrayKlass* array_klass = array_type->as_flat_array_klass();
1082 ciInlineKlass* elem_klass = array_klass->element_klass()->as_inline_klass();
1083
1084 bool can_delay_access = false;
1085 ciBytecodeStream s(method());
1086 s.force_bci(bci());
1087 s.next();
1088 if (s.cur_bc() == Bytecodes::_getfield) {
1089 bool is_null_free = array_klass->is_elem_null_free();
1090 bool will_link;
1091 ciField* next_field = s.get_field(will_link);
1092 bool next_needs_patching = !next_field->holder()->is_initialized() ||
1093 !next_field->will_link(method(), Bytecodes::_getfield) ||
1094 PatchALot;
1095 bool needs_atomic_access = array_klass->is_elem_atomic();
1096 can_delay_access = is_null_free && C1UseDelayedFlattenedFieldReads &&
1097 !next_needs_patching && !needs_atomic_access;
1098 }
1099 if (can_delay_access) {
1100 // potentially optimizable array access, storing information for delayed decision
1101 LoadIndexed* li = new LoadIndexed(array, index, length, type, state_before);
1102 DelayedLoadIndexed* dli = new DelayedLoadIndexed(li, state_before);
1103 li->set_delayed(dli);
1104 set_pending_load_indexed(dli);
1105 return; // Nothing else to do for now
1106 } else {
1107 NewInstance* buffer = new NewInstance(elem_klass, state_before, false, true);
1108 buffer->set_null_free(true);
1109 _memory->new_instance(buffer);
1110 result = append_split(buffer);
1111 load_indexed = new LoadIndexed(array, index, length, type, state_before);
1112 load_indexed->set_buffer(buffer);
1113 // The LoadIndexed node will initialize this instance by copying from
1114 // the flat field. Ensure these stores are visible before any
1115 // subsequent store that publishes this reference.
1116 need_membar = true;
1117 }
1118 } else {
1119 load_indexed = new LoadIndexed(array, index, length, type, state_before);
1120 if (profile_array_accesses() && is_reference_type(type)) {
1121 compilation()->set_would_profile(true);
1122 load_indexed->set_should_profile(true);
1123 load_indexed->set_profiled_method(method());
1124 load_indexed->set_profiled_bci(bci());
1125 }
1126 }
1127 result = append(load_indexed);
1128 if (need_membar) {
1129 append(new MemBar(lir_membar_storestore));
1130 }
1131 assert(!load_indexed->should_profile() || load_indexed == result, "should not be optimized out");
1132 push(as_ValueType(type), result);
1133 }
1134
1135
1136 void GraphBuilder::store_indexed(BasicType type) {
1137 // In case of in block code motion in range check elimination
1138 ValueStack* state_before = nullptr;
1139 int array_idx = state()->stack_size() - 3;
1140 if (type == T_OBJECT && state()->stack_at(array_idx)->maybe_flat_array()) {
1141 // Save the entire state and re-execute on deopt when accessing flat arrays
1142 state_before = copy_state_before();
1143 state_before->set_should_reexecute(true);
1144 } else {
1145 state_before = copy_state_indexed_access();
1146 }
1147 compilation()->set_has_access_indexed(true);
1148 Value value = pop(as_ValueType(type));
1149 Value index = ipop();
1150 Value array = apop();
1151 Value length = nullptr;
1152 if (CSEArrayLength ||
1153 (array->as_Constant() != nullptr) ||
1154 (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) ||
1155 (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant()) ||
1156 (array->as_NewMultiArray() && array->as_NewMultiArray()->dims()->at(0)->type()->is_constant())) {
1157 length = append(new ArrayLength(array, state_before));
1158 }
1159 ciType* array_type = array->declared_type();
1160 bool check_boolean = false;
1161 if (array_type != nullptr) {
1162 if (array_type->is_loaded() &&
1163 array_type->as_array_klass()->element_type()->basic_type() == T_BOOLEAN) {
1164 assert(type == T_BYTE, "boolean store uses bastore");
1165 Value mask = append(new Constant(new IntConstant(1)));
1166 value = append(new LogicOp(Bytecodes::_iand, value, mask));
1167 }
1168 } else if (type == T_BYTE) {
1169 check_boolean = true;
1170 }
1171
1172 StoreIndexed* store_indexed = new StoreIndexed(array, index, length, type, value, state_before, check_boolean);
1173 if (profile_array_accesses() && is_reference_type(type) && !array->is_loaded_flat_array()) {
1174 compilation()->set_would_profile(true);
1175 store_indexed->set_should_profile(true);
1176 store_indexed->set_profiled_method(method());
1177 store_indexed->set_profiled_bci(bci());
1178 }
1179 Instruction* result = append(store_indexed);
1180 assert(!store_indexed->should_profile() || store_indexed == result, "should not be optimized out");
1181 _memory->store_value(value);
1182 }
1183
1184 void GraphBuilder::stack_op(Bytecodes::Code code) {
1185 switch (code) {
1186 case Bytecodes::_pop:
1187 { state()->raw_pop();
1188 }
1189 break;
1190 case Bytecodes::_pop2:
1191 { state()->raw_pop();
1192 state()->raw_pop();
1193 }
1194 break;
1195 case Bytecodes::_dup:
1196 { Value w = state()->raw_pop();
1197 state()->raw_push(w);
1198 state()->raw_push(w);
1199 }
1200 break;
1201 case Bytecodes::_dup_x1:
1202 { Value w1 = state()->raw_pop();
1203 Value w2 = state()->raw_pop();
1204 state()->raw_push(w1);
1205 state()->raw_push(w2);
1206 state()->raw_push(w1);
1207 }
1208 break;
1209 case Bytecodes::_dup_x2:
1210 { Value w1 = state()->raw_pop();
1211 Value w2 = state()->raw_pop();
1212 Value w3 = state()->raw_pop();
1213 state()->raw_push(w1);
1214 state()->raw_push(w3);
1215 state()->raw_push(w2);
1216 state()->raw_push(w1);
1217 }
1218 break;
1219 case Bytecodes::_dup2:
1220 { Value w1 = state()->raw_pop();
1221 Value w2 = state()->raw_pop();
1222 state()->raw_push(w2);
1223 state()->raw_push(w1);
1224 state()->raw_push(w2);
1225 state()->raw_push(w1);
1226 }
1227 break;
1228 case Bytecodes::_dup2_x1:
1229 { Value w1 = state()->raw_pop();
1230 Value w2 = state()->raw_pop();
1231 Value w3 = state()->raw_pop();
1232 state()->raw_push(w2);
1233 state()->raw_push(w1);
1234 state()->raw_push(w3);
1235 state()->raw_push(w2);
1236 state()->raw_push(w1);
1237 }
1238 break;
1239 case Bytecodes::_dup2_x2:
1240 { Value w1 = state()->raw_pop();
1241 Value w2 = state()->raw_pop();
1242 Value w3 = state()->raw_pop();
1243 Value w4 = state()->raw_pop();
1244 state()->raw_push(w2);
1245 state()->raw_push(w1);
1246 state()->raw_push(w4);
1247 state()->raw_push(w3);
1248 state()->raw_push(w2);
1249 state()->raw_push(w1);
1250 }
1251 break;
1252 case Bytecodes::_swap:
1253 { Value w1 = state()->raw_pop();
1254 Value w2 = state()->raw_pop();
1255 state()->raw_push(w1);
1256 state()->raw_push(w2);
1257 }
1258 break;
1259 default:
1260 ShouldNotReachHere();
1261 break;
1262 }
1263 }
1264
1265
1266 void GraphBuilder::arithmetic_op(ValueType* type, Bytecodes::Code code, ValueStack* state_before) {
1267 Value y = pop(type);
1268 Value x = pop(type);
1269 Value res = new ArithmeticOp(code, x, y, state_before);
1270 push(type, append(res));
1271 }
1272
1273
1274 void GraphBuilder::negate_op(ValueType* type) {
1275 push(type, append(new NegateOp(pop(type))));
1276 }
1277
1278
1279 void GraphBuilder::shift_op(ValueType* type, Bytecodes::Code code) {
1280 Value s = ipop();
1281 Value x = pop(type);
1282 // try to simplify
1283 // Note: This code should go into the canonicalizer as soon as it can
1284 // can handle canonicalized forms that contain more than one node.
1285 if (CanonicalizeNodes && code == Bytecodes::_iushr) {
1286 // pattern: x >>> s
1287 IntConstant* s1 = s->type()->as_IntConstant();
1288 if (s1 != nullptr) {
1289 // pattern: x >>> s1, with s1 constant
1290 ShiftOp* l = x->as_ShiftOp();
1291 if (l != nullptr && l->op() == Bytecodes::_ishl) {
1292 // pattern: (a << b) >>> s1
1293 IntConstant* s0 = l->y()->type()->as_IntConstant();
1294 if (s0 != nullptr) {
1295 // pattern: (a << s0) >>> s1
1296 const int s0c = s0->value() & 0x1F; // only the low 5 bits are significant for shifts
1297 const int s1c = s1->value() & 0x1F; // only the low 5 bits are significant for shifts
1298 if (s0c == s1c) {
1299 if (s0c == 0) {
1300 // pattern: (a << 0) >>> 0 => simplify to: a
1301 ipush(l->x());
1302 } else {
1303 // pattern: (a << s0c) >>> s0c => simplify to: a & m, with m constant
1304 assert(0 < s0c && s0c < BitsPerInt, "adjust code below to handle corner cases");
1305 const int m = checked_cast<int>(right_n_bits(BitsPerInt - s0c));
1306 Value s = append(new Constant(new IntConstant(m)));
1307 ipush(append(new LogicOp(Bytecodes::_iand, l->x(), s)));
1308 }
1309 return;
1310 }
1311 }
1312 }
1313 }
1314 }
1315 // could not simplify
1316 push(type, append(new ShiftOp(code, x, s)));
1317 }
1318
1319
1320 void GraphBuilder::logic_op(ValueType* type, Bytecodes::Code code) {
1321 Value y = pop(type);
1322 Value x = pop(type);
1323 push(type, append(new LogicOp(code, x, y)));
1324 }
1325
1326
1327 void GraphBuilder::compare_op(ValueType* type, Bytecodes::Code code) {
1328 ValueStack* state_before = copy_state_before();
1329 Value y = pop(type);
1330 Value x = pop(type);
1331 ipush(append(new CompareOp(code, x, y, state_before)));
1332 }
1333
1334
1335 void GraphBuilder::convert(Bytecodes::Code op, BasicType from, BasicType to) {
1336 push(as_ValueType(to), append(new Convert(op, pop(as_ValueType(from)), as_ValueType(to))));
1337 }
1338
1339
1340 void GraphBuilder::increment() {
1341 int index = stream()->get_index();
1342 int delta = stream()->is_wide() ? (signed short)Bytes::get_Java_u2(stream()->cur_bcp() + 4) : (signed char)(stream()->cur_bcp()[2]);
1343 load_local(intType, index);
1344 ipush(append(new Constant(new IntConstant(delta))));
1345 arithmetic_op(intType, Bytecodes::_iadd);
1346 store_local(intType, index);
1347 }
1348
1349
1350 void GraphBuilder::_goto(int from_bci, int to_bci) {
1351 Goto *x = new Goto(block_at(to_bci), to_bci <= from_bci);
1352 if (is_profiling()) {
1353 compilation()->set_would_profile(true);
1354 x->set_profiled_bci(bci());
1355 if (profile_branches()) {
1356 x->set_profiled_method(method());
1357 x->set_should_profile(true);
1358 }
1359 }
1360 append(x);
1361 }
1362
1363
1364 void GraphBuilder::if_node(Value x, If::Condition cond, Value y, ValueStack* state_before) {
1365 BlockBegin* tsux = block_at(stream()->get_dest());
1366 BlockBegin* fsux = block_at(stream()->next_bci());
1367 bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci();
1368
1369 bool subst_check = false;
1370 if (Arguments::is_valhalla_enabled() && (stream()->cur_bc() == Bytecodes::_if_acmpeq || stream()->cur_bc() == Bytecodes::_if_acmpne)) {
1371 ValueType* left_vt = x->type();
1372 ValueType* right_vt = y->type();
1373 if (left_vt->is_object()) {
1374 assert(right_vt->is_object(), "must be");
1375 ciKlass* left_klass = x->as_loaded_klass_or_null();
1376 ciKlass* right_klass = y->as_loaded_klass_or_null();
1377
1378 if (left_klass == nullptr || right_klass == nullptr) {
1379 // The klass is still unloaded, or came from a Phi node. Go slow case;
1380 subst_check = true;
1381 } else if (left_klass->can_be_inline_klass() || right_klass->can_be_inline_klass()) {
1382 // Either operand may be a value object, but we're not sure. Go slow case;
1383 subst_check = true;
1384 } else {
1385 // No need to do substitutability check
1386 }
1387 }
1388 }
1389 if ((stream()->cur_bc() == Bytecodes::_if_acmpeq || stream()->cur_bc() == Bytecodes::_if_acmpne) &&
1390 is_profiling() && profile_branches()) {
1391 compilation()->set_would_profile(true);
1392 append(new ProfileACmpTypes(method(), bci(), x, y));
1393 }
1394
1395 // In case of loop invariant code motion or predicate insertion
1396 // before the body of a loop the state is needed
1397 Instruction *i = append(new If(x, cond, false, y, tsux, fsux, (is_bb || compilation()->is_optimistic() || subst_check) ? state_before : nullptr, is_bb, subst_check));
1398
1399 assert(i->as_Goto() == nullptr ||
1400 (i->as_Goto()->sux_at(0) == tsux && i->as_Goto()->is_safepoint() == (tsux->bci() < stream()->cur_bci())) ||
1401 (i->as_Goto()->sux_at(0) == fsux && i->as_Goto()->is_safepoint() == (fsux->bci() < stream()->cur_bci())),
1402 "safepoint state of Goto returned by canonicalizer incorrect");
1403
1404 if (is_profiling()) {
1405 If* if_node = i->as_If();
1406 if (if_node != nullptr) {
1407 // Note that we'd collect profile data in this method if we wanted it.
1408 compilation()->set_would_profile(true);
1409 // At level 2 we need the proper bci to count backedges
1410 if_node->set_profiled_bci(bci());
1411 if (profile_branches()) {
1412 // Successors can be rotated by the canonicalizer, check for this case.
1413 if_node->set_profiled_method(method());
1414 if_node->set_should_profile(true);
1415 if (if_node->tsux() == fsux) {
1416 if_node->set_swapped(true);
1417 }
1418 }
1419 return;
1420 }
1421
1422 // Check if this If was reduced to Goto.
1423 Goto *goto_node = i->as_Goto();
1424 if (goto_node != nullptr) {
1425 compilation()->set_would_profile(true);
1426 goto_node->set_profiled_bci(bci());
1427 if (profile_branches()) {
1428 goto_node->set_profiled_method(method());
1429 goto_node->set_should_profile(true);
1430 // Find out which successor is used.
1431 if (goto_node->default_sux() == tsux) {
1432 goto_node->set_direction(Goto::taken);
1433 } else if (goto_node->default_sux() == fsux) {
1434 goto_node->set_direction(Goto::not_taken);
1435 } else {
1436 ShouldNotReachHere();
1437 }
1438 }
1439 return;
1440 }
1441 }
1442 }
1443
1444
1445 void GraphBuilder::if_zero(ValueType* type, If::Condition cond) {
1446 Value y = append(new Constant(intZero));
1447 ValueStack* state_before = copy_state_before();
1448 Value x = ipop();
1449 if_node(x, cond, y, state_before);
1450 }
1451
1452
1453 void GraphBuilder::if_null(ValueType* type, If::Condition cond) {
1454 Value y = append(new Constant(objectNull));
1455 ValueStack* state_before = copy_state_before();
1456 Value x = apop();
1457 if_node(x, cond, y, state_before);
1458 }
1459
1460
1461 void GraphBuilder::if_same(ValueType* type, If::Condition cond) {
1462 ValueStack* state_before = copy_state_before();
1463 Value y = pop(type);
1464 Value x = pop(type);
1465 if_node(x, cond, y, state_before);
1466 }
1467
1468
1469 void GraphBuilder::jsr(int dest) {
1470 // We only handle well-formed jsrs (those which are "block-structured").
1471 // If the bytecodes are strange (jumping out of a jsr block) then we
1472 // might end up trying to re-parse a block containing a jsr which
1473 // has already been activated. Watch for this case and bail out.
1474 if (next_bci() >= method()->code_size()) {
1475 // This can happen if the subroutine does not terminate with a ret,
1476 // effectively turning the jsr into a goto.
1477 BAILOUT("too-complicated jsr/ret structure");
1478 }
1479 for (ScopeData* cur_scope_data = scope_data();
1480 cur_scope_data != nullptr && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope();
1481 cur_scope_data = cur_scope_data->parent()) {
1482 if (cur_scope_data->jsr_entry_bci() == dest) {
1483 BAILOUT("too-complicated jsr/ret structure");
1484 }
1485 }
1486
1487 push(addressType, append(new Constant(new AddressConstant(next_bci()))));
1488 if (!try_inline_jsr(dest)) {
1489 return; // bailed out while parsing and inlining subroutine
1490 }
1491 }
1492
1493
1494 void GraphBuilder::ret(int local_index) {
1495 if (!parsing_jsr()) BAILOUT("ret encountered while not parsing subroutine");
1496
1497 if (local_index != scope_data()->jsr_return_address_local()) {
1498 BAILOUT("can not handle complicated jsr/ret constructs");
1499 }
1500
1501 // Rets simply become (NON-SAFEPOINT) gotos to the jsr continuation
1502 append(new Goto(scope_data()->jsr_continuation(), false));
1503 }
1504
1505
1506 void GraphBuilder::table_switch() {
1507 Bytecode_tableswitch sw(stream());
1508 const int l = sw.length();
1509 if (CanonicalizeNodes && l == 1 && compilation()->env()->comp_level() != CompLevel_full_profile) {
1510 // total of 2 successors => use If instead of switch
1511 // Note: This code should go into the canonicalizer as soon as it can
1512 // can handle canonicalized forms that contain more than one node.
1513 Value key = append(new Constant(new IntConstant(sw.low_key())));
1514 BlockBegin* tsux = block_at(bci() + sw.dest_offset_at(0));
1515 BlockBegin* fsux = block_at(bci() + sw.default_offset());
1516 bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
1517 // In case of loop invariant code motion or predicate insertion
1518 // before the body of a loop the state is needed
1519 ValueStack* state_before = copy_state_if_bb(is_bb);
1520 append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
1521 } else {
1522 // collect successors
1523 BlockList* sux = new BlockList(l + 1, nullptr);
1524 int i;
1525 bool has_bb = false;
1526 for (i = 0; i < l; i++) {
1527 sux->at_put(i, block_at(bci() + sw.dest_offset_at(i)));
1528 if (sw.dest_offset_at(i) < 0) has_bb = true;
1529 }
1530 // add default successor
1531 if (sw.default_offset() < 0) has_bb = true;
1532 sux->at_put(i, block_at(bci() + sw.default_offset()));
1533 // In case of loop invariant code motion or predicate insertion
1534 // before the body of a loop the state is needed
1535 ValueStack* state_before = copy_state_if_bb(has_bb);
1536 Instruction* res = append(new TableSwitch(ipop(), sux, sw.low_key(), state_before, has_bb));
1537 #ifdef ASSERT
1538 if (res->as_Goto()) {
1539 for (i = 0; i < l; i++) {
1540 if (sux->at(i) == res->as_Goto()->sux_at(0)) {
1541 assert(res->as_Goto()->is_safepoint() == (sw.dest_offset_at(i) < 0), "safepoint state of Goto returned by canonicalizer incorrect");
1542 }
1543 }
1544 }
1545 #endif
1546 }
1547 }
1548
1549
1550 void GraphBuilder::lookup_switch() {
1551 Bytecode_lookupswitch sw(stream());
1552 const int l = sw.number_of_pairs();
1553 if (CanonicalizeNodes && l == 1 && compilation()->env()->comp_level() != CompLevel_full_profile) {
1554 // total of 2 successors => use If instead of switch
1555 // Note: This code should go into the canonicalizer as soon as it can
1556 // can handle canonicalized forms that contain more than one node.
1557 // simplify to If
1558 LookupswitchPair pair = sw.pair_at(0);
1559 Value key = append(new Constant(new IntConstant(pair.match())));
1560 BlockBegin* tsux = block_at(bci() + pair.offset());
1561 BlockBegin* fsux = block_at(bci() + sw.default_offset());
1562 bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
1563 // In case of loop invariant code motion or predicate insertion
1564 // before the body of a loop the state is needed
1565 ValueStack* state_before = copy_state_if_bb(is_bb);;
1566 append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
1567 } else {
1568 // collect successors & keys
1569 BlockList* sux = new BlockList(l + 1, nullptr);
1570 intArray* keys = new intArray(l, l, 0);
1571 int i;
1572 bool has_bb = false;
1573 for (i = 0; i < l; i++) {
1574 LookupswitchPair pair = sw.pair_at(i);
1575 if (pair.offset() < 0) has_bb = true;
1576 sux->at_put(i, block_at(bci() + pair.offset()));
1577 keys->at_put(i, pair.match());
1578 }
1579 // add default successor
1580 if (sw.default_offset() < 0) has_bb = true;
1581 sux->at_put(i, block_at(bci() + sw.default_offset()));
1582 // In case of loop invariant code motion or predicate insertion
1583 // before the body of a loop the state is needed
1584 ValueStack* state_before = copy_state_if_bb(has_bb);
1585 Instruction* res = append(new LookupSwitch(ipop(), sux, keys, state_before, has_bb));
1586 #ifdef ASSERT
1587 if (res->as_Goto()) {
1588 for (i = 0; i < l; i++) {
1589 if (sux->at(i) == res->as_Goto()->sux_at(0)) {
1590 assert(res->as_Goto()->is_safepoint() == (sw.pair_at(i).offset() < 0), "safepoint state of Goto returned by canonicalizer incorrect");
1591 }
1592 }
1593 }
1594 #endif
1595 }
1596 }
1597
1598 void GraphBuilder::call_register_finalizer() {
1599 // If the receiver requires finalization then emit code to perform
1600 // the registration on return.
1601
1602 // Gather some type information about the receiver
1603 Value receiver = state()->local_at(0);
1604 assert(receiver != nullptr, "must have a receiver");
1605 ciType* declared_type = receiver->declared_type();
1606 ciType* exact_type = receiver->exact_type();
1607 if (exact_type == nullptr &&
1608 receiver->as_Local() &&
1609 receiver->as_Local()->java_index() == 0) {
1610 ciInstanceKlass* ik = compilation()->method()->holder();
1611 if (ik->is_final()) {
1612 exact_type = ik;
1613 } else if (UseCHA && !(ik->has_subklass() || ik->is_interface())) {
1614 // test class is leaf class
1615 compilation()->dependency_recorder()->assert_leaf_type(ik);
1616 exact_type = ik;
1617 } else {
1618 declared_type = ik;
1619 }
1620 }
1621
1622 // see if we know statically that registration isn't required
1623 bool needs_check = true;
1624 if (exact_type != nullptr) {
1625 needs_check = exact_type->as_instance_klass()->has_finalizer();
1626 } else if (declared_type != nullptr) {
1627 ciInstanceKlass* ik = declared_type->as_instance_klass();
1628 if (!Dependencies::has_finalizable_subclass(ik)) {
1629 compilation()->dependency_recorder()->assert_has_no_finalizable_subclasses(ik);
1630 needs_check = false;
1631 }
1632 }
1633
1634 if (needs_check) {
1635 // Perform the registration of finalizable objects.
1636 ValueStack* state_before = copy_state_for_exception();
1637 load_local(objectType, 0);
1638 append_split(new Intrinsic(voidType, vmIntrinsics::_Object_init,
1639 state()->pop_arguments(1),
1640 true, state_before, true));
1641 }
1642 }
1643
1644
1645 void GraphBuilder::method_return(Value x, bool ignore_return) {
1646 if (method()->intrinsic_id() == vmIntrinsics::_Object_init) {
1647 call_register_finalizer();
1648 }
1649
1650 // The conditions for a memory barrier are described in Parse::do_exits().
1651 bool need_mem_bar = false;
1652 if (method()->is_object_constructor() &&
1653 (scope()->wrote_non_strict_final() || scope()->wrote_stable() ||
1654 (AlwaysSafeConstructors && scope()->wrote_fields()) ||
1655 (support_IRIW_for_not_multiple_copy_atomic_cpu && scope()->wrote_volatile()))) {
1656 need_mem_bar = true;
1657 }
1658
1659 BasicType bt = method()->return_type()->basic_type();
1660 switch (bt) {
1661 case T_BYTE:
1662 {
1663 Value shift = append(new Constant(new IntConstant(24)));
1664 x = append(new ShiftOp(Bytecodes::_ishl, x, shift));
1665 x = append(new ShiftOp(Bytecodes::_ishr, x, shift));
1666 break;
1667 }
1668 case T_SHORT:
1669 {
1670 Value shift = append(new Constant(new IntConstant(16)));
1671 x = append(new ShiftOp(Bytecodes::_ishl, x, shift));
1672 x = append(new ShiftOp(Bytecodes::_ishr, x, shift));
1673 break;
1674 }
1675 case T_CHAR:
1676 {
1677 Value mask = append(new Constant(new IntConstant(0xFFFF)));
1678 x = append(new LogicOp(Bytecodes::_iand, x, mask));
1679 break;
1680 }
1681 case T_BOOLEAN:
1682 {
1683 Value mask = append(new Constant(new IntConstant(1)));
1684 x = append(new LogicOp(Bytecodes::_iand, x, mask));
1685 break;
1686 }
1687 default:
1688 break;
1689 }
1690
1691 // Check to see whether we are inlining. If so, Return
1692 // instructions become Gotos to the continuation point.
1693 if (continuation() != nullptr) {
1694
1695 int invoke_bci = state()->caller_state()->bci();
1696
1697 if (x != nullptr && !ignore_return) {
1698 ciMethod* caller = state()->scope()->caller()->method();
1699 Bytecodes::Code invoke_raw_bc = caller->raw_code_at_bci(invoke_bci);
1700 if (invoke_raw_bc == Bytecodes::_invokehandle || invoke_raw_bc == Bytecodes::_invokedynamic) {
1701 ciType* declared_ret_type = caller->get_declared_signature_at_bci(invoke_bci)->return_type();
1702 if (declared_ret_type->is_klass() && x->exact_type() == nullptr &&
1703 x->declared_type() != declared_ret_type && declared_ret_type != compilation()->env()->Object_klass()) {
1704 x = append(new TypeCast(declared_ret_type->as_klass(), x, copy_state_before()));
1705 }
1706 }
1707 }
1708
1709 assert(!method()->is_synchronized() || InlineSynchronizedMethods, "can not inline synchronized methods yet");
1710
1711 if (compilation()->env()->dtrace_method_probes()) {
1712 // Report exit from inline methods
1713 Values* args = new Values(1);
1714 args->push(append(new Constant(new MethodConstant(method()))));
1715 append(new RuntimeCall(voidType, "dtrace_method_exit", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), args));
1716 }
1717
1718 // If the inlined method is synchronized, the monitor must be
1719 // released before we jump to the continuation block.
1720 if (method()->is_synchronized()) {
1721 assert(state()->locks_size() == 1, "receiver must be locked here");
1722 monitorexit(state()->lock_at(0), SynchronizationEntryBCI);
1723 }
1724
1725 if (need_mem_bar) {
1726 append(new MemBar(lir_membar_storestore));
1727 }
1728
1729 // State at end of inlined method is the state of the caller
1730 // without the method parameters on stack, including the
1731 // return value, if any, of the inlined method on operand stack.
1732 set_state(state()->caller_state()->copy_for_parsing());
1733 if (x != nullptr) {
1734 if (!ignore_return) {
1735 state()->push(x->type(), x);
1736 }
1737 if (profile_return() && x->type()->is_object_kind()) {
1738 ciMethod* caller = state()->scope()->method();
1739 profile_return_type(x, method(), caller, invoke_bci);
1740 }
1741 }
1742 Goto* goto_callee = new Goto(continuation(), false);
1743
1744 // See whether this is the first return; if so, store off some
1745 // of the state for later examination
1746 if (num_returns() == 0) {
1747 set_inline_cleanup_info();
1748 }
1749
1750 // The current bci() is in the wrong scope, so use the bci() of
1751 // the continuation point.
1752 append_with_bci(goto_callee, scope_data()->continuation()->bci());
1753 incr_num_returns();
1754 return;
1755 }
1756
1757 state()->truncate_stack(0);
1758 if (method()->is_synchronized()) {
1759 // perform the unlocking before exiting the method
1760 Value receiver;
1761 if (!method()->is_static()) {
1762 receiver = _initial_state->local_at(0);
1763 } else {
1764 receiver = append(new Constant(new ClassConstant(method()->holder())));
1765 }
1766 append_split(new MonitorExit(receiver, state()->unlock()));
1767 }
1768
1769 if (need_mem_bar) {
1770 append(new MemBar(lir_membar_storestore));
1771 }
1772
1773 assert(!ignore_return, "Ignoring return value works only for inlining");
1774 append(new Return(x));
1775 }
1776
1777 Value GraphBuilder::make_constant(ciConstant field_value, ciField* field) {
1778 if (!field_value.is_valid()) return nullptr;
1779
1780 BasicType field_type = field_value.basic_type();
1781 ValueType* value = as_ValueType(field_value);
1782
1783 // Attach dimension info to stable arrays.
1784 if (FoldStableValues &&
1785 field->is_stable() && field_type == T_ARRAY && !field_value.is_null_or_zero()) {
1786 ciArray* array = field_value.as_object()->as_array();
1787 jint dimension = field->type()->as_array_klass()->dimension();
1788 value = new StableArrayConstant(array, dimension);
1789 }
1790
1791 switch (field_type) {
1792 case T_ARRAY:
1793 case T_OBJECT:
1794 if (field_value.as_object()->should_be_constant()) {
1795 return new Constant(value);
1796 }
1797 return nullptr; // Not a constant.
1798 default:
1799 return new Constant(value);
1800 }
1801 }
1802
1803 void GraphBuilder::copy_inline_content(ciInlineKlass* vk, Value src, int src_off, Value dest, int dest_off, ValueStack* state_before, ciField* enclosing_field) {
1804 for (int i = 0; i < vk->nof_declared_nonstatic_fields(); i++) {
1805 ciField* field = vk->declared_nonstatic_field_at(i);
1806 int offset = field->offset_in_bytes() - vk->payload_offset();
1807 if (field->is_flat()) {
1808 copy_inline_content(field->type()->as_inline_klass(), src, src_off + offset, dest, dest_off + offset, state_before, enclosing_field);
1809 if (!field->is_null_free()) {
1810 // Nullable, copy the null marker using Unsafe because null markers are no real fields
1811 int null_marker_offset = field->null_marker_offset() - vk->payload_offset();
1812 Value offset = append(new Constant(new LongConstant(src_off + null_marker_offset)));
1813 Value nm = append(new UnsafeGet(T_BOOLEAN, src, offset, false));
1814 offset = append(new Constant(new LongConstant(dest_off + null_marker_offset)));
1815 append(new UnsafePut(T_BOOLEAN, dest, offset, nm, false));
1816 }
1817 } else {
1818 Value value = append(new LoadField(src, src_off + offset, field, false, state_before, false));
1819 StoreField* store = new StoreField(dest, dest_off + offset, field, value, false, state_before, false);
1820 store->set_enclosing_field(enclosing_field);
1821 append(store);
1822 }
1823 }
1824 }
1825
1826 void GraphBuilder::access_field(Bytecodes::Code code) {
1827 bool will_link;
1828 ciField* field = stream()->get_field(will_link);
1829 ciInstanceKlass* holder = field->holder();
1830 BasicType field_basic_type = field->type()->basic_type();
1831 ValueType* type = as_ValueType(field_basic_type);
1832
1833 // call will_link again to determine if the field is valid.
1834 const bool needs_patching = !holder->is_loaded() ||
1835 !field->will_link(method(), code) ||
1836 (!field->is_flat() && PatchALot);
1837
1838 ValueStack* state_before = nullptr;
1839 if (!holder->is_initialized() || needs_patching) {
1840 // save state before instruction for debug info when
1841 // deoptimization happens during patching
1842 state_before = copy_state_before();
1843 }
1844
1845 Value obj = nullptr;
1846 if (code == Bytecodes::_getstatic || code == Bytecodes::_putstatic) {
1847 if (state_before != nullptr) {
1848 // build a patching constant
1849 obj = new Constant(new InstanceConstant(holder->java_mirror()), state_before);
1850 } else {
1851 obj = new Constant(new InstanceConstant(holder->java_mirror()));
1852 }
1853 }
1854
1855 if (code == Bytecodes::_putfield) {
1856 scope()->set_wrote_fields();
1857 if (field->is_volatile()) {
1858 scope()->set_wrote_volatile();
1859 }
1860 if (field->is_final() && !field->is_strict()) {
1861 scope()->set_wrote_non_strict_final();
1862 }
1863 if (field->is_stable()) {
1864 scope()->set_wrote_stable();
1865 }
1866 }
1867
1868 int offset = !needs_patching ? field->offset_in_bytes() : -1;
1869 switch (code) {
1870 case Bytecodes::_getstatic: {
1871 // check for compile-time constants, i.e., initialized static final fields
1872 Value constant = nullptr;
1873 if (field->is_static_constant() && !PatchALot) {
1874 ciConstant field_value = field->constant_value();
1875 assert(!field->is_stable() || !field_value.is_null_or_zero(),
1876 "stable static w/ default value shouldn't be a constant");
1877 constant = make_constant(field_value, field);
1878 }
1879 if (constant != nullptr) {
1880 push(type, append(constant));
1881 } else {
1882 if (state_before == nullptr) {
1883 state_before = copy_state_for_exception();
1884 }
1885 LoadField* load_field = new LoadField(append(obj), offset, field, true,
1886 state_before, needs_patching);
1887 push(type, append(load_field));
1888 }
1889 break;
1890 }
1891 case Bytecodes::_putstatic: {
1892 Value val = pop(type);
1893 if (state_before == nullptr) {
1894 state_before = copy_state_for_exception();
1895 }
1896 if (field_basic_type == T_BOOLEAN) {
1897 Value mask = append(new Constant(new IntConstant(1)));
1898 val = append(new LogicOp(Bytecodes::_iand, val, mask));
1899 }
1900 if (field->is_null_free()) {
1901 null_check(val);
1902
1903 ciType* field_type = field->type();
1904 if (field_type->is_loaded() && field_type->is_inlinetype() && field_type->as_inline_klass()->is_empty() &&
1905 (!method()->is_class_initializer() || field->is_flat())) {
1906 // Storing to a field of an empty, null-free inline type that is already initialized. Ignore.
1907 break;
1908 }
1909 }
1910 append(new StoreField(append(obj), offset, field, val, true, state_before, needs_patching));
1911 break;
1912 }
1913 case Bytecodes::_getfield: {
1914 // Check for compile-time constants, i.e., trusted final non-static fields.
1915 Value constant = nullptr;
1916 if (state_before == nullptr && field->is_flat()) {
1917 // Save the entire state and re-execute on deopt when accessing flat fields
1918 assert(Interpreter::bytecode_should_reexecute(code), "should reexecute");
1919 state_before = copy_state_before();
1920 }
1921 if (!has_pending_field_access() && !has_pending_load_indexed()) {
1922 obj = apop();
1923 ObjectType* obj_type = obj->type()->as_ObjectType();
1924 if (field->is_constant() && !field->is_flat() && obj_type->is_constant() && !PatchALot) {
1925 ciObject* const_oop = obj_type->constant_value();
1926 if (!const_oop->is_null_object() && const_oop->is_loaded()) {
1927 ciConstant field_value = field->constant_value_of(const_oop);
1928 if (field_value.is_valid()) {
1929 constant = make_constant(field_value, field);
1930 // For CallSite objects add a dependency for invalidation of the optimization.
1931 if (field->is_call_site_target()) {
1932 ciCallSite* call_site = const_oop->as_call_site();
1933 if (!call_site->is_fully_initialized_constant_call_site()) {
1934 ciMethodHandle* target = field_value.as_object()->as_method_handle();
1935 dependency_recorder()->assert_call_site_target_value(call_site, target);
1936 }
1937 }
1938 }
1939 }
1940 }
1941 }
1942 if (constant != nullptr) {
1943 push(type, append(constant));
1944 } else {
1945 if (state_before == nullptr) {
1946 state_before = copy_state_for_exception();
1947 }
1948 if (!field->is_flat()) {
1949 if (has_pending_field_access()) {
1950 assert(!needs_patching, "Can't patch delayed field access");
1951 obj = pending_field_access()->obj();
1952 offset += pending_field_access()->offset() - field->holder()->as_inline_klass()->payload_offset();
1953 field = pending_field_access()->holder()->get_field_by_offset(offset, false);
1954 assert(field != nullptr, "field not found");
1955 set_pending_field_access(nullptr);
1956 } else if (has_pending_load_indexed()) {
1957 assert(!needs_patching, "Can't patch delayed field access");
1958 pending_load_indexed()->update(field, offset - field->holder()->as_inline_klass()->payload_offset());
1959 LoadIndexed* li = pending_load_indexed()->load_instr();
1960 li->set_type(type);
1961 push(type, append(li));
1962 set_pending_load_indexed(nullptr);
1963 break;
1964 }
1965 LoadField* load = new LoadField(obj, offset, field, false, state_before, needs_patching);
1966 Value replacement = !needs_patching ? _memory->load(load) : load;
1967 if (replacement != load) {
1968 assert(replacement->is_linked() || !replacement->can_be_linked(), "should already by linked");
1969 // Writing an (integer) value to a boolean, byte, char or short field includes an implicit narrowing
1970 // conversion. Emit an explicit conversion here to get the correct field value after the write.
1971 switch (field_basic_type) {
1972 case T_BOOLEAN:
1973 case T_BYTE:
1974 replacement = append(new Convert(Bytecodes::_i2b, replacement, type));
1975 break;
1976 case T_CHAR:
1977 replacement = append(new Convert(Bytecodes::_i2c, replacement, type));
1978 break;
1979 case T_SHORT:
1980 replacement = append(new Convert(Bytecodes::_i2s, replacement, type));
1981 break;
1982 default:
1983 break;
1984 }
1985 push(type, replacement);
1986 } else {
1987 push(type, append(load));
1988 }
1989 } else {
1990 // Flat field
1991 assert(!needs_patching, "Can't patch flat inline type field access");
1992 ciInlineKlass* inline_klass = field->type()->as_inline_klass();
1993 if (field->is_atomic()) {
1994 assert(!has_pending_field_access(), "Pending field accesses are not supported");
1995 LoadField* load = new LoadField(obj, offset, field, false, state_before, needs_patching);
1996 push(type, append(load));
1997 } else {
1998 // Look at the next bytecode to check if we can delay the field access
1999 bool can_delay_access = false;
2000 if (field->is_null_free()) {
2001 ciBytecodeStream s(method());
2002 s.force_bci(bci());
2003 s.next();
2004 if (s.cur_bc() == Bytecodes::_getfield && !needs_patching) {
2005 ciField* next_field = s.get_field(will_link);
2006 bool next_needs_patching = !next_field->holder()->is_loaded() ||
2007 !next_field->will_link(method(), Bytecodes::_getfield) ||
2008 PatchALot;
2009 // We can't update the offset for atomic accesses
2010 bool next_needs_atomic_access = next_field->is_flat() && next_field->is_atomic();
2011 can_delay_access = C1UseDelayedFlattenedFieldReads && !next_needs_patching && !next_needs_atomic_access && next_field->is_null_free();
2012 }
2013 }
2014
2015 if (can_delay_access) {
2016 if (has_pending_load_indexed()) {
2017 pending_load_indexed()->update(field, offset - field->holder()->as_inline_klass()->payload_offset());
2018 } else if (has_pending_field_access()) {
2019 pending_field_access()->inc_offset(offset - field->holder()->as_inline_klass()->payload_offset());
2020 } else {
2021 null_check(obj);
2022 DelayedFieldAccess* dfa = new DelayedFieldAccess(obj, field->holder(), field->offset_in_bytes(), state_before);
2023 set_pending_field_access(dfa);
2024 }
2025 } else {
2026 if (!field->is_strict()) {
2027 scope()->set_wrote_non_strict_final();
2028 }
2029 scope()->set_wrote_fields();
2030 if (has_pending_load_indexed()) {
2031 assert(field->is_null_free(), "nullable fields do not support delayed accesses yet");
2032 assert(!needs_patching, "Can't patch delayed field access");
2033 pending_load_indexed()->update(field, offset - field->holder()->as_inline_klass()->payload_offset());
2034 NewInstance* buffer = new NewInstance(inline_klass, pending_load_indexed()->state_before(), false, true);
2035 buffer->set_null_free(true);
2036 _memory->new_instance(buffer);
2037 pending_load_indexed()->load_instr()->set_buffer(buffer);
2038 apush(append_split(buffer));
2039 append(pending_load_indexed()->load_instr());
2040 set_pending_load_indexed(nullptr);
2041 } else if (has_pending_field_access()) {
2042 assert(field->is_null_free(), "nullable fields do not support delayed accesses yet");
2043 state_before = pending_field_access()->state_before();
2044 NewInstance* buffer = new NewInstance(inline_klass, state_before, false, true);
2045 _memory->new_instance(buffer);
2046 apush(append_split(buffer));
2047 copy_inline_content(inline_klass, pending_field_access()->obj(),
2048 pending_field_access()->offset() + field->offset_in_bytes() - field->holder()->as_inline_klass()->payload_offset(),
2049 buffer, inline_klass->payload_offset(), state_before);
2050 set_pending_field_access(nullptr);
2051 } else {
2052 if (!field->is_null_free() && !inline_klass->is_initialized()) {
2053 // Cannot allocate an instance of inline_klass because it may have not been
2054 // initialized, bailout for now
2055 bailout("load from an uninitialized nullable non-atomic flat field");
2056 return;
2057 }
2058
2059 NewInstance* buffer = new NewInstance(inline_klass, state_before, false, true);
2060 _memory->new_instance(buffer);
2061 append_split(buffer);
2062
2063 if (inline_klass->is_initialized() && inline_klass->is_empty()) {
2064 // Needs an explicit null check because below code does not perform any actual load if there are no fields
2065 null_check(obj);
2066 }
2067 copy_inline_content(inline_klass, obj, field->offset_in_bytes(), buffer, inline_klass->payload_offset(), state_before);
2068
2069 Instruction* result = buffer;
2070 if (!field->is_null_free()) {
2071 Value int_zero = append(new Constant(intZero));
2072 Value object_null = append(new Constant(objectNull));
2073 Value nm_offset = append(new Constant(new LongConstant(offset + inline_klass->null_marker_offset_in_payload())));
2074 Value nm = append(new UnsafeGet(T_BOOLEAN, obj, nm_offset, false));
2075 result = append(new IfOp(nm, Instruction::neq, int_zero, buffer, object_null, state_before, false));
2076 }
2077 apush(result);
2078 }
2079
2080 // If we allocated a new instance ensure the stores to copy the
2081 // field contents are visible before any subsequent store that
2082 // publishes this reference.
2083 append(new MemBar(lir_membar_storestore));
2084 }
2085 }
2086 }
2087 }
2088 break;
2089 }
2090 case Bytecodes::_putfield: {
2091 Value val = pop(type);
2092 obj = apop();
2093 if (state_before == nullptr) {
2094 state_before = copy_state_for_exception();
2095 }
2096 if (field_basic_type == T_BOOLEAN) {
2097 Value mask = append(new Constant(new IntConstant(1)));
2098 val = append(new LogicOp(Bytecodes::_iand, val, mask));
2099 }
2100
2101 ciType* field_type = field->type();
2102 if (field->is_null_free() && field_type->is_loaded() && field_type->is_inlinetype() &&
2103 field_type->as_inline_klass()->is_empty() && (!method()->is_object_constructor() || field->is_flat())) {
2104 // Storing to a field of an empty, null-free inline type that is already initialized. Ignore.
2105 null_check(obj);
2106 null_check(val);
2107 } else if (!field->is_flat()) {
2108 if (field->is_null_free()) {
2109 null_check(val);
2110 }
2111 StoreField* store = new StoreField(obj, offset, field, val, false, state_before, needs_patching);
2112 if (!needs_patching) store = _memory->store(store);
2113 if (store != nullptr) {
2114 append(store);
2115 }
2116 } else {
2117 // Flat field
2118 assert(!needs_patching, "Can't patch flat inline type field access");
2119 ciInlineKlass* inline_klass = field_type->as_inline_klass();
2120 if (field->is_atomic()) {
2121 if (field->is_null_free()) {
2122 null_check(val);
2123 }
2124 append(new StoreField(obj, offset, field, val, false, state_before, needs_patching));
2125 } else if (field->is_null_free()) {
2126 assert(!inline_klass->is_empty(), "should have been handled");
2127 copy_inline_content(inline_klass, val, inline_klass->payload_offset(), obj, offset, state_before, field);
2128 } else {
2129 if (!inline_klass->is_initialized()) {
2130 // null_reset_value is not available, bailout for now
2131 bailout("store to an uninitialized nullable non-atomic flat field");
2132 return;
2133 }
2134
2135 // Store the subfields when field is a nullable non-atomic field
2136 Value object_null = append(new Constant(objectNull));
2137 Value null_reset_value = append(new Constant(new ObjectConstant(inline_klass->get_null_reset_value().as_object())));
2138 Value src = append(new IfOp(val, Instruction::neq, object_null, val, null_reset_value, state_before, false));
2139 copy_inline_content(inline_klass, src, inline_klass->payload_offset(), obj, offset, state_before);
2140
2141 // Store the null marker
2142 Value int_one = append(new Constant(new IntConstant(1)));
2143 Value int_zero = append(new Constant(intZero));
2144 Value nm = append(new IfOp(val, Instruction::neq, object_null, int_one, int_zero, state_before, false));
2145 Value nm_offset = append(new Constant(new LongConstant(offset + inline_klass->null_marker_offset_in_payload())));
2146 append(new UnsafePut(T_BOOLEAN, obj, nm_offset, nm, false));
2147 }
2148 }
2149 break;
2150 }
2151 default:
2152 ShouldNotReachHere();
2153 break;
2154 }
2155 }
2156
2157 Dependencies* GraphBuilder::dependency_recorder() const {
2158 return compilation()->dependency_recorder();
2159 }
2160
2161 // How many arguments do we want to profile?
2162 Values* GraphBuilder::args_list_for_profiling(ciMethod* target, int& start, bool may_have_receiver) {
2163 int n = 0;
2164 bool has_receiver = may_have_receiver && Bytecodes::has_receiver(method()->java_code_at_bci(bci()));
2165 start = has_receiver ? 1 : 0;
2166 if (profile_arguments()) {
2167 ciProfileData* data = method()->method_data()->bci_to_data(bci());
2168 if (data != nullptr && (data->is_CallTypeData() || data->is_VirtualCallTypeData())) {
2169 n = data->is_CallTypeData() ? data->as_CallTypeData()->number_of_arguments() : data->as_VirtualCallTypeData()->number_of_arguments();
2170 }
2171 }
2172 // If we are inlining then we need to collect arguments to profile parameters for the target
2173 if (profile_parameters() && target != nullptr) {
2174 if (target->method_data() != nullptr && target->method_data()->parameters_type_data() != nullptr) {
2175 // The receiver is profiled on method entry so it's included in
2176 // the number of parameters but here we're only interested in
2177 // actual arguments.
2178 n = MAX2(n, target->method_data()->parameters_type_data()->number_of_parameters() - start);
2179 }
2180 }
2181 if (n > 0) {
2182 return new Values(n);
2183 }
2184 return nullptr;
2185 }
2186
2187 void GraphBuilder::check_args_for_profiling(Values* obj_args, int expected) {
2188 #ifdef ASSERT
2189 bool ignored_will_link;
2190 ciSignature* declared_signature = nullptr;
2191 ciMethod* real_target = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
2192 assert(expected == obj_args->capacity() || real_target->is_method_handle_intrinsic(), "missed on arg?");
2193 #endif
2194 }
2195
2196 // Collect arguments that we want to profile in a list
2197 Values* GraphBuilder::collect_args_for_profiling(Values* args, ciMethod* target, bool may_have_receiver) {
2198 int start = 0;
2199 Values* obj_args = args_list_for_profiling(target, start, may_have_receiver);
2200 if (obj_args == nullptr) {
2201 return nullptr;
2202 }
2203 int s = obj_args->capacity();
2204 // if called through method handle invoke, some arguments may have been popped
2205 for (int i = start, j = 0; j < s && i < args->length(); i++) {
2206 if (args->at(i)->type()->is_object_kind()) {
2207 obj_args->push(args->at(i));
2208 j++;
2209 }
2210 }
2211 check_args_for_profiling(obj_args, s);
2212 return obj_args;
2213 }
2214
2215 void GraphBuilder::invoke(Bytecodes::Code code) {
2216 bool will_link;
2217 ciSignature* declared_signature = nullptr;
2218 ciMethod* target = stream()->get_method(will_link, &declared_signature);
2219 ciKlass* holder = stream()->get_declared_method_holder();
2220 const Bytecodes::Code bc_raw = stream()->cur_bc_raw();
2221 assert(declared_signature != nullptr, "cannot be null");
2222 assert(will_link == target->is_loaded(), "");
2223 JFR_ONLY(Jfr::on_resolution(this, holder, target); CHECK_BAILOUT();)
2224
2225 ciInstanceKlass* klass = target->holder();
2226 assert(!target->is_loaded() || klass->is_loaded(), "loaded target must imply loaded klass");
2227
2228 // check if CHA possible: if so, change the code to invoke_special
2229 ciInstanceKlass* calling_klass = method()->holder();
2230 ciInstanceKlass* callee_holder = ciEnv::get_instance_klass_for_declared_method_holder(holder);
2231 ciInstanceKlass* actual_recv = callee_holder;
2232
2233 CompileLog* log = compilation()->log();
2234 if (log != nullptr)
2235 log->elem("call method='%d' instr='%s'",
2236 log->identify(target),
2237 Bytecodes::name(code));
2238
2239 // Some methods are obviously bindable without any type checks so
2240 // convert them directly to an invokespecial or invokestatic.
2241 if (target->is_loaded() && !target->is_abstract() && target->can_be_statically_bound()) {
2242 switch (bc_raw) {
2243 case Bytecodes::_invokeinterface:
2244 // convert to invokespecial if the target is the private interface method.
2245 if (target->is_private()) {
2246 assert(holder->is_interface(), "How did we get a non-interface method here!");
2247 code = Bytecodes::_invokespecial;
2248 }
2249 break;
2250 case Bytecodes::_invokevirtual:
2251 code = Bytecodes::_invokespecial;
2252 break;
2253 case Bytecodes::_invokehandle:
2254 code = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokespecial;
2255 break;
2256 default:
2257 break;
2258 }
2259 } else {
2260 if (bc_raw == Bytecodes::_invokehandle) {
2261 assert(!will_link, "should come here only for unlinked call");
2262 code = Bytecodes::_invokespecial;
2263 }
2264 }
2265
2266 if (code == Bytecodes::_invokespecial) {
2267 // Additional receiver subtype checks for interface calls via invokespecial or invokeinterface.
2268 ciKlass* receiver_constraint = nullptr;
2269
2270 if (bc_raw == Bytecodes::_invokeinterface) {
2271 receiver_constraint = holder;
2272 } else if (bc_raw == Bytecodes::_invokespecial && !target->is_object_constructor() && calling_klass->is_interface()) {
2273 receiver_constraint = calling_klass;
2274 }
2275
2276 if (receiver_constraint != nullptr) {
2277 int index = state()->stack_size() - (target->arg_size_no_receiver() + 1);
2278 Value receiver = state()->stack_at(index);
2279 CheckCast* c = new CheckCast(receiver_constraint, receiver, copy_state_before());
2280 // go to uncommon_trap when checkcast fails
2281 c->set_invokespecial_receiver_check();
2282 state()->stack_at_put(index, append_split(c));
2283 }
2284 }
2285
2286 // Push appendix argument (MethodType, CallSite, etc.), if one.
2287 bool patch_for_appendix = false;
2288 int patching_appendix_arg = 0;
2289 if (Bytecodes::has_optional_appendix(bc_raw) && (!will_link || PatchALot)) {
2290 Value arg = append(new Constant(new ObjectConstant(compilation()->env()->unloaded_ciinstance()), copy_state_before()));
2291 apush(arg);
2292 patch_for_appendix = true;
2293 patching_appendix_arg = (will_link && stream()->has_appendix()) ? 0 : 1;
2294 } else if (stream()->has_appendix()) {
2295 ciObject* appendix = stream()->get_appendix();
2296 Value arg = append(new Constant(new ObjectConstant(appendix)));
2297 apush(arg);
2298 }
2299
2300 ciMethod* cha_monomorphic_target = nullptr;
2301 ciMethod* exact_target = nullptr;
2302 Value better_receiver = nullptr;
2303 if (UseCHA && target->is_loaded() &&
2304 !(// %%% FIXME: Are both of these relevant?
2305 target->is_method_handle_intrinsic() ||
2306 target->is_compiled_lambda_form()) &&
2307 !patch_for_appendix) {
2308 Value receiver = nullptr;
2309 ciInstanceKlass* receiver_klass = nullptr;
2310 bool type_is_exact = false;
2311 // try to find a precise receiver type
2312 if (will_link && !target->is_static()) {
2313 int index = state()->stack_size() - (target->arg_size_no_receiver() + 1);
2314 receiver = state()->stack_at(index);
2315 ciType* type = receiver->exact_type();
2316 if (type != nullptr && type->is_loaded()) {
2317 assert(!type->is_instance_klass() || !type->as_instance_klass()->is_interface(), "Must not be an interface");
2318 // Detects non-interface instances, primitive arrays, and some object arrays.
2319 // Array receivers can only call Object methods, so we should be able to allow
2320 // all object arrays here too, even those with unloaded types.
2321 receiver_klass = (ciInstanceKlass*) type;
2322 type_is_exact = true;
2323 }
2324 if (type == nullptr) {
2325 type = receiver->declared_type();
2326 if (type != nullptr && type->is_loaded() &&
2327 type->is_instance_klass() && !type->as_instance_klass()->is_interface()) {
2328 receiver_klass = (ciInstanceKlass*) type;
2329 if (receiver_klass->is_leaf_type() && !receiver_klass->is_final()) {
2330 // Insert a dependency on this type since
2331 // find_monomorphic_target may assume it's already done.
2332 dependency_recorder()->assert_leaf_type(receiver_klass);
2333 type_is_exact = true;
2334 }
2335 }
2336 }
2337 }
2338 if (receiver_klass != nullptr && type_is_exact &&
2339 receiver_klass->is_loaded() && code != Bytecodes::_invokespecial) {
2340 // If we have the exact receiver type we can bind directly to
2341 // the method to call.
2342 exact_target = target->resolve_invoke(calling_klass, receiver_klass);
2343 if (exact_target != nullptr) {
2344 target = exact_target;
2345 code = Bytecodes::_invokespecial;
2346 }
2347 }
2348 if (receiver_klass != nullptr &&
2349 receiver_klass->is_subtype_of(actual_recv) &&
2350 actual_recv->is_initialized()) {
2351 actual_recv = receiver_klass;
2352 }
2353
2354 if ((code == Bytecodes::_invokevirtual && callee_holder->is_initialized()) ||
2355 (code == Bytecodes::_invokeinterface && callee_holder->is_initialized() && !actual_recv->is_interface())) {
2356 // Use CHA on the receiver to select a more precise method.
2357 cha_monomorphic_target = target->find_monomorphic_target(calling_klass, callee_holder, actual_recv);
2358 } else if (code == Bytecodes::_invokeinterface && callee_holder->is_loaded() && receiver != nullptr) {
2359 assert(callee_holder->is_interface(), "invokeinterface to non interface?");
2360 // If there is only one implementor of this interface then we
2361 // may be able bind this invoke directly to the implementing
2362 // klass but we need both a dependence on the single interface
2363 // and on the method we bind to. Additionally since all we know
2364 // about the receiver type is the it's supposed to implement the
2365 // interface we have to insert a check that it's the class we
2366 // expect. Interface types are not checked by the verifier so
2367 // they are roughly equivalent to Object.
2368 // The number of implementors for declared_interface is less or
2369 // equal to the number of implementors for target->holder() so
2370 // if number of implementors of target->holder() == 1 then
2371 // number of implementors for decl_interface is 0 or 1. If
2372 // it's 0 then no class implements decl_interface and there's
2373 // no point in inlining.
2374 ciInstanceKlass* declared_interface = callee_holder;
2375 ciInstanceKlass* singleton = declared_interface->unique_implementor();
2376 if (singleton != nullptr) {
2377 assert(singleton != declared_interface, "not a unique implementor");
2378 cha_monomorphic_target = target->find_monomorphic_target(calling_klass, declared_interface, singleton);
2379 if (cha_monomorphic_target != nullptr) {
2380 ciInstanceKlass* holder = cha_monomorphic_target->holder();
2381 ciInstanceKlass* constraint = (holder->is_subtype_of(singleton) ? holder : singleton); // avoid upcasts
2382 if (holder != compilation()->env()->Object_klass() &&
2383 (!type_is_exact || receiver_klass->is_subtype_of(constraint))) {
2384 actual_recv = declared_interface;
2385
2386 // insert a check it's really the expected class.
2387 CheckCast* c = new CheckCast(constraint, receiver, copy_state_for_exception());
2388 c->set_incompatible_class_change_check();
2389 c->set_direct_compare(constraint->is_final());
2390 // pass the result of the checkcast so that the compiler has
2391 // more accurate type info in the inlinee
2392 better_receiver = append_split(c);
2393
2394 dependency_recorder()->assert_unique_implementor(declared_interface, singleton);
2395 } else {
2396 cha_monomorphic_target = nullptr;
2397 }
2398 }
2399 }
2400 }
2401 }
2402
2403 if (cha_monomorphic_target != nullptr) {
2404 assert(!target->can_be_statically_bound() || target == cha_monomorphic_target, "");
2405 assert(!cha_monomorphic_target->is_abstract(), "");
2406 if (!cha_monomorphic_target->can_be_statically_bound(actual_recv)) {
2407 // If we inlined because CHA revealed only a single target method,
2408 // then we are dependent on that target method not getting overridden
2409 // by dynamic class loading. Be sure to test the "static" receiver
2410 // dest_method here, as opposed to the actual receiver, which may
2411 // falsely lead us to believe that the receiver is final or private.
2412 dependency_recorder()->assert_unique_concrete_method(actual_recv, cha_monomorphic_target, callee_holder, target);
2413 }
2414 code = Bytecodes::_invokespecial;
2415 }
2416
2417 // check if we could do inlining
2418 if (!PatchALot && Inline && target->is_loaded() && !patch_for_appendix &&
2419 callee_holder->is_loaded()) { // the effect of symbolic reference resolution
2420
2421 // callee is known => check if we have static binding
2422 if ((code == Bytecodes::_invokestatic && klass->is_initialized()) || // invokestatic involves an initialization barrier on declaring class
2423 code == Bytecodes::_invokespecial ||
2424 (code == Bytecodes::_invokevirtual && target->is_final_method()) ||
2425 code == Bytecodes::_invokedynamic) {
2426 // static binding => check if callee is ok
2427 ciMethod* inline_target = (cha_monomorphic_target != nullptr) ? cha_monomorphic_target : target;
2428 bool holder_known = (cha_monomorphic_target != nullptr) || (exact_target != nullptr);
2429 bool success = try_inline(inline_target, holder_known, false /* ignore_return */, code, better_receiver);
2430
2431 CHECK_BAILOUT();
2432 clear_inline_bailout();
2433
2434 if (success) {
2435 // Register dependence if JVMTI has either breakpoint
2436 // setting or hotswapping of methods capabilities since they may
2437 // cause deoptimization.
2438 if (compilation()->env()->jvmti_can_hotswap_or_post_breakpoint()) {
2439 dependency_recorder()->assert_evol_method(inline_target);
2440 }
2441 return;
2442 }
2443 } else {
2444 print_inlining(target, "no static binding", /*success*/ false);
2445 }
2446 } else {
2447 print_inlining(target, "not inlineable", /*success*/ false);
2448 }
2449
2450 // If we attempted an inline which did not succeed because of a
2451 // bailout during construction of the callee graph, the entire
2452 // compilation has to be aborted. This is fairly rare and currently
2453 // seems to only occur for jasm-generated classes which contain
2454 // jsr/ret pairs which are not associated with finally clauses and
2455 // do not have exception handlers in the containing method, and are
2456 // therefore not caught early enough to abort the inlining without
2457 // corrupting the graph. (We currently bail out with a non-empty
2458 // stack at a ret in these situations.)
2459 CHECK_BAILOUT();
2460
2461 // inlining not successful => standard invoke
2462 ciType* return_type = declared_signature->return_type();
2463 ValueStack* state_before = copy_state_exhandling();
2464
2465 // The bytecode (code) might change in this method so we are checking this very late.
2466 const bool has_receiver =
2467 code == Bytecodes::_invokespecial ||
2468 code == Bytecodes::_invokevirtual ||
2469 code == Bytecodes::_invokeinterface;
2470 Values* args = state()->pop_arguments(target->arg_size_no_receiver() + patching_appendix_arg);
2471 Value recv = has_receiver ? apop() : nullptr;
2472
2473 // A null check is required here (when there is a receiver) for any of the following cases
2474 // - invokespecial, always need a null check.
2475 // - invokevirtual, when the target is final and loaded. Calls to final targets will become optimized
2476 // and require null checking. If the target is loaded a null check is emitted here.
2477 // If the target isn't loaded the null check must happen after the call resolution. We achieve that
2478 // by using the target methods unverified entry point (see CompiledIC::compute_monomorphic_entry).
2479 // (The JVM specification requires that LinkageError must be thrown before a NPE. An unloaded target may
2480 // potentially fail, and can't have the null check before the resolution.)
2481 // - A call that will be profiled. (But we can't add a null check when the target is unloaded, by the same
2482 // reason as above, so calls with a receiver to unloaded targets can't be profiled.)
2483 //
2484 // Normal invokevirtual will perform the null check during lookup
2485
2486 bool need_null_check = (code == Bytecodes::_invokespecial) ||
2487 (target->is_loaded() && (target->is_final_method() || (is_profiling() && profile_calls())));
2488
2489 if (need_null_check) {
2490 if (recv != nullptr) {
2491 null_check(recv);
2492 }
2493
2494 if (is_profiling()) {
2495 // Note that we'd collect profile data in this method if we wanted it.
2496 compilation()->set_would_profile(true);
2497
2498 if (profile_calls()) {
2499 assert(cha_monomorphic_target == nullptr || exact_target == nullptr, "both can not be set");
2500 ciKlass* target_klass = nullptr;
2501 if (cha_monomorphic_target != nullptr) {
2502 target_klass = cha_monomorphic_target->holder();
2503 } else if (exact_target != nullptr) {
2504 target_klass = exact_target->holder();
2505 }
2506 profile_call(target, recv, target_klass, collect_args_for_profiling(args, nullptr, false), false);
2507 }
2508 }
2509 }
2510
2511 Invoke* result = new Invoke(code, return_type, recv, args, target, state_before);
2512 // push result
2513 append_split(result);
2514
2515 if (!return_type->is_void()) {
2516 push(as_ValueType(return_type), result);
2517 }
2518
2519 if (profile_return() && return_type->is_object()) {
2520 profile_return_type(result, target);
2521 }
2522 }
2523
2524
2525 void GraphBuilder::new_instance(int klass_index) {
2526 ValueStack* state_before = copy_state_exhandling();
2527 ciKlass* klass = stream()->get_klass();
2528 assert(klass->is_instance_klass(), "must be an instance klass");
2529 NewInstance* new_instance = new NewInstance(klass->as_instance_klass(), state_before, stream()->is_unresolved_klass(), false);
2530 _memory->new_instance(new_instance);
2531 apush(append_split(new_instance));
2532 }
2533
2534 void GraphBuilder::new_type_array() {
2535 ValueStack* state_before = copy_state_exhandling();
2536 apush(append_split(new NewTypeArray(ipop(), (BasicType)stream()->get_index(), state_before, true)));
2537 }
2538
2539
2540 void GraphBuilder::new_object_array() {
2541 ciKlass* klass = stream()->get_klass();
2542 ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
2543 NewArray* n = new NewObjectArray(klass, ipop(), state_before);
2544 apush(append_split(n));
2545 }
2546
2547
2548 bool GraphBuilder::direct_compare(ciKlass* k) {
2549 if (k->is_loaded() && k->is_instance_klass() && !UseSlowPath) {
2550 ciInstanceKlass* ik = k->as_instance_klass();
2551 if (ik->is_final()) {
2552 return true;
2553 } else {
2554 if (UseCHA && !(ik->has_subklass() || ik->is_interface())) {
2555 // test class is leaf class
2556 dependency_recorder()->assert_leaf_type(ik);
2557 return true;
2558 }
2559 }
2560 }
2561 return false;
2562 }
2563
2564
2565 void GraphBuilder::check_cast(int klass_index) {
2566 ciKlass* klass = stream()->get_klass();
2567 ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_for_exception();
2568 CheckCast* c = new CheckCast(klass, apop(), state_before);
2569 apush(append_split(c));
2570 c->set_direct_compare(direct_compare(klass));
2571
2572 if (is_profiling()) {
2573 // Note that we'd collect profile data in this method if we wanted it.
2574 compilation()->set_would_profile(true);
2575
2576 if (profile_checkcasts()) {
2577 c->set_profiled_method(method());
2578 c->set_profiled_bci(bci());
2579 c->set_should_profile(true);
2580 }
2581 }
2582 }
2583
2584
2585 void GraphBuilder::instance_of(int klass_index) {
2586 ciKlass* klass = stream()->get_klass();
2587 ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
2588 InstanceOf* i = new InstanceOf(klass, apop(), state_before);
2589 ipush(append_split(i));
2590 i->set_direct_compare(direct_compare(klass));
2591
2592 if (is_profiling()) {
2593 // Note that we'd collect profile data in this method if we wanted it.
2594 compilation()->set_would_profile(true);
2595
2596 if (profile_checkcasts()) {
2597 i->set_profiled_method(method());
2598 i->set_profiled_bci(bci());
2599 i->set_should_profile(true);
2600 }
2601 }
2602 }
2603
2604
2605 void GraphBuilder::monitorenter(Value x, int bci) {
2606 bool maybe_inlinetype = false;
2607 if (bci == InvocationEntryBci) {
2608 // Called by GraphBuilder::inline_sync_entry.
2609 #ifdef ASSERT
2610 ciType* obj_type = x->declared_type();
2611 assert(obj_type == nullptr || !obj_type->is_inlinetype(), "inline types cannot have synchronized methods");
2612 #endif
2613 } else {
2614 // We are compiling a monitorenter bytecode
2615 if (Arguments::is_valhalla_enabled()) {
2616 ciType* obj_type = x->declared_type();
2617 if (obj_type == nullptr || obj_type->can_be_inline_klass()) {
2618 // If we're (possibly) locking on an inline type, check for markWord::always_locked_pattern
2619 // and throw IMSE. (obj_type is null for Phi nodes, so let's just be conservative).
2620 maybe_inlinetype = true;
2621 }
2622 }
2623 }
2624
2625 // save state before locking in case of deoptimization after a NullPointerException
2626 ValueStack* state_before = copy_state_for_exception_with_bci(bci);
2627 append_with_bci(new MonitorEnter(x, state()->lock(x), state_before, maybe_inlinetype), bci);
2628 kill_all();
2629 }
2630
2631
2632 void GraphBuilder::monitorexit(Value x, int bci) {
2633 append_with_bci(new MonitorExit(x, state()->unlock()), bci);
2634 kill_all();
2635 }
2636
2637
2638 void GraphBuilder::new_multi_array(int dimensions) {
2639 ciKlass* klass = stream()->get_klass();
2640 ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
2641
2642 Values* dims = new Values(dimensions, dimensions, nullptr);
2643 // fill in all dimensions
2644 int i = dimensions;
2645 while (i-- > 0) dims->at_put(i, ipop());
2646 // create array
2647 NewArray* n = new NewMultiArray(klass, dims, state_before);
2648 apush(append_split(n));
2649 }
2650
2651
2652 void GraphBuilder::throw_op(int bci) {
2653 // We require that the debug info for a Throw be the "state before"
2654 // the Throw (i.e., exception oop is still on TOS)
2655 ValueStack* state_before = copy_state_before_with_bci(bci);
2656 Throw* t = new Throw(apop(), state_before);
2657 // operand stack not needed after a throw
2658 state()->truncate_stack(0);
2659 append_with_bci(t, bci);
2660 }
2661
2662
2663 Instruction* GraphBuilder::append_with_bci(Instruction* instr, int bci) {
2664 Canonicalizer canon(compilation(), instr, bci);
2665 Instruction* i1 = canon.canonical();
2666 if (i1->is_linked() || !i1->can_be_linked()) {
2667 // Canonicalizer returned an instruction which was already
2668 // appended so simply return it.
2669 return i1;
2670 }
2671
2672 if (UseLocalValueNumbering) {
2673 // Lookup the instruction in the ValueMap and add it to the map if
2674 // it's not found.
2675 Instruction* i2 = vmap()->find_insert(i1);
2676 if (i2 != i1) {
2677 // found an entry in the value map, so just return it.
2678 assert(i2->is_linked(), "should already be linked");
2679 return i2;
2680 }
2681 ValueNumberingEffects vne(vmap());
2682 i1->visit(&vne);
2683 }
2684
2685 // i1 was not eliminated => append it
2686 assert(i1->next() == nullptr, "shouldn't already be linked");
2687 _last = _last->set_next(i1, canon.bci());
2688
2689 if (++_instruction_count >= InstructionCountCutoff && !bailed_out()) {
2690 // set the bailout state but complete normal processing. We
2691 // might do a little more work before noticing the bailout so we
2692 // want processing to continue normally until it's noticed.
2693 bailout("Method and/or inlining is too large");
2694 }
2695
2696 #ifndef PRODUCT
2697 if (PrintIRDuringConstruction) {
2698 InstructionPrinter ip;
2699 ip.print_line(i1);
2700 if (Verbose) {
2701 state()->print();
2702 }
2703 }
2704 #endif
2705
2706 // save state after modification of operand stack for StateSplit instructions
2707 StateSplit* s = i1->as_StateSplit();
2708 if (s != nullptr) {
2709 if (EliminateFieldAccess) {
2710 Intrinsic* intrinsic = s->as_Intrinsic();
2711 if (s->as_Invoke() != nullptr || (intrinsic && !intrinsic->preserves_state())) {
2712 _memory->kill();
2713 }
2714 }
2715 s->set_state(state()->copy(ValueStack::StateAfter, canon.bci()));
2716 }
2717
2718 // set up exception handlers for this instruction if necessary
2719 if (i1->can_trap()) {
2720 i1->set_exception_handlers(handle_exception(i1));
2721 assert(i1->exception_state() != nullptr || !i1->needs_exception_state() || bailed_out(), "handle_exception must set exception state");
2722 }
2723 return i1;
2724 }
2725
2726
2727 Instruction* GraphBuilder::append(Instruction* instr) {
2728 assert(instr->as_StateSplit() == nullptr || instr->as_BlockEnd() != nullptr, "wrong append used");
2729 return append_with_bci(instr, bci());
2730 }
2731
2732
2733 Instruction* GraphBuilder::append_split(StateSplit* instr) {
2734 return append_with_bci(instr, bci());
2735 }
2736
2737
2738 void GraphBuilder::null_check(Value value) {
2739 if (value->as_NewArray() != nullptr || value->as_NewInstance() != nullptr) {
2740 return;
2741 } else {
2742 Constant* con = value->as_Constant();
2743 if (con) {
2744 ObjectType* c = con->type()->as_ObjectType();
2745 if (c && c->is_loaded()) {
2746 ObjectConstant* oc = c->as_ObjectConstant();
2747 if (!oc || !oc->value()->is_null_object()) {
2748 return;
2749 }
2750 }
2751 }
2752 if (value->is_null_free()) return;
2753 }
2754 append(new NullCheck(value, copy_state_for_exception()));
2755 }
2756
2757
2758
2759 XHandlers* GraphBuilder::handle_exception(Instruction* instruction) {
2760 if (!has_handler() && (!instruction->needs_exception_state() || instruction->exception_state() != nullptr)) {
2761 assert(instruction->exception_state() == nullptr
2762 || instruction->exception_state()->kind() == ValueStack::EmptyExceptionState
2763 || (instruction->exception_state()->kind() == ValueStack::ExceptionState && _compilation->env()->should_retain_local_variables()),
2764 "exception_state should be of exception kind");
2765 return new XHandlers();
2766 }
2767
2768 XHandlers* exception_handlers = new XHandlers();
2769 ScopeData* cur_scope_data = scope_data();
2770 ValueStack* cur_state = instruction->state_before();
2771 ValueStack* prev_state = nullptr;
2772 int scope_count = 0;
2773
2774 assert(cur_state != nullptr, "state_before must be set");
2775 do {
2776 int cur_bci = cur_state->bci();
2777 assert(cur_scope_data->scope() == cur_state->scope(), "scopes do not match");
2778 assert(cur_bci == SynchronizationEntryBCI || cur_bci == cur_scope_data->stream()->cur_bci()
2779 || has_pending_field_access() || has_pending_load_indexed(), "invalid bci");
2780
2781
2782 // join with all potential exception handlers
2783 XHandlers* list = cur_scope_data->xhandlers();
2784 const int n = list->length();
2785 for (int i = 0; i < n; i++) {
2786 XHandler* h = list->handler_at(i);
2787 if (h->covers(cur_bci)) {
2788 // h is a potential exception handler => join it
2789 compilation()->set_has_exception_handlers(true);
2790
2791 BlockBegin* entry = h->entry_block();
2792 if (entry == block()) {
2793 // It's acceptable for an exception handler to cover itself
2794 // but we don't handle that in the parser currently. It's
2795 // very rare so we bailout instead of trying to handle it.
2796 BAILOUT_("exception handler covers itself", exception_handlers);
2797 }
2798 assert(entry->bci() == h->handler_bci(), "must match");
2799 assert(entry->bci() == -1 || entry == cur_scope_data->block_at(entry->bci()), "blocks must correspond");
2800
2801 // previously this was a BAILOUT, but this is not necessary
2802 // now because asynchronous exceptions are not handled this way.
2803 assert(entry->state() == nullptr || cur_state->total_locks_size() == entry->state()->total_locks_size(), "locks do not match");
2804
2805 // xhandler start with an empty expression stack
2806 if (cur_state->stack_size() != 0) {
2807 // locals are preserved
2808 // stack will be truncated
2809 cur_state = cur_state->copy(ValueStack::ExceptionState, cur_state->bci());
2810 }
2811 if (instruction->exception_state() == nullptr) {
2812 instruction->set_exception_state(cur_state);
2813 }
2814
2815 // Note: Usually this join must work. However, very
2816 // complicated jsr-ret structures where we don't ret from
2817 // the subroutine can cause the objects on the monitor
2818 // stacks to not match because blocks can be parsed twice.
2819 // The only test case we've seen so far which exhibits this
2820 // problem is caught by the infinite recursion test in
2821 // GraphBuilder::jsr() if the join doesn't work.
2822 if (!entry->try_merge(cur_state, compilation()->has_irreducible_loops())) {
2823 BAILOUT_("error while joining with exception handler, prob. due to complicated jsr/rets", exception_handlers);
2824 }
2825
2826 // add current state for correct handling of phi functions at begin of xhandler
2827 int phi_operand = entry->add_exception_state(cur_state);
2828
2829 // add entry to the list of xhandlers of this block
2830 _block->add_exception_handler(entry);
2831
2832 // add back-edge from xhandler entry to this block
2833 if (!entry->is_predecessor(_block)) {
2834 entry->add_predecessor(_block);
2835 }
2836
2837 // clone XHandler because phi_operand and scope_count can not be shared
2838 XHandler* new_xhandler = new XHandler(h);
2839 new_xhandler->set_phi_operand(phi_operand);
2840 new_xhandler->set_scope_count(scope_count);
2841 exception_handlers->append(new_xhandler);
2842
2843 // fill in exception handler subgraph lazily
2844 assert(!entry->is_set(BlockBegin::was_visited_flag), "entry must not be visited yet");
2845 cur_scope_data->add_to_work_list(entry);
2846
2847 // stop when reaching catchall
2848 if (h->catch_type() == 0) {
2849 return exception_handlers;
2850 }
2851 }
2852 }
2853
2854 if (exception_handlers->length() == 0) {
2855 // This scope and all callees do not handle exceptions, so the local
2856 // variables of this scope are not needed. However, the scope itself is
2857 // required for a correct exception stack trace -> clear out the locals.
2858 // Stack and locals are invalidated but not truncated in caller state.
2859 if (prev_state != nullptr) {
2860 assert(instruction->exception_state() != nullptr, "missed set?");
2861 ValueStack::Kind exc_kind = ValueStack::empty_exception_kind(true /* caller */);
2862 cur_state = cur_state->copy(exc_kind, cur_state->bci());
2863 // reset caller exception state
2864 prev_state->set_caller_state(cur_state);
2865 } else {
2866 assert(instruction->exception_state() == nullptr, "already set");
2867 // set instruction exception state
2868 // truncate stack
2869 ValueStack::Kind exc_kind = ValueStack::empty_exception_kind();
2870 cur_state = cur_state->copy(exc_kind, cur_state->bci());
2871 instruction->set_exception_state(cur_state);
2872 }
2873 }
2874
2875 // Set up iteration for next time.
2876 // If parsing a jsr, do not grab exception handlers from the
2877 // parent scopes for this method (already got them, and they
2878 // needed to be cloned)
2879
2880 while (cur_scope_data->parsing_jsr()) {
2881 cur_scope_data = cur_scope_data->parent();
2882 }
2883
2884 assert(cur_scope_data->scope() == cur_state->scope(), "scopes do not match");
2885 assert(cur_state->locks_size() == 0 || cur_state->locks_size() == 1, "unlocking must be done in a catchall exception handler");
2886
2887 prev_state = cur_state;
2888 cur_state = cur_state->caller_state();
2889 cur_scope_data = cur_scope_data->parent();
2890 scope_count++;
2891 } while (cur_scope_data != nullptr);
2892
2893 return exception_handlers;
2894 }
2895
2896
2897 // Helper class for simplifying Phis.
2898 class PhiSimplifier : public BlockClosure {
2899 private:
2900 bool _has_substitutions;
2901 Value simplify(Value v);
2902
2903 public:
2904 PhiSimplifier(BlockBegin* start) : _has_substitutions(false) {
2905 start->iterate_preorder(this);
2906 if (_has_substitutions) {
2907 SubstitutionResolver sr(start);
2908 }
2909 }
2910 void block_do(BlockBegin* b);
2911 bool has_substitutions() const { return _has_substitutions; }
2912 };
2913
2914
2915 Value PhiSimplifier::simplify(Value v) {
2916 Phi* phi = v->as_Phi();
2917
2918 if (phi == nullptr) {
2919 // no phi function
2920 return v;
2921 } else if (v->has_subst()) {
2922 // already substituted; subst can be phi itself -> simplify
2923 return simplify(v->subst());
2924 } else if (phi->is_set(Phi::cannot_simplify)) {
2925 // already tried to simplify phi before
2926 return phi;
2927 } else if (phi->is_set(Phi::visited)) {
2928 // break cycles in phi functions
2929 return phi;
2930 } else if (phi->type()->is_illegal()) {
2931 // illegal phi functions are ignored anyway
2932 return phi;
2933
2934 } else {
2935 // mark phi function as processed to break cycles in phi functions
2936 phi->set(Phi::visited);
2937
2938 // simplify x = [y, x] and x = [y, y] to y
2939 Value subst = nullptr;
2940 int opd_count = phi->operand_count();
2941 for (int i = 0; i < opd_count; i++) {
2942 Value opd = phi->operand_at(i);
2943 assert(opd != nullptr, "Operand must exist!");
2944
2945 if (opd->type()->is_illegal()) {
2946 // if one operand is illegal, the entire phi function is illegal
2947 phi->make_illegal();
2948 phi->clear(Phi::visited);
2949 return phi;
2950 }
2951
2952 Value new_opd = simplify(opd);
2953 assert(new_opd != nullptr, "Simplified operand must exist!");
2954
2955 if (new_opd != phi && new_opd != subst) {
2956 if (subst == nullptr) {
2957 subst = new_opd;
2958 } else {
2959 // no simplification possible
2960 phi->set(Phi::cannot_simplify);
2961 phi->clear(Phi::visited);
2962 return phi;
2963 }
2964 }
2965 }
2966
2967 // successfully simplified phi function
2968 assert(subst != nullptr, "illegal phi function");
2969 _has_substitutions = true;
2970 phi->clear(Phi::visited);
2971 phi->set_subst(subst);
2972
2973 #ifndef PRODUCT
2974 if (PrintPhiFunctions) {
2975 tty->print_cr("simplified phi function %c%d to %c%d (Block B%d)", phi->type()->tchar(), phi->id(), subst->type()->tchar(), subst->id(), phi->block()->block_id());
2976 }
2977 #endif
2978
2979 return subst;
2980 }
2981 }
2982
2983
2984 void PhiSimplifier::block_do(BlockBegin* b) {
2985 for_each_phi_fun(b, phi,
2986 simplify(phi);
2987 );
2988
2989 #ifdef ASSERT
2990 for_each_phi_fun(b, phi,
2991 assert(phi->operand_count() != 1 || phi->subst() != phi || phi->is_illegal(), "missed trivial simplification");
2992 );
2993
2994 ValueStack* state = b->state()->caller_state();
2995 for_each_state_value(state, value,
2996 Phi* phi = value->as_Phi();
2997 assert(phi == nullptr || phi->block() != b, "must not have phi function to simplify in caller state");
2998 );
2999 #endif
3000 }
3001
3002 // This method is called after all blocks are filled with HIR instructions
3003 // It eliminates all Phi functions of the form x = [y, y] and x = [y, x]
3004 void GraphBuilder::eliminate_redundant_phis(BlockBegin* start) {
3005 PhiSimplifier simplifier(start);
3006 }
3007
3008
3009 void GraphBuilder::connect_to_end(BlockBegin* beg) {
3010 // setup iteration
3011 kill_all();
3012 _block = beg;
3013 _state = beg->state()->copy_for_parsing();
3014 _last = beg;
3015 iterate_bytecodes_for_block(beg->bci());
3016 }
3017
3018
3019 BlockEnd* GraphBuilder::iterate_bytecodes_for_block(int bci) {
3020 #ifndef PRODUCT
3021 if (PrintIRDuringConstruction) {
3022 tty->cr();
3023 InstructionPrinter ip;
3024 ip.print_instr(_block); tty->cr();
3025 ip.print_stack(_block->state()); tty->cr();
3026 ip.print_inline_level(_block);
3027 ip.print_head();
3028 tty->print_cr("locals size: %d stack size: %d", state()->locals_size(), state()->stack_size());
3029 }
3030 #endif
3031 _skip_block = false;
3032 assert(state() != nullptr, "ValueStack missing!");
3033 CompileLog* log = compilation()->log();
3034 ciBytecodeStream s(method());
3035 s.reset_to_bci(bci);
3036 int prev_bci = bci;
3037 scope_data()->set_stream(&s);
3038 // iterate
3039 Bytecodes::Code code = Bytecodes::_illegal;
3040 bool push_exception = false;
3041
3042 if (block()->is_set(BlockBegin::exception_entry_flag) && block()->next() == nullptr) {
3043 // first thing in the exception entry block should be the exception object.
3044 push_exception = true;
3045 }
3046
3047 bool ignore_return = scope_data()->ignore_return();
3048
3049 while (!bailed_out() && last()->as_BlockEnd() == nullptr &&
3050 (code = stream()->next()) != ciBytecodeStream::EOBC() &&
3051 (block_at(s.cur_bci()) == nullptr || block_at(s.cur_bci()) == block())) {
3052 assert(state()->kind() == ValueStack::Parsing, "invalid state kind");
3053
3054 if (log != nullptr)
3055 log->set_context("bc code='%d' bci='%d'", (int)code, s.cur_bci());
3056
3057 // Check for active jsr during OSR compilation
3058 if (compilation()->is_osr_compile()
3059 && scope()->is_top_scope()
3060 && parsing_jsr()
3061 && s.cur_bci() == compilation()->osr_bci()) {
3062 bailout("OSR not supported while a jsr is active");
3063 }
3064
3065 if (push_exception) {
3066 apush(append(new ExceptionObject()));
3067 push_exception = false;
3068 }
3069
3070 // handle bytecode
3071 switch (code) {
3072 case Bytecodes::_nop : /* nothing to do */ break;
3073 case Bytecodes::_aconst_null : apush(append(new Constant(objectNull ))); break;
3074 case Bytecodes::_iconst_m1 : ipush(append(new Constant(new IntConstant (-1)))); break;
3075 case Bytecodes::_iconst_0 : ipush(append(new Constant(intZero ))); break;
3076 case Bytecodes::_iconst_1 : ipush(append(new Constant(intOne ))); break;
3077 case Bytecodes::_iconst_2 : ipush(append(new Constant(new IntConstant ( 2)))); break;
3078 case Bytecodes::_iconst_3 : ipush(append(new Constant(new IntConstant ( 3)))); break;
3079 case Bytecodes::_iconst_4 : ipush(append(new Constant(new IntConstant ( 4)))); break;
3080 case Bytecodes::_iconst_5 : ipush(append(new Constant(new IntConstant ( 5)))); break;
3081 case Bytecodes::_lconst_0 : lpush(append(new Constant(new LongConstant ( 0)))); break;
3082 case Bytecodes::_lconst_1 : lpush(append(new Constant(new LongConstant ( 1)))); break;
3083 case Bytecodes::_fconst_0 : fpush(append(new Constant(new FloatConstant ( 0)))); break;
3084 case Bytecodes::_fconst_1 : fpush(append(new Constant(new FloatConstant ( 1)))); break;
3085 case Bytecodes::_fconst_2 : fpush(append(new Constant(new FloatConstant ( 2)))); break;
3086 case Bytecodes::_dconst_0 : dpush(append(new Constant(new DoubleConstant( 0)))); break;
3087 case Bytecodes::_dconst_1 : dpush(append(new Constant(new DoubleConstant( 1)))); break;
3088 case Bytecodes::_bipush : ipush(append(new Constant(new IntConstant(((signed char*)s.cur_bcp())[1])))); break;
3089 case Bytecodes::_sipush : ipush(append(new Constant(new IntConstant((short)Bytes::get_Java_u2(s.cur_bcp()+1))))); break;
3090 case Bytecodes::_ldc : // fall through
3091 case Bytecodes::_ldc_w : // fall through
3092 case Bytecodes::_ldc2_w : load_constant(); break;
3093 case Bytecodes::_iload : load_local(intType , s.get_index()); break;
3094 case Bytecodes::_lload : load_local(longType , s.get_index()); break;
3095 case Bytecodes::_fload : load_local(floatType , s.get_index()); break;
3096 case Bytecodes::_dload : load_local(doubleType , s.get_index()); break;
3097 case Bytecodes::_aload : load_local(instanceType, s.get_index()); break;
3098 case Bytecodes::_iload_0 : load_local(intType , 0); break;
3099 case Bytecodes::_iload_1 : load_local(intType , 1); break;
3100 case Bytecodes::_iload_2 : load_local(intType , 2); break;
3101 case Bytecodes::_iload_3 : load_local(intType , 3); break;
3102 case Bytecodes::_lload_0 : load_local(longType , 0); break;
3103 case Bytecodes::_lload_1 : load_local(longType , 1); break;
3104 case Bytecodes::_lload_2 : load_local(longType , 2); break;
3105 case Bytecodes::_lload_3 : load_local(longType , 3); break;
3106 case Bytecodes::_fload_0 : load_local(floatType , 0); break;
3107 case Bytecodes::_fload_1 : load_local(floatType , 1); break;
3108 case Bytecodes::_fload_2 : load_local(floatType , 2); break;
3109 case Bytecodes::_fload_3 : load_local(floatType , 3); break;
3110 case Bytecodes::_dload_0 : load_local(doubleType, 0); break;
3111 case Bytecodes::_dload_1 : load_local(doubleType, 1); break;
3112 case Bytecodes::_dload_2 : load_local(doubleType, 2); break;
3113 case Bytecodes::_dload_3 : load_local(doubleType, 3); break;
3114 case Bytecodes::_aload_0 : load_local(objectType, 0); break;
3115 case Bytecodes::_aload_1 : load_local(objectType, 1); break;
3116 case Bytecodes::_aload_2 : load_local(objectType, 2); break;
3117 case Bytecodes::_aload_3 : load_local(objectType, 3); break;
3118 case Bytecodes::_iaload : load_indexed(T_INT ); break;
3119 case Bytecodes::_laload : load_indexed(T_LONG ); break;
3120 case Bytecodes::_faload : load_indexed(T_FLOAT ); break;
3121 case Bytecodes::_daload : load_indexed(T_DOUBLE); break;
3122 case Bytecodes::_aaload : load_indexed(T_OBJECT); break;
3123 case Bytecodes::_baload : load_indexed(T_BYTE ); break;
3124 case Bytecodes::_caload : load_indexed(T_CHAR ); break;
3125 case Bytecodes::_saload : load_indexed(T_SHORT ); break;
3126 case Bytecodes::_istore : store_local(intType , s.get_index()); break;
3127 case Bytecodes::_lstore : store_local(longType , s.get_index()); break;
3128 case Bytecodes::_fstore : store_local(floatType , s.get_index()); break;
3129 case Bytecodes::_dstore : store_local(doubleType, s.get_index()); break;
3130 case Bytecodes::_astore : store_local(objectType, s.get_index()); break;
3131 case Bytecodes::_istore_0 : store_local(intType , 0); break;
3132 case Bytecodes::_istore_1 : store_local(intType , 1); break;
3133 case Bytecodes::_istore_2 : store_local(intType , 2); break;
3134 case Bytecodes::_istore_3 : store_local(intType , 3); break;
3135 case Bytecodes::_lstore_0 : store_local(longType , 0); break;
3136 case Bytecodes::_lstore_1 : store_local(longType , 1); break;
3137 case Bytecodes::_lstore_2 : store_local(longType , 2); break;
3138 case Bytecodes::_lstore_3 : store_local(longType , 3); break;
3139 case Bytecodes::_fstore_0 : store_local(floatType , 0); break;
3140 case Bytecodes::_fstore_1 : store_local(floatType , 1); break;
3141 case Bytecodes::_fstore_2 : store_local(floatType , 2); break;
3142 case Bytecodes::_fstore_3 : store_local(floatType , 3); break;
3143 case Bytecodes::_dstore_0 : store_local(doubleType, 0); break;
3144 case Bytecodes::_dstore_1 : store_local(doubleType, 1); break;
3145 case Bytecodes::_dstore_2 : store_local(doubleType, 2); break;
3146 case Bytecodes::_dstore_3 : store_local(doubleType, 3); break;
3147 case Bytecodes::_astore_0 : store_local(objectType, 0); break;
3148 case Bytecodes::_astore_1 : store_local(objectType, 1); break;
3149 case Bytecodes::_astore_2 : store_local(objectType, 2); break;
3150 case Bytecodes::_astore_3 : store_local(objectType, 3); break;
3151 case Bytecodes::_iastore : store_indexed(T_INT ); break;
3152 case Bytecodes::_lastore : store_indexed(T_LONG ); break;
3153 case Bytecodes::_fastore : store_indexed(T_FLOAT ); break;
3154 case Bytecodes::_dastore : store_indexed(T_DOUBLE); break;
3155 case Bytecodes::_aastore : store_indexed(T_OBJECT); break;
3156 case Bytecodes::_bastore : store_indexed(T_BYTE ); break;
3157 case Bytecodes::_castore : store_indexed(T_CHAR ); break;
3158 case Bytecodes::_sastore : store_indexed(T_SHORT ); break;
3159 case Bytecodes::_pop : // fall through
3160 case Bytecodes::_pop2 : // fall through
3161 case Bytecodes::_dup : // fall through
3162 case Bytecodes::_dup_x1 : // fall through
3163 case Bytecodes::_dup_x2 : // fall through
3164 case Bytecodes::_dup2 : // fall through
3165 case Bytecodes::_dup2_x1 : // fall through
3166 case Bytecodes::_dup2_x2 : // fall through
3167 case Bytecodes::_swap : stack_op(code); break;
3168 case Bytecodes::_iadd : arithmetic_op(intType , code); break;
3169 case Bytecodes::_ladd : arithmetic_op(longType , code); break;
3170 case Bytecodes::_fadd : arithmetic_op(floatType , code); break;
3171 case Bytecodes::_dadd : arithmetic_op(doubleType, code); break;
3172 case Bytecodes::_isub : arithmetic_op(intType , code); break;
3173 case Bytecodes::_lsub : arithmetic_op(longType , code); break;
3174 case Bytecodes::_fsub : arithmetic_op(floatType , code); break;
3175 case Bytecodes::_dsub : arithmetic_op(doubleType, code); break;
3176 case Bytecodes::_imul : arithmetic_op(intType , code); break;
3177 case Bytecodes::_lmul : arithmetic_op(longType , code); break;
3178 case Bytecodes::_fmul : arithmetic_op(floatType , code); break;
3179 case Bytecodes::_dmul : arithmetic_op(doubleType, code); break;
3180 case Bytecodes::_idiv : arithmetic_op(intType , code, copy_state_for_exception()); break;
3181 case Bytecodes::_ldiv : arithmetic_op(longType , code, copy_state_for_exception()); break;
3182 case Bytecodes::_fdiv : arithmetic_op(floatType , code); break;
3183 case Bytecodes::_ddiv : arithmetic_op(doubleType, code); break;
3184 case Bytecodes::_irem : arithmetic_op(intType , code, copy_state_for_exception()); break;
3185 case Bytecodes::_lrem : arithmetic_op(longType , code, copy_state_for_exception()); break;
3186 case Bytecodes::_frem : arithmetic_op(floatType , code); break;
3187 case Bytecodes::_drem : arithmetic_op(doubleType, code); break;
3188 case Bytecodes::_ineg : negate_op(intType ); break;
3189 case Bytecodes::_lneg : negate_op(longType ); break;
3190 case Bytecodes::_fneg : negate_op(floatType ); break;
3191 case Bytecodes::_dneg : negate_op(doubleType); break;
3192 case Bytecodes::_ishl : shift_op(intType , code); break;
3193 case Bytecodes::_lshl : shift_op(longType, code); break;
3194 case Bytecodes::_ishr : shift_op(intType , code); break;
3195 case Bytecodes::_lshr : shift_op(longType, code); break;
3196 case Bytecodes::_iushr : shift_op(intType , code); break;
3197 case Bytecodes::_lushr : shift_op(longType, code); break;
3198 case Bytecodes::_iand : logic_op(intType , code); break;
3199 case Bytecodes::_land : logic_op(longType, code); break;
3200 case Bytecodes::_ior : logic_op(intType , code); break;
3201 case Bytecodes::_lor : logic_op(longType, code); break;
3202 case Bytecodes::_ixor : logic_op(intType , code); break;
3203 case Bytecodes::_lxor : logic_op(longType, code); break;
3204 case Bytecodes::_iinc : increment(); break;
3205 case Bytecodes::_i2l : convert(code, T_INT , T_LONG ); break;
3206 case Bytecodes::_i2f : convert(code, T_INT , T_FLOAT ); break;
3207 case Bytecodes::_i2d : convert(code, T_INT , T_DOUBLE); break;
3208 case Bytecodes::_l2i : convert(code, T_LONG , T_INT ); break;
3209 case Bytecodes::_l2f : convert(code, T_LONG , T_FLOAT ); break;
3210 case Bytecodes::_l2d : convert(code, T_LONG , T_DOUBLE); break;
3211 case Bytecodes::_f2i : convert(code, T_FLOAT , T_INT ); break;
3212 case Bytecodes::_f2l : convert(code, T_FLOAT , T_LONG ); break;
3213 case Bytecodes::_f2d : convert(code, T_FLOAT , T_DOUBLE); break;
3214 case Bytecodes::_d2i : convert(code, T_DOUBLE, T_INT ); break;
3215 case Bytecodes::_d2l : convert(code, T_DOUBLE, T_LONG ); break;
3216 case Bytecodes::_d2f : convert(code, T_DOUBLE, T_FLOAT ); break;
3217 case Bytecodes::_i2b : convert(code, T_INT , T_BYTE ); break;
3218 case Bytecodes::_i2c : convert(code, T_INT , T_CHAR ); break;
3219 case Bytecodes::_i2s : convert(code, T_INT , T_SHORT ); break;
3220 case Bytecodes::_lcmp : compare_op(longType , code); break;
3221 case Bytecodes::_fcmpl : compare_op(floatType , code); break;
3222 case Bytecodes::_fcmpg : compare_op(floatType , code); break;
3223 case Bytecodes::_dcmpl : compare_op(doubleType, code); break;
3224 case Bytecodes::_dcmpg : compare_op(doubleType, code); break;
3225 case Bytecodes::_ifeq : if_zero(intType , If::eql); break;
3226 case Bytecodes::_ifne : if_zero(intType , If::neq); break;
3227 case Bytecodes::_iflt : if_zero(intType , If::lss); break;
3228 case Bytecodes::_ifge : if_zero(intType , If::geq); break;
3229 case Bytecodes::_ifgt : if_zero(intType , If::gtr); break;
3230 case Bytecodes::_ifle : if_zero(intType , If::leq); break;
3231 case Bytecodes::_if_icmpeq : if_same(intType , If::eql); break;
3232 case Bytecodes::_if_icmpne : if_same(intType , If::neq); break;
3233 case Bytecodes::_if_icmplt : if_same(intType , If::lss); break;
3234 case Bytecodes::_if_icmpge : if_same(intType , If::geq); break;
3235 case Bytecodes::_if_icmpgt : if_same(intType , If::gtr); break;
3236 case Bytecodes::_if_icmple : if_same(intType , If::leq); break;
3237 case Bytecodes::_if_acmpeq : if_same(objectType, If::eql); break;
3238 case Bytecodes::_if_acmpne : if_same(objectType, If::neq); break;
3239 case Bytecodes::_goto : _goto(s.cur_bci(), s.get_dest()); break;
3240 case Bytecodes::_jsr : jsr(s.get_dest()); break;
3241 case Bytecodes::_ret : ret(s.get_index()); break;
3242 case Bytecodes::_tableswitch : table_switch(); break;
3243 case Bytecodes::_lookupswitch : lookup_switch(); break;
3244 case Bytecodes::_ireturn : method_return(ipop(), ignore_return); break;
3245 case Bytecodes::_lreturn : method_return(lpop(), ignore_return); break;
3246 case Bytecodes::_freturn : method_return(fpop(), ignore_return); break;
3247 case Bytecodes::_dreturn : method_return(dpop(), ignore_return); break;
3248 case Bytecodes::_areturn : method_return(apop(), ignore_return); break;
3249 case Bytecodes::_return : method_return(nullptr, ignore_return); break;
3250 case Bytecodes::_getstatic : // fall through
3251 case Bytecodes::_putstatic : // fall through
3252 case Bytecodes::_getfield : // fall through
3253 case Bytecodes::_putfield : access_field(code); break;
3254 case Bytecodes::_invokevirtual : // fall through
3255 case Bytecodes::_invokespecial : // fall through
3256 case Bytecodes::_invokestatic : // fall through
3257 case Bytecodes::_invokedynamic : // fall through
3258 case Bytecodes::_invokeinterface: invoke(code); break;
3259 case Bytecodes::_new : new_instance(s.get_index_u2()); break;
3260 case Bytecodes::_newarray : new_type_array(); break;
3261 case Bytecodes::_anewarray : new_object_array(); break;
3262 case Bytecodes::_arraylength : { ValueStack* state_before = copy_state_for_exception(); ipush(append(new ArrayLength(apop(), state_before))); break; }
3263 case Bytecodes::_athrow : throw_op(s.cur_bci()); break;
3264 case Bytecodes::_checkcast : check_cast(s.get_index_u2()); break;
3265 case Bytecodes::_instanceof : instance_of(s.get_index_u2()); break;
3266 case Bytecodes::_monitorenter : monitorenter(apop(), s.cur_bci()); break;
3267 case Bytecodes::_monitorexit : monitorexit (apop(), s.cur_bci()); break;
3268 case Bytecodes::_wide : ShouldNotReachHere(); break;
3269 case Bytecodes::_multianewarray : new_multi_array(s.cur_bcp()[3]); break;
3270 case Bytecodes::_ifnull : if_null(objectType, If::eql); break;
3271 case Bytecodes::_ifnonnull : if_null(objectType, If::neq); break;
3272 case Bytecodes::_goto_w : _goto(s.cur_bci(), s.get_far_dest()); break;
3273 case Bytecodes::_jsr_w : jsr(s.get_far_dest()); break;
3274 case Bytecodes::_breakpoint : BAILOUT_("concurrent setting of breakpoint", nullptr);
3275 default : ShouldNotReachHere(); break;
3276 }
3277
3278 if (log != nullptr)
3279 log->clear_context(); // skip marker if nothing was printed
3280
3281 // save current bci to setup Goto at the end
3282 prev_bci = s.cur_bci();
3283
3284 }
3285 CHECK_BAILOUT_(nullptr);
3286 // stop processing of this block (see try_inline_full)
3287 if (_skip_block) {
3288 _skip_block = false;
3289 assert(_last && _last->as_BlockEnd(), "");
3290 return _last->as_BlockEnd();
3291 }
3292 // if there are any, check if last instruction is a BlockEnd instruction
3293 BlockEnd* end = last()->as_BlockEnd();
3294 if (end == nullptr) {
3295 // all blocks must end with a BlockEnd instruction => add a Goto
3296 end = new Goto(block_at(s.cur_bci()), false);
3297 append(end);
3298 }
3299 assert(end == last()->as_BlockEnd(), "inconsistency");
3300
3301 assert(end->state() != nullptr, "state must already be present");
3302 assert(end->as_Return() == nullptr || end->as_Throw() == nullptr || end->state()->stack_size() == 0, "stack not needed for return and throw");
3303
3304 // connect to begin & set state
3305 // NOTE that inlining may have changed the block we are parsing
3306 block()->set_end(end);
3307 // propagate state
3308 for (int i = end->number_of_sux() - 1; i >= 0; i--) {
3309 BlockBegin* sux = end->sux_at(i);
3310 assert(sux->is_predecessor(block()), "predecessor missing");
3311 // be careful, bailout if bytecodes are strange
3312 if (!sux->try_merge(end->state(), compilation()->has_irreducible_loops())) BAILOUT_("block join failed", nullptr);
3313 scope_data()->add_to_work_list(end->sux_at(i));
3314 }
3315
3316 scope_data()->set_stream(nullptr);
3317
3318 // done
3319 return end;
3320 }
3321
3322
3323 void GraphBuilder::iterate_all_blocks(bool start_in_current_block_for_inlining) {
3324 do {
3325 if (start_in_current_block_for_inlining && !bailed_out()) {
3326 iterate_bytecodes_for_block(0);
3327 start_in_current_block_for_inlining = false;
3328 } else {
3329 BlockBegin* b;
3330 while ((b = scope_data()->remove_from_work_list()) != nullptr) {
3331 if (!b->is_set(BlockBegin::was_visited_flag)) {
3332 if (b->is_set(BlockBegin::osr_entry_flag)) {
3333 // we're about to parse the osr entry block, so make sure
3334 // we setup the OSR edge leading into this block so that
3335 // Phis get setup correctly.
3336 setup_osr_entry_block();
3337 // this is no longer the osr entry block, so clear it.
3338 b->clear(BlockBegin::osr_entry_flag);
3339 }
3340 b->set(BlockBegin::was_visited_flag);
3341 connect_to_end(b);
3342 }
3343 }
3344 }
3345 } while (!bailed_out() && !scope_data()->is_work_list_empty());
3346 }
3347
3348
3349 bool GraphBuilder::_can_trap [Bytecodes::number_of_java_codes];
3350
3351 void GraphBuilder::initialize() {
3352 // the following bytecodes are assumed to potentially
3353 // throw exceptions in compiled code - note that e.g.
3354 // monitorexit & the return bytecodes do not throw
3355 // exceptions since monitor pairing proved that they
3356 // succeed (if monitor pairing succeeded)
3357 Bytecodes::Code can_trap_list[] =
3358 { Bytecodes::_ldc
3359 , Bytecodes::_ldc_w
3360 , Bytecodes::_ldc2_w
3361 , Bytecodes::_iaload
3362 , Bytecodes::_laload
3363 , Bytecodes::_faload
3364 , Bytecodes::_daload
3365 , Bytecodes::_aaload
3366 , Bytecodes::_baload
3367 , Bytecodes::_caload
3368 , Bytecodes::_saload
3369 , Bytecodes::_iastore
3370 , Bytecodes::_lastore
3371 , Bytecodes::_fastore
3372 , Bytecodes::_dastore
3373 , Bytecodes::_aastore
3374 , Bytecodes::_bastore
3375 , Bytecodes::_castore
3376 , Bytecodes::_sastore
3377 , Bytecodes::_idiv
3378 , Bytecodes::_ldiv
3379 , Bytecodes::_irem
3380 , Bytecodes::_lrem
3381 , Bytecodes::_getstatic
3382 , Bytecodes::_putstatic
3383 , Bytecodes::_getfield
3384 , Bytecodes::_putfield
3385 , Bytecodes::_invokevirtual
3386 , Bytecodes::_invokespecial
3387 , Bytecodes::_invokestatic
3388 , Bytecodes::_invokedynamic
3389 , Bytecodes::_invokeinterface
3390 , Bytecodes::_new
3391 , Bytecodes::_newarray
3392 , Bytecodes::_anewarray
3393 , Bytecodes::_arraylength
3394 , Bytecodes::_athrow
3395 , Bytecodes::_checkcast
3396 , Bytecodes::_instanceof
3397 , Bytecodes::_monitorenter
3398 , Bytecodes::_multianewarray
3399 };
3400
3401 // inititialize trap tables
3402 for (int i = 0; i < Bytecodes::number_of_java_codes; i++) {
3403 _can_trap[i] = false;
3404 }
3405 // set standard trap info
3406 for (uint j = 0; j < ARRAY_SIZE(can_trap_list); j++) {
3407 _can_trap[can_trap_list[j]] = true;
3408 }
3409 }
3410
3411
3412 BlockBegin* GraphBuilder::header_block(BlockBegin* entry, BlockBegin::Flag f, ValueStack* state) {
3413 assert(entry->is_set(f), "entry/flag mismatch");
3414 // create header block
3415 BlockBegin* h = new BlockBegin(entry->bci());
3416 h->set_depth_first_number(0);
3417
3418 Value l = h;
3419 BlockEnd* g = new Goto(entry, false);
3420 l->set_next(g, entry->bci());
3421 h->set_end(g);
3422 h->set(f);
3423 // setup header block end state
3424 ValueStack* s = state->copy(ValueStack::StateAfter, entry->bci()); // can use copy since stack is empty (=> no phis)
3425 assert(s->stack_is_empty(), "must have empty stack at entry point");
3426 g->set_state(s);
3427 return h;
3428 }
3429
3430
3431
3432 BlockBegin* GraphBuilder::setup_start_block(int osr_bci, BlockBegin* std_entry, BlockBegin* osr_entry, ValueStack* state) {
3433 BlockBegin* start = new BlockBegin(0);
3434
3435 // This code eliminates the empty start block at the beginning of
3436 // each method. Previously, each method started with the
3437 // start-block created below, and this block was followed by the
3438 // header block that was always empty. This header block is only
3439 // necessary if std_entry is also a backward branch target because
3440 // then phi functions may be necessary in the header block. It's
3441 // also necessary when profiling so that there's a single block that
3442 // can increment the counters.
3443 // In addition, with range check elimination, we may need a valid block
3444 // that dominates all the rest to insert range predicates.
3445 BlockBegin* new_header_block;
3446 if (std_entry->number_of_preds() > 0 || is_profiling() || RangeCheckElimination) {
3447 new_header_block = header_block(std_entry, BlockBegin::std_entry_flag, state);
3448 } else {
3449 new_header_block = std_entry;
3450 }
3451
3452 // setup start block (root for the IR graph)
3453 Base* base =
3454 new Base(
3455 new_header_block,
3456 osr_entry
3457 );
3458 start->set_next(base, 0);
3459 start->set_end(base);
3460 // create & setup state for start block
3461 start->set_state(state->copy(ValueStack::StateAfter, std_entry->bci()));
3462 base->set_state(state->copy(ValueStack::StateAfter, std_entry->bci()));
3463
3464 if (base->std_entry()->state() == nullptr) {
3465 // setup states for header blocks
3466 base->std_entry()->merge(state, compilation()->has_irreducible_loops());
3467 }
3468
3469 assert(base->std_entry()->state() != nullptr, "");
3470 return start;
3471 }
3472
3473
3474 void GraphBuilder::setup_osr_entry_block() {
3475 assert(compilation()->is_osr_compile(), "only for osrs");
3476
3477 int osr_bci = compilation()->osr_bci();
3478 ciBytecodeStream s(method());
3479 s.reset_to_bci(osr_bci);
3480 s.next();
3481 scope_data()->set_stream(&s);
3482
3483 // create a new block to be the osr setup code
3484 _osr_entry = new BlockBegin(osr_bci);
3485 _osr_entry->set(BlockBegin::osr_entry_flag);
3486 _osr_entry->set_depth_first_number(0);
3487 BlockBegin* target = bci2block()->at(osr_bci);
3488 assert(target != nullptr && target->is_set(BlockBegin::osr_entry_flag), "must be there");
3489 // the osr entry has no values for locals
3490 ValueStack* state = target->state()->copy();
3491 _osr_entry->set_state(state);
3492
3493 kill_all();
3494 _block = _osr_entry;
3495 _state = _osr_entry->state()->copy();
3496 assert(_state->bci() == osr_bci, "mismatch");
3497 _last = _osr_entry;
3498 Value e = append(new OsrEntry());
3499 e->set_needs_null_check(false);
3500
3501 // OSR buffer is
3502 //
3503 // locals[nlocals-1..0]
3504 // monitors[number_of_locks-1..0]
3505 //
3506 // locals is a direct copy of the interpreter frame so in the osr buffer
3507 // so first slot in the local array is the last local from the interpreter
3508 // and last slot is local[0] (receiver) from the interpreter
3509 //
3510 // Similarly with locks. The first lock slot in the osr buffer is the nth lock
3511 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
3512 // in the interpreter frame (the method lock if a sync method)
3513
3514 // Initialize monitors in the compiled activation.
3515
3516 int index;
3517 Value local;
3518
3519 // find all the locals that the interpreter thinks contain live oops
3520 const ResourceBitMap live_oops = method()->live_local_oops_at_bci(osr_bci);
3521
3522 // compute the offset into the locals so that we can treat the buffer
3523 // as if the locals were still in the interpreter frame
3524 int locals_offset = BytesPerWord * (method()->max_locals() - 1);
3525 for_each_local_value(state, index, local) {
3526 int offset = locals_offset - (index + local->type()->size() - 1) * BytesPerWord;
3527 Value get;
3528 if (local->type()->is_object_kind() && !live_oops.at(index)) {
3529 // The interpreter thinks this local is dead but the compiler
3530 // doesn't so pretend that the interpreter passed in null.
3531 get = append(new Constant(objectNull));
3532 } else {
3533 Value off_val = append(new Constant(new IntConstant(offset)));
3534 get = append(new UnsafeGet(as_BasicType(local->type()), e,
3535 off_val,
3536 false/*is_volatile*/,
3537 true/*is_raw*/));
3538 }
3539 _state->store_local(index, get);
3540 }
3541
3542 // the storage for the OSR buffer is freed manually in the LIRGenerator.
3543
3544 assert(state->caller_state() == nullptr, "should be top scope");
3545 state->clear_locals();
3546 Goto* g = new Goto(target, false);
3547 append(g);
3548 _osr_entry->set_end(g);
3549 target->merge(_osr_entry->end()->state(), compilation()->has_irreducible_loops());
3550
3551 scope_data()->set_stream(nullptr);
3552 }
3553
3554
3555 ValueStack* GraphBuilder::state_at_entry() {
3556 ValueStack* state = new ValueStack(scope(), nullptr);
3557
3558 // Set up locals for receiver
3559 int idx = 0;
3560 if (!method()->is_static()) {
3561 // we should always see the receiver
3562 state->store_local(idx, new Local(method()->holder(), objectType, idx, true));
3563 idx = 1;
3564 }
3565
3566 // Set up locals for incoming arguments
3567 ciSignature* sig = method()->signature();
3568 for (int i = 0; i < sig->count(); i++) {
3569 ciType* type = sig->type_at(i);
3570 BasicType basic_type = type->basic_type();
3571 // don't allow T_ARRAY to propagate into locals types
3572 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
3573 ValueType* vt = as_ValueType(basic_type);
3574 state->store_local(idx, new Local(type, vt, idx, false));
3575 idx += type->size();
3576 }
3577
3578 // lock synchronized method
3579 if (method()->is_synchronized()) {
3580 state->lock(nullptr);
3581 }
3582
3583 return state;
3584 }
3585
3586
3587 GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope)
3588 : _scope_data(nullptr)
3589 , _compilation(compilation)
3590 , _memory(new MemoryBuffer())
3591 , _inline_bailout_msg(nullptr)
3592 , _instruction_count(0)
3593 , _osr_entry(nullptr)
3594 , _pending_field_access(nullptr)
3595 , _pending_load_indexed(nullptr)
3596 {
3597 int osr_bci = compilation->osr_bci();
3598
3599 // determine entry points and bci2block mapping
3600 BlockListBuilder blm(compilation, scope, osr_bci);
3601 CHECK_BAILOUT();
3602
3603 BlockList* bci2block = blm.bci2block();
3604 BlockBegin* start_block = bci2block->at(0);
3605
3606 push_root_scope(scope, bci2block, start_block);
3607
3608 // setup state for std entry
3609 _initial_state = state_at_entry();
3610 start_block->merge(_initial_state, compilation->has_irreducible_loops());
3611
3612 // End nulls still exist here
3613
3614 // complete graph
3615 _vmap = new ValueMap();
3616 switch (scope->method()->intrinsic_id()) {
3617 case vmIntrinsics::_dabs : // fall through
3618 case vmIntrinsics::_dsqrt : // fall through
3619 case vmIntrinsics::_dsqrt_strict : // fall through
3620 case vmIntrinsics::_dsin : // fall through
3621 case vmIntrinsics::_dcos : // fall through
3622 case vmIntrinsics::_dtan : // fall through
3623 case vmIntrinsics::_dsinh : // fall through
3624 case vmIntrinsics::_dtanh : // fall through
3625 case vmIntrinsics::_dcbrt : // fall through
3626 case vmIntrinsics::_dlog : // fall through
3627 case vmIntrinsics::_dlog10 : // fall through
3628 case vmIntrinsics::_dexp : // fall through
3629 case vmIntrinsics::_dpow : // fall through
3630 {
3631 // Compiles where the root method is an intrinsic need a special
3632 // compilation environment because the bytecodes for the method
3633 // shouldn't be parsed during the compilation, only the special
3634 // Intrinsic node should be emitted. If this isn't done the
3635 // code for the inlined version will be different than the root
3636 // compiled version which could lead to monotonicity problems on
3637 // intel.
3638 if (CheckIntrinsics && !scope->method()->intrinsic_candidate()) {
3639 BAILOUT("failed to inline intrinsic, method not annotated");
3640 }
3641
3642 // Set up a stream so that appending instructions works properly.
3643 ciBytecodeStream s(scope->method());
3644 s.reset_to_bci(0);
3645 scope_data()->set_stream(&s);
3646 s.next();
3647
3648 // setup the initial block state
3649 _block = start_block;
3650 _state = start_block->state()->copy_for_parsing();
3651 _last = start_block;
3652 load_local(doubleType, 0);
3653 if (scope->method()->intrinsic_id() == vmIntrinsics::_dpow) {
3654 load_local(doubleType, 2);
3655 }
3656
3657 // Emit the intrinsic node.
3658 bool result = try_inline_intrinsics(scope->method());
3659 if (!result) BAILOUT("failed to inline intrinsic");
3660 method_return(dpop());
3661
3662 // connect the begin and end blocks and we're all done.
3663 BlockEnd* end = last()->as_BlockEnd();
3664 block()->set_end(end);
3665 break;
3666 }
3667
3668 case vmIntrinsics::_Reference_get0:
3669 {
3670 {
3671 // With java.lang.ref.reference.get() we must go through the
3672 // intrinsic - when G1 is enabled - even when get() is the root
3673 // method of the compile so that, if necessary, the value in
3674 // the referent field of the reference object gets recorded by
3675 // the pre-barrier code.
3676 // Specifically, if G1 is enabled, the value in the referent
3677 // field is recorded by the G1 SATB pre barrier. This will
3678 // result in the referent being marked live and the reference
3679 // object removed from the list of discovered references during
3680 // reference processing.
3681 if (CheckIntrinsics && !scope->method()->intrinsic_candidate()) {
3682 BAILOUT("failed to inline intrinsic, method not annotated");
3683 }
3684
3685 // Also we need intrinsic to prevent commoning reads from this field
3686 // across safepoint since GC can change its value.
3687
3688 // Set up a stream so that appending instructions works properly.
3689 ciBytecodeStream s(scope->method());
3690 s.reset_to_bci(0);
3691 scope_data()->set_stream(&s);
3692 s.next();
3693
3694 // setup the initial block state
3695 _block = start_block;
3696 _state = start_block->state()->copy_for_parsing();
3697 _last = start_block;
3698 load_local(objectType, 0);
3699
3700 // Emit the intrinsic node.
3701 bool result = try_inline_intrinsics(scope->method());
3702 if (!result) BAILOUT("failed to inline intrinsic");
3703 method_return(apop());
3704
3705 // connect the begin and end blocks and we're all done.
3706 BlockEnd* end = last()->as_BlockEnd();
3707 block()->set_end(end);
3708 break;
3709 }
3710 // Otherwise, fall thru
3711 }
3712
3713 default:
3714 scope_data()->add_to_work_list(start_block);
3715 iterate_all_blocks();
3716 break;
3717 }
3718 CHECK_BAILOUT();
3719
3720 # ifdef ASSERT
3721 // For all blocks reachable from start_block: _end must be non-null
3722 {
3723 BlockList processed;
3724 BlockList to_go;
3725 to_go.append(start_block);
3726 while(to_go.length() > 0) {
3727 BlockBegin* current = to_go.pop();
3728 assert(current != nullptr, "Should not happen.");
3729 assert(current->end() != nullptr, "All blocks reachable from start_block should have end() != nullptr.");
3730 processed.append(current);
3731 for(int i = 0; i < current->number_of_sux(); i++) {
3732 BlockBegin* s = current->sux_at(i);
3733 if (!processed.contains(s)) {
3734 to_go.append(s);
3735 }
3736 }
3737 }
3738 }
3739 #endif // ASSERT
3740
3741 _start = setup_start_block(osr_bci, start_block, _osr_entry, _initial_state);
3742
3743 eliminate_redundant_phis(_start);
3744
3745 NOT_PRODUCT(if (PrintValueNumbering && Verbose) print_stats());
3746 // for osr compile, bailout if some requirements are not fulfilled
3747 if (osr_bci != -1) {
3748 BlockBegin* osr_block = blm.bci2block()->at(osr_bci);
3749 if (!osr_block->is_set(BlockBegin::was_visited_flag)) {
3750 BAILOUT("osr entry must have been visited for osr compile");
3751 }
3752
3753 // check if osr entry point has empty stack - we cannot handle non-empty stacks at osr entry points
3754 if (!osr_block->state()->stack_is_empty()) {
3755 BAILOUT("stack not empty at OSR entry point");
3756 }
3757 }
3758 #ifndef PRODUCT
3759 if (PrintCompilation && Verbose) tty->print_cr("Created %d Instructions", _instruction_count);
3760 #endif
3761 }
3762
3763
3764 ValueStack* GraphBuilder::copy_state_before() {
3765 return copy_state_before_with_bci(bci());
3766 }
3767
3768 ValueStack* GraphBuilder::copy_state_exhandling() {
3769 return copy_state_exhandling_with_bci(bci());
3770 }
3771
3772 ValueStack* GraphBuilder::copy_state_for_exception() {
3773 return copy_state_for_exception_with_bci(bci());
3774 }
3775
3776 ValueStack* GraphBuilder::copy_state_before_with_bci(int bci) {
3777 return state()->copy(ValueStack::StateBefore, bci);
3778 }
3779
3780 ValueStack* GraphBuilder::copy_state_exhandling_with_bci(int bci) {
3781 if (!has_handler()) return nullptr;
3782 return state()->copy(ValueStack::StateBefore, bci);
3783 }
3784
3785 ValueStack* GraphBuilder::copy_state_for_exception_with_bci(int bci) {
3786 ValueStack* s = copy_state_exhandling_with_bci(bci);
3787 if (s == nullptr) {
3788 // no handler, no need to retain locals
3789 ValueStack::Kind exc_kind = ValueStack::empty_exception_kind();
3790 s = state()->copy(exc_kind, bci);
3791 }
3792 return s;
3793 }
3794
3795 int GraphBuilder::recursive_inline_level(ciMethod* cur_callee) const {
3796 int recur_level = 0;
3797 for (IRScope* s = scope(); s != nullptr; s = s->caller()) {
3798 if (s->method() == cur_callee) {
3799 ++recur_level;
3800 }
3801 }
3802 return recur_level;
3803 }
3804
3805 static void set_flags_for_inlined_callee(Compilation* compilation, ciMethod* callee) {
3806 if (callee->has_reserved_stack_access()) {
3807 compilation->set_has_reserved_stack_access(true);
3808 }
3809 if (callee->is_synchronized() || callee->has_monitor_bytecodes()) {
3810 compilation->set_has_monitors(true);
3811 }
3812 if (callee->is_scoped()) {
3813 compilation->set_has_scoped_access(true);
3814 }
3815 }
3816
3817 bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, bool ignore_return, Bytecodes::Code bc, Value receiver) {
3818 const char* msg = nullptr;
3819
3820 // clear out any existing inline bailout condition
3821 clear_inline_bailout();
3822
3823 // exclude methods we don't want to inline
3824 msg = should_not_inline(callee);
3825 if (msg != nullptr) {
3826 print_inlining(callee, msg, /*success*/ false);
3827 return false;
3828 }
3829
3830 // method handle invokes
3831 if (callee->is_method_handle_intrinsic()) {
3832 if (try_method_handle_inline(callee, ignore_return)) {
3833 set_flags_for_inlined_callee(compilation(), callee);
3834 return true;
3835 }
3836 return false;
3837 }
3838
3839 // handle intrinsics
3840 if (callee->intrinsic_id() != vmIntrinsics::_none &&
3841 callee->check_intrinsic_candidate()) {
3842 if (try_inline_intrinsics(callee, ignore_return)) {
3843 print_inlining(callee, "intrinsic");
3844 set_flags_for_inlined_callee(compilation(), callee);
3845 return true;
3846 }
3847 // try normal inlining
3848 }
3849
3850 // certain methods cannot be parsed at all
3851 msg = check_can_parse(callee);
3852 if (msg != nullptr) {
3853 print_inlining(callee, msg, /*success*/ false);
3854 return false;
3855 }
3856
3857 // If bytecode not set use the current one.
3858 if (bc == Bytecodes::_illegal) {
3859 bc = code();
3860 }
3861 if (try_inline_full(callee, holder_known, ignore_return, bc, receiver)) {
3862 set_flags_for_inlined_callee(compilation(), callee);
3863 return true;
3864 }
3865
3866 // Entire compilation could fail during try_inline_full call.
3867 // In that case printing inlining decision info is useless.
3868 if (!bailed_out())
3869 print_inlining(callee, _inline_bailout_msg, /*success*/ false);
3870
3871 return false;
3872 }
3873
3874
3875 const char* GraphBuilder::check_can_parse(ciMethod* callee) const {
3876 // Certain methods cannot be parsed at all:
3877 if ( callee->is_native()) return "native method";
3878 if ( callee->is_abstract()) return "abstract method";
3879 if (!callee->can_be_parsed()) return "cannot be parsed";
3880 return nullptr;
3881 }
3882
3883 // negative filter: should callee NOT be inlined? returns null, ok to inline, or rejection msg
3884 const char* GraphBuilder::should_not_inline(ciMethod* callee) const {
3885 if ( compilation()->directive()->should_not_inline(callee)) return "disallowed by CompileCommand";
3886 if ( callee->dont_inline()) return "don't inline by annotation";
3887 return nullptr;
3888 }
3889
3890 void GraphBuilder::build_graph_for_intrinsic(ciMethod* callee, bool ignore_return) {
3891 vmIntrinsics::ID id = callee->intrinsic_id();
3892 assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
3893
3894 // Some intrinsics need special IR nodes.
3895 switch(id) {
3896 case vmIntrinsics::_getReference : append_unsafe_get(callee, T_OBJECT, false); return;
3897 case vmIntrinsics::_getBoolean : append_unsafe_get(callee, T_BOOLEAN, false); return;
3898 case vmIntrinsics::_getByte : append_unsafe_get(callee, T_BYTE, false); return;
3899 case vmIntrinsics::_getShort : append_unsafe_get(callee, T_SHORT, false); return;
3900 case vmIntrinsics::_getChar : append_unsafe_get(callee, T_CHAR, false); return;
3901 case vmIntrinsics::_getInt : append_unsafe_get(callee, T_INT, false); return;
3902 case vmIntrinsics::_getLong : append_unsafe_get(callee, T_LONG, false); return;
3903 case vmIntrinsics::_getFloat : append_unsafe_get(callee, T_FLOAT, false); return;
3904 case vmIntrinsics::_getDouble : append_unsafe_get(callee, T_DOUBLE, false); return;
3905 case vmIntrinsics::_putReference : append_unsafe_put(callee, T_OBJECT, false); return;
3906 case vmIntrinsics::_putBoolean : append_unsafe_put(callee, T_BOOLEAN, false); return;
3907 case vmIntrinsics::_putByte : append_unsafe_put(callee, T_BYTE, false); return;
3908 case vmIntrinsics::_putShort : append_unsafe_put(callee, T_SHORT, false); return;
3909 case vmIntrinsics::_putChar : append_unsafe_put(callee, T_CHAR, false); return;
3910 case vmIntrinsics::_putInt : append_unsafe_put(callee, T_INT, false); return;
3911 case vmIntrinsics::_putLong : append_unsafe_put(callee, T_LONG, false); return;
3912 case vmIntrinsics::_putFloat : append_unsafe_put(callee, T_FLOAT, false); return;
3913 case vmIntrinsics::_putDouble : append_unsafe_put(callee, T_DOUBLE, false); return;
3914 case vmIntrinsics::_getShortUnaligned : append_unsafe_get(callee, T_SHORT, false); return;
3915 case vmIntrinsics::_getCharUnaligned : append_unsafe_get(callee, T_CHAR, false); return;
3916 case vmIntrinsics::_getIntUnaligned : append_unsafe_get(callee, T_INT, false); return;
3917 case vmIntrinsics::_getLongUnaligned : append_unsafe_get(callee, T_LONG, false); return;
3918 case vmIntrinsics::_putShortUnaligned : append_unsafe_put(callee, T_SHORT, false); return;
3919 case vmIntrinsics::_putCharUnaligned : append_unsafe_put(callee, T_CHAR, false); return;
3920 case vmIntrinsics::_putIntUnaligned : append_unsafe_put(callee, T_INT, false); return;
3921 case vmIntrinsics::_putLongUnaligned : append_unsafe_put(callee, T_LONG, false); return;
3922 case vmIntrinsics::_getReferenceVolatile : append_unsafe_get(callee, T_OBJECT, true); return;
3923 case vmIntrinsics::_getBooleanVolatile : append_unsafe_get(callee, T_BOOLEAN, true); return;
3924 case vmIntrinsics::_getByteVolatile : append_unsafe_get(callee, T_BYTE, true); return;
3925 case vmIntrinsics::_getShortVolatile : append_unsafe_get(callee, T_SHORT, true); return;
3926 case vmIntrinsics::_getCharVolatile : append_unsafe_get(callee, T_CHAR, true); return;
3927 case vmIntrinsics::_getIntVolatile : append_unsafe_get(callee, T_INT, true); return;
3928 case vmIntrinsics::_getLongVolatile : append_unsafe_get(callee, T_LONG, true); return;
3929 case vmIntrinsics::_getFloatVolatile : append_unsafe_get(callee, T_FLOAT, true); return;
3930 case vmIntrinsics::_getDoubleVolatile : append_unsafe_get(callee, T_DOUBLE, true); return;
3931 case vmIntrinsics::_putReferenceVolatile : append_unsafe_put(callee, T_OBJECT, true); return;
3932 case vmIntrinsics::_putBooleanVolatile : append_unsafe_put(callee, T_BOOLEAN, true); return;
3933 case vmIntrinsics::_putByteVolatile : append_unsafe_put(callee, T_BYTE, true); return;
3934 case vmIntrinsics::_putShortVolatile : append_unsafe_put(callee, T_SHORT, true); return;
3935 case vmIntrinsics::_putCharVolatile : append_unsafe_put(callee, T_CHAR, true); return;
3936 case vmIntrinsics::_putIntVolatile : append_unsafe_put(callee, T_INT, true); return;
3937 case vmIntrinsics::_putLongVolatile : append_unsafe_put(callee, T_LONG, true); return;
3938 case vmIntrinsics::_putFloatVolatile : append_unsafe_put(callee, T_FLOAT, true); return;
3939 case vmIntrinsics::_putDoubleVolatile : append_unsafe_put(callee, T_DOUBLE, true); return;
3940 case vmIntrinsics::_compareAndSetLong:
3941 case vmIntrinsics::_compareAndSetInt:
3942 case vmIntrinsics::_compareAndSetReference : append_unsafe_CAS(callee); return;
3943 case vmIntrinsics::_getAndAddInt:
3944 case vmIntrinsics::_getAndAddLong : append_unsafe_get_and_set(callee, true); return;
3945 case vmIntrinsics::_getAndSetInt :
3946 case vmIntrinsics::_getAndSetLong :
3947 case vmIntrinsics::_getAndSetReference : append_unsafe_get_and_set(callee, false); return;
3948 case vmIntrinsics::_getCharStringU : append_char_access(callee, false); return;
3949 case vmIntrinsics::_putCharStringU : append_char_access(callee, true); return;
3950 case vmIntrinsics::_clone : append_alloc_array_copy(callee); return;
3951 default:
3952 break;
3953 }
3954 if (_inline_bailout_msg != nullptr) {
3955 return;
3956 }
3957
3958 // create intrinsic node
3959 const bool has_receiver = !callee->is_static();
3960 ValueType* result_type = as_ValueType(callee->return_type());
3961 ValueStack* state_before = copy_state_for_exception();
3962
3963 Values* args = state()->pop_arguments(callee->arg_size());
3964
3965 if (is_profiling()) {
3966 // Don't profile in the special case where the root method
3967 // is the intrinsic
3968 if (callee != method()) {
3969 // Note that we'd collect profile data in this method if we wanted it.
3970 compilation()->set_would_profile(true);
3971 if (profile_calls()) {
3972 Value recv = nullptr;
3973 if (has_receiver) {
3974 recv = args->at(0);
3975 null_check(recv);
3976 }
3977 profile_call(callee, recv, nullptr, collect_args_for_profiling(args, callee, true), true);
3978 }
3979 }
3980 }
3981
3982 Intrinsic* result = new Intrinsic(result_type, callee->intrinsic_id(),
3983 args, has_receiver, state_before,
3984 vmIntrinsics::preserves_state(id),
3985 vmIntrinsics::can_trap(id));
3986 // append instruction & push result
3987 Value value = append_split(result);
3988 if (result_type != voidType && !ignore_return) {
3989 push(result_type, value);
3990 }
3991
3992 if (callee != method() && profile_return() && result_type->is_object_kind()) {
3993 profile_return_type(result, callee);
3994 }
3995 }
3996
3997 bool GraphBuilder::try_inline_intrinsics(ciMethod* callee, bool ignore_return) {
3998 // For calling is_intrinsic_available we need to transition to
3999 // the '_thread_in_vm' state because is_intrinsic_available()
4000 // accesses critical VM-internal data.
4001 bool is_available = false;
4002 {
4003 VM_ENTRY_MARK;
4004 methodHandle mh(THREAD, callee->get_Method());
4005 is_available = _compilation->compiler()->is_intrinsic_available(mh, _compilation->directive());
4006 }
4007
4008 if (!is_available) {
4009 if (!InlineNatives) {
4010 // Return false and also set message that the inlining of
4011 // intrinsics has been disabled in general.
4012 INLINE_BAILOUT("intrinsic method inlining disabled");
4013 } else {
4014 return false;
4015 }
4016 }
4017 build_graph_for_intrinsic(callee, ignore_return);
4018 if (_inline_bailout_msg != nullptr) {
4019 return false;
4020 }
4021 return true;
4022 }
4023
4024
4025 bool GraphBuilder::try_inline_jsr(int jsr_dest_bci) {
4026 // Introduce a new callee continuation point - all Ret instructions
4027 // will be replaced with Gotos to this point.
4028 if (next_bci() >= method()->code_size()) {
4029 return false;
4030 }
4031 BlockBegin* cont = block_at(next_bci());
4032 assert(cont != nullptr, "continuation must exist (BlockListBuilder starts a new block after a jsr");
4033
4034 // Note: can not assign state to continuation yet, as we have to
4035 // pick up the state from the Ret instructions.
4036
4037 // Push callee scope
4038 push_scope_for_jsr(cont, jsr_dest_bci);
4039
4040 // Temporarily set up bytecode stream so we can append instructions
4041 // (only using the bci of this stream)
4042 scope_data()->set_stream(scope_data()->parent()->stream());
4043
4044 BlockBegin* jsr_start_block = block_at(jsr_dest_bci);
4045 assert(jsr_start_block != nullptr, "jsr start block must exist");
4046 assert(!jsr_start_block->is_set(BlockBegin::was_visited_flag), "should not have visited jsr yet");
4047 Goto* goto_sub = new Goto(jsr_start_block, false);
4048 // Must copy state to avoid wrong sharing when parsing bytecodes
4049 assert(jsr_start_block->state() == nullptr, "should have fresh jsr starting block");
4050 jsr_start_block->set_state(copy_state_before_with_bci(jsr_dest_bci));
4051 append(goto_sub);
4052 _block->set_end(goto_sub);
4053 _last = _block = jsr_start_block;
4054
4055 // Clear out bytecode stream
4056 scope_data()->set_stream(nullptr);
4057
4058 scope_data()->add_to_work_list(jsr_start_block);
4059
4060 // Ready to resume parsing in subroutine
4061 iterate_all_blocks();
4062
4063 // If we bailed out during parsing, return immediately (this is bad news)
4064 CHECK_BAILOUT_(false);
4065
4066 // Detect whether the continuation can actually be reached. If not,
4067 // it has not had state set by the join() operations in
4068 // iterate_bytecodes_for_block()/ret() and we should not touch the
4069 // iteration state. The calling activation of
4070 // iterate_bytecodes_for_block will then complete normally.
4071 if (cont->state() != nullptr) {
4072 if (!cont->is_set(BlockBegin::was_visited_flag)) {
4073 // add continuation to work list instead of parsing it immediately
4074 scope_data()->parent()->add_to_work_list(cont);
4075 }
4076 }
4077
4078 assert(jsr_continuation() == cont, "continuation must not have changed");
4079 assert(!jsr_continuation()->is_set(BlockBegin::was_visited_flag) ||
4080 jsr_continuation()->is_set(BlockBegin::parser_loop_header_flag),
4081 "continuation can only be visited in case of backward branches");
4082 assert(_last && _last->as_BlockEnd(), "block must have end");
4083
4084 // continuation is in work list, so end iteration of current block
4085 _skip_block = true;
4086 pop_scope_for_jsr();
4087
4088 return true;
4089 }
4090
4091
4092 // Inline the entry of a synchronized method as a monitor enter and
4093 // register the exception handler which releases the monitor if an
4094 // exception is thrown within the callee. Note that the monitor enter
4095 // cannot throw an exception itself, because the receiver is
4096 // guaranteed to be non-null by the explicit null check at the
4097 // beginning of inlining.
4098 void GraphBuilder::inline_sync_entry(Value lock, BlockBegin* sync_handler) {
4099 assert(lock != nullptr && sync_handler != nullptr, "lock or handler missing");
4100
4101 monitorenter(lock, SynchronizationEntryBCI);
4102 assert(_last->as_MonitorEnter() != nullptr, "monitor enter expected");
4103 _last->set_needs_null_check(false);
4104
4105 sync_handler->set(BlockBegin::exception_entry_flag);
4106 sync_handler->set(BlockBegin::is_on_work_list_flag);
4107
4108 ciExceptionHandler* desc = new ciExceptionHandler(method()->holder(), 0, method()->code_size(), -1, 0);
4109 XHandler* h = new XHandler(desc);
4110 h->set_entry_block(sync_handler);
4111 scope_data()->xhandlers()->append(h);
4112 scope_data()->set_has_handler();
4113 }
4114
4115
4116 // If an exception is thrown and not handled within an inlined
4117 // synchronized method, the monitor must be released before the
4118 // exception is rethrown in the outer scope. Generate the appropriate
4119 // instructions here.
4120 void GraphBuilder::fill_sync_handler(Value lock, BlockBegin* sync_handler, bool default_handler) {
4121 BlockBegin* orig_block = _block;
4122 ValueStack* orig_state = _state;
4123 Instruction* orig_last = _last;
4124 _last = _block = sync_handler;
4125 _state = sync_handler->state()->copy();
4126
4127 assert(sync_handler != nullptr, "handler missing");
4128 assert(!sync_handler->is_set(BlockBegin::was_visited_flag), "is visited here");
4129
4130 assert(lock != nullptr || default_handler, "lock or handler missing");
4131
4132 XHandler* h = scope_data()->xhandlers()->remove_last();
4133 assert(h->entry_block() == sync_handler, "corrupt list of handlers");
4134
4135 block()->set(BlockBegin::was_visited_flag);
4136 Value exception = append_with_bci(new ExceptionObject(), SynchronizationEntryBCI);
4137 assert(exception->is_pinned(), "must be");
4138
4139 int bci = SynchronizationEntryBCI;
4140 if (compilation()->env()->dtrace_method_probes()) {
4141 // Report exit from inline methods. We don't have a stream here
4142 // so pass an explicit bci of SynchronizationEntryBCI.
4143 Values* args = new Values(1);
4144 args->push(append_with_bci(new Constant(new MethodConstant(method())), bci));
4145 append_with_bci(new RuntimeCall(voidType, "dtrace_method_exit", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), args), bci);
4146 }
4147
4148 if (lock) {
4149 assert(state()->locks_size() > 0 && state()->lock_at(state()->locks_size() - 1) == lock, "lock is missing");
4150 if (!lock->is_linked()) {
4151 lock = append_with_bci(lock, bci);
4152 }
4153
4154 // exit the monitor in the context of the synchronized method
4155 monitorexit(lock, bci);
4156
4157 // exit the context of the synchronized method
4158 if (!default_handler) {
4159 pop_scope();
4160 bci = _state->caller_state()->bci();
4161 _state = _state->caller_state()->copy_for_parsing();
4162 }
4163 }
4164
4165 // perform the throw as if at the call site
4166 apush(exception);
4167 throw_op(bci);
4168
4169 BlockEnd* end = last()->as_BlockEnd();
4170 block()->set_end(end);
4171
4172 _block = orig_block;
4173 _state = orig_state;
4174 _last = orig_last;
4175 }
4176
4177
4178 bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, bool ignore_return, Bytecodes::Code bc, Value receiver) {
4179 assert(!callee->is_native(), "callee must not be native");
4180 if (CompilationPolicy::should_not_inline(compilation()->env(), callee)) {
4181 INLINE_BAILOUT("inlining prohibited by policy");
4182 }
4183 // first perform tests of things it's not possible to inline
4184 if (callee->has_exception_handlers() &&
4185 !InlineMethodsWithExceptionHandlers) INLINE_BAILOUT("callee has exception handlers");
4186 if (callee->is_synchronized() &&
4187 !InlineSynchronizedMethods ) INLINE_BAILOUT("callee is synchronized");
4188 if (!callee->holder()->is_linked()) INLINE_BAILOUT("callee's klass not linked yet");
4189 if (bc == Bytecodes::_invokestatic &&
4190 !callee->holder()->is_initialized()) INLINE_BAILOUT("callee's klass not initialized yet");
4191 if (!callee->has_balanced_monitors()) INLINE_BAILOUT("callee's monitors do not match");
4192
4193 // Proper inlining of methods with jsrs requires a little more work.
4194 if (callee->has_jsrs() ) INLINE_BAILOUT("jsrs not handled properly by inliner yet");
4195
4196 if (is_profiling() && !callee->ensure_method_data()) {
4197 INLINE_BAILOUT("mdo allocation failed");
4198 }
4199
4200 const bool is_invokedynamic = (bc == Bytecodes::_invokedynamic);
4201 const bool has_receiver = (bc != Bytecodes::_invokestatic && !is_invokedynamic);
4202
4203 const int args_base = state()->stack_size() - callee->arg_size();
4204 assert(args_base >= 0, "stack underflow during inlining");
4205
4206 Value recv = nullptr;
4207 if (has_receiver) {
4208 assert(!callee->is_static(), "callee must not be static");
4209 assert(callee->arg_size() > 0, "must have at least a receiver");
4210
4211 recv = state()->stack_at(args_base);
4212 if (recv->is_null_obj()) {
4213 INLINE_BAILOUT("receiver is always null");
4214 }
4215 }
4216
4217 // now perform tests that are based on flag settings
4218 bool inlinee_by_directive = compilation()->directive()->should_inline(callee);
4219 if (callee->force_inline() || inlinee_by_directive) {
4220 if (inline_level() > MaxForceInlineLevel ) INLINE_BAILOUT("MaxForceInlineLevel");
4221 if (recursive_inline_level(callee) > C1MaxRecursiveInlineLevel) INLINE_BAILOUT("recursive inlining too deep");
4222
4223 const char* msg = "";
4224 if (callee->force_inline()) msg = "force inline by annotation";
4225 if (inlinee_by_directive) msg = "force inline by CompileCommand";
4226 print_inlining(callee, msg);
4227 } else {
4228 // use heuristic controls on inlining
4229 if (inline_level() > C1MaxInlineLevel ) INLINE_BAILOUT("inlining too deep");
4230 int callee_recursive_level = recursive_inline_level(callee);
4231 if (callee_recursive_level > C1MaxRecursiveInlineLevel ) INLINE_BAILOUT("recursive inlining too deep");
4232 if (callee->code_size_for_inlining() > max_inline_size() ) INLINE_BAILOUT("callee is too large");
4233 // Additional condition to limit stack usage for non-recursive calls.
4234 if ((callee_recursive_level == 0) &&
4235 (callee->max_stack() + callee->max_locals() - callee->size_of_parameters() > C1InlineStackLimit)) {
4236 INLINE_BAILOUT("callee uses too much stack");
4237 }
4238
4239 // don't inline throwable methods unless the inlining tree is rooted in a throwable class
4240 if (callee->name() == ciSymbols::object_initializer_name() &&
4241 callee->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) {
4242 // Throwable constructor call
4243 IRScope* top = scope();
4244 while (top->caller() != nullptr) {
4245 top = top->caller();
4246 }
4247 if (!top->method()->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) {
4248 INLINE_BAILOUT("don't inline Throwable constructors");
4249 }
4250 }
4251
4252 if (compilation()->env()->num_inlined_bytecodes() > DesiredMethodLimit) {
4253 INLINE_BAILOUT("total inlining greater than DesiredMethodLimit");
4254 }
4255 // printing
4256 print_inlining(callee, "inline", /*success*/ true);
4257 }
4258
4259 assert(bc != Bytecodes::_invokestatic || callee->holder()->is_initialized(), "required");
4260
4261 // NOTE: Bailouts from this point on, which occur at the
4262 // GraphBuilder level, do not cause bailout just of the inlining but
4263 // in fact of the entire compilation.
4264
4265 BlockBegin* orig_block = block();
4266
4267 // Insert null check if necessary
4268 if (has_receiver) {
4269 // note: null check must happen even if first instruction of callee does
4270 // an implicit null check since the callee is in a different scope
4271 // and we must make sure exception handling does the right thing
4272 null_check(recv);
4273 }
4274
4275 if (is_profiling()) {
4276 // Note that we'd collect profile data in this method if we wanted it.
4277 // this may be redundant here...
4278 compilation()->set_would_profile(true);
4279
4280 if (profile_calls()) {
4281 int start = 0;
4282 Values* obj_args = args_list_for_profiling(callee, start, has_receiver);
4283 if (obj_args != nullptr) {
4284 int s = obj_args->capacity();
4285 // if called through method handle invoke, some arguments may have been popped
4286 for (int i = args_base+start, j = 0; j < obj_args->capacity() && i < state()->stack_size(); ) {
4287 Value v = state()->stack_at_inc(i);
4288 if (v->type()->is_object_kind()) {
4289 obj_args->push(v);
4290 j++;
4291 }
4292 }
4293 check_args_for_profiling(obj_args, s);
4294 }
4295 profile_call(callee, recv, holder_known ? callee->holder() : nullptr, obj_args, true);
4296 }
4297 }
4298
4299 // Introduce a new callee continuation point - if the callee has
4300 // more than one return instruction or the return does not allow
4301 // fall-through of control flow, all return instructions of the
4302 // callee will need to be replaced by Goto's pointing to this
4303 // continuation point.
4304 BlockBegin* cont = block_at(next_bci());
4305 bool continuation_existed = true;
4306 if (cont == nullptr) {
4307 cont = new BlockBegin(next_bci());
4308 // low number so that continuation gets parsed as early as possible
4309 cont->set_depth_first_number(0);
4310 if (PrintInitialBlockList) {
4311 tty->print_cr("CFG: created block %d (bci %d) as continuation for inline at bci %d",
4312 cont->block_id(), cont->bci(), bci());
4313 }
4314 continuation_existed = false;
4315 }
4316 // Record number of predecessors of continuation block before
4317 // inlining, to detect if inlined method has edges to its
4318 // continuation after inlining.
4319 int continuation_preds = cont->number_of_preds();
4320
4321 // Push callee scope
4322 push_scope(callee, cont);
4323
4324 // the BlockListBuilder for the callee could have bailed out
4325 if (bailed_out())
4326 return false;
4327
4328 // Temporarily set up bytecode stream so we can append instructions
4329 // (only using the bci of this stream)
4330 scope_data()->set_stream(scope_data()->parent()->stream());
4331
4332 // Pass parameters into callee state: add assignments
4333 // note: this will also ensure that all arguments are computed before being passed
4334 ValueStack* callee_state = state();
4335 ValueStack* caller_state = state()->caller_state();
4336 for (int i = args_base; i < caller_state->stack_size(); ) {
4337 const int arg_no = i - args_base;
4338 Value arg = caller_state->stack_at_inc(i);
4339 store_local(callee_state, arg, arg_no);
4340 }
4341
4342 // Remove args from stack.
4343 // Note that we preserve locals state in case we can use it later
4344 // (see use of pop_scope() below)
4345 caller_state->truncate_stack(args_base);
4346 assert(callee_state->stack_size() == 0, "callee stack must be empty");
4347
4348 // Check if we need a membar at the beginning of the java.lang.Object
4349 // constructor to satisfy the memory model for strict fields.
4350 if (Arguments::is_valhalla_enabled() && method()->intrinsic_id() == vmIntrinsics::_Object_init) {
4351 Value receiver = state()->local_at(0);
4352 ciType* klass = receiver->exact_type();
4353 if (klass == nullptr) {
4354 // No exact type, check if the declared type has no implementors and add a dependency
4355 klass = receiver->declared_type();
4356 klass = compilation()->cha_exact_type(klass);
4357 }
4358 if (klass != nullptr && klass->is_instance_klass()) {
4359 // Exact receiver type, check if there is a strict field
4360 ciInstanceKlass* holder = klass->as_instance_klass();
4361 for (int i = 0; i < holder->nof_nonstatic_fields(); i++) {
4362 ciField* field = holder->nonstatic_field_at(i);
4363 if (field->is_strict()) {
4364 // Found a strict field, a membar is needed
4365 append(new MemBar(lir_membar_storestore));
4366 break;
4367 }
4368 }
4369 } else if (klass == nullptr) {
4370 // We can't statically determine the type of the receiver and therefore need
4371 // to put a membar here because it could have a strict field.
4372 append(new MemBar(lir_membar_storestore));
4373 }
4374 }
4375
4376 Value lock = nullptr;
4377 BlockBegin* sync_handler = nullptr;
4378
4379 // Inline the locking of the receiver if the callee is synchronized
4380 if (callee->is_synchronized()) {
4381 lock = callee->is_static() ? append(new Constant(new InstanceConstant(callee->holder()->java_mirror())))
4382 : state()->local_at(0);
4383 sync_handler = new BlockBegin(SynchronizationEntryBCI);
4384 inline_sync_entry(lock, sync_handler);
4385 }
4386
4387 if (compilation()->env()->dtrace_method_probes()) {
4388 Values* args = new Values(1);
4389 args->push(append(new Constant(new MethodConstant(method()))));
4390 append(new RuntimeCall(voidType, "dtrace_method_entry", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), args));
4391 }
4392
4393 if (profile_inlined_calls()) {
4394 profile_invocation(callee, copy_state_before_with_bci(SynchronizationEntryBCI));
4395 }
4396
4397 BlockBegin* callee_start_block = block_at(0);
4398 if (callee_start_block != nullptr) {
4399 assert(callee_start_block->is_set(BlockBegin::parser_loop_header_flag), "must be loop header");
4400 Goto* goto_callee = new Goto(callee_start_block, false);
4401 // The state for this goto is in the scope of the callee, so use
4402 // the entry bci for the callee instead of the call site bci.
4403 append_with_bci(goto_callee, 0);
4404 _block->set_end(goto_callee);
4405 callee_start_block->merge(callee_state, compilation()->has_irreducible_loops());
4406
4407 _last = _block = callee_start_block;
4408
4409 scope_data()->add_to_work_list(callee_start_block);
4410 }
4411
4412 // Clear out bytecode stream
4413 scope_data()->set_stream(nullptr);
4414 scope_data()->set_ignore_return(ignore_return);
4415
4416 CompileLog* log = compilation()->log();
4417 if (log != nullptr) log->head("parse method='%d'", log->identify(callee));
4418
4419 // Ready to resume parsing in callee (either in the same block we
4420 // were in before or in the callee's start block)
4421 iterate_all_blocks(callee_start_block == nullptr);
4422
4423 if (log != nullptr) log->done("parse");
4424
4425 // If we bailed out during parsing, return immediately (this is bad news)
4426 if (bailed_out())
4427 return false;
4428
4429 // iterate_all_blocks theoretically traverses in random order; in
4430 // practice, we have only traversed the continuation if we are
4431 // inlining into a subroutine
4432 assert(continuation_existed ||
4433 !continuation()->is_set(BlockBegin::was_visited_flag),
4434 "continuation should not have been parsed yet if we created it");
4435
4436 // At this point we are almost ready to return and resume parsing of
4437 // the caller back in the GraphBuilder. The only thing we want to do
4438 // first is an optimization: during parsing of the callee we
4439 // generated at least one Goto to the continuation block. If we
4440 // generated exactly one, and if the inlined method spanned exactly
4441 // one block (and we didn't have to Goto its entry), then we snip
4442 // off the Goto to the continuation, allowing control to fall
4443 // through back into the caller block and effectively performing
4444 // block merging. This allows load elimination and CSE to take place
4445 // across multiple callee scopes if they are relatively simple, and
4446 // is currently essential to making inlining profitable.
4447 if (num_returns() == 1
4448 && block() == orig_block
4449 && block() == inline_cleanup_block()) {
4450 _last = inline_cleanup_return_prev();
4451 _state = inline_cleanup_state();
4452 } else if (continuation_preds == cont->number_of_preds()) {
4453 // Inlining caused that the instructions after the invoke in the
4454 // caller are not reachable any more. So skip filling this block
4455 // with instructions!
4456 assert(cont == continuation(), "");
4457 assert(_last && _last->as_BlockEnd(), "");
4458 _skip_block = true;
4459 } else {
4460 // Resume parsing in continuation block unless it was already parsed.
4461 // Note that if we don't change _last here, iteration in
4462 // iterate_bytecodes_for_block will stop when we return.
4463 if (!continuation()->is_set(BlockBegin::was_visited_flag)) {
4464 // add continuation to work list instead of parsing it immediately
4465 assert(_last && _last->as_BlockEnd(), "");
4466 scope_data()->parent()->add_to_work_list(continuation());
4467 _skip_block = true;
4468 }
4469 }
4470
4471 // Fill the exception handler for synchronized methods with instructions
4472 if (callee->is_synchronized() && sync_handler->state() != nullptr) {
4473 fill_sync_handler(lock, sync_handler);
4474 } else {
4475 pop_scope();
4476 }
4477
4478 compilation()->notice_inlined_method(callee);
4479
4480 return true;
4481 }
4482
4483
4484 bool GraphBuilder::try_method_handle_inline(ciMethod* callee, bool ignore_return) {
4485 ValueStack* state_before = copy_state_before();
4486 vmIntrinsics::ID iid = callee->intrinsic_id();
4487 switch (iid) {
4488 case vmIntrinsics::_invokeBasic:
4489 {
4490 // get MethodHandle receiver
4491 const int args_base = state()->stack_size() - callee->arg_size();
4492 ValueType* type = state()->stack_at(args_base)->type();
4493 if (type->is_constant()) {
4494 ciObject* mh = type->as_ObjectType()->constant_value();
4495 if (mh->is_method_handle()) {
4496 ciMethod* target = mh->as_method_handle()->get_vmtarget();
4497
4498 // We don't do CHA here so only inline static and statically bindable methods.
4499 if (target->is_static() || target->can_be_statically_bound()) {
4500 if (ciMethod::is_consistent_info(callee, target)) {
4501 Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual;
4502 ignore_return = ignore_return || (callee->return_type()->is_void() && !target->return_type()->is_void());
4503 if (try_inline(target, /*holder_known*/ !callee->is_static(), ignore_return, bc)) {
4504 return true;
4505 }
4506 } else {
4507 print_inlining(target, "signatures mismatch", /*success*/ false);
4508 }
4509 } else {
4510 assert(false, "no inlining through MH::invokeBasic"); // missing optimization opportunity due to suboptimal LF shape
4511 print_inlining(target, "not static or statically bindable", /*success*/ false);
4512 }
4513 } else {
4514 assert(mh->is_null_object(), "not a null");
4515 print_inlining(callee, "receiver is always null", /*success*/ false);
4516 }
4517 } else {
4518 print_inlining(callee, "receiver not constant", /*success*/ false);
4519 }
4520 }
4521 break;
4522
4523 case vmIntrinsics::_linkToVirtual:
4524 case vmIntrinsics::_linkToStatic:
4525 case vmIntrinsics::_linkToSpecial:
4526 case vmIntrinsics::_linkToInterface:
4527 {
4528 // pop MemberName argument
4529 const int args_base = state()->stack_size() - callee->arg_size();
4530 ValueType* type = apop()->type();
4531 if (type->is_constant()) {
4532 ciMethod* target = type->as_ObjectType()->constant_value()->as_member_name()->get_vmtarget();
4533 ignore_return = ignore_return || (callee->return_type()->is_void() && !target->return_type()->is_void());
4534 // If the target is another method handle invoke, try to recursively get
4535 // a better target.
4536 if (target->is_method_handle_intrinsic()) {
4537 if (try_method_handle_inline(target, ignore_return)) {
4538 return true;
4539 }
4540 } else if (!ciMethod::is_consistent_info(callee, target)) {
4541 print_inlining(target, "signatures mismatch", /*success*/ false);
4542 } else {
4543 ciSignature* signature = target->signature();
4544 const int receiver_skip = target->is_static() ? 0 : 1;
4545 // Cast receiver to its type.
4546 if (!target->is_static()) {
4547 ciKlass* tk = signature->accessing_klass();
4548 Value obj = state()->stack_at(args_base);
4549 if (obj->exact_type() == nullptr &&
4550 obj->declared_type() != tk && tk != compilation()->env()->Object_klass()) {
4551 TypeCast* c = new TypeCast(tk, obj, state_before);
4552 append(c);
4553 state()->stack_at_put(args_base, c);
4554 }
4555 }
4556 // Cast reference arguments to its type.
4557 for (int i = 0, j = 0; i < signature->count(); i++) {
4558 ciType* t = signature->type_at(i);
4559 if (t->is_klass()) {
4560 ciKlass* tk = t->as_klass();
4561 Value obj = state()->stack_at(args_base + receiver_skip + j);
4562 if (obj->exact_type() == nullptr &&
4563 obj->declared_type() != tk && tk != compilation()->env()->Object_klass()) {
4564 TypeCast* c = new TypeCast(t, obj, state_before);
4565 append(c);
4566 state()->stack_at_put(args_base + receiver_skip + j, c);
4567 }
4568 }
4569 j += t->size(); // long and double take two slots
4570 }
4571 // We don't do CHA here so only inline static and statically bindable methods.
4572 if (target->is_static() || target->can_be_statically_bound()) {
4573 Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual;
4574 if (try_inline(target, /*holder_known*/ !callee->is_static(), ignore_return, bc)) {
4575 return true;
4576 }
4577 } else {
4578 print_inlining(target, "not static or statically bindable", /*success*/ false);
4579 }
4580 }
4581 } else {
4582 print_inlining(callee, "MemberName not constant", /*success*/ false);
4583 }
4584 }
4585 break;
4586
4587 case vmIntrinsics::_linkToNative:
4588 print_inlining(callee, "native call", /*success*/ false);
4589 break;
4590
4591 default:
4592 fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));
4593 break;
4594 }
4595 set_state(state_before->copy_for_parsing());
4596 return false;
4597 }
4598
4599
4600 void GraphBuilder::inline_bailout(const char* msg) {
4601 assert(msg != nullptr, "inline bailout msg must exist");
4602 _inline_bailout_msg = msg;
4603 }
4604
4605
4606 void GraphBuilder::clear_inline_bailout() {
4607 _inline_bailout_msg = nullptr;
4608 }
4609
4610
4611 void GraphBuilder::push_root_scope(IRScope* scope, BlockList* bci2block, BlockBegin* start) {
4612 ScopeData* data = new ScopeData(nullptr);
4613 data->set_scope(scope);
4614 data->set_bci2block(bci2block);
4615 _scope_data = data;
4616 _block = start;
4617 }
4618
4619
4620 void GraphBuilder::push_scope(ciMethod* callee, BlockBegin* continuation) {
4621 IRScope* callee_scope = new IRScope(compilation(), scope(), bci(), callee, -1, false);
4622 scope()->add_callee(callee_scope);
4623
4624 BlockListBuilder blb(compilation(), callee_scope, -1);
4625 CHECK_BAILOUT();
4626
4627 if (!blb.bci2block()->at(0)->is_set(BlockBegin::parser_loop_header_flag)) {
4628 // this scope can be inlined directly into the caller so remove
4629 // the block at bci 0.
4630 blb.bci2block()->at_put(0, nullptr);
4631 }
4632
4633 set_state(new ValueStack(callee_scope, state()->copy(ValueStack::CallerState, bci())));
4634
4635 ScopeData* data = new ScopeData(scope_data());
4636 data->set_scope(callee_scope);
4637 data->set_bci2block(blb.bci2block());
4638 data->set_continuation(continuation);
4639 _scope_data = data;
4640 }
4641
4642
4643 void GraphBuilder::push_scope_for_jsr(BlockBegin* jsr_continuation, int jsr_dest_bci) {
4644 ScopeData* data = new ScopeData(scope_data());
4645 data->set_parsing_jsr();
4646 data->set_jsr_entry_bci(jsr_dest_bci);
4647 data->set_jsr_return_address_local(-1);
4648 // Must clone bci2block list as we will be mutating it in order to
4649 // properly clone all blocks in jsr region as well as exception
4650 // handlers containing rets
4651 BlockList* new_bci2block = new BlockList(bci2block()->length());
4652 new_bci2block->appendAll(bci2block());
4653 data->set_bci2block(new_bci2block);
4654 data->set_scope(scope());
4655 data->setup_jsr_xhandlers();
4656 data->set_continuation(continuation());
4657 data->set_jsr_continuation(jsr_continuation);
4658 _scope_data = data;
4659 }
4660
4661
4662 void GraphBuilder::pop_scope() {
4663 int number_of_locks = scope()->number_of_locks();
4664 _scope_data = scope_data()->parent();
4665 // accumulate minimum number of monitor slots to be reserved
4666 scope()->set_min_number_of_locks(number_of_locks);
4667 }
4668
4669
4670 void GraphBuilder::pop_scope_for_jsr() {
4671 _scope_data = scope_data()->parent();
4672 }
4673
4674 void GraphBuilder::append_unsafe_get(ciMethod* callee, BasicType t, bool is_volatile) {
4675 Values* args = state()->pop_arguments(callee->arg_size());
4676 null_check(args->at(0));
4677 Instruction* offset = args->at(2);
4678 #ifndef _LP64
4679 offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
4680 #endif
4681 Instruction* op = append(new UnsafeGet(t, args->at(1), offset, is_volatile));
4682 push(op->type(), op);
4683 compilation()->set_has_unsafe_access(true);
4684 }
4685
4686
4687 void GraphBuilder::append_unsafe_put(ciMethod* callee, BasicType t, bool is_volatile) {
4688 Values* args = state()->pop_arguments(callee->arg_size());
4689 null_check(args->at(0));
4690 Instruction* offset = args->at(2);
4691 #ifndef _LP64
4692 offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
4693 #endif
4694 Value val = args->at(3);
4695 if (t == T_BOOLEAN) {
4696 Value mask = append(new Constant(new IntConstant(1)));
4697 val = append(new LogicOp(Bytecodes::_iand, val, mask));
4698 }
4699 Instruction* op = append(new UnsafePut(t, args->at(1), offset, val, is_volatile));
4700 compilation()->set_has_unsafe_access(true);
4701 kill_all();
4702 }
4703
4704 void GraphBuilder::append_unsafe_CAS(ciMethod* callee) {
4705 ValueStack* state_before = copy_state_for_exception();
4706 ValueType* result_type = as_ValueType(callee->return_type());
4707 assert(result_type->is_int(), "int result");
4708 Values* args = state()->pop_arguments(callee->arg_size());
4709
4710 // Pop off some args to specially handle, then push back
4711 Value newval = args->pop();
4712 Value cmpval = args->pop();
4713 Value offset = args->pop();
4714 Value src = args->pop();
4715 Value unsafe_obj = args->pop();
4716
4717 // Separately handle the unsafe arg. It is not needed for code
4718 // generation, but must be null checked
4719 null_check(unsafe_obj);
4720
4721 #ifndef _LP64
4722 offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
4723 #endif
4724
4725 args->push(src);
4726 args->push(offset);
4727 args->push(cmpval);
4728 args->push(newval);
4729
4730 // An unsafe CAS can alias with other field accesses, but we don't
4731 // know which ones so mark the state as no preserved. This will
4732 // cause CSE to invalidate memory across it.
4733 bool preserves_state = false;
4734 Intrinsic* result = new Intrinsic(result_type, callee->intrinsic_id(), args, false, state_before, preserves_state);
4735 append_split(result);
4736 push(result_type, result);
4737 compilation()->set_has_unsafe_access(true);
4738 }
4739
4740 void GraphBuilder::append_char_access(ciMethod* callee, bool is_store) {
4741 // This intrinsic accesses byte[] array as char[] array. Computing the offsets
4742 // correctly requires matched array shapes.
4743 assert (arrayOopDesc::base_offset_in_bytes(T_CHAR) == arrayOopDesc::base_offset_in_bytes(T_BYTE),
4744 "sanity: byte[] and char[] bases agree");
4745 assert (type2aelembytes(T_CHAR) == type2aelembytes(T_BYTE)*2,
4746 "sanity: byte[] and char[] scales agree");
4747
4748 ValueStack* state_before = copy_state_indexed_access();
4749 compilation()->set_has_access_indexed(true);
4750 Values* args = state()->pop_arguments(callee->arg_size());
4751 Value array = args->at(0);
4752 Value index = args->at(1);
4753 if (is_store) {
4754 Value value = args->at(2);
4755 Instruction* store = append(new StoreIndexed(array, index, nullptr, T_CHAR, value, state_before, false, true));
4756 store->set_flag(Instruction::NeedsRangeCheckFlag, false);
4757 _memory->store_value(value);
4758 } else {
4759 Instruction* load = append(new LoadIndexed(array, index, nullptr, T_CHAR, state_before, true));
4760 load->set_flag(Instruction::NeedsRangeCheckFlag, false);
4761 push(load->type(), load);
4762 }
4763 }
4764
4765 void GraphBuilder::append_alloc_array_copy(ciMethod* callee) {
4766 const int args_base = state()->stack_size() - callee->arg_size();
4767 ciType* receiver_type = state()->stack_at(args_base)->exact_type();
4768 if (receiver_type == nullptr) {
4769 inline_bailout("must have a receiver");
4770 return;
4771 }
4772 if (!receiver_type->is_type_array_klass()) {
4773 inline_bailout("clone array not primitive");
4774 return;
4775 }
4776
4777 ValueStack* state_before = copy_state_before();
4778 state_before->set_force_reexecute();
4779 Value src = apop();
4780 BasicType basic_type = src->exact_type()->as_array_klass()->element_type()->basic_type();
4781 Value length = append(new ArrayLength(src, state_before));
4782 Value new_array = append_split(new NewTypeArray(length, basic_type, state_before, false));
4783
4784 ValueType* result_type = as_ValueType(callee->return_type());
4785 vmIntrinsics::ID id = vmIntrinsics::_arraycopy;
4786 Values* args = new Values(5);
4787 args->push(src);
4788 args->push(append(new Constant(new IntConstant(0))));
4789 args->push(new_array);
4790 args->push(append(new Constant(new IntConstant(0))));
4791 args->push(length);
4792 const bool has_receiver = true;
4793 Intrinsic* array_copy = new Intrinsic(result_type, id,
4794 args, has_receiver, state_before,
4795 vmIntrinsics::preserves_state(id),
4796 vmIntrinsics::can_trap(id));
4797 array_copy->set_flag(Instruction::OmitChecksFlag, true);
4798 append_split(array_copy);
4799 apush(new_array);
4800 append(new MemBar(lir_membar_storestore));
4801 }
4802
4803 void GraphBuilder::print_inlining(ciMethod* callee, const char* msg, bool success) {
4804 CompileLog* log = compilation()->log();
4805 if (log != nullptr) {
4806 assert(msg != nullptr, "inlining msg should not be null!");
4807 if (success) {
4808 log->inline_success(msg);
4809 } else {
4810 log->inline_fail(msg);
4811 }
4812 }
4813 EventCompilerInlining event;
4814 if (event.should_commit()) {
4815 CompilerEvent::InlineEvent::post(event, compilation()->env()->task()->compile_id(), method()->get_Method(), callee, success, msg, bci());
4816 }
4817
4818 CompileTask::print_inlining_ul(callee, scope()->level(), bci(), inlining_result_of(success), msg);
4819
4820 if (!compilation()->directive()->PrintInliningOption) {
4821 return;
4822 }
4823 CompileTask::print_inlining_tty(callee, scope()->level(), bci(), inlining_result_of(success), msg);
4824 if (success && CIPrintMethodCodes) {
4825 callee->print_codes();
4826 }
4827 }
4828
4829 void GraphBuilder::append_unsafe_get_and_set(ciMethod* callee, bool is_add) {
4830 Values* args = state()->pop_arguments(callee->arg_size());
4831 BasicType t = callee->return_type()->basic_type();
4832 null_check(args->at(0));
4833 Instruction* offset = args->at(2);
4834 #ifndef _LP64
4835 offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
4836 #endif
4837 Instruction* op = append(new UnsafeGetAndSet(t, args->at(1), offset, args->at(3), is_add));
4838 compilation()->set_has_unsafe_access(true);
4839 kill_all();
4840 push(op->type(), op);
4841 }
4842
4843 #ifndef PRODUCT
4844 void GraphBuilder::print_stats() {
4845 if (UseLocalValueNumbering) {
4846 vmap()->print();
4847 }
4848 }
4849 #endif // PRODUCT
4850
4851 void GraphBuilder::profile_call(ciMethod* callee, Value recv, ciKlass* known_holder, Values* obj_args, bool inlined) {
4852 assert(known_holder == nullptr || (known_holder->is_instance_klass() &&
4853 (!known_holder->is_interface() ||
4854 ((ciInstanceKlass*)known_holder)->has_nonstatic_concrete_methods())), "should be non-static concrete method");
4855 if (known_holder != nullptr) {
4856 if (known_holder->exact_klass() == nullptr) {
4857 known_holder = compilation()->cha_exact_type(known_holder);
4858 }
4859 }
4860
4861 append(new ProfileCall(method(), bci(), callee, recv, known_holder, obj_args, inlined));
4862 }
4863
4864 void GraphBuilder::profile_return_type(Value ret, ciMethod* callee, ciMethod* m, int invoke_bci) {
4865 assert((m == nullptr) == (invoke_bci < 0), "invalid method and invalid bci together");
4866 if (m == nullptr) {
4867 m = method();
4868 }
4869 if (invoke_bci < 0) {
4870 invoke_bci = bci();
4871 }
4872 ciMethodData* md = m->method_data_or_null();
4873 ciProfileData* data = md->bci_to_data(invoke_bci);
4874 if (data != nullptr && (data->is_CallTypeData() || data->is_VirtualCallTypeData())) {
4875 bool has_return = data->is_CallTypeData() ? ((ciCallTypeData*)data)->has_return() : ((ciVirtualCallTypeData*)data)->has_return();
4876 if (has_return) {
4877 append(new ProfileReturnType(m , invoke_bci, callee, ret));
4878 }
4879 }
4880 }
4881
4882 void GraphBuilder::profile_invocation(ciMethod* callee, ValueStack* state) {
4883 append(new ProfileInvoke(callee, state));
4884 }