1 /*
2 * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "ci/ciReplay.hpp"
29 #include "classfile/javaClasses.hpp"
30 #include "code/exceptionHandlerTable.hpp"
31 #include "code/nmethod.hpp"
32 #include "compiler/compileBroker.hpp"
33 #include "compiler/compileLog.hpp"
34 #include "compiler/disassembler.hpp"
35 #include "compiler/oopMap.hpp"
36 #include "gc/shared/barrierSet.hpp"
37 #include "gc/shared/c2/barrierSetC2.hpp"
38 #include "jfr/jfrEvents.hpp"
39 #include "jvm_io.h"
40 #include "memory/allocation.hpp"
41 #include "memory/resourceArea.hpp"
42 #include "opto/addnode.hpp"
43 #include "opto/block.hpp"
44 #include "opto/c2compiler.hpp"
45 #include "opto/callGenerator.hpp"
46 #include "opto/callnode.hpp"
47 #include "opto/castnode.hpp"
48 #include "opto/cfgnode.hpp"
49 #include "opto/chaitin.hpp"
50 #include "opto/compile.hpp"
51 #include "opto/connode.hpp"
52 #include "opto/convertnode.hpp"
53 #include "opto/divnode.hpp"
54 #include "opto/escape.hpp"
55 #include "opto/idealGraphPrinter.hpp"
56 #include "opto/locknode.hpp"
57 #include "opto/loopnode.hpp"
58 #include "opto/machnode.hpp"
59 #include "opto/macro.hpp"
60 #include "opto/matcher.hpp"
61 #include "opto/mathexactnode.hpp"
62 #include "opto/memnode.hpp"
63 #include "opto/mulnode.hpp"
64 #include "opto/narrowptrnode.hpp"
65 #include "opto/node.hpp"
66 #include "opto/opcodes.hpp"
67 #include "opto/output.hpp"
68 #include "opto/parse.hpp"
69 #include "opto/phaseX.hpp"
70 #include "opto/rootnode.hpp"
71 #include "opto/runtime.hpp"
72 #include "opto/stringopts.hpp"
73 #include "opto/type.hpp"
74 #include "opto/vector.hpp"
75 #include "opto/vectornode.hpp"
76 #include "runtime/globals_extension.hpp"
77 #include "runtime/sharedRuntime.hpp"
78 #include "runtime/signature.hpp"
79 #include "runtime/stubRoutines.hpp"
80 #include "runtime/timer.hpp"
81 #include "utilities/align.hpp"
82 #include "utilities/copy.hpp"
83 #include "utilities/macros.hpp"
84 #include "utilities/resourceHash.hpp"
85
86 // -------------------- Compile::mach_constant_base_node -----------------------
87 // Constant table base node singleton.
88 MachConstantBaseNode* Compile::mach_constant_base_node() {
89 if (_mach_constant_base_node == nullptr) {
90 _mach_constant_base_node = new MachConstantBaseNode();
91 _mach_constant_base_node->add_req(C->root());
92 }
93 return _mach_constant_base_node;
94 }
95
96
97 /// Support for intrinsics.
98
99 // Return the index at which m must be inserted (or already exists).
100 // The sort order is by the address of the ciMethod, with is_virtual as minor key.
101 class IntrinsicDescPair {
102 private:
103 ciMethod* _m;
104 bool _is_virtual;
105 public:
106 IntrinsicDescPair(ciMethod* m, bool is_virtual) : _m(m), _is_virtual(is_virtual) {}
107 static int compare(IntrinsicDescPair* const& key, CallGenerator* const& elt) {
108 ciMethod* m= elt->method();
109 ciMethod* key_m = key->_m;
110 if (key_m < m) return -1;
111 else if (key_m > m) return 1;
112 else {
113 bool is_virtual = elt->is_virtual();
114 bool key_virtual = key->_is_virtual;
115 if (key_virtual < is_virtual) return -1;
116 else if (key_virtual > is_virtual) return 1;
117 else return 0;
118 }
119 }
120 };
121 int Compile::intrinsic_insertion_index(ciMethod* m, bool is_virtual, bool& found) {
122 #ifdef ASSERT
123 for (int i = 1; i < _intrinsics.length(); i++) {
124 CallGenerator* cg1 = _intrinsics.at(i-1);
125 CallGenerator* cg2 = _intrinsics.at(i);
126 assert(cg1->method() != cg2->method()
127 ? cg1->method() < cg2->method()
128 : cg1->is_virtual() < cg2->is_virtual(),
129 "compiler intrinsics list must stay sorted");
130 }
131 #endif
132 IntrinsicDescPair pair(m, is_virtual);
133 return _intrinsics.find_sorted<IntrinsicDescPair*, IntrinsicDescPair::compare>(&pair, found);
134 }
135
136 void Compile::register_intrinsic(CallGenerator* cg) {
137 bool found = false;
138 int index = intrinsic_insertion_index(cg->method(), cg->is_virtual(), found);
139 assert(!found, "registering twice");
140 _intrinsics.insert_before(index, cg);
141 assert(find_intrinsic(cg->method(), cg->is_virtual()) == cg, "registration worked");
142 }
143
144 CallGenerator* Compile::find_intrinsic(ciMethod* m, bool is_virtual) {
145 assert(m->is_loaded(), "don't try this on unloaded methods");
146 if (_intrinsics.length() > 0) {
147 bool found = false;
148 int index = intrinsic_insertion_index(m, is_virtual, found);
149 if (found) {
150 return _intrinsics.at(index);
151 }
152 }
153 // Lazily create intrinsics for intrinsic IDs well-known in the runtime.
154 if (m->intrinsic_id() != vmIntrinsics::_none &&
155 m->intrinsic_id() <= vmIntrinsics::LAST_COMPILER_INLINE) {
156 CallGenerator* cg = make_vm_intrinsic(m, is_virtual);
157 if (cg != nullptr) {
158 // Save it for next time:
159 register_intrinsic(cg);
160 return cg;
161 } else {
162 gather_intrinsic_statistics(m->intrinsic_id(), is_virtual, _intrinsic_disabled);
163 }
164 }
165 return nullptr;
166 }
167
168 // Compile::make_vm_intrinsic is defined in library_call.cpp.
169
170 #ifndef PRODUCT
171 // statistics gathering...
172
173 juint Compile::_intrinsic_hist_count[vmIntrinsics::number_of_intrinsics()] = {0};
174 jubyte Compile::_intrinsic_hist_flags[vmIntrinsics::number_of_intrinsics()] = {0};
175
176 inline int as_int(vmIntrinsics::ID id) {
177 return vmIntrinsics::as_int(id);
178 }
179
180 bool Compile::gather_intrinsic_statistics(vmIntrinsics::ID id, bool is_virtual, int flags) {
181 assert(id > vmIntrinsics::_none && id < vmIntrinsics::ID_LIMIT, "oob");
182 int oflags = _intrinsic_hist_flags[as_int(id)];
183 assert(flags != 0, "what happened?");
184 if (is_virtual) {
185 flags |= _intrinsic_virtual;
186 }
187 bool changed = (flags != oflags);
188 if ((flags & _intrinsic_worked) != 0) {
189 juint count = (_intrinsic_hist_count[as_int(id)] += 1);
190 if (count == 1) {
191 changed = true; // first time
192 }
193 // increment the overall count also:
194 _intrinsic_hist_count[as_int(vmIntrinsics::_none)] += 1;
195 }
196 if (changed) {
197 if (((oflags ^ flags) & _intrinsic_virtual) != 0) {
198 // Something changed about the intrinsic's virtuality.
199 if ((flags & _intrinsic_virtual) != 0) {
200 // This is the first use of this intrinsic as a virtual call.
201 if (oflags != 0) {
202 // We already saw it as a non-virtual, so note both cases.
203 flags |= _intrinsic_both;
204 }
205 } else if ((oflags & _intrinsic_both) == 0) {
206 // This is the first use of this intrinsic as a non-virtual
207 flags |= _intrinsic_both;
208 }
209 }
210 _intrinsic_hist_flags[as_int(id)] = (jubyte) (oflags | flags);
211 }
212 // update the overall flags also:
213 _intrinsic_hist_flags[as_int(vmIntrinsics::_none)] |= (jubyte) flags;
214 return changed;
215 }
216
217 static char* format_flags(int flags, char* buf) {
218 buf[0] = 0;
219 if ((flags & Compile::_intrinsic_worked) != 0) strcat(buf, ",worked");
220 if ((flags & Compile::_intrinsic_failed) != 0) strcat(buf, ",failed");
221 if ((flags & Compile::_intrinsic_disabled) != 0) strcat(buf, ",disabled");
222 if ((flags & Compile::_intrinsic_virtual) != 0) strcat(buf, ",virtual");
223 if ((flags & Compile::_intrinsic_both) != 0) strcat(buf, ",nonvirtual");
224 if (buf[0] == 0) strcat(buf, ",");
225 assert(buf[0] == ',', "must be");
226 return &buf[1];
227 }
228
229 void Compile::print_intrinsic_statistics() {
230 char flagsbuf[100];
231 ttyLocker ttyl;
232 if (xtty != nullptr) xtty->head("statistics type='intrinsic'");
233 tty->print_cr("Compiler intrinsic usage:");
234 juint total = _intrinsic_hist_count[as_int(vmIntrinsics::_none)];
235 if (total == 0) total = 1; // avoid div0 in case of no successes
236 #define PRINT_STAT_LINE(name, c, f) \
237 tty->print_cr(" %4d (%4.1f%%) %s (%s)", (int)(c), ((c) * 100.0) / total, name, f);
238 for (auto id : EnumRange<vmIntrinsicID>{}) {
239 int flags = _intrinsic_hist_flags[as_int(id)];
240 juint count = _intrinsic_hist_count[as_int(id)];
241 if ((flags | count) != 0) {
242 PRINT_STAT_LINE(vmIntrinsics::name_at(id), count, format_flags(flags, flagsbuf));
243 }
244 }
245 PRINT_STAT_LINE("total", total, format_flags(_intrinsic_hist_flags[as_int(vmIntrinsics::_none)], flagsbuf));
246 if (xtty != nullptr) xtty->tail("statistics");
247 }
248
249 void Compile::print_statistics() {
250 { ttyLocker ttyl;
251 if (xtty != nullptr) xtty->head("statistics type='opto'");
252 Parse::print_statistics();
253 PhaseStringOpts::print_statistics();
254 PhaseCCP::print_statistics();
255 PhaseRegAlloc::print_statistics();
256 PhaseOutput::print_statistics();
257 PhasePeephole::print_statistics();
258 PhaseIdealLoop::print_statistics();
259 ConnectionGraph::print_statistics();
260 PhaseMacroExpand::print_statistics();
261 if (xtty != nullptr) xtty->tail("statistics");
262 }
263 if (_intrinsic_hist_flags[as_int(vmIntrinsics::_none)] != 0) {
264 // put this under its own <statistics> element.
265 print_intrinsic_statistics();
266 }
267 }
268 #endif //PRODUCT
269
270 void Compile::gvn_replace_by(Node* n, Node* nn) {
271 for (DUIterator_Last imin, i = n->last_outs(imin); i >= imin; ) {
272 Node* use = n->last_out(i);
273 bool is_in_table = initial_gvn()->hash_delete(use);
274 uint uses_found = 0;
275 for (uint j = 0; j < use->len(); j++) {
276 if (use->in(j) == n) {
277 if (j < use->req())
278 use->set_req(j, nn);
279 else
280 use->set_prec(j, nn);
281 uses_found++;
282 }
283 }
284 if (is_in_table) {
285 // reinsert into table
286 initial_gvn()->hash_find_insert(use);
287 }
288 record_for_igvn(use);
289 i -= uses_found; // we deleted 1 or more copies of this edge
290 }
291 }
292
293
294 // Identify all nodes that are reachable from below, useful.
295 // Use breadth-first pass that records state in a Unique_Node_List,
296 // recursive traversal is slower.
297 void Compile::identify_useful_nodes(Unique_Node_List &useful) {
298 int estimated_worklist_size = live_nodes();
299 useful.map( estimated_worklist_size, nullptr ); // preallocate space
300
301 // Initialize worklist
302 if (root() != nullptr) { useful.push(root()); }
303 // If 'top' is cached, declare it useful to preserve cached node
304 if (cached_top_node()) { useful.push(cached_top_node()); }
305
306 // Push all useful nodes onto the list, breadthfirst
307 for( uint next = 0; next < useful.size(); ++next ) {
308 assert( next < unique(), "Unique useful nodes < total nodes");
309 Node *n = useful.at(next);
310 uint max = n->len();
311 for( uint i = 0; i < max; ++i ) {
312 Node *m = n->in(i);
313 if (not_a_node(m)) continue;
314 useful.push(m);
315 }
316 }
317 }
318
319 // Update dead_node_list with any missing dead nodes using useful
320 // list. Consider all non-useful nodes to be useless i.e., dead nodes.
321 void Compile::update_dead_node_list(Unique_Node_List &useful) {
322 uint max_idx = unique();
323 VectorSet& useful_node_set = useful.member_set();
324
325 for (uint node_idx = 0; node_idx < max_idx; node_idx++) {
326 // If node with index node_idx is not in useful set,
327 // mark it as dead in dead node list.
328 if (!useful_node_set.test(node_idx)) {
329 record_dead_node(node_idx);
330 }
331 }
332 }
333
334 void Compile::remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful) {
335 int shift = 0;
336 for (int i = 0; i < inlines->length(); i++) {
337 CallGenerator* cg = inlines->at(i);
338 if (useful.member(cg->call_node())) {
339 if (shift > 0) {
340 inlines->at_put(i - shift, cg);
341 }
342 } else {
343 shift++; // skip over the dead element
344 }
345 }
346 if (shift > 0) {
347 inlines->trunc_to(inlines->length() - shift); // remove last elements from compacted array
348 }
349 }
350
351 void Compile::remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Node* dead) {
352 assert(dead != nullptr && dead->is_Call(), "sanity");
353 int found = 0;
354 for (int i = 0; i < inlines->length(); i++) {
355 if (inlines->at(i)->call_node() == dead) {
356 inlines->remove_at(i);
357 found++;
358 NOT_DEBUG( break; ) // elements are unique, so exit early
359 }
360 }
361 assert(found <= 1, "not unique");
362 }
363
364 void Compile::remove_useless_nodes(GrowableArray<Node*>& node_list, Unique_Node_List& useful) {
365 for (int i = node_list.length() - 1; i >= 0; i--) {
366 Node* n = node_list.at(i);
367 if (!useful.member(n)) {
368 node_list.delete_at(i); // replaces i-th with last element which is known to be useful (already processed)
369 }
370 }
371 }
372
373 void Compile::remove_useless_node(Node* dead) {
374 remove_modified_node(dead);
375
376 // Constant node that has no out-edges and has only one in-edge from
377 // root is usually dead. However, sometimes reshaping walk makes
378 // it reachable by adding use edges. So, we will NOT count Con nodes
379 // as dead to be conservative about the dead node count at any
380 // given time.
381 if (!dead->is_Con()) {
382 record_dead_node(dead->_idx);
383 }
384 if (dead->is_macro()) {
385 remove_macro_node(dead);
386 }
387 if (dead->is_expensive()) {
388 remove_expensive_node(dead);
389 }
390 if (dead->Opcode() == Op_Opaque4) {
391 remove_template_assertion_predicate_opaq(dead);
392 }
393 if (dead->for_post_loop_opts_igvn()) {
394 remove_from_post_loop_opts_igvn(dead);
395 }
396 if (dead->is_Call()) {
397 remove_useless_late_inlines( &_late_inlines, dead);
398 remove_useless_late_inlines( &_string_late_inlines, dead);
399 remove_useless_late_inlines( &_boxing_late_inlines, dead);
400 remove_useless_late_inlines(&_vector_reboxing_late_inlines, dead);
401
402 if (dead->is_CallStaticJava()) {
403 remove_unstable_if_trap(dead->as_CallStaticJava(), false);
404 }
405 }
406 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
407 bs->unregister_potential_barrier_node(dead);
408 }
409
410 // Disconnect all useless nodes by disconnecting those at the boundary.
411 void Compile::disconnect_useless_nodes(Unique_Node_List& useful, Unique_Node_List& worklist) {
412 uint next = 0;
413 while (next < useful.size()) {
414 Node *n = useful.at(next++);
415 if (n->is_SafePoint()) {
416 // We're done with a parsing phase. Replaced nodes are not valid
417 // beyond that point.
418 n->as_SafePoint()->delete_replaced_nodes();
419 }
420 // Use raw traversal of out edges since this code removes out edges
421 int max = n->outcnt();
422 for (int j = 0; j < max; ++j) {
423 Node* child = n->raw_out(j);
424 if (!useful.member(child)) {
425 assert(!child->is_top() || child != top(),
426 "If top is cached in Compile object it is in useful list");
427 // Only need to remove this out-edge to the useless node
428 n->raw_del_out(j);
429 --j;
430 --max;
431 }
432 }
433 if (n->outcnt() == 1 && n->has_special_unique_user()) {
434 assert(useful.member(n->unique_out()), "do not push a useless node");
435 worklist.push(n->unique_out());
436 }
437 }
438
439 remove_useless_nodes(_macro_nodes, useful); // remove useless macro nodes
440 remove_useless_nodes(_parse_predicate_opaqs, useful); // remove useless Parse Predicate opaque nodes
441 remove_useless_nodes(_template_assertion_predicate_opaqs, useful); // remove useless Assertion Predicate opaque nodes
442 remove_useless_nodes(_expensive_nodes, useful); // remove useless expensive nodes
443 remove_useless_nodes(_for_post_loop_igvn, useful); // remove useless node recorded for post loop opts IGVN pass
444 remove_useless_unstable_if_traps(useful); // remove useless unstable_if traps
445 remove_useless_coarsened_locks(useful); // remove useless coarsened locks nodes
446 #ifdef ASSERT
447 if (_modified_nodes != nullptr) {
448 _modified_nodes->remove_useless_nodes(useful.member_set());
449 }
450 #endif
451
452 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
453 bs->eliminate_useless_gc_barriers(useful, this);
454 // clean up the late inline lists
455 remove_useless_late_inlines( &_late_inlines, useful);
456 remove_useless_late_inlines( &_string_late_inlines, useful);
457 remove_useless_late_inlines( &_boxing_late_inlines, useful);
458 remove_useless_late_inlines(&_vector_reboxing_late_inlines, useful);
459 debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
460 }
461
462 // ============================================================================
463 //------------------------------CompileWrapper---------------------------------
464 class CompileWrapper : public StackObj {
465 Compile *const _compile;
466 public:
467 CompileWrapper(Compile* compile);
468
469 ~CompileWrapper();
470 };
471
472 CompileWrapper::CompileWrapper(Compile* compile) : _compile(compile) {
473 // the Compile* pointer is stored in the current ciEnv:
474 ciEnv* env = compile->env();
475 assert(env == ciEnv::current(), "must already be a ciEnv active");
476 assert(env->compiler_data() == nullptr, "compile already active?");
477 env->set_compiler_data(compile);
478 assert(compile == Compile::current(), "sanity");
479
480 compile->set_type_dict(nullptr);
481 compile->set_clone_map(new Dict(cmpkey, hashkey, _compile->comp_arena()));
482 compile->clone_map().set_clone_idx(0);
483 compile->set_type_last_size(0);
484 compile->set_last_tf(nullptr, nullptr);
485 compile->set_indexSet_arena(nullptr);
486 compile->set_indexSet_free_block_list(nullptr);
487 compile->init_type_arena();
488 Type::Initialize(compile);
489 _compile->begin_method();
490 _compile->clone_map().set_debug(_compile->has_method() && _compile->directive()->CloneMapDebugOption);
491 }
492 CompileWrapper::~CompileWrapper() {
493 // simulate crash during compilation
494 assert(CICrashAt < 0 || _compile->compile_id() != CICrashAt, "just as planned");
495
496 _compile->end_method();
497 _compile->env()->set_compiler_data(nullptr);
498 }
499
500
501 //----------------------------print_compile_messages---------------------------
502 void Compile::print_compile_messages() {
503 #ifndef PRODUCT
504 // Check if recompiling
505 if (!subsume_loads() && PrintOpto) {
506 // Recompiling without allowing machine instructions to subsume loads
507 tty->print_cr("*********************************************************");
508 tty->print_cr("** Bailout: Recompile without subsuming loads **");
509 tty->print_cr("*********************************************************");
510 }
511 if ((do_escape_analysis() != DoEscapeAnalysis) && PrintOpto) {
512 // Recompiling without escape analysis
513 tty->print_cr("*********************************************************");
514 tty->print_cr("** Bailout: Recompile without escape analysis **");
515 tty->print_cr("*********************************************************");
516 }
517 if (do_iterative_escape_analysis() != DoEscapeAnalysis && PrintOpto) {
518 // Recompiling without iterative escape analysis
519 tty->print_cr("*********************************************************");
520 tty->print_cr("** Bailout: Recompile without iterative escape analysis**");
521 tty->print_cr("*********************************************************");
522 }
523 if ((eliminate_boxing() != EliminateAutoBox) && PrintOpto) {
524 // Recompiling without boxing elimination
525 tty->print_cr("*********************************************************");
526 tty->print_cr("** Bailout: Recompile without boxing elimination **");
527 tty->print_cr("*********************************************************");
528 }
529 if ((do_locks_coarsening() != EliminateLocks) && PrintOpto) {
530 // Recompiling without locks coarsening
531 tty->print_cr("*********************************************************");
532 tty->print_cr("** Bailout: Recompile without locks coarsening **");
533 tty->print_cr("*********************************************************");
534 }
535 if (env()->break_at_compile()) {
536 // Open the debugger when compiling this method.
537 tty->print("### Breaking when compiling: ");
538 method()->print_short_name();
539 tty->cr();
540 BREAKPOINT;
541 }
542
543 if( PrintOpto ) {
544 if (is_osr_compilation()) {
545 tty->print("[OSR]%3d", _compile_id);
546 } else {
547 tty->print("%3d", _compile_id);
548 }
549 }
550 #endif
551 }
552
553 #ifndef PRODUCT
554 void Compile::print_ideal_ir(const char* phase_name) {
555 // keep the following output all in one block
556 // This output goes directly to the tty, not the compiler log.
557 // To enable tools to match it up with the compilation activity,
558 // be sure to tag this tty output with the compile ID.
559
560 // Node dumping can cause a safepoint, which can break the tty lock.
561 // Buffer all node dumps, so that all safepoints happen before we lock.
562 ResourceMark rm;
563 stringStream ss;
564
565 if (_output == nullptr) {
566 ss.print_cr("AFTER: %s", phase_name);
567 // Print out all nodes in ascending order of index.
568 root()->dump_bfs(MaxNodeLimit, nullptr, "+S$", &ss);
569 } else {
570 // Dump the node blockwise if we have a scheduling
571 _output->print_scheduling(&ss);
572 }
573
574 // Check that the lock is not broken by a safepoint.
575 NoSafepointVerifier nsv;
576 ttyLocker ttyl;
577 if (xtty != nullptr) {
578 xtty->head("ideal compile_id='%d'%s compile_phase='%s'",
579 compile_id(),
580 is_osr_compilation() ? " compile_kind='osr'" : "",
581 phase_name);
582 xtty->print("%s", ss.as_string()); // print to tty would use xml escape encoding
583 xtty->tail("ideal");
584 } else {
585 tty->print("%s", ss.as_string());
586 }
587 }
588 #endif
589
590 // ============================================================================
591 //------------------------------Compile standard-------------------------------
592
593 // Compile a method. entry_bci is -1 for normal compilations and indicates
594 // the continuation bci for on stack replacement.
595
596
597 Compile::Compile( ciEnv* ci_env, ciMethod* target, int osr_bci,
598 Options options, DirectiveSet* directive)
599 : Phase(Compiler),
600 _compile_id(ci_env->compile_id()),
601 _options(options),
602 _method(target),
603 _entry_bci(osr_bci),
604 _ilt(nullptr),
605 _stub_function(nullptr),
606 _stub_name(nullptr),
607 _stub_entry_point(nullptr),
608 _max_node_limit(MaxNodeLimit),
609 _post_loop_opts_phase(false),
610 _inlining_progress(false),
611 _inlining_incrementally(false),
612 _do_cleanup(false),
613 _has_reserved_stack_access(target->has_reserved_stack_access()),
614 #ifndef PRODUCT
615 _igv_idx(0),
616 _trace_opto_output(directive->TraceOptoOutputOption),
617 #endif
618 _has_method_handle_invokes(false),
619 _clinit_barrier_on_entry(false),
620 _stress_seed(0),
621 _comp_arena(mtCompiler),
622 _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
623 _env(ci_env),
624 _directive(directive),
625 _log(ci_env->log()),
626 _intrinsics (comp_arena(), 0, 0, nullptr),
627 _macro_nodes (comp_arena(), 8, 0, nullptr),
628 _parse_predicate_opaqs (comp_arena(), 8, 0, nullptr),
629 _template_assertion_predicate_opaqs (comp_arena(), 8, 0, nullptr),
630 _expensive_nodes (comp_arena(), 8, 0, nullptr),
631 _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
632 _unstable_if_traps (comp_arena(), 8, 0, nullptr),
633 _coarsened_locks (comp_arena(), 8, 0, nullptr),
634 _congraph(nullptr),
635 NOT_PRODUCT(_igv_printer(nullptr) COMMA)
636 _dead_node_list(comp_arena()),
637 _dead_node_count(0),
638 _node_arena(mtCompiler),
639 _old_arena(mtCompiler),
640 _mach_constant_base_node(nullptr),
641 _Compile_types(mtCompiler),
642 _initial_gvn(nullptr),
643 _igvn_worklist(nullptr),
644 _types(nullptr),
645 _node_hash(nullptr),
646 _late_inlines(comp_arena(), 2, 0, nullptr),
647 _string_late_inlines(comp_arena(), 2, 0, nullptr),
648 _boxing_late_inlines(comp_arena(), 2, 0, nullptr),
649 _vector_reboxing_late_inlines(comp_arena(), 2, 0, nullptr),
650 _late_inlines_pos(0),
651 _number_of_mh_late_inlines(0),
652 _print_inlining_stream(new (mtCompiler) stringStream()),
653 _print_inlining_list(nullptr),
654 _print_inlining_idx(0),
655 _print_inlining_output(nullptr),
656 _replay_inline_data(nullptr),
657 _java_calls(0),
658 _inner_loops(0),
659 _interpreter_frame_size(0),
660 _output(nullptr)
661 #ifndef PRODUCT
662 , _in_dump_cnt(0)
663 #endif
664 {
665 C = this;
666 CompileWrapper cw(this);
667
668 if (CITimeVerbose) {
669 tty->print(" ");
670 target->holder()->name()->print();
671 tty->print(".");
672 target->print_short_name();
673 tty->print(" ");
674 }
675 TraceTime t1("Total compilation time", &_t_totalCompilation, CITime, CITimeVerbose);
676 TraceTime t2(nullptr, &_t_methodCompilation, CITime, false);
677
678 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
679 bool print_opto_assembly = directive->PrintOptoAssemblyOption;
680 // We can always print a disassembly, either abstract (hex dump) or
681 // with the help of a suitable hsdis library. Thus, we should not
682 // couple print_assembly and print_opto_assembly controls.
683 // But: always print opto and regular assembly on compile command 'print'.
684 bool print_assembly = directive->PrintAssemblyOption;
685 set_print_assembly(print_opto_assembly || print_assembly);
686 #else
687 set_print_assembly(false); // must initialize.
688 #endif
689
690 #ifndef PRODUCT
691 set_parsed_irreducible_loop(false);
692 #endif
693
694 if (directive->ReplayInlineOption) {
695 _replay_inline_data = ciReplay::load_inline_data(method(), entry_bci(), ci_env->comp_level());
696 }
697 set_print_inlining(directive->PrintInliningOption || PrintOptoInlining);
698 set_print_intrinsics(directive->PrintIntrinsicsOption);
699 set_has_irreducible_loop(true); // conservative until build_loop_tree() reset it
700
701 if (ProfileTraps RTM_OPT_ONLY( || UseRTMLocking )) {
702 // Make sure the method being compiled gets its own MDO,
703 // so we can at least track the decompile_count().
704 // Need MDO to record RTM code generation state.
705 method()->ensure_method_data();
706 }
707
708 Init(/*do_aliasing=*/ true);
709
710 print_compile_messages();
711
712 _ilt = InlineTree::build_inline_tree_root();
713
714 // Even if NO memory addresses are used, MergeMem nodes must have at least 1 slice
715 assert(num_alias_types() >= AliasIdxRaw, "");
716
717 #define MINIMUM_NODE_HASH 1023
718
719 // GVN that will be run immediately on new nodes
720 uint estimated_size = method()->code_size()*4+64;
721 estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size);
722 _igvn_worklist = new (comp_arena()) Unique_Node_List(comp_arena());
723 _types = new (comp_arena()) Type_Array(comp_arena());
724 _node_hash = new (comp_arena()) NodeHash(comp_arena(), estimated_size);
725 PhaseGVN gvn;
726 set_initial_gvn(&gvn);
727
728 print_inlining_init();
729 { // Scope for timing the parser
730 TracePhase tp("parse", &timers[_t_parser]);
731
732 // Put top into the hash table ASAP.
733 initial_gvn()->transform_no_reclaim(top());
734
735 // Set up tf(), start(), and find a CallGenerator.
736 CallGenerator* cg = nullptr;
737 if (is_osr_compilation()) {
738 const TypeTuple *domain = StartOSRNode::osr_domain();
739 const TypeTuple *range = TypeTuple::make_range(method()->signature());
740 init_tf(TypeFunc::make(domain, range));
741 StartNode* s = new StartOSRNode(root(), domain);
742 initial_gvn()->set_type_bottom(s);
743 init_start(s);
744 cg = CallGenerator::for_osr(method(), entry_bci());
745 } else {
746 // Normal case.
747 init_tf(TypeFunc::make(method()));
748 StartNode* s = new StartNode(root(), tf()->domain());
749 initial_gvn()->set_type_bottom(s);
750 init_start(s);
751 if (method()->intrinsic_id() == vmIntrinsics::_Reference_get) {
752 // With java.lang.ref.reference.get() we must go through the
753 // intrinsic - even when get() is the root
754 // method of the compile - so that, if necessary, the value in
755 // the referent field of the reference object gets recorded by
756 // the pre-barrier code.
757 cg = find_intrinsic(method(), false);
758 }
759 if (cg == nullptr) {
760 float past_uses = method()->interpreter_invocation_count();
761 float expected_uses = past_uses;
762 cg = CallGenerator::for_inline(method(), expected_uses);
763 }
764 }
765 if (failing()) return;
766 if (cg == nullptr) {
767 const char* reason = InlineTree::check_can_parse(method());
768 assert(reason != nullptr, "expect reason for parse failure");
769 stringStream ss;
770 ss.print("cannot parse method: %s", reason);
771 record_method_not_compilable(ss.as_string());
772 return;
773 }
774
775 gvn.set_type(root(), root()->bottom_type());
776
777 JVMState* jvms = build_start_state(start(), tf());
778 if ((jvms = cg->generate(jvms)) == nullptr) {
779 if (!failure_reason_is(C2Compiler::retry_class_loading_during_parsing())) {
780 assert(failure_reason() != nullptr, "expect reason for parse failure");
781 stringStream ss;
782 ss.print("method parse failed: %s", failure_reason());
783 record_method_not_compilable(ss.as_string());
784 }
785 return;
786 }
787 GraphKit kit(jvms);
788
789 if (!kit.stopped()) {
790 // Accept return values, and transfer control we know not where.
791 // This is done by a special, unique ReturnNode bound to root.
792 return_values(kit.jvms());
793 }
794
795 if (kit.has_exceptions()) {
796 // Any exceptions that escape from this call must be rethrown
797 // to whatever caller is dynamically above us on the stack.
798 // This is done by a special, unique RethrowNode bound to root.
799 rethrow_exceptions(kit.transfer_exceptions_into_jvms());
800 }
801
802 assert(IncrementalInline || (_late_inlines.length() == 0 && !has_mh_late_inlines()), "incremental inlining is off");
803
804 if (_late_inlines.length() == 0 && !has_mh_late_inlines() && !failing() && has_stringbuilder()) {
805 inline_string_calls(true);
806 }
807
808 if (failing()) return;
809
810 print_method(PHASE_BEFORE_REMOVEUSELESS, 3);
811
812 // Remove clutter produced by parsing.
813 if (!failing()) {
814 ResourceMark rm;
815 PhaseRemoveUseless pru(initial_gvn(), *igvn_worklist());
816 }
817 }
818
819 // Note: Large methods are capped off in do_one_bytecode().
820 if (failing()) return;
821
822 // After parsing, node notes are no longer automagic.
823 // They must be propagated by register_new_node_with_optimizer(),
824 // clone(), or the like.
825 set_default_node_notes(nullptr);
826
827 #ifndef PRODUCT
828 if (should_print_igv(1)) {
829 _igv_printer->print_inlining();
830 }
831 #endif
832
833 if (failing()) return;
834 NOT_PRODUCT( verify_graph_edges(); )
835
836 // If any phase is randomized for stress testing, seed random number
837 // generation and log the seed for repeatability.
838 if (StressLCM || StressGCM || StressIGVN || StressCCP || StressIncrementalInlining) {
839 if (FLAG_IS_DEFAULT(StressSeed) || (FLAG_IS_ERGO(StressSeed) && directive->RepeatCompilationOption)) {
840 _stress_seed = static_cast<uint>(Ticks::now().nanoseconds());
841 FLAG_SET_ERGO(StressSeed, _stress_seed);
842 } else {
843 _stress_seed = StressSeed;
844 }
845 if (_log != nullptr) {
846 _log->elem("stress_test seed='%u'", _stress_seed);
847 }
848 }
849
850 // Now optimize
851 Optimize();
852 if (failing()) return;
853 NOT_PRODUCT( verify_graph_edges(); )
854
855 #ifndef PRODUCT
856 if (should_print_ideal()) {
857 print_ideal_ir("print_ideal");
858 }
859 #endif
860
861 #ifdef ASSERT
862 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
863 bs->verify_gc_barriers(this, BarrierSetC2::BeforeCodeGen);
864 #endif
865
866 // Dump compilation data to replay it.
867 if (directive->DumpReplayOption) {
868 env()->dump_replay_data(_compile_id);
869 }
870 if (directive->DumpInlineOption && (ilt() != nullptr)) {
871 env()->dump_inline_data(_compile_id);
872 }
873
874 // Now that we know the size of all the monitors we can add a fixed slot
875 // for the original deopt pc.
876 int next_slot = fixed_slots() + (sizeof(address) / VMRegImpl::stack_slot_size);
877 set_fixed_slots(next_slot);
878
879 // Compute when to use implicit null checks. Used by matching trap based
880 // nodes and NullCheck optimization.
881 set_allowed_deopt_reasons();
882
883 // Now generate code
884 Code_Gen();
885 }
886
887 //------------------------------Compile----------------------------------------
888 // Compile a runtime stub
889 Compile::Compile( ciEnv* ci_env,
890 TypeFunc_generator generator,
891 address stub_function,
892 const char *stub_name,
893 int is_fancy_jump,
894 bool pass_tls,
895 bool return_pc,
896 DirectiveSet* directive)
897 : Phase(Compiler),
898 _compile_id(0),
899 _options(Options::for_runtime_stub()),
900 _method(nullptr),
901 _entry_bci(InvocationEntryBci),
902 _stub_function(stub_function),
903 _stub_name(stub_name),
904 _stub_entry_point(nullptr),
905 _max_node_limit(MaxNodeLimit),
906 _post_loop_opts_phase(false),
907 _inlining_progress(false),
908 _inlining_incrementally(false),
909 _has_reserved_stack_access(false),
910 #ifndef PRODUCT
911 _igv_idx(0),
912 _trace_opto_output(directive->TraceOptoOutputOption),
913 #endif
914 _has_method_handle_invokes(false),
915 _clinit_barrier_on_entry(false),
916 _stress_seed(0),
917 _comp_arena(mtCompiler),
918 _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
919 _env(ci_env),
920 _directive(directive),
921 _log(ci_env->log()),
922 _congraph(nullptr),
923 NOT_PRODUCT(_igv_printer(nullptr) COMMA)
924 _dead_node_list(comp_arena()),
925 _dead_node_count(0),
926 _node_arena(mtCompiler),
927 _old_arena(mtCompiler),
928 _mach_constant_base_node(nullptr),
929 _Compile_types(mtCompiler),
930 _initial_gvn(nullptr),
931 _igvn_worklist(nullptr),
932 _types(nullptr),
933 _node_hash(nullptr),
934 _number_of_mh_late_inlines(0),
935 _print_inlining_stream(new (mtCompiler) stringStream()),
936 _print_inlining_list(nullptr),
937 _print_inlining_idx(0),
938 _print_inlining_output(nullptr),
939 _replay_inline_data(nullptr),
940 _java_calls(0),
941 _inner_loops(0),
942 _interpreter_frame_size(0),
943 _output(nullptr),
944 #ifndef PRODUCT
945 _in_dump_cnt(0),
946 #endif
947 _allowed_reasons(0) {
948 C = this;
949
950 TraceTime t1(nullptr, &_t_totalCompilation, CITime, false);
951 TraceTime t2(nullptr, &_t_stubCompilation, CITime, false);
952
953 #ifndef PRODUCT
954 set_print_assembly(PrintFrameConverterAssembly);
955 set_parsed_irreducible_loop(false);
956 #else
957 set_print_assembly(false); // Must initialize.
958 #endif
959 set_has_irreducible_loop(false); // no loops
960
961 CompileWrapper cw(this);
962 Init(/*do_aliasing=*/ false);
963 init_tf((*generator)());
964
965 _igvn_worklist = new (comp_arena()) Unique_Node_List(comp_arena());
966 _types = new (comp_arena()) Type_Array(comp_arena());
967 _node_hash = new (comp_arena()) NodeHash(comp_arena(), 255);
968 {
969 PhaseGVN gvn;
970 set_initial_gvn(&gvn); // not significant, but GraphKit guys use it pervasively
971 gvn.transform_no_reclaim(top());
972
973 GraphKit kit;
974 kit.gen_stub(stub_function, stub_name, is_fancy_jump, pass_tls, return_pc);
975 }
976
977 NOT_PRODUCT( verify_graph_edges(); )
978
979 Code_Gen();
980 }
981
982 //------------------------------Init-------------------------------------------
983 // Prepare for a single compilation
984 void Compile::Init(bool aliasing) {
985 _do_aliasing = aliasing;
986 _unique = 0;
987 _regalloc = nullptr;
988
989 _tf = nullptr; // filled in later
990 _top = nullptr; // cached later
991 _matcher = nullptr; // filled in later
992 _cfg = nullptr; // filled in later
993
994 IA32_ONLY( set_24_bit_selection_and_mode(true, false); )
995
996 _node_note_array = nullptr;
997 _default_node_notes = nullptr;
998 DEBUG_ONLY( _modified_nodes = nullptr; ) // Used in Optimize()
999
1000 _immutable_memory = nullptr; // filled in at first inquiry
1001
1002 #ifdef ASSERT
1003 _phase_optimize_finished = false;
1004 _exception_backedge = false;
1005 _type_verify = nullptr;
1006 #endif
1007
1008 // Globally visible Nodes
1009 // First set TOP to null to give safe behavior during creation of RootNode
1010 set_cached_top_node(nullptr);
1011 set_root(new RootNode());
1012 // Now that you have a Root to point to, create the real TOP
1013 set_cached_top_node( new ConNode(Type::TOP) );
1014 set_recent_alloc(nullptr, nullptr);
1015
1016 // Create Debug Information Recorder to record scopes, oopmaps, etc.
1017 env()->set_oop_recorder(new OopRecorder(env()->arena()));
1018 env()->set_debug_info(new DebugInformationRecorder(env()->oop_recorder()));
1019 env()->set_dependencies(new Dependencies(env()));
1020
1021 _fixed_slots = 0;
1022 set_has_split_ifs(false);
1023 set_has_loops(false); // first approximation
1024 set_has_stringbuilder(false);
1025 set_has_boxed_value(false);
1026 _trap_can_recompile = false; // no traps emitted yet
1027 _major_progress = true; // start out assuming good things will happen
1028 set_has_unsafe_access(false);
1029 set_max_vector_size(0);
1030 set_clear_upper_avx(false); //false as default for clear upper bits of ymm registers
1031 Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
1032 set_decompile_count(0);
1033
1034 set_do_freq_based_layout(_directive->BlockLayoutByFrequencyOption);
1035 _loop_opts_cnt = LoopOptsCount;
1036 set_do_inlining(Inline);
1037 set_max_inline_size(MaxInlineSize);
1038 set_freq_inline_size(FreqInlineSize);
1039 set_do_scheduling(OptoScheduling);
1040
1041 set_do_vector_loop(false);
1042 set_has_monitors(false);
1043
1044 if (AllowVectorizeOnDemand) {
1045 if (has_method() && (_directive->VectorizeOption || _directive->VectorizeDebugOption)) {
1046 set_do_vector_loop(true);
1047 NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n", method()->name()->as_quoted_ascii());})
1048 } else if (has_method() && method()->name() != 0 &&
1049 method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
1050 set_do_vector_loop(true);
1051 }
1052 }
1053 set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
1054 NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n", method()->name()->as_quoted_ascii());})
1055
1056 set_rtm_state(NoRTM); // No RTM lock eliding by default
1057 _max_node_limit = _directive->MaxNodeLimitOption;
1058
1059 #if INCLUDE_RTM_OPT
1060 if (UseRTMLocking && has_method() && (method()->method_data_or_null() != nullptr)) {
1061 int rtm_state = method()->method_data()->rtm_state();
1062 if (method_has_option(CompileCommand::NoRTMLockEliding) || ((rtm_state & NoRTM) != 0)) {
1063 // Don't generate RTM lock eliding code.
1064 set_rtm_state(NoRTM);
1065 } else if (method_has_option(CompileCommand::UseRTMLockEliding) || ((rtm_state & UseRTM) != 0) || !UseRTMDeopt) {
1066 // Generate RTM lock eliding code without abort ratio calculation code.
1067 set_rtm_state(UseRTM);
1068 } else if (UseRTMDeopt) {
1069 // Generate RTM lock eliding code and include abort ratio calculation
1070 // code if UseRTMDeopt is on.
1071 set_rtm_state(ProfileRTM);
1072 }
1073 }
1074 #endif
1075 if (VM_Version::supports_fast_class_init_checks() && has_method() && !is_osr_compilation() && method()->needs_clinit_barrier()) {
1076 set_clinit_barrier_on_entry(true);
1077 }
1078 if (debug_info()->recording_non_safepoints()) {
1079 set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*>
1080 (comp_arena(), 8, 0, nullptr));
1081 set_default_node_notes(Node_Notes::make(this));
1082 }
1083
1084 const int grow_ats = 16;
1085 _max_alias_types = grow_ats;
1086 _alias_types = NEW_ARENA_ARRAY(comp_arena(), AliasType*, grow_ats);
1087 AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType, grow_ats);
1088 Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
1089 {
1090 for (int i = 0; i < grow_ats; i++) _alias_types[i] = &ats[i];
1091 }
1092 // Initialize the first few types.
1093 _alias_types[AliasIdxTop]->Init(AliasIdxTop, nullptr);
1094 _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
1095 _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
1096 _num_alias_types = AliasIdxRaw+1;
1097 // Zero out the alias type cache.
1098 Copy::zero_to_bytes(_alias_cache, sizeof(_alias_cache));
1099 // A null adr_type hits in the cache right away. Preload the right answer.
1100 probe_alias_cache(nullptr)->_index = AliasIdxTop;
1101 }
1102
1103 //---------------------------init_start----------------------------------------
1104 // Install the StartNode on this compile object.
1105 void Compile::init_start(StartNode* s) {
1106 if (failing())
1107 return; // already failing
1108 assert(s == start(), "");
1109 }
1110
1111 /**
1112 * Return the 'StartNode'. We must not have a pending failure, since the ideal graph
1113 * can be in an inconsistent state, i.e., we can get segmentation faults when traversing
1114 * the ideal graph.
1115 */
1116 StartNode* Compile::start() const {
1117 assert (!failing(), "Must not have pending failure. Reason is: %s", failure_reason());
1118 for (DUIterator_Fast imax, i = root()->fast_outs(imax); i < imax; i++) {
1119 Node* start = root()->fast_out(i);
1120 if (start->is_Start()) {
1121 return start->as_Start();
1122 }
1123 }
1124 fatal("Did not find Start node!");
1125 return nullptr;
1126 }
1127
1128 //-------------------------------immutable_memory-------------------------------------
1129 // Access immutable memory
1130 Node* Compile::immutable_memory() {
1131 if (_immutable_memory != nullptr) {
1132 return _immutable_memory;
1133 }
1134 StartNode* s = start();
1135 for (DUIterator_Fast imax, i = s->fast_outs(imax); true; i++) {
1136 Node *p = s->fast_out(i);
1137 if (p != s && p->as_Proj()->_con == TypeFunc::Memory) {
1138 _immutable_memory = p;
1139 return _immutable_memory;
1140 }
1141 }
1142 ShouldNotReachHere();
1143 return nullptr;
1144 }
1145
1146 //----------------------set_cached_top_node------------------------------------
1147 // Install the cached top node, and make sure Node::is_top works correctly.
1148 void Compile::set_cached_top_node(Node* tn) {
1149 if (tn != nullptr) verify_top(tn);
1150 Node* old_top = _top;
1151 _top = tn;
1152 // Calling Node::setup_is_top allows the nodes the chance to adjust
1153 // their _out arrays.
1154 if (_top != nullptr) _top->setup_is_top();
1155 if (old_top != nullptr) old_top->setup_is_top();
1156 assert(_top == nullptr || top()->is_top(), "");
1157 }
1158
1159 #ifdef ASSERT
1160 uint Compile::count_live_nodes_by_graph_walk() {
1161 Unique_Node_List useful(comp_arena());
1162 // Get useful node list by walking the graph.
1163 identify_useful_nodes(useful);
1164 return useful.size();
1165 }
1166
1167 void Compile::print_missing_nodes() {
1168
1169 // Return if CompileLog is null and PrintIdealNodeCount is false.
1170 if ((_log == nullptr) && (! PrintIdealNodeCount)) {
1171 return;
1172 }
1173
1174 // This is an expensive function. It is executed only when the user
1175 // specifies VerifyIdealNodeCount option or otherwise knows the
1176 // additional work that needs to be done to identify reachable nodes
1177 // by walking the flow graph and find the missing ones using
1178 // _dead_node_list.
1179
1180 Unique_Node_List useful(comp_arena());
1181 // Get useful node list by walking the graph.
1182 identify_useful_nodes(useful);
1183
1184 uint l_nodes = C->live_nodes();
1185 uint l_nodes_by_walk = useful.size();
1186
1187 if (l_nodes != l_nodes_by_walk) {
1188 if (_log != nullptr) {
1189 _log->begin_head("mismatched_nodes count='%d'", abs((int) (l_nodes - l_nodes_by_walk)));
1190 _log->stamp();
1191 _log->end_head();
1192 }
1193 VectorSet& useful_member_set = useful.member_set();
1194 int last_idx = l_nodes_by_walk;
1195 for (int i = 0; i < last_idx; i++) {
1196 if (useful_member_set.test(i)) {
1197 if (_dead_node_list.test(i)) {
1198 if (_log != nullptr) {
1199 _log->elem("mismatched_node_info node_idx='%d' type='both live and dead'", i);
1200 }
1201 if (PrintIdealNodeCount) {
1202 // Print the log message to tty
1203 tty->print_cr("mismatched_node idx='%d' both live and dead'", i);
1204 useful.at(i)->dump();
1205 }
1206 }
1207 }
1208 else if (! _dead_node_list.test(i)) {
1209 if (_log != nullptr) {
1210 _log->elem("mismatched_node_info node_idx='%d' type='neither live nor dead'", i);
1211 }
1212 if (PrintIdealNodeCount) {
1213 // Print the log message to tty
1214 tty->print_cr("mismatched_node idx='%d' type='neither live nor dead'", i);
1215 }
1216 }
1217 }
1218 if (_log != nullptr) {
1219 _log->tail("mismatched_nodes");
1220 }
1221 }
1222 }
1223 void Compile::record_modified_node(Node* n) {
1224 if (_modified_nodes != nullptr && !_inlining_incrementally && !n->is_Con()) {
1225 _modified_nodes->push(n);
1226 }
1227 }
1228
1229 void Compile::remove_modified_node(Node* n) {
1230 if (_modified_nodes != nullptr) {
1231 _modified_nodes->remove(n);
1232 }
1233 }
1234 #endif
1235
1236 #ifndef PRODUCT
1237 void Compile::verify_top(Node* tn) const {
1238 if (tn != nullptr) {
1239 assert(tn->is_Con(), "top node must be a constant");
1240 assert(((ConNode*)tn)->type() == Type::TOP, "top node must have correct type");
1241 assert(tn->in(0) != nullptr, "must have live top node");
1242 }
1243 }
1244 #endif
1245
1246
1247 ///-------------------Managing Per-Node Debug & Profile Info-------------------
1248
1249 void Compile::grow_node_notes(GrowableArray<Node_Notes*>* arr, int grow_by) {
1250 guarantee(arr != nullptr, "");
1251 int num_blocks = arr->length();
1252 if (grow_by < num_blocks) grow_by = num_blocks;
1253 int num_notes = grow_by * _node_notes_block_size;
1254 Node_Notes* notes = NEW_ARENA_ARRAY(node_arena(), Node_Notes, num_notes);
1255 Copy::zero_to_bytes(notes, num_notes * sizeof(Node_Notes));
1256 while (num_notes > 0) {
1257 arr->append(notes);
1258 notes += _node_notes_block_size;
1259 num_notes -= _node_notes_block_size;
1260 }
1261 assert(num_notes == 0, "exact multiple, please");
1262 }
1263
1264 bool Compile::copy_node_notes_to(Node* dest, Node* source) {
1265 if (source == nullptr || dest == nullptr) return false;
1266
1267 if (dest->is_Con())
1268 return false; // Do not push debug info onto constants.
1269
1270 #ifdef ASSERT
1271 // Leave a bread crumb trail pointing to the original node:
1272 if (dest != nullptr && dest != source && dest->debug_orig() == nullptr) {
1273 dest->set_debug_orig(source);
1274 }
1275 #endif
1276
1277 if (node_note_array() == nullptr)
1278 return false; // Not collecting any notes now.
1279
1280 // This is a copy onto a pre-existing node, which may already have notes.
1281 // If both nodes have notes, do not overwrite any pre-existing notes.
1282 Node_Notes* source_notes = node_notes_at(source->_idx);
1283 if (source_notes == nullptr || source_notes->is_clear()) return false;
1284 Node_Notes* dest_notes = node_notes_at(dest->_idx);
1285 if (dest_notes == nullptr || dest_notes->is_clear()) {
1286 return set_node_notes_at(dest->_idx, source_notes);
1287 }
1288
1289 Node_Notes merged_notes = (*source_notes);
1290 // The order of operations here ensures that dest notes will win...
1291 merged_notes.update_from(dest_notes);
1292 return set_node_notes_at(dest->_idx, &merged_notes);
1293 }
1294
1295
1296 //--------------------------allow_range_check_smearing-------------------------
1297 // Gating condition for coalescing similar range checks.
1298 // Sometimes we try 'speculatively' replacing a series of a range checks by a
1299 // single covering check that is at least as strong as any of them.
1300 // If the optimization succeeds, the simplified (strengthened) range check
1301 // will always succeed. If it fails, we will deopt, and then give up
1302 // on the optimization.
1303 bool Compile::allow_range_check_smearing() const {
1304 // If this method has already thrown a range-check,
1305 // assume it was because we already tried range smearing
1306 // and it failed.
1307 uint already_trapped = trap_count(Deoptimization::Reason_range_check);
1308 return !already_trapped;
1309 }
1310
1311
1312 //------------------------------flatten_alias_type-----------------------------
1313 const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
1314 assert(do_aliasing(), "Aliasing should be enabled");
1315 int offset = tj->offset();
1316 TypePtr::PTR ptr = tj->ptr();
1317
1318 // Known instance (scalarizable allocation) alias only with itself.
1319 bool is_known_inst = tj->isa_oopptr() != nullptr &&
1320 tj->is_oopptr()->is_known_instance();
1321
1322 // Process weird unsafe references.
1323 if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) {
1324 assert(InlineUnsafeOps || StressReflectiveCode, "indeterminate pointers come only from unsafe ops");
1325 assert(!is_known_inst, "scalarizable allocation should not have unsafe references");
1326 tj = TypeOopPtr::BOTTOM;
1327 ptr = tj->ptr();
1328 offset = tj->offset();
1329 }
1330
1331 // Array pointers need some flattening
1332 const TypeAryPtr* ta = tj->isa_aryptr();
1333 if (ta && ta->is_stable()) {
1334 // Erase stability property for alias analysis.
1335 tj = ta = ta->cast_to_stable(false);
1336 }
1337 if( ta && is_known_inst ) {
1338 if ( offset != Type::OffsetBot &&
1339 offset > arrayOopDesc::length_offset_in_bytes() ) {
1340 offset = Type::OffsetBot; // Flatten constant access into array body only
1341 tj = ta = ta->
1342 remove_speculative()->
1343 cast_to_ptr_type(ptr)->
1344 with_offset(offset);
1345 }
1346 } else if (ta) {
1347 // For arrays indexed by constant indices, we flatten the alias
1348 // space to include all of the array body. Only the header, klass
1349 // and array length can be accessed un-aliased.
1350 if( offset != Type::OffsetBot ) {
1351 if( ta->const_oop() ) { // MethodData* or Method*
1352 offset = Type::OffsetBot; // Flatten constant access into array body
1353 tj = ta = ta->
1354 remove_speculative()->
1355 cast_to_ptr_type(ptr)->
1356 cast_to_exactness(false)->
1357 with_offset(offset);
1358 } else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
1359 // range is OK as-is.
1360 tj = ta = TypeAryPtr::RANGE;
1361 } else if( offset == oopDesc::klass_offset_in_bytes() ) {
1362 tj = TypeInstPtr::KLASS; // all klass loads look alike
1363 ta = TypeAryPtr::RANGE; // generic ignored junk
1364 ptr = TypePtr::BotPTR;
1365 } else if( offset == oopDesc::mark_offset_in_bytes() ) {
1366 tj = TypeInstPtr::MARK;
1367 ta = TypeAryPtr::RANGE; // generic ignored junk
1368 ptr = TypePtr::BotPTR;
1369 } else { // Random constant offset into array body
1370 offset = Type::OffsetBot; // Flatten constant access into array body
1371 tj = ta = ta->
1372 remove_speculative()->
1373 cast_to_ptr_type(ptr)->
1374 cast_to_exactness(false)->
1375 with_offset(offset);
1376 }
1377 }
1378 // Arrays of fixed size alias with arrays of unknown size.
1379 if (ta->size() != TypeInt::POS) {
1380 const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
1381 tj = ta = ta->
1382 remove_speculative()->
1383 cast_to_ptr_type(ptr)->
1384 with_ary(tary)->
1385 cast_to_exactness(false);
1386 }
1387 // Arrays of known objects become arrays of unknown objects.
1388 if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
1389 const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
1390 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,nullptr,false,offset);
1391 }
1392 if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
1393 const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
1394 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,nullptr,false,offset);
1395 }
1396 // Arrays of bytes and of booleans both use 'bastore' and 'baload' so
1397 // cannot be distinguished by bytecode alone.
1398 if (ta->elem() == TypeInt::BOOL) {
1399 const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
1400 ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
1401 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,offset);
1402 }
1403 // During the 2nd round of IterGVN, NotNull castings are removed.
1404 // Make sure the Bottom and NotNull variants alias the same.
1405 // Also, make sure exact and non-exact variants alias the same.
1406 if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != nullptr) {
1407 tj = ta = ta->
1408 remove_speculative()->
1409 cast_to_ptr_type(TypePtr::BotPTR)->
1410 cast_to_exactness(false)->
1411 with_offset(offset);
1412 }
1413 }
1414
1415 // Oop pointers need some flattening
1416 const TypeInstPtr *to = tj->isa_instptr();
1417 if (to && to != TypeOopPtr::BOTTOM) {
1418 ciInstanceKlass* ik = to->instance_klass();
1419 if( ptr == TypePtr::Constant ) {
1420 if (ik != ciEnv::current()->Class_klass() ||
1421 offset < ik->layout_helper_size_in_bytes()) {
1422 // No constant oop pointers (such as Strings); they alias with
1423 // unknown strings.
1424 assert(!is_known_inst, "not scalarizable allocation");
1425 tj = to = to->
1426 cast_to_instance_id(TypeOopPtr::InstanceBot)->
1427 remove_speculative()->
1428 cast_to_ptr_type(TypePtr::BotPTR)->
1429 cast_to_exactness(false);
1430 }
1431 } else if( is_known_inst ) {
1432 tj = to; // Keep NotNull and klass_is_exact for instance type
1433 } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
1434 // During the 2nd round of IterGVN, NotNull castings are removed.
1435 // Make sure the Bottom and NotNull variants alias the same.
1436 // Also, make sure exact and non-exact variants alias the same.
1437 tj = to = to->
1438 remove_speculative()->
1439 cast_to_instance_id(TypeOopPtr::InstanceBot)->
1440 cast_to_ptr_type(TypePtr::BotPTR)->
1441 cast_to_exactness(false);
1442 }
1443 if (to->speculative() != nullptr) {
1444 tj = to = to->remove_speculative();
1445 }
1446 // Canonicalize the holder of this field
1447 if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
1448 // First handle header references such as a LoadKlassNode, even if the
1449 // object's klass is unloaded at compile time (4965979).
1450 if (!is_known_inst) { // Do it only for non-instance types
1451 tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, nullptr, offset);
1452 }
1453 } else if (offset < 0 || offset >= ik->layout_helper_size_in_bytes()) {
1454 // Static fields are in the space above the normal instance
1455 // fields in the java.lang.Class instance.
1456 if (ik != ciEnv::current()->Class_klass()) {
1457 to = nullptr;
1458 tj = TypeOopPtr::BOTTOM;
1459 offset = tj->offset();
1460 }
1461 } else {
1462 ciInstanceKlass *canonical_holder = ik->get_canonical_holder(offset);
1463 assert(offset < canonical_holder->layout_helper_size_in_bytes(), "");
1464 if (!ik->equals(canonical_holder) || tj->offset() != offset) {
1465 if( is_known_inst ) {
1466 tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, nullptr, offset, to->instance_id());
1467 } else {
1468 tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, nullptr, offset);
1469 }
1470 }
1471 }
1472 }
1473
1474 // Klass pointers to object array klasses need some flattening
1475 const TypeKlassPtr *tk = tj->isa_klassptr();
1476 if( tk ) {
1477 // If we are referencing a field within a Klass, we need
1478 // to assume the worst case of an Object. Both exact and
1479 // inexact types must flatten to the same alias class so
1480 // use NotNull as the PTR.
1481 if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {
1482 tj = tk = TypeInstKlassPtr::make(TypePtr::NotNull,
1483 env()->Object_klass(),
1484 offset);
1485 }
1486
1487 if (tk->isa_aryklassptr() && tk->is_aryklassptr()->elem()->isa_klassptr()) {
1488 ciKlass* k = ciObjArrayKlass::make(env()->Object_klass());
1489 if (!k || !k->is_loaded()) { // Only fails for some -Xcomp runs
1490 tj = tk = TypeInstKlassPtr::make(TypePtr::NotNull, env()->Object_klass(), offset);
1491 } else {
1492 tj = tk = TypeAryKlassPtr::make(TypePtr::NotNull, tk->is_aryklassptr()->elem(), k, offset);
1493 }
1494 }
1495
1496 // Check for precise loads from the primary supertype array and force them
1497 // to the supertype cache alias index. Check for generic array loads from
1498 // the primary supertype array and also force them to the supertype cache
1499 // alias index. Since the same load can reach both, we need to merge
1500 // these 2 disparate memories into the same alias class. Since the
1501 // primary supertype array is read-only, there's no chance of confusion
1502 // where we bypass an array load and an array store.
1503 int primary_supers_offset = in_bytes(Klass::primary_supers_offset());
1504 if (offset == Type::OffsetBot ||
1505 (offset >= primary_supers_offset &&
1506 offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) ||
1507 offset == (int)in_bytes(Klass::secondary_super_cache_offset())) {
1508 offset = in_bytes(Klass::secondary_super_cache_offset());
1509 tj = tk = tk->with_offset(offset);
1510 }
1511 }
1512
1513 // Flatten all Raw pointers together.
1514 if (tj->base() == Type::RawPtr)
1515 tj = TypeRawPtr::BOTTOM;
1516
1517 if (tj->base() == Type::AnyPtr)
1518 tj = TypePtr::BOTTOM; // An error, which the caller must check for.
1519
1520 offset = tj->offset();
1521 assert( offset != Type::OffsetTop, "Offset has fallen from constant" );
1522
1523 assert( (offset != Type::OffsetBot && tj->base() != Type::AryPtr) ||
1524 (offset == Type::OffsetBot && tj->base() == Type::AryPtr) ||
1525 (offset == Type::OffsetBot && tj == TypeOopPtr::BOTTOM) ||
1526 (offset == Type::OffsetBot && tj == TypePtr::BOTTOM) ||
1527 (offset == oopDesc::mark_offset_in_bytes() && tj->base() == Type::AryPtr) ||
1528 (offset == oopDesc::klass_offset_in_bytes() && tj->base() == Type::AryPtr) ||
1529 (offset == arrayOopDesc::length_offset_in_bytes() && tj->base() == Type::AryPtr),
1530 "For oops, klasses, raw offset must be constant; for arrays the offset is never known" );
1531 assert( tj->ptr() != TypePtr::TopPTR &&
1532 tj->ptr() != TypePtr::AnyNull &&
1533 tj->ptr() != TypePtr::Null, "No imprecise addresses" );
1534 // assert( tj->ptr() != TypePtr::Constant ||
1535 // tj->base() == Type::RawPtr ||
1536 // tj->base() == Type::KlassPtr, "No constant oop addresses" );
1537
1538 return tj;
1539 }
1540
1541 void Compile::AliasType::Init(int i, const TypePtr* at) {
1542 assert(AliasIdxTop <= i && i < Compile::current()->_max_alias_types, "Invalid alias index");
1543 _index = i;
1544 _adr_type = at;
1545 _field = nullptr;
1546 _element = nullptr;
1547 _is_rewritable = true; // default
1548 const TypeOopPtr *atoop = (at != nullptr) ? at->isa_oopptr() : nullptr;
1549 if (atoop != nullptr && atoop->is_known_instance()) {
1550 const TypeOopPtr *gt = atoop->cast_to_instance_id(TypeOopPtr::InstanceBot);
1551 _general_index = Compile::current()->get_alias_index(gt);
1552 } else {
1553 _general_index = 0;
1554 }
1555 }
1556
1557 BasicType Compile::AliasType::basic_type() const {
1558 if (element() != nullptr) {
1559 const Type* element = adr_type()->is_aryptr()->elem();
1560 return element->isa_narrowoop() ? T_OBJECT : element->array_element_basic_type();
1561 } if (field() != nullptr) {
1562 return field()->layout_type();
1563 } else {
1564 return T_ILLEGAL; // unknown
1565 }
1566 }
1567
1568 //---------------------------------print_on------------------------------------
1569 #ifndef PRODUCT
1570 void Compile::AliasType::print_on(outputStream* st) {
1571 if (index() < 10)
1572 st->print("@ <%d> ", index());
1573 else st->print("@ <%d>", index());
1574 st->print(is_rewritable() ? " " : " RO");
1575 int offset = adr_type()->offset();
1576 if (offset == Type::OffsetBot)
1577 st->print(" +any");
1578 else st->print(" +%-3d", offset);
1579 st->print(" in ");
1580 adr_type()->dump_on(st);
1581 const TypeOopPtr* tjp = adr_type()->isa_oopptr();
1582 if (field() != nullptr && tjp) {
1583 if (tjp->is_instptr()->instance_klass() != field()->holder() ||
1584 tjp->offset() != field()->offset_in_bytes()) {
1585 st->print(" != ");
1586 field()->print();
1587 st->print(" ***");
1588 }
1589 }
1590 }
1591
1592 void print_alias_types() {
1593 Compile* C = Compile::current();
1594 tty->print_cr("--- Alias types, AliasIdxBot .. %d", C->num_alias_types()-1);
1595 for (int idx = Compile::AliasIdxBot; idx < C->num_alias_types(); idx++) {
1596 C->alias_type(idx)->print_on(tty);
1597 tty->cr();
1598 }
1599 }
1600 #endif
1601
1602
1603 //----------------------------probe_alias_cache--------------------------------
1604 Compile::AliasCacheEntry* Compile::probe_alias_cache(const TypePtr* adr_type) {
1605 intptr_t key = (intptr_t) adr_type;
1606 key ^= key >> logAliasCacheSize;
1607 return &_alias_cache[key & right_n_bits(logAliasCacheSize)];
1608 }
1609
1610
1611 //-----------------------------grow_alias_types--------------------------------
1612 void Compile::grow_alias_types() {
1613 const int old_ats = _max_alias_types; // how many before?
1614 const int new_ats = old_ats; // how many more?
1615 const int grow_ats = old_ats+new_ats; // how many now?
1616 _max_alias_types = grow_ats;
1617 _alias_types = REALLOC_ARENA_ARRAY(comp_arena(), AliasType*, _alias_types, old_ats, grow_ats);
1618 AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType, new_ats);
1619 Copy::zero_to_bytes(ats, sizeof(AliasType)*new_ats);
1620 for (int i = 0; i < new_ats; i++) _alias_types[old_ats+i] = &ats[i];
1621 }
1622
1623
1624 //--------------------------------find_alias_type------------------------------
1625 Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field) {
1626 if (!do_aliasing()) {
1627 return alias_type(AliasIdxBot);
1628 }
1629
1630 AliasCacheEntry* ace = probe_alias_cache(adr_type);
1631 if (ace->_adr_type == adr_type) {
1632 return alias_type(ace->_index);
1633 }
1634
1635 // Handle special cases.
1636 if (adr_type == nullptr) return alias_type(AliasIdxTop);
1637 if (adr_type == TypePtr::BOTTOM) return alias_type(AliasIdxBot);
1638
1639 // Do it the slow way.
1640 const TypePtr* flat = flatten_alias_type(adr_type);
1641
1642 #ifdef ASSERT
1643 {
1644 ResourceMark rm;
1645 assert(flat == flatten_alias_type(flat), "not idempotent: adr_type = %s; flat = %s => %s",
1646 Type::str(adr_type), Type::str(flat), Type::str(flatten_alias_type(flat)));
1647 assert(flat != TypePtr::BOTTOM, "cannot alias-analyze an untyped ptr: adr_type = %s",
1648 Type::str(adr_type));
1649 if (flat->isa_oopptr() && !flat->isa_klassptr()) {
1650 const TypeOopPtr* foop = flat->is_oopptr();
1651 // Scalarizable allocations have exact klass always.
1652 bool exact = !foop->klass_is_exact() || foop->is_known_instance();
1653 const TypePtr* xoop = foop->cast_to_exactness(exact)->is_ptr();
1654 assert(foop == flatten_alias_type(xoop), "exactness must not affect alias type: foop = %s; xoop = %s",
1655 Type::str(foop), Type::str(xoop));
1656 }
1657 }
1658 #endif
1659
1660 int idx = AliasIdxTop;
1661 for (int i = 0; i < num_alias_types(); i++) {
1662 if (alias_type(i)->adr_type() == flat) {
1663 idx = i;
1664 break;
1665 }
1666 }
1667
1668 if (idx == AliasIdxTop) {
1669 if (no_create) return nullptr;
1670 // Grow the array if necessary.
1671 if (_num_alias_types == _max_alias_types) grow_alias_types();
1672 // Add a new alias type.
1673 idx = _num_alias_types++;
1674 _alias_types[idx]->Init(idx, flat);
1675 if (flat == TypeInstPtr::KLASS) alias_type(idx)->set_rewritable(false);
1676 if (flat == TypeAryPtr::RANGE) alias_type(idx)->set_rewritable(false);
1677 if (flat->isa_instptr()) {
1678 if (flat->offset() == java_lang_Class::klass_offset()
1679 && flat->is_instptr()->instance_klass() == env()->Class_klass())
1680 alias_type(idx)->set_rewritable(false);
1681 }
1682 if (flat->isa_aryptr()) {
1683 #ifdef ASSERT
1684 const int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1685 // (T_BYTE has the weakest alignment and size restrictions...)
1686 assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
1687 #endif
1688 if (flat->offset() == TypePtr::OffsetBot) {
1689 alias_type(idx)->set_element(flat->is_aryptr()->elem());
1690 }
1691 }
1692 if (flat->isa_klassptr()) {
1693 if (UseCompactObjectHeaders) {
1694 if (flat->offset() == in_bytes(Klass::prototype_header_offset()))
1695 alias_type(idx)->set_rewritable(false);
1696 }
1697 if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
1698 alias_type(idx)->set_rewritable(false);
1699 if (flat->offset() == in_bytes(Klass::modifier_flags_offset()))
1700 alias_type(idx)->set_rewritable(false);
1701 if (flat->offset() == in_bytes(Klass::access_flags_offset()))
1702 alias_type(idx)->set_rewritable(false);
1703 if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
1704 alias_type(idx)->set_rewritable(false);
1705 if (flat->offset() == in_bytes(Klass::secondary_super_cache_offset()))
1706 alias_type(idx)->set_rewritable(false);
1707 }
1708 // %%% (We would like to finalize JavaThread::threadObj_offset(),
1709 // but the base pointer type is not distinctive enough to identify
1710 // references into JavaThread.)
1711
1712 // Check for final fields.
1713 const TypeInstPtr* tinst = flat->isa_instptr();
1714 if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {
1715 ciField* field;
1716 if (tinst->const_oop() != nullptr &&
1717 tinst->instance_klass() == ciEnv::current()->Class_klass() &&
1718 tinst->offset() >= (tinst->instance_klass()->layout_helper_size_in_bytes())) {
1719 // static field
1720 ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
1721 field = k->get_field_by_offset(tinst->offset(), true);
1722 } else {
1723 ciInstanceKlass *k = tinst->instance_klass();
1724 field = k->get_field_by_offset(tinst->offset(), false);
1725 }
1726 assert(field == nullptr ||
1727 original_field == nullptr ||
1728 (field->holder() == original_field->holder() &&
1729 field->offset_in_bytes() == original_field->offset_in_bytes() &&
1730 field->is_static() == original_field->is_static()), "wrong field?");
1731 // Set field() and is_rewritable() attributes.
1732 if (field != nullptr) alias_type(idx)->set_field(field);
1733 }
1734 }
1735
1736 // Fill the cache for next time.
1737 ace->_adr_type = adr_type;
1738 ace->_index = idx;
1739 assert(alias_type(adr_type) == alias_type(idx), "type must be installed");
1740
1741 // Might as well try to fill the cache for the flattened version, too.
1742 AliasCacheEntry* face = probe_alias_cache(flat);
1743 if (face->_adr_type == nullptr) {
1744 face->_adr_type = flat;
1745 face->_index = idx;
1746 assert(alias_type(flat) == alias_type(idx), "flat type must work too");
1747 }
1748
1749 return alias_type(idx);
1750 }
1751
1752
1753 Compile::AliasType* Compile::alias_type(ciField* field) {
1754 const TypeOopPtr* t;
1755 if (field->is_static())
1756 t = TypeInstPtr::make(field->holder()->java_mirror());
1757 else
1758 t = TypeOopPtr::make_from_klass_raw(field->holder());
1759 AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
1760 assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
1761 return atp;
1762 }
1763
1764
1765 //------------------------------have_alias_type--------------------------------
1766 bool Compile::have_alias_type(const TypePtr* adr_type) {
1767 AliasCacheEntry* ace = probe_alias_cache(adr_type);
1768 if (ace->_adr_type == adr_type) {
1769 return true;
1770 }
1771
1772 // Handle special cases.
1773 if (adr_type == nullptr) return true;
1774 if (adr_type == TypePtr::BOTTOM) return true;
1775
1776 return find_alias_type(adr_type, true, nullptr) != nullptr;
1777 }
1778
1779 //-----------------------------must_alias--------------------------------------
1780 // True if all values of the given address type are in the given alias category.
1781 bool Compile::must_alias(const TypePtr* adr_type, int alias_idx) {
1782 if (alias_idx == AliasIdxBot) return true; // the universal category
1783 if (adr_type == nullptr) return true; // null serves as TypePtr::TOP
1784 if (alias_idx == AliasIdxTop) return false; // the empty category
1785 if (adr_type->base() == Type::AnyPtr) return false; // TypePtr::BOTTOM or its twins
1786
1787 // the only remaining possible overlap is identity
1788 int adr_idx = get_alias_index(adr_type);
1789 assert(adr_idx != AliasIdxBot && adr_idx != AliasIdxTop, "");
1790 assert(adr_idx == alias_idx ||
1791 (alias_type(alias_idx)->adr_type() != TypeOopPtr::BOTTOM
1792 && adr_type != TypeOopPtr::BOTTOM),
1793 "should not be testing for overlap with an unsafe pointer");
1794 return adr_idx == alias_idx;
1795 }
1796
1797 //------------------------------can_alias--------------------------------------
1798 // True if any values of the given address type are in the given alias category.
1799 bool Compile::can_alias(const TypePtr* adr_type, int alias_idx) {
1800 if (alias_idx == AliasIdxTop) return false; // the empty category
1801 if (adr_type == nullptr) return false; // null serves as TypePtr::TOP
1802 // Known instance doesn't alias with bottom memory
1803 if (alias_idx == AliasIdxBot) return !adr_type->is_known_instance(); // the universal category
1804 if (adr_type->base() == Type::AnyPtr) return !C->get_adr_type(alias_idx)->is_known_instance(); // TypePtr::BOTTOM or its twins
1805
1806 // the only remaining possible overlap is identity
1807 int adr_idx = get_alias_index(adr_type);
1808 assert(adr_idx != AliasIdxBot && adr_idx != AliasIdxTop, "");
1809 return adr_idx == alias_idx;
1810 }
1811
1812 // Remove the opaque nodes that protect the Parse Predicates so that all unused
1813 // checks and uncommon_traps will be eliminated from the ideal graph.
1814 void Compile::cleanup_parse_predicates(PhaseIterGVN& igvn) const {
1815 if (parse_predicate_count() == 0) {
1816 return;
1817 }
1818 for (int i = parse_predicate_count(); i > 0; i--) {
1819 Node* n = parse_predicate_opaque1_node(i - 1);
1820 assert(n->Opcode() == Op_Opaque1, "must be");
1821 igvn.replace_node(n, n->in(1));
1822 }
1823 assert(parse_predicate_count() == 0, "should be clean!");
1824 }
1825
1826 void Compile::record_for_post_loop_opts_igvn(Node* n) {
1827 if (!n->for_post_loop_opts_igvn()) {
1828 assert(!_for_post_loop_igvn.contains(n), "duplicate");
1829 n->add_flag(Node::NodeFlags::Flag_for_post_loop_opts_igvn);
1830 _for_post_loop_igvn.append(n);
1831 }
1832 }
1833
1834 void Compile::remove_from_post_loop_opts_igvn(Node* n) {
1835 n->remove_flag(Node::NodeFlags::Flag_for_post_loop_opts_igvn);
1836 _for_post_loop_igvn.remove(n);
1837 }
1838
1839 void Compile::process_for_post_loop_opts_igvn(PhaseIterGVN& igvn) {
1840 // Verify that all previous optimizations produced a valid graph
1841 // at least to this point, even if no loop optimizations were done.
1842 PhaseIdealLoop::verify(igvn);
1843
1844 C->set_post_loop_opts_phase(); // no more loop opts allowed
1845
1846 assert(!C->major_progress(), "not cleared");
1847
1848 if (_for_post_loop_igvn.length() > 0) {
1849 while (_for_post_loop_igvn.length() > 0) {
1850 Node* n = _for_post_loop_igvn.pop();
1851 n->remove_flag(Node::NodeFlags::Flag_for_post_loop_opts_igvn);
1852 igvn._worklist.push(n);
1853 }
1854 igvn.optimize();
1855 assert(_for_post_loop_igvn.length() == 0, "no more delayed nodes allowed");
1856
1857 // Sometimes IGVN sets major progress (e.g., when processing loop nodes).
1858 if (C->major_progress()) {
1859 C->clear_major_progress(); // ensure that major progress is now clear
1860 }
1861 }
1862 }
1863
1864 void Compile::record_unstable_if_trap(UnstableIfTrap* trap) {
1865 if (OptimizeUnstableIf) {
1866 _unstable_if_traps.append(trap);
1867 }
1868 }
1869
1870 void Compile::remove_useless_unstable_if_traps(Unique_Node_List& useful) {
1871 for (int i = _unstable_if_traps.length() - 1; i >= 0; i--) {
1872 UnstableIfTrap* trap = _unstable_if_traps.at(i);
1873 Node* n = trap->uncommon_trap();
1874 if (!useful.member(n)) {
1875 _unstable_if_traps.delete_at(i); // replaces i-th with last element which is known to be useful (already processed)
1876 }
1877 }
1878 }
1879
1880 // Remove the unstable if trap associated with 'unc' from candidates. It is either dead
1881 // or fold-compares case. Return true if succeed or not found.
1882 //
1883 // In rare cases, the found trap has been processed. It is too late to delete it. Return
1884 // false and ask fold-compares to yield.
1885 //
1886 // 'fold-compares' may use the uncommon_trap of the dominating IfNode to cover the fused
1887 // IfNode. This breaks the unstable_if trap invariant: control takes the unstable path
1888 // when deoptimization does happen.
1889 bool Compile::remove_unstable_if_trap(CallStaticJavaNode* unc, bool yield) {
1890 for (int i = 0; i < _unstable_if_traps.length(); ++i) {
1891 UnstableIfTrap* trap = _unstable_if_traps.at(i);
1892 if (trap->uncommon_trap() == unc) {
1893 if (yield && trap->modified()) {
1894 return false;
1895 }
1896 _unstable_if_traps.delete_at(i);
1897 break;
1898 }
1899 }
1900 return true;
1901 }
1902
1903 // Re-calculate unstable_if traps with the liveness of next_bci, which points to the unlikely path.
1904 // It needs to be done after igvn because fold-compares may fuse uncommon_traps and before renumbering.
1905 void Compile::process_for_unstable_if_traps(PhaseIterGVN& igvn) {
1906 for (int i = _unstable_if_traps.length() - 1; i >= 0; --i) {
1907 UnstableIfTrap* trap = _unstable_if_traps.at(i);
1908 CallStaticJavaNode* unc = trap->uncommon_trap();
1909 int next_bci = trap->next_bci();
1910 bool modified = trap->modified();
1911
1912 if (next_bci != -1 && !modified) {
1913 assert(!_dead_node_list.test(unc->_idx), "changing a dead node!");
1914 JVMState* jvms = unc->jvms();
1915 ciMethod* method = jvms->method();
1916 ciBytecodeStream iter(method);
1917
1918 iter.force_bci(jvms->bci());
1919 assert(next_bci == iter.next_bci() || next_bci == iter.get_dest(), "wrong next_bci at unstable_if");
1920 Bytecodes::Code c = iter.cur_bc();
1921 Node* lhs = nullptr;
1922 Node* rhs = nullptr;
1923 if (c == Bytecodes::_if_acmpeq || c == Bytecodes::_if_acmpne) {
1924 lhs = unc->peek_operand(0);
1925 rhs = unc->peek_operand(1);
1926 } else if (c == Bytecodes::_ifnull || c == Bytecodes::_ifnonnull) {
1927 lhs = unc->peek_operand(0);
1928 }
1929
1930 ResourceMark rm;
1931 const MethodLivenessResult& live_locals = method->liveness_at_bci(next_bci);
1932 assert(live_locals.is_valid(), "broken liveness info");
1933 int len = (int)live_locals.size();
1934
1935 for (int i = 0; i < len; i++) {
1936 Node* local = unc->local(jvms, i);
1937 // kill local using the liveness of next_bci.
1938 // give up when the local looks like an operand to secure reexecution.
1939 if (!live_locals.at(i) && !local->is_top() && local != lhs && local!= rhs) {
1940 uint idx = jvms->locoff() + i;
1941 #ifdef ASSERT
1942 if (PrintOpto && Verbose) {
1943 tty->print("[unstable_if] kill local#%d: ", idx);
1944 local->dump();
1945 tty->cr();
1946 }
1947 #endif
1948 igvn.replace_input_of(unc, idx, top());
1949 modified = true;
1950 }
1951 }
1952 }
1953
1954 // keep the mondified trap for late query
1955 if (modified) {
1956 trap->set_modified();
1957 } else {
1958 _unstable_if_traps.delete_at(i);
1959 }
1960 }
1961 igvn.optimize();
1962 }
1963
1964 // StringOpts and late inlining of string methods
1965 void Compile::inline_string_calls(bool parse_time) {
1966 {
1967 // remove useless nodes to make the usage analysis simpler
1968 ResourceMark rm;
1969 PhaseRemoveUseless pru(initial_gvn(), *igvn_worklist());
1970 }
1971
1972 {
1973 ResourceMark rm;
1974 print_method(PHASE_BEFORE_STRINGOPTS, 3);
1975 PhaseStringOpts pso(initial_gvn());
1976 print_method(PHASE_AFTER_STRINGOPTS, 3);
1977 }
1978
1979 // now inline anything that we skipped the first time around
1980 if (!parse_time) {
1981 _late_inlines_pos = _late_inlines.length();
1982 }
1983
1984 while (_string_late_inlines.length() > 0) {
1985 CallGenerator* cg = _string_late_inlines.pop();
1986 cg->do_late_inline();
1987 if (failing()) return;
1988 }
1989 _string_late_inlines.trunc_to(0);
1990 }
1991
1992 // Late inlining of boxing methods
1993 void Compile::inline_boxing_calls(PhaseIterGVN& igvn) {
1994 if (_boxing_late_inlines.length() > 0) {
1995 assert(has_boxed_value(), "inconsistent");
1996
1997 PhaseGVN* gvn = initial_gvn();
1998 set_inlining_incrementally(true);
1999
2000 igvn_worklist()->ensure_empty(); // should be done with igvn
2001
2002 _late_inlines_pos = _late_inlines.length();
2003
2004 while (_boxing_late_inlines.length() > 0) {
2005 CallGenerator* cg = _boxing_late_inlines.pop();
2006 cg->do_late_inline();
2007 if (failing()) return;
2008 }
2009 _boxing_late_inlines.trunc_to(0);
2010
2011 inline_incrementally_cleanup(igvn);
2012
2013 set_inlining_incrementally(false);
2014 }
2015 }
2016
2017 bool Compile::inline_incrementally_one() {
2018 assert(IncrementalInline, "incremental inlining should be on");
2019
2020 TracePhase tp("incrementalInline_inline", &timers[_t_incrInline_inline]);
2021
2022 set_inlining_progress(false);
2023 set_do_cleanup(false);
2024
2025 for (int i = 0; i < _late_inlines.length(); i++) {
2026 _late_inlines_pos = i+1;
2027 CallGenerator* cg = _late_inlines.at(i);
2028 bool does_dispatch = cg->is_virtual_late_inline() || cg->is_mh_late_inline();
2029 if (inlining_incrementally() || does_dispatch) { // a call can be either inlined or strength-reduced to a direct call
2030 cg->do_late_inline();
2031 assert(_late_inlines.at(i) == cg, "no insertions before current position allowed");
2032 if (failing()) {
2033 return false;
2034 } else if (inlining_progress()) {
2035 _late_inlines_pos = i+1; // restore the position in case new elements were inserted
2036 print_method(PHASE_INCREMENTAL_INLINE_STEP, 3, cg->call_node());
2037 break; // process one call site at a time
2038 }
2039 } else {
2040 // Ignore late inline direct calls when inlining is not allowed.
2041 // They are left in the late inline list when node budget is exhausted until the list is fully drained.
2042 }
2043 }
2044 // Remove processed elements.
2045 _late_inlines.remove_till(_late_inlines_pos);
2046 _late_inlines_pos = 0;
2047
2048 assert(inlining_progress() || _late_inlines.length() == 0, "no progress");
2049
2050 bool needs_cleanup = do_cleanup() || over_inlining_cutoff();
2051
2052 set_inlining_progress(false);
2053 set_do_cleanup(false);
2054
2055 bool force_cleanup = directive()->IncrementalInlineForceCleanupOption;
2056 return (_late_inlines.length() > 0) && !needs_cleanup && !force_cleanup;
2057 }
2058
2059 void Compile::inline_incrementally_cleanup(PhaseIterGVN& igvn) {
2060 {
2061 TracePhase tp("incrementalInline_pru", &timers[_t_incrInline_pru]);
2062 ResourceMark rm;
2063 PhaseRemoveUseless pru(initial_gvn(), *igvn_worklist());
2064 }
2065 {
2066 TracePhase tp("incrementalInline_igvn", &timers[_t_incrInline_igvn]);
2067 igvn.reset_from_gvn(initial_gvn());
2068 igvn.optimize();
2069 }
2070 print_method(PHASE_INCREMENTAL_INLINE_CLEANUP, 3);
2071 }
2072
2073 // Perform incremental inlining until bound on number of live nodes is reached
2074 void Compile::inline_incrementally(PhaseIterGVN& igvn) {
2075 TracePhase tp("incrementalInline", &timers[_t_incrInline]);
2076
2077 set_inlining_incrementally(true);
2078 uint low_live_nodes = 0;
2079
2080 while (_late_inlines.length() > 0) {
2081 if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
2082 if (low_live_nodes < (uint)LiveNodeCountInliningCutoff * 8 / 10) {
2083 TracePhase tp("incrementalInline_ideal", &timers[_t_incrInline_ideal]);
2084 // PhaseIdealLoop is expensive so we only try it once we are
2085 // out of live nodes and we only try it again if the previous
2086 // helped got the number of nodes down significantly
2087 PhaseIdealLoop::optimize(igvn, LoopOptsNone);
2088 if (failing()) return;
2089 low_live_nodes = live_nodes();
2090 _major_progress = true;
2091 }
2092
2093 if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
2094 bool do_print_inlining = print_inlining() || print_intrinsics();
2095 if (do_print_inlining || log() != nullptr) {
2096 // Print inlining message for candidates that we couldn't inline for lack of space.
2097 for (int i = 0; i < _late_inlines.length(); i++) {
2098 CallGenerator* cg = _late_inlines.at(i);
2099 const char* msg = "live nodes > LiveNodeCountInliningCutoff";
2100 if (do_print_inlining) {
2101 cg->print_inlining_late(msg);
2102 }
2103 log_late_inline_failure(cg, msg);
2104 }
2105 }
2106 break; // finish
2107 }
2108 }
2109
2110 igvn_worklist()->ensure_empty(); // should be done with igvn
2111
2112 while (inline_incrementally_one()) {
2113 assert(!failing(), "inconsistent");
2114 }
2115 if (failing()) return;
2116
2117 inline_incrementally_cleanup(igvn);
2118
2119 print_method(PHASE_INCREMENTAL_INLINE_STEP, 3);
2120
2121 if (failing()) return;
2122
2123 if (_late_inlines.length() == 0) {
2124 break; // no more progress
2125 }
2126 }
2127
2128 igvn_worklist()->ensure_empty(); // should be done with igvn
2129
2130 if (_string_late_inlines.length() > 0) {
2131 assert(has_stringbuilder(), "inconsistent");
2132
2133 inline_string_calls(false);
2134
2135 if (failing()) return;
2136
2137 inline_incrementally_cleanup(igvn);
2138 }
2139
2140 set_inlining_incrementally(false);
2141 }
2142
2143 void Compile::process_late_inline_calls_no_inline(PhaseIterGVN& igvn) {
2144 // "inlining_incrementally() == false" is used to signal that no inlining is allowed
2145 // (see LateInlineVirtualCallGenerator::do_late_inline_check() for details).
2146 // Tracking and verification of modified nodes is disabled by setting "_modified_nodes == nullptr"
2147 // as if "inlining_incrementally() == true" were set.
2148 assert(inlining_incrementally() == false, "not allowed");
2149 assert(_modified_nodes == nullptr, "not allowed");
2150 assert(_late_inlines.length() > 0, "sanity");
2151
2152 while (_late_inlines.length() > 0) {
2153 igvn_worklist()->ensure_empty(); // should be done with igvn
2154
2155 while (inline_incrementally_one()) {
2156 assert(!failing(), "inconsistent");
2157 }
2158 if (failing()) return;
2159
2160 inline_incrementally_cleanup(igvn);
2161 }
2162 }
2163
2164 bool Compile::optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode) {
2165 if (_loop_opts_cnt > 0) {
2166 while (major_progress() && (_loop_opts_cnt > 0)) {
2167 TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2168 PhaseIdealLoop::optimize(igvn, mode);
2169 _loop_opts_cnt--;
2170 if (failing()) return false;
2171 if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2172 }
2173 }
2174 return true;
2175 }
2176
2177 // Remove edges from "root" to each SafePoint at a backward branch.
2178 // They were inserted during parsing (see add_safepoint()) to make
2179 // infinite loops without calls or exceptions visible to root, i.e.,
2180 // useful.
2181 void Compile::remove_root_to_sfpts_edges(PhaseIterGVN& igvn) {
2182 Node *r = root();
2183 if (r != nullptr) {
2184 for (uint i = r->req(); i < r->len(); ++i) {
2185 Node *n = r->in(i);
2186 if (n != nullptr && n->is_SafePoint()) {
2187 r->rm_prec(i);
2188 if (n->outcnt() == 0) {
2189 igvn.remove_dead_node(n);
2190 }
2191 --i;
2192 }
2193 }
2194 // Parsing may have added top inputs to the root node (Path
2195 // leading to the Halt node proven dead). Make sure we get a
2196 // chance to clean them up.
2197 igvn._worklist.push(r);
2198 igvn.optimize();
2199 }
2200 }
2201
2202 //------------------------------Optimize---------------------------------------
2203 // Given a graph, optimize it.
2204 void Compile::Optimize() {
2205 TracePhase tp("optimizer", &timers[_t_optimizer]);
2206
2207 #ifndef PRODUCT
2208 if (env()->break_at_compile()) {
2209 BREAKPOINT;
2210 }
2211
2212 #endif
2213
2214 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2215 #ifdef ASSERT
2216 bs->verify_gc_barriers(this, BarrierSetC2::BeforeOptimize);
2217 #endif
2218
2219 ResourceMark rm;
2220
2221 print_inlining_reinit();
2222
2223 NOT_PRODUCT( verify_graph_edges(); )
2224
2225 print_method(PHASE_AFTER_PARSING, 1);
2226
2227 {
2228 // Iterative Global Value Numbering, including ideal transforms
2229 // Initialize IterGVN with types and values from parse-time GVN
2230 PhaseIterGVN igvn(initial_gvn());
2231 #ifdef ASSERT
2232 _modified_nodes = new (comp_arena()) Unique_Node_List(comp_arena());
2233 #endif
2234 {
2235 TracePhase tp("iterGVN", &timers[_t_iterGVN]);
2236 igvn.optimize();
2237 }
2238
2239 if (failing()) return;
2240
2241 print_method(PHASE_ITER_GVN1, 2);
2242
2243 process_for_unstable_if_traps(igvn);
2244
2245 if (failing()) return;
2246
2247 inline_incrementally(igvn);
2248
2249 print_method(PHASE_INCREMENTAL_INLINE, 2);
2250
2251 if (failing()) return;
2252
2253 if (eliminate_boxing()) {
2254 // Inline valueOf() methods now.
2255 inline_boxing_calls(igvn);
2256
2257 if (failing()) return;
2258
2259 if (AlwaysIncrementalInline || StressIncrementalInlining) {
2260 inline_incrementally(igvn);
2261 }
2262
2263 print_method(PHASE_INCREMENTAL_BOXING_INLINE, 2);
2264
2265 if (failing()) return;
2266 }
2267
2268 // Remove the speculative part of types and clean up the graph from
2269 // the extra CastPP nodes whose only purpose is to carry them. Do
2270 // that early so that optimizations are not disrupted by the extra
2271 // CastPP nodes.
2272 remove_speculative_types(igvn);
2273
2274 if (failing()) return;
2275
2276 // No more new expensive nodes will be added to the list from here
2277 // so keep only the actual candidates for optimizations.
2278 cleanup_expensive_nodes(igvn);
2279
2280 if (failing()) return;
2281
2282 assert(EnableVectorSupport || !has_vbox_nodes(), "sanity");
2283 if (EnableVectorSupport && has_vbox_nodes()) {
2284 TracePhase tp("", &timers[_t_vector]);
2285 PhaseVector pv(igvn);
2286 pv.optimize_vector_boxes();
2287 if (failing()) return;
2288 print_method(PHASE_ITER_GVN_AFTER_VECTOR, 2);
2289 }
2290 assert(!has_vbox_nodes(), "sanity");
2291
2292 if (!failing() && RenumberLiveNodes && live_nodes() + NodeLimitFudgeFactor < unique()) {
2293 Compile::TracePhase tp("", &timers[_t_renumberLive]);
2294 igvn_worklist()->ensure_empty(); // should be done with igvn
2295 {
2296 ResourceMark rm;
2297 PhaseRenumberLive prl(initial_gvn(), *igvn_worklist());
2298 }
2299 igvn.reset_from_gvn(initial_gvn());
2300 igvn.optimize();
2301 }
2302
2303 // Now that all inlining is over and no PhaseRemoveUseless will run, cut edge from root to loop
2304 // safepoints
2305 remove_root_to_sfpts_edges(igvn);
2306
2307 if (failing()) return;
2308
2309 // Perform escape analysis
2310 if (do_escape_analysis() && ConnectionGraph::has_candidates(this)) {
2311 if (has_loops()) {
2312 // Cleanup graph (remove dead nodes).
2313 TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2314 PhaseIdealLoop::optimize(igvn, LoopOptsMaxUnroll);
2315 if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
2316 if (failing()) return;
2317 }
2318 bool progress;
2319 do {
2320 ConnectionGraph::do_analysis(this, &igvn);
2321
2322 if (failing()) return;
2323
2324 int mcount = macro_count(); // Record number of allocations and locks before IGVN
2325
2326 // Optimize out fields loads from scalar replaceable allocations.
2327 igvn.optimize();
2328 print_method(PHASE_ITER_GVN_AFTER_EA, 2);
2329
2330 if (failing()) return;
2331
2332 if (congraph() != nullptr && macro_count() > 0) {
2333 TracePhase tp("macroEliminate", &timers[_t_macroEliminate]);
2334 PhaseMacroExpand mexp(igvn);
2335 mexp.eliminate_macro_nodes();
2336 igvn.set_delay_transform(false);
2337
2338 igvn.optimize();
2339 print_method(PHASE_ITER_GVN_AFTER_ELIMINATION, 2);
2340
2341 if (failing()) return;
2342 }
2343 progress = do_iterative_escape_analysis() &&
2344 (macro_count() < mcount) &&
2345 ConnectionGraph::has_candidates(this);
2346 // Try again if candidates exist and made progress
2347 // by removing some allocations and/or locks.
2348 } while (progress);
2349 }
2350
2351 // Loop transforms on the ideal graph. Range Check Elimination,
2352 // peeling, unrolling, etc.
2353
2354 // Set loop opts counter
2355 if((_loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) {
2356 {
2357 TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2358 PhaseIdealLoop::optimize(igvn, LoopOptsDefault);
2359 _loop_opts_cnt--;
2360 if (major_progress()) print_method(PHASE_PHASEIDEALLOOP1, 2);
2361 if (failing()) return;
2362 }
2363 // Loop opts pass if partial peeling occurred in previous pass
2364 if(PartialPeelLoop && major_progress() && (_loop_opts_cnt > 0)) {
2365 TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2366 PhaseIdealLoop::optimize(igvn, LoopOptsSkipSplitIf);
2367 _loop_opts_cnt--;
2368 if (major_progress()) print_method(PHASE_PHASEIDEALLOOP2, 2);
2369 if (failing()) return;
2370 }
2371 // Loop opts pass for loop-unrolling before CCP
2372 if(major_progress() && (_loop_opts_cnt > 0)) {
2373 TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2374 PhaseIdealLoop::optimize(igvn, LoopOptsSkipSplitIf);
2375 _loop_opts_cnt--;
2376 if (major_progress()) print_method(PHASE_PHASEIDEALLOOP3, 2);
2377 }
2378 if (!failing()) {
2379 // Verify that last round of loop opts produced a valid graph
2380 PhaseIdealLoop::verify(igvn);
2381 }
2382 }
2383 if (failing()) return;
2384
2385 // Conditional Constant Propagation;
2386 PhaseCCP ccp( &igvn );
2387 assert( true, "Break here to ccp.dump_nodes_and_types(_root,999,1)");
2388 {
2389 TracePhase tp("ccp", &timers[_t_ccp]);
2390 ccp.do_transform();
2391 }
2392 print_method(PHASE_CCP1, 2);
2393
2394 assert( true, "Break here to ccp.dump_old2new_map()");
2395
2396 // Iterative Global Value Numbering, including ideal transforms
2397 {
2398 TracePhase tp("iterGVN2", &timers[_t_iterGVN2]);
2399 igvn.reset_from_igvn(&ccp);
2400 igvn.optimize();
2401 }
2402 print_method(PHASE_ITER_GVN2, 2);
2403
2404 if (failing()) return;
2405
2406 // Loop transforms on the ideal graph. Range Check Elimination,
2407 // peeling, unrolling, etc.
2408 if (!optimize_loops(igvn, LoopOptsDefault)) {
2409 return;
2410 }
2411
2412 if (failing()) return;
2413
2414 C->clear_major_progress(); // ensure that major progress is now clear
2415
2416 process_for_post_loop_opts_igvn(igvn);
2417
2418 if (failing()) return;
2419
2420 #ifdef ASSERT
2421 bs->verify_gc_barriers(this, BarrierSetC2::BeforeMacroExpand);
2422 #endif
2423
2424 {
2425 TracePhase tp("macroExpand", &timers[_t_macroExpand]);
2426 PhaseMacroExpand mex(igvn);
2427 if (mex.expand_macro_nodes()) {
2428 assert(failing(), "must bail out w/ explicit message");
2429 return;
2430 }
2431 print_method(PHASE_MACRO_EXPANSION, 2);
2432 }
2433
2434 {
2435 TracePhase tp("barrierExpand", &timers[_t_barrierExpand]);
2436 if (bs->expand_barriers(this, igvn)) {
2437 assert(failing(), "must bail out w/ explicit message");
2438 return;
2439 }
2440 print_method(PHASE_BARRIER_EXPANSION, 2);
2441 }
2442
2443 if (C->max_vector_size() > 0) {
2444 C->optimize_logic_cones(igvn);
2445 igvn.optimize();
2446 }
2447
2448 DEBUG_ONLY( _modified_nodes = nullptr; )
2449
2450 assert(igvn._worklist.size() == 0, "not empty");
2451
2452 assert(_late_inlines.length() == 0 || IncrementalInlineMH || IncrementalInlineVirtual, "not empty");
2453
2454 if (_late_inlines.length() > 0) {
2455 // More opportunities to optimize virtual and MH calls.
2456 // Though it's maybe too late to perform inlining, strength-reducing them to direct calls is still an option.
2457 process_late_inline_calls_no_inline(igvn);
2458 if (failing()) return;
2459 }
2460 } // (End scope of igvn; run destructor if necessary for asserts.)
2461
2462 check_no_dead_use();
2463
2464 process_print_inlining();
2465
2466 // We will never use the NodeHash table any more. Clear it so that final_graph_reshaping does not have
2467 // to remove hashes to unlock nodes for modifications.
2468 C->node_hash()->clear();
2469
2470 // A method with only infinite loops has no edges entering loops from root
2471 {
2472 TracePhase tp("graphReshape", &timers[_t_graphReshaping]);
2473 if (final_graph_reshaping()) {
2474 assert(failing(), "must bail out w/ explicit message");
2475 return;
2476 }
2477 }
2478
2479 print_method(PHASE_OPTIMIZE_FINISHED, 2);
2480 DEBUG_ONLY(set_phase_optimize_finished();)
2481 }
2482
2483 #ifdef ASSERT
2484 void Compile::check_no_dead_use() const {
2485 ResourceMark rm;
2486 Unique_Node_List wq;
2487 wq.push(root());
2488 for (uint i = 0; i < wq.size(); ++i) {
2489 Node* n = wq.at(i);
2490 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
2491 Node* u = n->fast_out(j);
2492 if (u->outcnt() == 0 && !u->is_Con()) {
2493 u->dump();
2494 fatal("no reachable node should have no use");
2495 }
2496 wq.push(u);
2497 }
2498 }
2499 }
2500 #endif
2501
2502 void Compile::inline_vector_reboxing_calls() {
2503 if (C->_vector_reboxing_late_inlines.length() > 0) {
2504 _late_inlines_pos = C->_late_inlines.length();
2505 while (_vector_reboxing_late_inlines.length() > 0) {
2506 CallGenerator* cg = _vector_reboxing_late_inlines.pop();
2507 cg->do_late_inline();
2508 if (failing()) return;
2509 print_method(PHASE_INLINE_VECTOR_REBOX, 3, cg->call_node());
2510 }
2511 _vector_reboxing_late_inlines.trunc_to(0);
2512 }
2513 }
2514
2515 bool Compile::has_vbox_nodes() {
2516 if (C->_vector_reboxing_late_inlines.length() > 0) {
2517 return true;
2518 }
2519 for (int macro_idx = C->macro_count() - 1; macro_idx >= 0; macro_idx--) {
2520 Node * n = C->macro_node(macro_idx);
2521 assert(n->is_macro(), "only macro nodes expected here");
2522 if (n->Opcode() == Op_VectorUnbox || n->Opcode() == Op_VectorBox || n->Opcode() == Op_VectorBoxAllocate) {
2523 return true;
2524 }
2525 }
2526 return false;
2527 }
2528
2529 //---------------------------- Bitwise operation packing optimization ---------------------------
2530
2531 static bool is_vector_unary_bitwise_op(Node* n) {
2532 return n->Opcode() == Op_XorV &&
2533 VectorNode::is_vector_bitwise_not_pattern(n);
2534 }
2535
2536 static bool is_vector_binary_bitwise_op(Node* n) {
2537 switch (n->Opcode()) {
2538 case Op_AndV:
2539 case Op_OrV:
2540 return true;
2541
2542 case Op_XorV:
2543 return !is_vector_unary_bitwise_op(n);
2544
2545 default:
2546 return false;
2547 }
2548 }
2549
2550 static bool is_vector_ternary_bitwise_op(Node* n) {
2551 return n->Opcode() == Op_MacroLogicV;
2552 }
2553
2554 static bool is_vector_bitwise_op(Node* n) {
2555 return is_vector_unary_bitwise_op(n) ||
2556 is_vector_binary_bitwise_op(n) ||
2557 is_vector_ternary_bitwise_op(n);
2558 }
2559
2560 static bool is_vector_bitwise_cone_root(Node* n) {
2561 if (n->bottom_type()->isa_vectmask() || !is_vector_bitwise_op(n)) {
2562 return false;
2563 }
2564 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2565 if (is_vector_bitwise_op(n->fast_out(i))) {
2566 return false;
2567 }
2568 }
2569 return true;
2570 }
2571
2572 static uint collect_unique_inputs(Node* n, Unique_Node_List& inputs) {
2573 uint cnt = 0;
2574 if (is_vector_bitwise_op(n)) {
2575 uint inp_cnt = n->is_predicated_vector() ? n->req()-1 : n->req();
2576 if (VectorNode::is_vector_bitwise_not_pattern(n)) {
2577 for (uint i = 1; i < inp_cnt; i++) {
2578 Node* in = n->in(i);
2579 bool skip = VectorNode::is_all_ones_vector(in);
2580 if (!skip && !inputs.member(in)) {
2581 inputs.push(in);
2582 cnt++;
2583 }
2584 }
2585 assert(cnt <= 1, "not unary");
2586 } else {
2587 uint last_req = inp_cnt;
2588 if (is_vector_ternary_bitwise_op(n)) {
2589 last_req = inp_cnt - 1; // skip last input
2590 }
2591 for (uint i = 1; i < last_req; i++) {
2592 Node* def = n->in(i);
2593 if (!inputs.member(def)) {
2594 inputs.push(def);
2595 cnt++;
2596 }
2597 }
2598 }
2599 } else { // not a bitwise operations
2600 if (!inputs.member(n)) {
2601 inputs.push(n);
2602 cnt++;
2603 }
2604 }
2605 return cnt;
2606 }
2607
2608 void Compile::collect_logic_cone_roots(Unique_Node_List& list) {
2609 Unique_Node_List useful_nodes;
2610 C->identify_useful_nodes(useful_nodes);
2611
2612 for (uint i = 0; i < useful_nodes.size(); i++) {
2613 Node* n = useful_nodes.at(i);
2614 if (is_vector_bitwise_cone_root(n)) {
2615 list.push(n);
2616 }
2617 }
2618 }
2619
2620 Node* Compile::xform_to_MacroLogicV(PhaseIterGVN& igvn,
2621 const TypeVect* vt,
2622 Unique_Node_List& partition,
2623 Unique_Node_List& inputs) {
2624 assert(partition.size() == 2 || partition.size() == 3, "not supported");
2625 assert(inputs.size() == 2 || inputs.size() == 3, "not supported");
2626 assert(Matcher::match_rule_supported_vector(Op_MacroLogicV, vt->length(), vt->element_basic_type()), "not supported");
2627
2628 Node* in1 = inputs.at(0);
2629 Node* in2 = inputs.at(1);
2630 Node* in3 = (inputs.size() == 3 ? inputs.at(2) : in2);
2631
2632 uint func = compute_truth_table(partition, inputs);
2633
2634 Node* pn = partition.at(partition.size() - 1);
2635 Node* mask = pn->is_predicated_vector() ? pn->in(pn->req()-1) : nullptr;
2636 return igvn.transform(MacroLogicVNode::make(igvn, in1, in2, in3, mask, func, vt));
2637 }
2638
2639 static uint extract_bit(uint func, uint pos) {
2640 return (func & (1 << pos)) >> pos;
2641 }
2642
2643 //
2644 // A macro logic node represents a truth table. It has 4 inputs,
2645 // First three inputs corresponds to 3 columns of a truth table
2646 // and fourth input captures the logic function.
2647 //
2648 // eg. fn = (in1 AND in2) OR in3;
2649 //
2650 // MacroNode(in1,in2,in3,fn)
2651 //
2652 // -----------------
2653 // in1 in2 in3 fn
2654 // -----------------
2655 // 0 0 0 0
2656 // 0 0 1 1
2657 // 0 1 0 0
2658 // 0 1 1 1
2659 // 1 0 0 0
2660 // 1 0 1 1
2661 // 1 1 0 1
2662 // 1 1 1 1
2663 //
2664
2665 uint Compile::eval_macro_logic_op(uint func, uint in1 , uint in2, uint in3) {
2666 int res = 0;
2667 for (int i = 0; i < 8; i++) {
2668 int bit1 = extract_bit(in1, i);
2669 int bit2 = extract_bit(in2, i);
2670 int bit3 = extract_bit(in3, i);
2671
2672 int func_bit_pos = (bit1 << 2 | bit2 << 1 | bit3);
2673 int func_bit = extract_bit(func, func_bit_pos);
2674
2675 res |= func_bit << i;
2676 }
2677 return res;
2678 }
2679
2680 static uint eval_operand(Node* n, ResourceHashtable<Node*,uint>& eval_map) {
2681 assert(n != nullptr, "");
2682 assert(eval_map.contains(n), "absent");
2683 return *(eval_map.get(n));
2684 }
2685
2686 static void eval_operands(Node* n,
2687 uint& func1, uint& func2, uint& func3,
2688 ResourceHashtable<Node*,uint>& eval_map) {
2689 assert(is_vector_bitwise_op(n), "");
2690
2691 if (is_vector_unary_bitwise_op(n)) {
2692 Node* opnd = n->in(1);
2693 if (VectorNode::is_vector_bitwise_not_pattern(n) && VectorNode::is_all_ones_vector(opnd)) {
2694 opnd = n->in(2);
2695 }
2696 func1 = eval_operand(opnd, eval_map);
2697 } else if (is_vector_binary_bitwise_op(n)) {
2698 func1 = eval_operand(n->in(1), eval_map);
2699 func2 = eval_operand(n->in(2), eval_map);
2700 } else {
2701 assert(is_vector_ternary_bitwise_op(n), "unknown operation");
2702 func1 = eval_operand(n->in(1), eval_map);
2703 func2 = eval_operand(n->in(2), eval_map);
2704 func3 = eval_operand(n->in(3), eval_map);
2705 }
2706 }
2707
2708 uint Compile::compute_truth_table(Unique_Node_List& partition, Unique_Node_List& inputs) {
2709 assert(inputs.size() <= 3, "sanity");
2710 ResourceMark rm;
2711 uint res = 0;
2712 ResourceHashtable<Node*,uint> eval_map;
2713
2714 // Populate precomputed functions for inputs.
2715 // Each input corresponds to one column of 3 input truth-table.
2716 uint input_funcs[] = { 0xAA, // (_, _, c) -> c
2717 0xCC, // (_, b, _) -> b
2718 0xF0 }; // (a, _, _) -> a
2719 for (uint i = 0; i < inputs.size(); i++) {
2720 eval_map.put(inputs.at(i), input_funcs[2-i]);
2721 }
2722
2723 for (uint i = 0; i < partition.size(); i++) {
2724 Node* n = partition.at(i);
2725
2726 uint func1 = 0, func2 = 0, func3 = 0;
2727 eval_operands(n, func1, func2, func3, eval_map);
2728
2729 switch (n->Opcode()) {
2730 case Op_OrV:
2731 assert(func3 == 0, "not binary");
2732 res = func1 | func2;
2733 break;
2734 case Op_AndV:
2735 assert(func3 == 0, "not binary");
2736 res = func1 & func2;
2737 break;
2738 case Op_XorV:
2739 if (VectorNode::is_vector_bitwise_not_pattern(n)) {
2740 assert(func2 == 0 && func3 == 0, "not unary");
2741 res = (~func1) & 0xFF;
2742 } else {
2743 assert(func3 == 0, "not binary");
2744 res = func1 ^ func2;
2745 }
2746 break;
2747 case Op_MacroLogicV:
2748 // Ordering of inputs may change during evaluation of sub-tree
2749 // containing MacroLogic node as a child node, thus a re-evaluation
2750 // makes sure that function is evaluated in context of current
2751 // inputs.
2752 res = eval_macro_logic_op(n->in(4)->get_int(), func1, func2, func3);
2753 break;
2754
2755 default: assert(false, "not supported: %s", n->Name());
2756 }
2757 assert(res <= 0xFF, "invalid");
2758 eval_map.put(n, res);
2759 }
2760 return res;
2761 }
2762
2763 // Criteria under which nodes gets packed into a macro logic node:-
2764 // 1) Parent and both child nodes are all unmasked or masked with
2765 // same predicates.
2766 // 2) Masked parent can be packed with left child if it is predicated
2767 // and both have same predicates.
2768 // 3) Masked parent can be packed with right child if its un-predicated
2769 // or has matching predication condition.
2770 // 4) An unmasked parent can be packed with an unmasked child.
2771 bool Compile::compute_logic_cone(Node* n, Unique_Node_List& partition, Unique_Node_List& inputs) {
2772 assert(partition.size() == 0, "not empty");
2773 assert(inputs.size() == 0, "not empty");
2774 if (is_vector_ternary_bitwise_op(n)) {
2775 return false;
2776 }
2777
2778 bool is_unary_op = is_vector_unary_bitwise_op(n);
2779 if (is_unary_op) {
2780 assert(collect_unique_inputs(n, inputs) == 1, "not unary");
2781 return false; // too few inputs
2782 }
2783
2784 bool pack_left_child = true;
2785 bool pack_right_child = true;
2786
2787 bool left_child_LOP = is_vector_bitwise_op(n->in(1));
2788 bool right_child_LOP = is_vector_bitwise_op(n->in(2));
2789
2790 int left_child_input_cnt = 0;
2791 int right_child_input_cnt = 0;
2792
2793 bool parent_is_predicated = n->is_predicated_vector();
2794 bool left_child_predicated = n->in(1)->is_predicated_vector();
2795 bool right_child_predicated = n->in(2)->is_predicated_vector();
2796
2797 Node* parent_pred = parent_is_predicated ? n->in(n->req()-1) : nullptr;
2798 Node* left_child_pred = left_child_predicated ? n->in(1)->in(n->in(1)->req()-1) : nullptr;
2799 Node* right_child_pred = right_child_predicated ? n->in(1)->in(n->in(1)->req()-1) : nullptr;
2800
2801 do {
2802 if (pack_left_child && left_child_LOP &&
2803 ((!parent_is_predicated && !left_child_predicated) ||
2804 ((parent_is_predicated && left_child_predicated &&
2805 parent_pred == left_child_pred)))) {
2806 partition.push(n->in(1));
2807 left_child_input_cnt = collect_unique_inputs(n->in(1), inputs);
2808 } else {
2809 inputs.push(n->in(1));
2810 left_child_input_cnt = 1;
2811 }
2812
2813 if (pack_right_child && right_child_LOP &&
2814 (!right_child_predicated ||
2815 (right_child_predicated && parent_is_predicated &&
2816 parent_pred == right_child_pred))) {
2817 partition.push(n->in(2));
2818 right_child_input_cnt = collect_unique_inputs(n->in(2), inputs);
2819 } else {
2820 inputs.push(n->in(2));
2821 right_child_input_cnt = 1;
2822 }
2823
2824 if (inputs.size() > 3) {
2825 assert(partition.size() > 0, "");
2826 inputs.clear();
2827 partition.clear();
2828 if (left_child_input_cnt > right_child_input_cnt) {
2829 pack_left_child = false;
2830 } else {
2831 pack_right_child = false;
2832 }
2833 } else {
2834 break;
2835 }
2836 } while(true);
2837
2838 if(partition.size()) {
2839 partition.push(n);
2840 }
2841
2842 return (partition.size() == 2 || partition.size() == 3) &&
2843 (inputs.size() == 2 || inputs.size() == 3);
2844 }
2845
2846 void Compile::process_logic_cone_root(PhaseIterGVN &igvn, Node *n, VectorSet &visited) {
2847 assert(is_vector_bitwise_op(n), "not a root");
2848
2849 visited.set(n->_idx);
2850
2851 // 1) Do a DFS walk over the logic cone.
2852 for (uint i = 1; i < n->req(); i++) {
2853 Node* in = n->in(i);
2854 if (!visited.test(in->_idx) && is_vector_bitwise_op(in)) {
2855 process_logic_cone_root(igvn, in, visited);
2856 }
2857 }
2858
2859 // 2) Bottom up traversal: Merge node[s] with
2860 // the parent to form macro logic node.
2861 Unique_Node_List partition;
2862 Unique_Node_List inputs;
2863 if (compute_logic_cone(n, partition, inputs)) {
2864 const TypeVect* vt = n->bottom_type()->is_vect();
2865 Node* pn = partition.at(partition.size() - 1);
2866 Node* mask = pn->is_predicated_vector() ? pn->in(pn->req()-1) : nullptr;
2867 if (mask == nullptr ||
2868 Matcher::match_rule_supported_vector_masked(Op_MacroLogicV, vt->length(), vt->element_basic_type())) {
2869 Node* macro_logic = xform_to_MacroLogicV(igvn, vt, partition, inputs);
2870 VectorNode::trace_new_vector(macro_logic, "MacroLogic");
2871 igvn.replace_node(n, macro_logic);
2872 }
2873 }
2874 }
2875
2876 void Compile::optimize_logic_cones(PhaseIterGVN &igvn) {
2877 ResourceMark rm;
2878 if (Matcher::match_rule_supported(Op_MacroLogicV)) {
2879 Unique_Node_List list;
2880 collect_logic_cone_roots(list);
2881
2882 while (list.size() > 0) {
2883 Node* n = list.pop();
2884 const TypeVect* vt = n->bottom_type()->is_vect();
2885 bool supported = Matcher::match_rule_supported_vector(Op_MacroLogicV, vt->length(), vt->element_basic_type());
2886 if (supported) {
2887 VectorSet visited(comp_arena());
2888 process_logic_cone_root(igvn, n, visited);
2889 }
2890 }
2891 }
2892 }
2893
2894 //------------------------------Code_Gen---------------------------------------
2895 // Given a graph, generate code for it
2896 void Compile::Code_Gen() {
2897 if (failing()) {
2898 return;
2899 }
2900
2901 // Perform instruction selection. You might think we could reclaim Matcher
2902 // memory PDQ, but actually the Matcher is used in generating spill code.
2903 // Internals of the Matcher (including some VectorSets) must remain live
2904 // for awhile - thus I cannot reclaim Matcher memory lest a VectorSet usage
2905 // set a bit in reclaimed memory.
2906
2907 // In debug mode can dump m._nodes.dump() for mapping of ideal to machine
2908 // nodes. Mapping is only valid at the root of each matched subtree.
2909 NOT_PRODUCT( verify_graph_edges(); )
2910
2911 Matcher matcher;
2912 _matcher = &matcher;
2913 {
2914 TracePhase tp("matcher", &timers[_t_matcher]);
2915 matcher.match();
2916 if (failing()) {
2917 return;
2918 }
2919 }
2920 // In debug mode can dump m._nodes.dump() for mapping of ideal to machine
2921 // nodes. Mapping is only valid at the root of each matched subtree.
2922 NOT_PRODUCT( verify_graph_edges(); )
2923
2924 // If you have too many nodes, or if matching has failed, bail out
2925 check_node_count(0, "out of nodes matching instructions");
2926 if (failing()) {
2927 return;
2928 }
2929
2930 print_method(PHASE_MATCHING, 2);
2931
2932 // Build a proper-looking CFG
2933 PhaseCFG cfg(node_arena(), root(), matcher);
2934 _cfg = &cfg;
2935 {
2936 TracePhase tp("scheduler", &timers[_t_scheduler]);
2937 bool success = cfg.do_global_code_motion();
2938 if (!success) {
2939 return;
2940 }
2941
2942 print_method(PHASE_GLOBAL_CODE_MOTION, 2);
2943 NOT_PRODUCT( verify_graph_edges(); )
2944 cfg.verify();
2945 }
2946
2947 PhaseChaitin regalloc(unique(), cfg, matcher, false);
2948 _regalloc = ®alloc;
2949 {
2950 TracePhase tp("regalloc", &timers[_t_registerAllocation]);
2951 // Perform register allocation. After Chaitin, use-def chains are
2952 // no longer accurate (at spill code) and so must be ignored.
2953 // Node->LRG->reg mappings are still accurate.
2954 _regalloc->Register_Allocate();
2955
2956 // Bail out if the allocator builds too many nodes
2957 if (failing()) {
2958 return;
2959 }
2960 }
2961
2962 // Prior to register allocation we kept empty basic blocks in case the
2963 // the allocator needed a place to spill. After register allocation we
2964 // are not adding any new instructions. If any basic block is empty, we
2965 // can now safely remove it.
2966 {
2967 TracePhase tp("blockOrdering", &timers[_t_blockOrdering]);
2968 cfg.remove_empty_blocks();
2969 if (do_freq_based_layout()) {
2970 PhaseBlockLayout layout(cfg);
2971 } else {
2972 cfg.set_loop_alignment();
2973 }
2974 cfg.fixup_flow();
2975 cfg.remove_unreachable_blocks();
2976 cfg.verify_dominator_tree();
2977 }
2978
2979 // Apply peephole optimizations
2980 if( OptoPeephole ) {
2981 TracePhase tp("peephole", &timers[_t_peephole]);
2982 PhasePeephole peep( _regalloc, cfg);
2983 peep.do_transform();
2984 }
2985
2986 // Do late expand if CPU requires this.
2987 if (Matcher::require_postalloc_expand) {
2988 TracePhase tp("postalloc_expand", &timers[_t_postalloc_expand]);
2989 cfg.postalloc_expand(_regalloc);
2990 }
2991
2992 // Convert Nodes to instruction bits in a buffer
2993 {
2994 TracePhase tp("output", &timers[_t_output]);
2995 PhaseOutput output;
2996 output.Output();
2997 if (failing()) return;
2998 output.install();
2999 }
3000
3001 print_method(PHASE_FINAL_CODE, 1);
3002
3003 // He's dead, Jim.
3004 _cfg = (PhaseCFG*)((intptr_t)0xdeadbeef);
3005 _regalloc = (PhaseChaitin*)((intptr_t)0xdeadbeef);
3006 }
3007
3008 //------------------------------Final_Reshape_Counts---------------------------
3009 // This class defines counters to help identify when a method
3010 // may/must be executed using hardware with only 24-bit precision.
3011 struct Final_Reshape_Counts : public StackObj {
3012 int _call_count; // count non-inlined 'common' calls
3013 int _float_count; // count float ops requiring 24-bit precision
3014 int _double_count; // count double ops requiring more precision
3015 int _java_call_count; // count non-inlined 'java' calls
3016 int _inner_loop_count; // count loops which need alignment
3017 VectorSet _visited; // Visitation flags
3018 Node_List _tests; // Set of IfNodes & PCTableNodes
3019
3020 Final_Reshape_Counts() :
3021 _call_count(0), _float_count(0), _double_count(0),
3022 _java_call_count(0), _inner_loop_count(0) { }
3023
3024 void inc_call_count () { _call_count ++; }
3025 void inc_float_count () { _float_count ++; }
3026 void inc_double_count() { _double_count++; }
3027 void inc_java_call_count() { _java_call_count++; }
3028 void inc_inner_loop_count() { _inner_loop_count++; }
3029
3030 int get_call_count () const { return _call_count ; }
3031 int get_float_count () const { return _float_count ; }
3032 int get_double_count() const { return _double_count; }
3033 int get_java_call_count() const { return _java_call_count; }
3034 int get_inner_loop_count() const { return _inner_loop_count; }
3035 };
3036
3037 // Eliminate trivially redundant StoreCMs and accumulate their
3038 // precedence edges.
3039 void Compile::eliminate_redundant_card_marks(Node* n) {
3040 assert(n->Opcode() == Op_StoreCM, "expected StoreCM");
3041 if (n->in(MemNode::Address)->outcnt() > 1) {
3042 // There are multiple users of the same address so it might be
3043 // possible to eliminate some of the StoreCMs
3044 Node* mem = n->in(MemNode::Memory);
3045 Node* adr = n->in(MemNode::Address);
3046 Node* val = n->in(MemNode::ValueIn);
3047 Node* prev = n;
3048 bool done = false;
3049 // Walk the chain of StoreCMs eliminating ones that match. As
3050 // long as it's a chain of single users then the optimization is
3051 // safe. Eliminating partially redundant StoreCMs would require
3052 // cloning copies down the other paths.
3053 while (mem->Opcode() == Op_StoreCM && mem->outcnt() == 1 && !done) {
3054 if (adr == mem->in(MemNode::Address) &&
3055 val == mem->in(MemNode::ValueIn)) {
3056 // redundant StoreCM
3057 if (mem->req() > MemNode::OopStore) {
3058 // Hasn't been processed by this code yet.
3059 n->add_prec(mem->in(MemNode::OopStore));
3060 } else {
3061 // Already converted to precedence edge
3062 for (uint i = mem->req(); i < mem->len(); i++) {
3063 // Accumulate any precedence edges
3064 if (mem->in(i) != nullptr) {
3065 n->add_prec(mem->in(i));
3066 }
3067 }
3068 // Everything above this point has been processed.
3069 done = true;
3070 }
3071 // Eliminate the previous StoreCM
3072 prev->set_req(MemNode::Memory, mem->in(MemNode::Memory));
3073 assert(mem->outcnt() == 0, "should be dead");
3074 mem->disconnect_inputs(this);
3075 } else {
3076 prev = mem;
3077 }
3078 mem = prev->in(MemNode::Memory);
3079 }
3080 }
3081 }
3082
3083 //------------------------------final_graph_reshaping_impl----------------------
3084 // Implement items 1-5 from final_graph_reshaping below.
3085 void Compile::final_graph_reshaping_impl(Node *n, Final_Reshape_Counts& frc, Unique_Node_List& dead_nodes) {
3086
3087 if ( n->outcnt() == 0 ) return; // dead node
3088 uint nop = n->Opcode();
3089
3090 // Check for 2-input instruction with "last use" on right input.
3091 // Swap to left input. Implements item (2).
3092 if( n->req() == 3 && // two-input instruction
3093 n->in(1)->outcnt() > 1 && // left use is NOT a last use
3094 (!n->in(1)->is_Phi() || n->in(1)->in(2) != n) && // it is not data loop
3095 n->in(2)->outcnt() == 1 &&// right use IS a last use
3096 !n->in(2)->is_Con() ) { // right use is not a constant
3097 // Check for commutative opcode
3098 switch( nop ) {
3099 case Op_AddI: case Op_AddF: case Op_AddD: case Op_AddL:
3100 case Op_MaxI: case Op_MaxL: case Op_MaxF: case Op_MaxD:
3101 case Op_MinI: case Op_MinL: case Op_MinF: case Op_MinD:
3102 case Op_MulI: case Op_MulF: case Op_MulD: case Op_MulL:
3103 case Op_AndL: case Op_XorL: case Op_OrL:
3104 case Op_AndI: case Op_XorI: case Op_OrI: {
3105 // Move "last use" input to left by swapping inputs
3106 n->swap_edges(1, 2);
3107 break;
3108 }
3109 default:
3110 break;
3111 }
3112 }
3113
3114 #ifdef ASSERT
3115 if( n->is_Mem() ) {
3116 int alias_idx = get_alias_index(n->as_Mem()->adr_type());
3117 assert( n->in(0) != nullptr || alias_idx != Compile::AliasIdxRaw ||
3118 // oop will be recorded in oop map if load crosses safepoint
3119 n->is_Load() && (n->as_Load()->bottom_type()->isa_oopptr() ||
3120 LoadNode::is_immutable_value(n->in(MemNode::Address))),
3121 "raw memory operations should have control edge");
3122 }
3123 if (n->is_MemBar()) {
3124 MemBarNode* mb = n->as_MemBar();
3125 if (mb->trailing_store() || mb->trailing_load_store()) {
3126 assert(mb->leading_membar()->trailing_membar() == mb, "bad membar pair");
3127 Node* mem = BarrierSet::barrier_set()->barrier_set_c2()->step_over_gc_barrier(mb->in(MemBarNode::Precedent));
3128 assert((mb->trailing_store() && mem->is_Store() && mem->as_Store()->is_release()) ||
3129 (mb->trailing_load_store() && mem->is_LoadStore()), "missing mem op");
3130 } else if (mb->leading()) {
3131 assert(mb->trailing_membar()->leading_membar() == mb, "bad membar pair");
3132 }
3133 }
3134 #endif
3135 // Count FPU ops and common calls, implements item (3)
3136 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->final_graph_reshaping(this, n, nop, dead_nodes);
3137 if (!gc_handled) {
3138 final_graph_reshaping_main_switch(n, frc, nop, dead_nodes);
3139 }
3140
3141 // Collect CFG split points
3142 if (n->is_MultiBranch() && !n->is_RangeCheck()) {
3143 frc._tests.push(n);
3144 }
3145 }
3146
3147 void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& frc, uint nop, Unique_Node_List& dead_nodes) {
3148 switch( nop ) {
3149 // Count all float operations that may use FPU
3150 case Op_AddF:
3151 case Op_SubF:
3152 case Op_MulF:
3153 case Op_DivF:
3154 case Op_NegF:
3155 case Op_ModF:
3156 case Op_ConvI2F:
3157 case Op_ConF:
3158 case Op_CmpF:
3159 case Op_CmpF3:
3160 case Op_StoreF:
3161 case Op_LoadF:
3162 // case Op_ConvL2F: // longs are split into 32-bit halves
3163 frc.inc_float_count();
3164 break;
3165
3166 case Op_ConvF2D:
3167 case Op_ConvD2F:
3168 frc.inc_float_count();
3169 frc.inc_double_count();
3170 break;
3171
3172 // Count all double operations that may use FPU
3173 case Op_AddD:
3174 case Op_SubD:
3175 case Op_MulD:
3176 case Op_DivD:
3177 case Op_NegD:
3178 case Op_ModD:
3179 case Op_ConvI2D:
3180 case Op_ConvD2I:
3181 // case Op_ConvL2D: // handled by leaf call
3182 // case Op_ConvD2L: // handled by leaf call
3183 case Op_ConD:
3184 case Op_CmpD:
3185 case Op_CmpD3:
3186 case Op_StoreD:
3187 case Op_LoadD:
3188 case Op_LoadD_unaligned:
3189 frc.inc_double_count();
3190 break;
3191 case Op_Opaque1: // Remove Opaque Nodes before matching
3192 case Op_Opaque3:
3193 n->subsume_by(n->in(1), this);
3194 break;
3195 case Op_CallStaticJava:
3196 case Op_CallJava:
3197 case Op_CallDynamicJava:
3198 frc.inc_java_call_count(); // Count java call site;
3199 case Op_CallRuntime:
3200 case Op_CallLeaf:
3201 case Op_CallLeafVector:
3202 case Op_CallLeafNoFP: {
3203 assert (n->is_Call(), "");
3204 CallNode *call = n->as_Call();
3205 // Count call sites where the FP mode bit would have to be flipped.
3206 // Do not count uncommon runtime calls:
3207 // uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
3208 // _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
3209 if (!call->is_CallStaticJava() || !call->as_CallStaticJava()->_name) {
3210 frc.inc_call_count(); // Count the call site
3211 } else { // See if uncommon argument is shared
3212 Node *n = call->in(TypeFunc::Parms);
3213 int nop = n->Opcode();
3214 // Clone shared simple arguments to uncommon calls, item (1).
3215 if (n->outcnt() > 1 &&
3216 !n->is_Proj() &&
3217 nop != Op_CreateEx &&
3218 nop != Op_CheckCastPP &&
3219 nop != Op_DecodeN &&
3220 nop != Op_DecodeNKlass &&
3221 !n->is_Mem() &&
3222 !n->is_Phi()) {
3223 Node *x = n->clone();
3224 call->set_req(TypeFunc::Parms, x);
3225 }
3226 }
3227 break;
3228 }
3229
3230 case Op_StoreCM:
3231 {
3232 // Convert OopStore dependence into precedence edge
3233 Node* prec = n->in(MemNode::OopStore);
3234 n->del_req(MemNode::OopStore);
3235 n->add_prec(prec);
3236 eliminate_redundant_card_marks(n);
3237 }
3238
3239 // fall through
3240
3241 case Op_StoreB:
3242 case Op_StoreC:
3243 case Op_StoreI:
3244 case Op_StoreL:
3245 case Op_CompareAndSwapB:
3246 case Op_CompareAndSwapS:
3247 case Op_CompareAndSwapI:
3248 case Op_CompareAndSwapL:
3249 case Op_CompareAndSwapP:
3250 case Op_CompareAndSwapN:
3251 case Op_WeakCompareAndSwapB:
3252 case Op_WeakCompareAndSwapS:
3253 case Op_WeakCompareAndSwapI:
3254 case Op_WeakCompareAndSwapL:
3255 case Op_WeakCompareAndSwapP:
3256 case Op_WeakCompareAndSwapN:
3257 case Op_CompareAndExchangeB:
3258 case Op_CompareAndExchangeS:
3259 case Op_CompareAndExchangeI:
3260 case Op_CompareAndExchangeL:
3261 case Op_CompareAndExchangeP:
3262 case Op_CompareAndExchangeN:
3263 case Op_GetAndAddS:
3264 case Op_GetAndAddB:
3265 case Op_GetAndAddI:
3266 case Op_GetAndAddL:
3267 case Op_GetAndSetS:
3268 case Op_GetAndSetB:
3269 case Op_GetAndSetI:
3270 case Op_GetAndSetL:
3271 case Op_GetAndSetP:
3272 case Op_GetAndSetN:
3273 case Op_StoreP:
3274 case Op_StoreN:
3275 case Op_StoreNKlass:
3276 case Op_LoadB:
3277 case Op_LoadUB:
3278 case Op_LoadUS:
3279 case Op_LoadI:
3280 case Op_LoadKlass:
3281 case Op_LoadNKlass:
3282 case Op_LoadL:
3283 case Op_LoadL_unaligned:
3284 case Op_LoadP:
3285 case Op_LoadN:
3286 case Op_LoadRange:
3287 case Op_LoadS:
3288 break;
3289
3290 case Op_AddP: { // Assert sane base pointers
3291 Node *addp = n->in(AddPNode::Address);
3292 assert( !addp->is_AddP() ||
3293 addp->in(AddPNode::Base)->is_top() || // Top OK for allocation
3294 addp->in(AddPNode::Base) == n->in(AddPNode::Base),
3295 "Base pointers must match (addp %u)", addp->_idx );
3296 #ifdef _LP64
3297 if ((UseCompressedOops || UseCompressedClassPointers) &&
3298 addp->Opcode() == Op_ConP &&
3299 addp == n->in(AddPNode::Base) &&
3300 n->in(AddPNode::Offset)->is_Con()) {
3301 // If the transformation of ConP to ConN+DecodeN is beneficial depends
3302 // on the platform and on the compressed oops mode.
3303 // Use addressing with narrow klass to load with offset on x86.
3304 // Some platforms can use the constant pool to load ConP.
3305 // Do this transformation here since IGVN will convert ConN back to ConP.
3306 const Type* t = addp->bottom_type();
3307 bool is_oop = t->isa_oopptr() != nullptr;
3308 bool is_klass = t->isa_klassptr() != nullptr;
3309
3310 if ((is_oop && Matcher::const_oop_prefer_decode() ) ||
3311 (is_klass && Matcher::const_klass_prefer_decode())) {
3312 Node* nn = nullptr;
3313
3314 int op = is_oop ? Op_ConN : Op_ConNKlass;
3315
3316 // Look for existing ConN node of the same exact type.
3317 Node* r = root();
3318 uint cnt = r->outcnt();
3319 for (uint i = 0; i < cnt; i++) {
3320 Node* m = r->raw_out(i);
3321 if (m!= nullptr && m->Opcode() == op &&
3322 m->bottom_type()->make_ptr() == t) {
3323 nn = m;
3324 break;
3325 }
3326 }
3327 if (nn != nullptr) {
3328 // Decode a narrow oop to match address
3329 // [R12 + narrow_oop_reg<<3 + offset]
3330 if (is_oop) {
3331 nn = new DecodeNNode(nn, t);
3332 } else {
3333 nn = new DecodeNKlassNode(nn, t);
3334 }
3335 // Check for succeeding AddP which uses the same Base.
3336 // Otherwise we will run into the assertion above when visiting that guy.
3337 for (uint i = 0; i < n->outcnt(); ++i) {
3338 Node *out_i = n->raw_out(i);
3339 if (out_i && out_i->is_AddP() && out_i->in(AddPNode::Base) == addp) {
3340 out_i->set_req(AddPNode::Base, nn);
3341 #ifdef ASSERT
3342 for (uint j = 0; j < out_i->outcnt(); ++j) {
3343 Node *out_j = out_i->raw_out(j);
3344 assert(out_j == nullptr || !out_j->is_AddP() || out_j->in(AddPNode::Base) != addp,
3345 "more than 2 AddP nodes in a chain (out_j %u)", out_j->_idx);
3346 }
3347 #endif
3348 }
3349 }
3350 n->set_req(AddPNode::Base, nn);
3351 n->set_req(AddPNode::Address, nn);
3352 if (addp->outcnt() == 0) {
3353 addp->disconnect_inputs(this);
3354 }
3355 }
3356 }
3357 }
3358 #endif
3359 break;
3360 }
3361
3362 case Op_CastPP: {
3363 // Remove CastPP nodes to gain more freedom during scheduling but
3364 // keep the dependency they encode as control or precedence edges
3365 // (if control is set already) on memory operations. Some CastPP
3366 // nodes don't have a control (don't carry a dependency): skip
3367 // those.
3368 if (n->in(0) != nullptr) {
3369 ResourceMark rm;
3370 Unique_Node_List wq;
3371 wq.push(n);
3372 for (uint next = 0; next < wq.size(); ++next) {
3373 Node *m = wq.at(next);
3374 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
3375 Node* use = m->fast_out(i);
3376 if (use->is_Mem() || use->is_EncodeNarrowPtr()) {
3377 use->ensure_control_or_add_prec(n->in(0));
3378 } else {
3379 switch(use->Opcode()) {
3380 case Op_AddP:
3381 case Op_DecodeN:
3382 case Op_DecodeNKlass:
3383 case Op_CheckCastPP:
3384 case Op_CastPP:
3385 wq.push(use);
3386 break;
3387 }
3388 }
3389 }
3390 }
3391 }
3392 const bool is_LP64 = LP64_ONLY(true) NOT_LP64(false);
3393 if (is_LP64 && n->in(1)->is_DecodeN() && Matcher::gen_narrow_oop_implicit_null_checks()) {
3394 Node* in1 = n->in(1);
3395 const Type* t = n->bottom_type();
3396 Node* new_in1 = in1->clone();
3397 new_in1->as_DecodeN()->set_type(t);
3398
3399 if (!Matcher::narrow_oop_use_complex_address()) {
3400 //
3401 // x86, ARM and friends can handle 2 adds in addressing mode
3402 // and Matcher can fold a DecodeN node into address by using
3403 // a narrow oop directly and do implicit null check in address:
3404 //
3405 // [R12 + narrow_oop_reg<<3 + offset]
3406 // NullCheck narrow_oop_reg
3407 //
3408 // On other platforms (Sparc) we have to keep new DecodeN node and
3409 // use it to do implicit null check in address:
3410 //
3411 // decode_not_null narrow_oop_reg, base_reg
3412 // [base_reg + offset]
3413 // NullCheck base_reg
3414 //
3415 // Pin the new DecodeN node to non-null path on these platform (Sparc)
3416 // to keep the information to which null check the new DecodeN node
3417 // corresponds to use it as value in implicit_null_check().
3418 //
3419 new_in1->set_req(0, n->in(0));
3420 }
3421
3422 n->subsume_by(new_in1, this);
3423 if (in1->outcnt() == 0) {
3424 in1->disconnect_inputs(this);
3425 }
3426 } else {
3427 n->subsume_by(n->in(1), this);
3428 if (n->outcnt() == 0) {
3429 n->disconnect_inputs(this);
3430 }
3431 }
3432 break;
3433 }
3434 #ifdef _LP64
3435 case Op_CmpP:
3436 // Do this transformation here to preserve CmpPNode::sub() and
3437 // other TypePtr related Ideal optimizations (for example, ptr nullness).
3438 if (n->in(1)->is_DecodeNarrowPtr() || n->in(2)->is_DecodeNarrowPtr()) {
3439 Node* in1 = n->in(1);
3440 Node* in2 = n->in(2);
3441 if (!in1->is_DecodeNarrowPtr()) {
3442 in2 = in1;
3443 in1 = n->in(2);
3444 }
3445 assert(in1->is_DecodeNarrowPtr(), "sanity");
3446
3447 Node* new_in2 = nullptr;
3448 if (in2->is_DecodeNarrowPtr()) {
3449 assert(in2->Opcode() == in1->Opcode(), "must be same node type");
3450 new_in2 = in2->in(1);
3451 } else if (in2->Opcode() == Op_ConP) {
3452 const Type* t = in2->bottom_type();
3453 if (t == TypePtr::NULL_PTR) {
3454 assert(in1->is_DecodeN(), "compare klass to null?");
3455 // Don't convert CmpP null check into CmpN if compressed
3456 // oops implicit null check is not generated.
3457 // This will allow to generate normal oop implicit null check.
3458 if (Matcher::gen_narrow_oop_implicit_null_checks())
3459 new_in2 = ConNode::make(TypeNarrowOop::NULL_PTR);
3460 //
3461 // This transformation together with CastPP transformation above
3462 // will generated code for implicit null checks for compressed oops.
3463 //
3464 // The original code after Optimize()
3465 //
3466 // LoadN memory, narrow_oop_reg
3467 // decode narrow_oop_reg, base_reg
3468 // CmpP base_reg, nullptr
3469 // CastPP base_reg // NotNull
3470 // Load [base_reg + offset], val_reg
3471 //
3472 // after these transformations will be
3473 //
3474 // LoadN memory, narrow_oop_reg
3475 // CmpN narrow_oop_reg, nullptr
3476 // decode_not_null narrow_oop_reg, base_reg
3477 // Load [base_reg + offset], val_reg
3478 //
3479 // and the uncommon path (== nullptr) will use narrow_oop_reg directly
3480 // since narrow oops can be used in debug info now (see the code in
3481 // final_graph_reshaping_walk()).
3482 //
3483 // At the end the code will be matched to
3484 // on x86:
3485 //
3486 // Load_narrow_oop memory, narrow_oop_reg
3487 // Load [R12 + narrow_oop_reg<<3 + offset], val_reg
3488 // NullCheck narrow_oop_reg
3489 //
3490 // and on sparc:
3491 //
3492 // Load_narrow_oop memory, narrow_oop_reg
3493 // decode_not_null narrow_oop_reg, base_reg
3494 // Load [base_reg + offset], val_reg
3495 // NullCheck base_reg
3496 //
3497 } else if (t->isa_oopptr()) {
3498 new_in2 = ConNode::make(t->make_narrowoop());
3499 } else if (t->isa_klassptr()) {
3500 new_in2 = ConNode::make(t->make_narrowklass());
3501 }
3502 }
3503 if (new_in2 != nullptr) {
3504 Node* cmpN = new CmpNNode(in1->in(1), new_in2);
3505 n->subsume_by(cmpN, this);
3506 if (in1->outcnt() == 0) {
3507 in1->disconnect_inputs(this);
3508 }
3509 if (in2->outcnt() == 0) {
3510 in2->disconnect_inputs(this);
3511 }
3512 }
3513 }
3514 break;
3515
3516 case Op_DecodeN:
3517 case Op_DecodeNKlass:
3518 assert(!n->in(1)->is_EncodeNarrowPtr(), "should be optimized out");
3519 // DecodeN could be pinned when it can't be fold into
3520 // an address expression, see the code for Op_CastPP above.
3521 assert(n->in(0) == nullptr || (UseCompressedOops && !Matcher::narrow_oop_use_complex_address()), "no control");
3522 break;
3523
3524 case Op_EncodeP:
3525 case Op_EncodePKlass: {
3526 Node* in1 = n->in(1);
3527 if (in1->is_DecodeNarrowPtr()) {
3528 n->subsume_by(in1->in(1), this);
3529 } else if (in1->Opcode() == Op_ConP) {
3530 const Type* t = in1->bottom_type();
3531 if (t == TypePtr::NULL_PTR) {
3532 assert(t->isa_oopptr(), "null klass?");
3533 n->subsume_by(ConNode::make(TypeNarrowOop::NULL_PTR), this);
3534 } else if (t->isa_oopptr()) {
3535 n->subsume_by(ConNode::make(t->make_narrowoop()), this);
3536 } else if (t->isa_klassptr()) {
3537 n->subsume_by(ConNode::make(t->make_narrowklass()), this);
3538 }
3539 }
3540 if (in1->outcnt() == 0) {
3541 in1->disconnect_inputs(this);
3542 }
3543 break;
3544 }
3545
3546 case Op_Proj: {
3547 if (OptimizeStringConcat || IncrementalInline) {
3548 ProjNode* proj = n->as_Proj();
3549 if (proj->_is_io_use) {
3550 assert(proj->_con == TypeFunc::I_O || proj->_con == TypeFunc::Memory, "");
3551 // Separate projections were used for the exception path which
3552 // are normally removed by a late inline. If it wasn't inlined
3553 // then they will hang around and should just be replaced with
3554 // the original one. Merge them.
3555 Node* non_io_proj = proj->in(0)->as_Multi()->proj_out_or_null(proj->_con, false /*is_io_use*/);
3556 if (non_io_proj != nullptr) {
3557 proj->subsume_by(non_io_proj , this);
3558 }
3559 }
3560 }
3561 break;
3562 }
3563
3564 case Op_Phi:
3565 if (n->as_Phi()->bottom_type()->isa_narrowoop() || n->as_Phi()->bottom_type()->isa_narrowklass()) {
3566 // The EncodeP optimization may create Phi with the same edges
3567 // for all paths. It is not handled well by Register Allocator.
3568 Node* unique_in = n->in(1);
3569 assert(unique_in != nullptr, "");
3570 uint cnt = n->req();
3571 for (uint i = 2; i < cnt; i++) {
3572 Node* m = n->in(i);
3573 assert(m != nullptr, "");
3574 if (unique_in != m)
3575 unique_in = nullptr;
3576 }
3577 if (unique_in != nullptr) {
3578 n->subsume_by(unique_in, this);
3579 }
3580 }
3581 break;
3582
3583 #endif
3584
3585 #ifdef ASSERT
3586 case Op_CastII:
3587 // Verify that all range check dependent CastII nodes were removed.
3588 if (n->isa_CastII()->has_range_check()) {
3589 n->dump(3);
3590 assert(false, "Range check dependent CastII node was not removed");
3591 }
3592 break;
3593 #endif
3594
3595 case Op_ModI:
3596 if (UseDivMod) {
3597 // Check if a%b and a/b both exist
3598 Node* d = n->find_similar(Op_DivI);
3599 if (d) {
3600 // Replace them with a fused divmod if supported
3601 if (Matcher::has_match_rule(Op_DivModI)) {
3602 DivModINode* divmod = DivModINode::make(n);
3603 d->subsume_by(divmod->div_proj(), this);
3604 n->subsume_by(divmod->mod_proj(), this);
3605 } else {
3606 // replace a%b with a-((a/b)*b)
3607 Node* mult = new MulINode(d, d->in(2));
3608 Node* sub = new SubINode(d->in(1), mult);
3609 n->subsume_by(sub, this);
3610 }
3611 }
3612 }
3613 break;
3614
3615 case Op_ModL:
3616 if (UseDivMod) {
3617 // Check if a%b and a/b both exist
3618 Node* d = n->find_similar(Op_DivL);
3619 if (d) {
3620 // Replace them with a fused divmod if supported
3621 if (Matcher::has_match_rule(Op_DivModL)) {
3622 DivModLNode* divmod = DivModLNode::make(n);
3623 d->subsume_by(divmod->div_proj(), this);
3624 n->subsume_by(divmod->mod_proj(), this);
3625 } else {
3626 // replace a%b with a-((a/b)*b)
3627 Node* mult = new MulLNode(d, d->in(2));
3628 Node* sub = new SubLNode(d->in(1), mult);
3629 n->subsume_by(sub, this);
3630 }
3631 }
3632 }
3633 break;
3634
3635 case Op_UModI:
3636 if (UseDivMod) {
3637 // Check if a%b and a/b both exist
3638 Node* d = n->find_similar(Op_UDivI);
3639 if (d) {
3640 // Replace them with a fused unsigned divmod if supported
3641 if (Matcher::has_match_rule(Op_UDivModI)) {
3642 UDivModINode* divmod = UDivModINode::make(n);
3643 d->subsume_by(divmod->div_proj(), this);
3644 n->subsume_by(divmod->mod_proj(), this);
3645 } else {
3646 // replace a%b with a-((a/b)*b)
3647 Node* mult = new MulINode(d, d->in(2));
3648 Node* sub = new SubINode(d->in(1), mult);
3649 n->subsume_by(sub, this);
3650 }
3651 }
3652 }
3653 break;
3654
3655 case Op_UModL:
3656 if (UseDivMod) {
3657 // Check if a%b and a/b both exist
3658 Node* d = n->find_similar(Op_UDivL);
3659 if (d) {
3660 // Replace them with a fused unsigned divmod if supported
3661 if (Matcher::has_match_rule(Op_UDivModL)) {
3662 UDivModLNode* divmod = UDivModLNode::make(n);
3663 d->subsume_by(divmod->div_proj(), this);
3664 n->subsume_by(divmod->mod_proj(), this);
3665 } else {
3666 // replace a%b with a-((a/b)*b)
3667 Node* mult = new MulLNode(d, d->in(2));
3668 Node* sub = new SubLNode(d->in(1), mult);
3669 n->subsume_by(sub, this);
3670 }
3671 }
3672 }
3673 break;
3674
3675 case Op_LoadVector:
3676 case Op_StoreVector:
3677 case Op_LoadVectorGather:
3678 case Op_StoreVectorScatter:
3679 case Op_LoadVectorGatherMasked:
3680 case Op_StoreVectorScatterMasked:
3681 case Op_VectorCmpMasked:
3682 case Op_VectorMaskGen:
3683 case Op_LoadVectorMasked:
3684 case Op_StoreVectorMasked:
3685 break;
3686
3687 case Op_AddReductionVI:
3688 case Op_AddReductionVL:
3689 case Op_AddReductionVF:
3690 case Op_AddReductionVD:
3691 case Op_MulReductionVI:
3692 case Op_MulReductionVL:
3693 case Op_MulReductionVF:
3694 case Op_MulReductionVD:
3695 case Op_MinReductionV:
3696 case Op_MaxReductionV:
3697 case Op_AndReductionV:
3698 case Op_OrReductionV:
3699 case Op_XorReductionV:
3700 break;
3701
3702 case Op_PackB:
3703 case Op_PackS:
3704 case Op_PackI:
3705 case Op_PackF:
3706 case Op_PackL:
3707 case Op_PackD:
3708 if (n->req()-1 > 2) {
3709 // Replace many operand PackNodes with a binary tree for matching
3710 PackNode* p = (PackNode*) n;
3711 Node* btp = p->binary_tree_pack(1, n->req());
3712 n->subsume_by(btp, this);
3713 }
3714 break;
3715 case Op_Loop:
3716 assert(!n->as_Loop()->is_loop_nest_inner_loop() || _loop_opts_cnt == 0, "should have been turned into a counted loop");
3717 case Op_CountedLoop:
3718 case Op_LongCountedLoop:
3719 case Op_OuterStripMinedLoop:
3720 if (n->as_Loop()->is_inner_loop()) {
3721 frc.inc_inner_loop_count();
3722 }
3723 n->as_Loop()->verify_strip_mined(0);
3724 break;
3725 case Op_LShiftI:
3726 case Op_RShiftI:
3727 case Op_URShiftI:
3728 case Op_LShiftL:
3729 case Op_RShiftL:
3730 case Op_URShiftL:
3731 if (Matcher::need_masked_shift_count) {
3732 // The cpu's shift instructions don't restrict the count to the
3733 // lower 5/6 bits. We need to do the masking ourselves.
3734 Node* in2 = n->in(2);
3735 juint mask = (n->bottom_type() == TypeInt::INT) ? (BitsPerInt - 1) : (BitsPerLong - 1);
3736 const TypeInt* t = in2->find_int_type();
3737 if (t != nullptr && t->is_con()) {
3738 juint shift = t->get_con();
3739 if (shift > mask) { // Unsigned cmp
3740 n->set_req(2, ConNode::make(TypeInt::make(shift & mask)));
3741 }
3742 } else {
3743 if (t == nullptr || t->_lo < 0 || t->_hi > (int)mask) {
3744 Node* shift = new AndINode(in2, ConNode::make(TypeInt::make(mask)));
3745 n->set_req(2, shift);
3746 }
3747 }
3748 if (in2->outcnt() == 0) { // Remove dead node
3749 in2->disconnect_inputs(this);
3750 }
3751 }
3752 break;
3753 case Op_MemBarStoreStore:
3754 case Op_MemBarRelease:
3755 // Break the link with AllocateNode: it is no longer useful and
3756 // confuses register allocation.
3757 if (n->req() > MemBarNode::Precedent) {
3758 n->set_req(MemBarNode::Precedent, top());
3759 }
3760 break;
3761 case Op_MemBarAcquire: {
3762 if (n->as_MemBar()->trailing_load() && n->req() > MemBarNode::Precedent) {
3763 // At parse time, the trailing MemBarAcquire for a volatile load
3764 // is created with an edge to the load. After optimizations,
3765 // that input may be a chain of Phis. If those phis have no
3766 // other use, then the MemBarAcquire keeps them alive and
3767 // register allocation can be confused.
3768 dead_nodes.push(n->in(MemBarNode::Precedent));
3769 n->set_req(MemBarNode::Precedent, top());
3770 }
3771 break;
3772 }
3773 case Op_Blackhole:
3774 break;
3775 case Op_RangeCheck: {
3776 RangeCheckNode* rc = n->as_RangeCheck();
3777 Node* iff = new IfNode(rc->in(0), rc->in(1), rc->_prob, rc->_fcnt);
3778 n->subsume_by(iff, this);
3779 frc._tests.push(iff);
3780 break;
3781 }
3782 case Op_ConvI2L: {
3783 if (!Matcher::convi2l_type_required) {
3784 // Code generation on some platforms doesn't need accurate
3785 // ConvI2L types. Widening the type can help remove redundant
3786 // address computations.
3787 n->as_Type()->set_type(TypeLong::INT);
3788 ResourceMark rm;
3789 Unique_Node_List wq;
3790 wq.push(n);
3791 for (uint next = 0; next < wq.size(); next++) {
3792 Node *m = wq.at(next);
3793
3794 for(;;) {
3795 // Loop over all nodes with identical inputs edges as m
3796 Node* k = m->find_similar(m->Opcode());
3797 if (k == nullptr) {
3798 break;
3799 }
3800 // Push their uses so we get a chance to remove node made
3801 // redundant
3802 for (DUIterator_Fast imax, i = k->fast_outs(imax); i < imax; i++) {
3803 Node* u = k->fast_out(i);
3804 if (u->Opcode() == Op_LShiftL ||
3805 u->Opcode() == Op_AddL ||
3806 u->Opcode() == Op_SubL ||
3807 u->Opcode() == Op_AddP) {
3808 wq.push(u);
3809 }
3810 }
3811 // Replace all nodes with identical edges as m with m
3812 k->subsume_by(m, this);
3813 }
3814 }
3815 }
3816 break;
3817 }
3818 case Op_CmpUL: {
3819 if (!Matcher::has_match_rule(Op_CmpUL)) {
3820 // No support for unsigned long comparisons
3821 ConINode* sign_pos = new ConINode(TypeInt::make(BitsPerLong - 1));
3822 Node* sign_bit_mask = new RShiftLNode(n->in(1), sign_pos);
3823 Node* orl = new OrLNode(n->in(1), sign_bit_mask);
3824 ConLNode* remove_sign_mask = new ConLNode(TypeLong::make(max_jlong));
3825 Node* andl = new AndLNode(orl, remove_sign_mask);
3826 Node* cmp = new CmpLNode(andl, n->in(2));
3827 n->subsume_by(cmp, this);
3828 }
3829 break;
3830 }
3831 default:
3832 assert(!n->is_Call(), "");
3833 assert(!n->is_Mem(), "");
3834 assert(nop != Op_ProfileBoolean, "should be eliminated during IGVN");
3835 break;
3836 }
3837 }
3838
3839 //------------------------------final_graph_reshaping_walk---------------------
3840 // Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
3841 // requires that the walk visits a node's inputs before visiting the node.
3842 void Compile::final_graph_reshaping_walk(Node_Stack& nstack, Node* root, Final_Reshape_Counts& frc, Unique_Node_List& dead_nodes) {
3843 Unique_Node_List sfpt;
3844
3845 frc._visited.set(root->_idx); // first, mark node as visited
3846 uint cnt = root->req();
3847 Node *n = root;
3848 uint i = 0;
3849 while (true) {
3850 if (i < cnt) {
3851 // Place all non-visited non-null inputs onto stack
3852 Node* m = n->in(i);
3853 ++i;
3854 if (m != nullptr && !frc._visited.test_set(m->_idx)) {
3855 if (m->is_SafePoint() && m->as_SafePoint()->jvms() != nullptr) {
3856 // compute worst case interpreter size in case of a deoptimization
3857 update_interpreter_frame_size(m->as_SafePoint()->jvms()->interpreter_frame_size());
3858
3859 sfpt.push(m);
3860 }
3861 cnt = m->req();
3862 nstack.push(n, i); // put on stack parent and next input's index
3863 n = m;
3864 i = 0;
3865 }
3866 } else {
3867 // Now do post-visit work
3868 final_graph_reshaping_impl(n, frc, dead_nodes);
3869 if (nstack.is_empty())
3870 break; // finished
3871 n = nstack.node(); // Get node from stack
3872 cnt = n->req();
3873 i = nstack.index();
3874 nstack.pop(); // Shift to the next node on stack
3875 }
3876 }
3877
3878 // Skip next transformation if compressed oops are not used.
3879 if ((UseCompressedOops && !Matcher::gen_narrow_oop_implicit_null_checks()) ||
3880 (!UseCompressedOops && !UseCompressedClassPointers))
3881 return;
3882
3883 // Go over safepoints nodes to skip DecodeN/DecodeNKlass nodes for debug edges.
3884 // It could be done for an uncommon traps or any safepoints/calls
3885 // if the DecodeN/DecodeNKlass node is referenced only in a debug info.
3886 while (sfpt.size() > 0) {
3887 n = sfpt.pop();
3888 JVMState *jvms = n->as_SafePoint()->jvms();
3889 assert(jvms != nullptr, "sanity");
3890 int start = jvms->debug_start();
3891 int end = n->req();
3892 bool is_uncommon = (n->is_CallStaticJava() &&
3893 n->as_CallStaticJava()->uncommon_trap_request() != 0);
3894 for (int j = start; j < end; j++) {
3895 Node* in = n->in(j);
3896 if (in->is_DecodeNarrowPtr()) {
3897 bool safe_to_skip = true;
3898 if (!is_uncommon ) {
3899 // Is it safe to skip?
3900 for (uint i = 0; i < in->outcnt(); i++) {
3901 Node* u = in->raw_out(i);
3902 if (!u->is_SafePoint() ||
3903 (u->is_Call() && u->as_Call()->has_non_debug_use(n))) {
3904 safe_to_skip = false;
3905 }
3906 }
3907 }
3908 if (safe_to_skip) {
3909 n->set_req(j, in->in(1));
3910 }
3911 if (in->outcnt() == 0) {
3912 in->disconnect_inputs(this);
3913 }
3914 }
3915 }
3916 }
3917 }
3918
3919 //------------------------------final_graph_reshaping--------------------------
3920 // Final Graph Reshaping.
3921 //
3922 // (1) Clone simple inputs to uncommon calls, so they can be scheduled late
3923 // and not commoned up and forced early. Must come after regular
3924 // optimizations to avoid GVN undoing the cloning. Clone constant
3925 // inputs to Loop Phis; these will be split by the allocator anyways.
3926 // Remove Opaque nodes.
3927 // (2) Move last-uses by commutative operations to the left input to encourage
3928 // Intel update-in-place two-address operations and better register usage
3929 // on RISCs. Must come after regular optimizations to avoid GVN Ideal
3930 // calls canonicalizing them back.
3931 // (3) Count the number of double-precision FP ops, single-precision FP ops
3932 // and call sites. On Intel, we can get correct rounding either by
3933 // forcing singles to memory (requires extra stores and loads after each
3934 // FP bytecode) or we can set a rounding mode bit (requires setting and
3935 // clearing the mode bit around call sites). The mode bit is only used
3936 // if the relative frequency of single FP ops to calls is low enough.
3937 // This is a key transform for SPEC mpeg_audio.
3938 // (4) Detect infinite loops; blobs of code reachable from above but not
3939 // below. Several of the Code_Gen algorithms fail on such code shapes,
3940 // so we simply bail out. Happens a lot in ZKM.jar, but also happens
3941 // from time to time in other codes (such as -Xcomp finalizer loops, etc).
3942 // Detection is by looking for IfNodes where only 1 projection is
3943 // reachable from below or CatchNodes missing some targets.
3944 // (5) Assert for insane oop offsets in debug mode.
3945
3946 bool Compile::final_graph_reshaping() {
3947 // an infinite loop may have been eliminated by the optimizer,
3948 // in which case the graph will be empty.
3949 if (root()->req() == 1) {
3950 // Do not compile method that is only a trivial infinite loop,
3951 // since the content of the loop may have been eliminated.
3952 record_method_not_compilable("trivial infinite loop");
3953 return true;
3954 }
3955
3956 // Expensive nodes have their control input set to prevent the GVN
3957 // from freely commoning them. There's no GVN beyond this point so
3958 // no need to keep the control input. We want the expensive nodes to
3959 // be freely moved to the least frequent code path by gcm.
3960 assert(OptimizeExpensiveOps || expensive_count() == 0, "optimization off but list non empty?");
3961 for (int i = 0; i < expensive_count(); i++) {
3962 _expensive_nodes.at(i)->set_req(0, nullptr);
3963 }
3964
3965 Final_Reshape_Counts frc;
3966
3967 // Visit everybody reachable!
3968 // Allocate stack of size C->live_nodes()/2 to avoid frequent realloc
3969 Node_Stack nstack(live_nodes() >> 1);
3970 Unique_Node_List dead_nodes;
3971 final_graph_reshaping_walk(nstack, root(), frc, dead_nodes);
3972
3973 // Check for unreachable (from below) code (i.e., infinite loops).
3974 for( uint i = 0; i < frc._tests.size(); i++ ) {
3975 MultiBranchNode *n = frc._tests[i]->as_MultiBranch();
3976 // Get number of CFG targets.
3977 // Note that PCTables include exception targets after calls.
3978 uint required_outcnt = n->required_outcnt();
3979 if (n->outcnt() != required_outcnt) {
3980 // Check for a few special cases. Rethrow Nodes never take the
3981 // 'fall-thru' path, so expected kids is 1 less.
3982 if (n->is_PCTable() && n->in(0) && n->in(0)->in(0)) {
3983 if (n->in(0)->in(0)->is_Call()) {
3984 CallNode* call = n->in(0)->in(0)->as_Call();
3985 if (call->entry_point() == OptoRuntime::rethrow_stub()) {
3986 required_outcnt--; // Rethrow always has 1 less kid
3987 } else if (call->req() > TypeFunc::Parms &&
3988 call->is_CallDynamicJava()) {
3989 // Check for null receiver. In such case, the optimizer has
3990 // detected that the virtual call will always result in a null
3991 // pointer exception. The fall-through projection of this CatchNode
3992 // will not be populated.
3993 Node* arg0 = call->in(TypeFunc::Parms);
3994 if (arg0->is_Type() &&
3995 arg0->as_Type()->type()->higher_equal(TypePtr::NULL_PTR)) {
3996 required_outcnt--;
3997 }
3998 } else if (call->entry_point() == OptoRuntime::new_array_Java() ||
3999 call->entry_point() == OptoRuntime::new_array_nozero_Java()) {
4000 // Check for illegal array length. In such case, the optimizer has
4001 // detected that the allocation attempt will always result in an
4002 // exception. There is no fall-through projection of this CatchNode .
4003 assert(call->is_CallStaticJava(), "static call expected");
4004 assert(call->req() == call->jvms()->endoff() + 1, "missing extra input");
4005 uint valid_length_test_input = call->req() - 1;
4006 Node* valid_length_test = call->in(valid_length_test_input);
4007 call->del_req(valid_length_test_input);
4008 if (valid_length_test->find_int_con(1) == 0) {
4009 required_outcnt--;
4010 }
4011 dead_nodes.push(valid_length_test);
4012 assert(n->outcnt() == required_outcnt, "malformed control flow");
4013 continue;
4014 }
4015 }
4016 }
4017
4018 // Recheck with a better notion of 'required_outcnt'
4019 if (n->outcnt() != required_outcnt) {
4020 record_method_not_compilable("malformed control flow");
4021 return true; // Not all targets reachable!
4022 }
4023 } else if (n->is_PCTable() && n->in(0) && n->in(0)->in(0) && n->in(0)->in(0)->is_Call()) {
4024 CallNode* call = n->in(0)->in(0)->as_Call();
4025 if (call->entry_point() == OptoRuntime::new_array_Java() ||
4026 call->entry_point() == OptoRuntime::new_array_nozero_Java()) {
4027 assert(call->is_CallStaticJava(), "static call expected");
4028 assert(call->req() == call->jvms()->endoff() + 1, "missing extra input");
4029 uint valid_length_test_input = call->req() - 1;
4030 dead_nodes.push(call->in(valid_length_test_input));
4031 call->del_req(valid_length_test_input); // valid length test useless now
4032 }
4033 }
4034 // Check that I actually visited all kids. Unreached kids
4035 // must be infinite loops.
4036 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++)
4037 if (!frc._visited.test(n->fast_out(j)->_idx)) {
4038 record_method_not_compilable("infinite loop");
4039 return true; // Found unvisited kid; must be unreach
4040 }
4041
4042 // Here so verification code in final_graph_reshaping_walk()
4043 // always see an OuterStripMinedLoopEnd
4044 if (n->is_OuterStripMinedLoopEnd() || n->is_LongCountedLoopEnd()) {
4045 IfNode* init_iff = n->as_If();
4046 Node* iff = new IfNode(init_iff->in(0), init_iff->in(1), init_iff->_prob, init_iff->_fcnt);
4047 n->subsume_by(iff, this);
4048 }
4049 }
4050
4051 while (dead_nodes.size() > 0) {
4052 Node* m = dead_nodes.pop();
4053 if (m->outcnt() == 0 && m != top()) {
4054 for (uint j = 0; j < m->req(); j++) {
4055 Node* in = m->in(j);
4056 if (in != nullptr) {
4057 dead_nodes.push(in);
4058 }
4059 }
4060 m->disconnect_inputs(this);
4061 }
4062 }
4063
4064 #ifdef IA32
4065 // If original bytecodes contained a mixture of floats and doubles
4066 // check if the optimizer has made it homogeneous, item (3).
4067 if (UseSSE == 0 &&
4068 frc.get_float_count() > 32 &&
4069 frc.get_double_count() == 0 &&
4070 (10 * frc.get_call_count() < frc.get_float_count()) ) {
4071 set_24_bit_selection_and_mode(false, true);
4072 }
4073 #endif // IA32
4074
4075 set_java_calls(frc.get_java_call_count());
4076 set_inner_loops(frc.get_inner_loop_count());
4077
4078 // No infinite loops, no reason to bail out.
4079 return false;
4080 }
4081
4082 //-----------------------------too_many_traps----------------------------------
4083 // Report if there are too many traps at the current method and bci.
4084 // Return true if there was a trap, and/or PerMethodTrapLimit is exceeded.
4085 bool Compile::too_many_traps(ciMethod* method,
4086 int bci,
4087 Deoptimization::DeoptReason reason) {
4088 ciMethodData* md = method->method_data();
4089 if (md->is_empty()) {
4090 // Assume the trap has not occurred, or that it occurred only
4091 // because of a transient condition during start-up in the interpreter.
4092 return false;
4093 }
4094 ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : nullptr;
4095 if (md->has_trap_at(bci, m, reason) != 0) {
4096 // Assume PerBytecodeTrapLimit==0, for a more conservative heuristic.
4097 // Also, if there are multiple reasons, or if there is no per-BCI record,
4098 // assume the worst.
4099 if (log())
4100 log()->elem("observe trap='%s' count='%d'",
4101 Deoptimization::trap_reason_name(reason),
4102 md->trap_count(reason));
4103 return true;
4104 } else {
4105 // Ignore method/bci and see if there have been too many globally.
4106 return too_many_traps(reason, md);
4107 }
4108 }
4109
4110 // Less-accurate variant which does not require a method and bci.
4111 bool Compile::too_many_traps(Deoptimization::DeoptReason reason,
4112 ciMethodData* logmd) {
4113 if (trap_count(reason) >= Deoptimization::per_method_trap_limit(reason)) {
4114 // Too many traps globally.
4115 // Note that we use cumulative trap_count, not just md->trap_count.
4116 if (log()) {
4117 int mcount = (logmd == nullptr)? -1: (int)logmd->trap_count(reason);
4118 log()->elem("observe trap='%s' count='0' mcount='%d' ccount='%d'",
4119 Deoptimization::trap_reason_name(reason),
4120 mcount, trap_count(reason));
4121 }
4122 return true;
4123 } else {
4124 // The coast is clear.
4125 return false;
4126 }
4127 }
4128
4129 //--------------------------too_many_recompiles--------------------------------
4130 // Report if there are too many recompiles at the current method and bci.
4131 // Consults PerBytecodeRecompilationCutoff and PerMethodRecompilationCutoff.
4132 // Is not eager to return true, since this will cause the compiler to use
4133 // Action_none for a trap point, to avoid too many recompilations.
4134 bool Compile::too_many_recompiles(ciMethod* method,
4135 int bci,
4136 Deoptimization::DeoptReason reason) {
4137 ciMethodData* md = method->method_data();
4138 if (md->is_empty()) {
4139 // Assume the trap has not occurred, or that it occurred only
4140 // because of a transient condition during start-up in the interpreter.
4141 return false;
4142 }
4143 // Pick a cutoff point well within PerBytecodeRecompilationCutoff.
4144 uint bc_cutoff = (uint) PerBytecodeRecompilationCutoff / 8;
4145 uint m_cutoff = (uint) PerMethodRecompilationCutoff / 2 + 1; // not zero
4146 Deoptimization::DeoptReason per_bc_reason
4147 = Deoptimization::reason_recorded_per_bytecode_if_any(reason);
4148 ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : nullptr;
4149 if ((per_bc_reason == Deoptimization::Reason_none
4150 || md->has_trap_at(bci, m, reason) != 0)
4151 // The trap frequency measure we care about is the recompile count:
4152 && md->trap_recompiled_at(bci, m)
4153 && md->overflow_recompile_count() >= bc_cutoff) {
4154 // Do not emit a trap here if it has already caused recompilations.
4155 // Also, if there are multiple reasons, or if there is no per-BCI record,
4156 // assume the worst.
4157 if (log())
4158 log()->elem("observe trap='%s recompiled' count='%d' recompiles2='%d'",
4159 Deoptimization::trap_reason_name(reason),
4160 md->trap_count(reason),
4161 md->overflow_recompile_count());
4162 return true;
4163 } else if (trap_count(reason) != 0
4164 && decompile_count() >= m_cutoff) {
4165 // Too many recompiles globally, and we have seen this sort of trap.
4166 // Use cumulative decompile_count, not just md->decompile_count.
4167 if (log())
4168 log()->elem("observe trap='%s' count='%d' mcount='%d' decompiles='%d' mdecompiles='%d'",
4169 Deoptimization::trap_reason_name(reason),
4170 md->trap_count(reason), trap_count(reason),
4171 md->decompile_count(), decompile_count());
4172 return true;
4173 } else {
4174 // The coast is clear.
4175 return false;
4176 }
4177 }
4178
4179 // Compute when not to trap. Used by matching trap based nodes and
4180 // NullCheck optimization.
4181 void Compile::set_allowed_deopt_reasons() {
4182 _allowed_reasons = 0;
4183 if (is_method_compilation()) {
4184 for (int rs = (int)Deoptimization::Reason_none+1; rs < Compile::trapHistLength; rs++) {
4185 assert(rs < BitsPerInt, "recode bit map");
4186 if (!too_many_traps((Deoptimization::DeoptReason) rs)) {
4187 _allowed_reasons |= nth_bit(rs);
4188 }
4189 }
4190 }
4191 }
4192
4193 bool Compile::needs_clinit_barrier(ciMethod* method, ciMethod* accessing_method) {
4194 return method->is_static() && needs_clinit_barrier(method->holder(), accessing_method);
4195 }
4196
4197 bool Compile::needs_clinit_barrier(ciField* field, ciMethod* accessing_method) {
4198 return field->is_static() && needs_clinit_barrier(field->holder(), accessing_method);
4199 }
4200
4201 bool Compile::needs_clinit_barrier(ciInstanceKlass* holder, ciMethod* accessing_method) {
4202 if (holder->is_initialized()) {
4203 return false;
4204 }
4205 if (holder->is_being_initialized()) {
4206 if (accessing_method->holder() == holder) {
4207 // Access inside a class. The barrier can be elided when access happens in <clinit>,
4208 // <init>, or a static method. In all those cases, there was an initialization
4209 // barrier on the holder klass passed.
4210 if (accessing_method->is_static_initializer() ||
4211 accessing_method->is_object_initializer() ||
4212 accessing_method->is_static()) {
4213 return false;
4214 }
4215 } else if (accessing_method->holder()->is_subclass_of(holder)) {
4216 // Access from a subclass. The barrier can be elided only when access happens in <clinit>.
4217 // In case of <init> or a static method, the barrier is on the subclass is not enough:
4218 // child class can become fully initialized while its parent class is still being initialized.
4219 if (accessing_method->is_static_initializer()) {
4220 return false;
4221 }
4222 }
4223 ciMethod* root = method(); // the root method of compilation
4224 if (root != accessing_method) {
4225 return needs_clinit_barrier(holder, root); // check access in the context of compilation root
4226 }
4227 }
4228 return true;
4229 }
4230
4231 #ifndef PRODUCT
4232 //------------------------------verify_bidirectional_edges---------------------
4233 // For each input edge to a node (ie - for each Use-Def edge), verify that
4234 // there is a corresponding Def-Use edge.
4235 void Compile::verify_bidirectional_edges(Unique_Node_List &visited) {
4236 // Allocate stack of size C->live_nodes()/16 to avoid frequent realloc
4237 uint stack_size = live_nodes() >> 4;
4238 Node_List nstack(MAX2(stack_size, (uint)OptoNodeListSize));
4239 nstack.push(_root);
4240
4241 while (nstack.size() > 0) {
4242 Node* n = nstack.pop();
4243 if (visited.member(n)) {
4244 continue;
4245 }
4246 visited.push(n);
4247
4248 // Walk over all input edges, checking for correspondence
4249 uint length = n->len();
4250 for (uint i = 0; i < length; i++) {
4251 Node* in = n->in(i);
4252 if (in != nullptr && !visited.member(in)) {
4253 nstack.push(in); // Put it on stack
4254 }
4255 if (in != nullptr && !in->is_top()) {
4256 // Count instances of `next`
4257 int cnt = 0;
4258 for (uint idx = 0; idx < in->_outcnt; idx++) {
4259 if (in->_out[idx] == n) {
4260 cnt++;
4261 }
4262 }
4263 assert(cnt > 0, "Failed to find Def-Use edge.");
4264 // Check for duplicate edges
4265 // walk the input array downcounting the input edges to n
4266 for (uint j = 0; j < length; j++) {
4267 if (n->in(j) == in) {
4268 cnt--;
4269 }
4270 }
4271 assert(cnt == 0, "Mismatched edge count.");
4272 } else if (in == nullptr) {
4273 assert(i == 0 || i >= n->req() ||
4274 n->is_Region() || n->is_Phi() || n->is_ArrayCopy() ||
4275 (n->is_Unlock() && i == (n->req() - 1)) ||
4276 (n->is_MemBar() && i == 5), // the precedence edge to a membar can be removed during macro node expansion
4277 "only region, phi, arraycopy, unlock or membar nodes have null data edges");
4278 } else {
4279 assert(in->is_top(), "sanity");
4280 // Nothing to check.
4281 }
4282 }
4283 }
4284 }
4285
4286 //------------------------------verify_graph_edges---------------------------
4287 // Walk the Graph and verify that there is a one-to-one correspondence
4288 // between Use-Def edges and Def-Use edges in the graph.
4289 void Compile::verify_graph_edges(bool no_dead_code) {
4290 if (VerifyGraphEdges) {
4291 Unique_Node_List visited;
4292
4293 // Call graph walk to check edges
4294 verify_bidirectional_edges(visited);
4295 if (no_dead_code) {
4296 // Now make sure that no visited node is used by an unvisited node.
4297 bool dead_nodes = false;
4298 Unique_Node_List checked;
4299 while (visited.size() > 0) {
4300 Node* n = visited.pop();
4301 checked.push(n);
4302 for (uint i = 0; i < n->outcnt(); i++) {
4303 Node* use = n->raw_out(i);
4304 if (checked.member(use)) continue; // already checked
4305 if (visited.member(use)) continue; // already in the graph
4306 if (use->is_Con()) continue; // a dead ConNode is OK
4307 // At this point, we have found a dead node which is DU-reachable.
4308 if (!dead_nodes) {
4309 tty->print_cr("*** Dead nodes reachable via DU edges:");
4310 dead_nodes = true;
4311 }
4312 use->dump(2);
4313 tty->print_cr("---");
4314 checked.push(use); // No repeats; pretend it is now checked.
4315 }
4316 }
4317 assert(!dead_nodes, "using nodes must be reachable from root");
4318 }
4319 }
4320 }
4321 #endif
4322
4323 // The Compile object keeps track of failure reasons separately from the ciEnv.
4324 // This is required because there is not quite a 1-1 relation between the
4325 // ciEnv and its compilation task and the Compile object. Note that one
4326 // ciEnv might use two Compile objects, if C2Compiler::compile_method decides
4327 // to backtrack and retry without subsuming loads. Other than this backtracking
4328 // behavior, the Compile's failure reason is quietly copied up to the ciEnv
4329 // by the logic in C2Compiler.
4330 void Compile::record_failure(const char* reason) {
4331 if (log() != nullptr) {
4332 log()->elem("failure reason='%s' phase='compile'", reason);
4333 }
4334 if (_failure_reason.get() == nullptr) {
4335 // Record the first failure reason.
4336 _failure_reason.set(reason);
4337 }
4338
4339 if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
4340 C->print_method(PHASE_FAILURE, 1);
4341 }
4342 _root = nullptr; // flush the graph, too
4343 }
4344
4345 Compile::TracePhase::TracePhase(const char* name, elapsedTimer* accumulator)
4346 : TraceTime(name, accumulator, CITime, CITimeVerbose),
4347 _phase_name(name), _dolog(CITimeVerbose)
4348 {
4349 if (_dolog) {
4350 C = Compile::current();
4351 _log = C->log();
4352 } else {
4353 C = nullptr;
4354 _log = nullptr;
4355 }
4356 if (_log != nullptr) {
4357 _log->begin_head("phase name='%s' nodes='%d' live='%d'", _phase_name, C->unique(), C->live_nodes());
4358 _log->stamp();
4359 _log->end_head();
4360 }
4361 }
4362
4363 Compile::TracePhase::~TracePhase() {
4364
4365 C = Compile::current();
4366 if (_dolog) {
4367 _log = C->log();
4368 } else {
4369 _log = nullptr;
4370 }
4371
4372 #ifdef ASSERT
4373 if (PrintIdealNodeCount) {
4374 tty->print_cr("phase name='%s' nodes='%d' live='%d' live_graph_walk='%d'",
4375 _phase_name, C->unique(), C->live_nodes(), C->count_live_nodes_by_graph_walk());
4376 }
4377
4378 if (VerifyIdealNodeCount) {
4379 Compile::current()->print_missing_nodes();
4380 }
4381 #endif
4382
4383 if (_log != nullptr) {
4384 _log->done("phase name='%s' nodes='%d' live='%d'", _phase_name, C->unique(), C->live_nodes());
4385 }
4386 }
4387
4388 //----------------------------static_subtype_check-----------------------------
4389 // Shortcut important common cases when superklass is exact:
4390 // (0) superklass is java.lang.Object (can occur in reflective code)
4391 // (1) subklass is already limited to a subtype of superklass => always ok
4392 // (2) subklass does not overlap with superklass => always fail
4393 // (3) superklass has NO subtypes and we can check with a simple compare.
4394 Compile::SubTypeCheckResult Compile::static_subtype_check(const TypeKlassPtr* superk, const TypeKlassPtr* subk, bool skip) {
4395 if (skip) {
4396 return SSC_full_test; // Let caller generate the general case.
4397 }
4398
4399 if (subk->is_java_subtype_of(superk)) {
4400 return SSC_always_true; // (0) and (1) this test cannot fail
4401 }
4402
4403 if (!subk->maybe_java_subtype_of(superk)) {
4404 return SSC_always_false; // (2) true path dead; no dynamic test needed
4405 }
4406
4407 const Type* superelem = superk;
4408 if (superk->isa_aryklassptr()) {
4409 int ignored;
4410 superelem = superk->is_aryklassptr()->base_element_type(ignored);
4411 }
4412
4413 if (superelem->isa_instklassptr()) {
4414 ciInstanceKlass* ik = superelem->is_instklassptr()->instance_klass();
4415 if (!ik->has_subklass()) {
4416 if (!ik->is_final()) {
4417 // Add a dependency if there is a chance of a later subclass.
4418 dependencies()->assert_leaf_type(ik);
4419 }
4420 if (!superk->maybe_java_subtype_of(subk)) {
4421 return SSC_always_false;
4422 }
4423 return SSC_easy_test; // (3) caller can do a simple ptr comparison
4424 }
4425 } else {
4426 // A primitive array type has no subtypes.
4427 return SSC_easy_test; // (3) caller can do a simple ptr comparison
4428 }
4429
4430 return SSC_full_test;
4431 }
4432
4433 Node* Compile::conv_I2X_index(PhaseGVN* phase, Node* idx, const TypeInt* sizetype, Node* ctrl) {
4434 #ifdef _LP64
4435 // The scaled index operand to AddP must be a clean 64-bit value.
4436 // Java allows a 32-bit int to be incremented to a negative
4437 // value, which appears in a 64-bit register as a large
4438 // positive number. Using that large positive number as an
4439 // operand in pointer arithmetic has bad consequences.
4440 // On the other hand, 32-bit overflow is rare, and the possibility
4441 // can often be excluded, if we annotate the ConvI2L node with
4442 // a type assertion that its value is known to be a small positive
4443 // number. (The prior range check has ensured this.)
4444 // This assertion is used by ConvI2LNode::Ideal.
4445 int index_max = max_jint - 1; // array size is max_jint, index is one less
4446 if (sizetype != nullptr) index_max = sizetype->_hi - 1;
4447 const TypeInt* iidxtype = TypeInt::make(0, index_max, Type::WidenMax);
4448 idx = constrained_convI2L(phase, idx, iidxtype, ctrl);
4449 #endif
4450 return idx;
4451 }
4452
4453 // Convert integer value to a narrowed long type dependent on ctrl (for example, a range check)
4454 Node* Compile::constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl, bool carry_dependency) {
4455 if (ctrl != nullptr) {
4456 // Express control dependency by a CastII node with a narrow type.
4457 value = new CastIINode(value, itype, carry_dependency ? ConstraintCastNode::StrongDependency : ConstraintCastNode::RegularDependency, true /* range check dependency */);
4458 // Make the CastII node dependent on the control input to prevent the narrowed ConvI2L
4459 // node from floating above the range check during loop optimizations. Otherwise, the
4460 // ConvI2L node may be eliminated independently of the range check, causing the data path
4461 // to become TOP while the control path is still there (although it's unreachable).
4462 value->set_req(0, ctrl);
4463 value = phase->transform(value);
4464 }
4465 const TypeLong* ltype = TypeLong::make(itype->_lo, itype->_hi, itype->_widen);
4466 return phase->transform(new ConvI2LNode(value, ltype));
4467 }
4468
4469 // The message about the current inlining is accumulated in
4470 // _print_inlining_stream and transferred into the _print_inlining_list
4471 // once we know whether inlining succeeds or not. For regular
4472 // inlining, messages are appended to the buffer pointed by
4473 // _print_inlining_idx in the _print_inlining_list. For late inlining,
4474 // a new buffer is added after _print_inlining_idx in the list. This
4475 // way we can update the inlining message for late inlining call site
4476 // when the inlining is attempted again.
4477 void Compile::print_inlining_init() {
4478 if (print_inlining() || print_intrinsics()) {
4479 // print_inlining_init is actually called several times.
4480 print_inlining_reset();
4481 _print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer*>(comp_arena(), 1, 1, new PrintInliningBuffer());
4482 }
4483 }
4484
4485 void Compile::print_inlining_reinit() {
4486 if (print_inlining() || print_intrinsics()) {
4487 print_inlining_reset();
4488 }
4489 }
4490
4491 void Compile::print_inlining_reset() {
4492 _print_inlining_stream->reset();
4493 }
4494
4495 void Compile::print_inlining_commit() {
4496 assert(print_inlining() || print_intrinsics(), "PrintInlining off?");
4497 // Transfer the message from _print_inlining_stream to the current
4498 // _print_inlining_list buffer and clear _print_inlining_stream.
4499 _print_inlining_list->at(_print_inlining_idx)->ss()->write(_print_inlining_stream->base(), _print_inlining_stream->size());
4500 print_inlining_reset();
4501 }
4502
4503 void Compile::print_inlining_push() {
4504 // Add new buffer to the _print_inlining_list at current position
4505 _print_inlining_idx++;
4506 _print_inlining_list->insert_before(_print_inlining_idx, new PrintInliningBuffer());
4507 }
4508
4509 Compile::PrintInliningBuffer* Compile::print_inlining_current() {
4510 return _print_inlining_list->at(_print_inlining_idx);
4511 }
4512
4513 void Compile::print_inlining_update(CallGenerator* cg) {
4514 if (print_inlining() || print_intrinsics()) {
4515 if (cg->is_late_inline()) {
4516 if (print_inlining_current()->cg() != cg &&
4517 (print_inlining_current()->cg() != nullptr ||
4518 print_inlining_current()->ss()->size() != 0)) {
4519 print_inlining_push();
4520 }
4521 print_inlining_commit();
4522 print_inlining_current()->set_cg(cg);
4523 } else {
4524 if (print_inlining_current()->cg() != nullptr) {
4525 print_inlining_push();
4526 }
4527 print_inlining_commit();
4528 }
4529 }
4530 }
4531
4532 void Compile::print_inlining_move_to(CallGenerator* cg) {
4533 // We resume inlining at a late inlining call site. Locate the
4534 // corresponding inlining buffer so that we can update it.
4535 if (print_inlining() || print_intrinsics()) {
4536 for (int i = 0; i < _print_inlining_list->length(); i++) {
4537 if (_print_inlining_list->at(i)->cg() == cg) {
4538 _print_inlining_idx = i;
4539 return;
4540 }
4541 }
4542 ShouldNotReachHere();
4543 }
4544 }
4545
4546 void Compile::print_inlining_update_delayed(CallGenerator* cg) {
4547 if (print_inlining() || print_intrinsics()) {
4548 assert(_print_inlining_stream->size() > 0, "missing inlining msg");
4549 assert(print_inlining_current()->cg() == cg, "wrong entry");
4550 // replace message with new message
4551 _print_inlining_list->at_put(_print_inlining_idx, new PrintInliningBuffer());
4552 print_inlining_commit();
4553 print_inlining_current()->set_cg(cg);
4554 }
4555 }
4556
4557 void Compile::print_inlining_assert_ready() {
4558 assert(!_print_inlining || _print_inlining_stream->size() == 0, "losing data");
4559 }
4560
4561 void Compile::process_print_inlining() {
4562 assert(_late_inlines.length() == 0, "not drained yet");
4563 if (print_inlining() || print_intrinsics()) {
4564 ResourceMark rm;
4565 stringStream ss;
4566 assert(_print_inlining_list != nullptr, "process_print_inlining should be called only once.");
4567 for (int i = 0; i < _print_inlining_list->length(); i++) {
4568 PrintInliningBuffer* pib = _print_inlining_list->at(i);
4569 ss.print("%s", pib->ss()->freeze());
4570 delete pib;
4571 DEBUG_ONLY(_print_inlining_list->at_put(i, nullptr));
4572 }
4573 // Reset _print_inlining_list, it only contains destructed objects.
4574 // It is on the arena, so it will be freed when the arena is reset.
4575 _print_inlining_list = nullptr;
4576 // _print_inlining_stream won't be used anymore, either.
4577 print_inlining_reset();
4578 size_t end = ss.size();
4579 _print_inlining_output = NEW_ARENA_ARRAY(comp_arena(), char, end+1);
4580 strncpy(_print_inlining_output, ss.freeze(), end+1);
4581 _print_inlining_output[end] = 0;
4582 }
4583 }
4584
4585 void Compile::dump_print_inlining() {
4586 if (_print_inlining_output != nullptr) {
4587 tty->print_raw(_print_inlining_output);
4588 }
4589 }
4590
4591 void Compile::log_late_inline(CallGenerator* cg) {
4592 if (log() != nullptr) {
4593 log()->head("late_inline method='%d' inline_id='" JLONG_FORMAT "'", log()->identify(cg->method()),
4594 cg->unique_id());
4595 JVMState* p = cg->call_node()->jvms();
4596 while (p != nullptr) {
4597 log()->elem("jvms bci='%d' method='%d'", p->bci(), log()->identify(p->method()));
4598 p = p->caller();
4599 }
4600 log()->tail("late_inline");
4601 }
4602 }
4603
4604 void Compile::log_late_inline_failure(CallGenerator* cg, const char* msg) {
4605 log_late_inline(cg);
4606 if (log() != nullptr) {
4607 log()->inline_fail(msg);
4608 }
4609 }
4610
4611 void Compile::log_inline_id(CallGenerator* cg) {
4612 if (log() != nullptr) {
4613 // The LogCompilation tool needs a unique way to identify late
4614 // inline call sites. This id must be unique for this call site in
4615 // this compilation. Try to have it unique across compilations as
4616 // well because it can be convenient when grepping through the log
4617 // file.
4618 // Distinguish OSR compilations from others in case CICountOSR is
4619 // on.
4620 jlong id = ((jlong)unique()) + (((jlong)compile_id()) << 33) + (CICountOSR && is_osr_compilation() ? ((jlong)1) << 32 : 0);
4621 cg->set_unique_id(id);
4622 log()->elem("inline_id id='" JLONG_FORMAT "'", id);
4623 }
4624 }
4625
4626 void Compile::log_inline_failure(const char* msg) {
4627 if (C->log() != nullptr) {
4628 C->log()->inline_fail(msg);
4629 }
4630 }
4631
4632
4633 // Dump inlining replay data to the stream.
4634 // Don't change thread state and acquire any locks.
4635 void Compile::dump_inline_data(outputStream* out) {
4636 InlineTree* inl_tree = ilt();
4637 if (inl_tree != nullptr) {
4638 out->print(" inline %d", inl_tree->count());
4639 inl_tree->dump_replay_data(out);
4640 }
4641 }
4642
4643 void Compile::dump_inline_data_reduced(outputStream* out) {
4644 assert(ReplayReduce, "");
4645
4646 InlineTree* inl_tree = ilt();
4647 if (inl_tree == nullptr) {
4648 return;
4649 }
4650 // Enable iterative replay file reduction
4651 // Output "compile" lines for depth 1 subtrees,
4652 // simulating that those trees were compiled
4653 // instead of inlined.
4654 for (int i = 0; i < inl_tree->subtrees().length(); ++i) {
4655 InlineTree* sub = inl_tree->subtrees().at(i);
4656 if (sub->inline_level() != 1) {
4657 continue;
4658 }
4659
4660 ciMethod* method = sub->method();
4661 int entry_bci = -1;
4662 int comp_level = env()->task()->comp_level();
4663 out->print("compile ");
4664 method->dump_name_as_ascii(out);
4665 out->print(" %d %d", entry_bci, comp_level);
4666 out->print(" inline %d", sub->count());
4667 sub->dump_replay_data(out, -1);
4668 out->cr();
4669 }
4670 }
4671
4672 int Compile::cmp_expensive_nodes(Node* n1, Node* n2) {
4673 if (n1->Opcode() < n2->Opcode()) return -1;
4674 else if (n1->Opcode() > n2->Opcode()) return 1;
4675
4676 assert(n1->req() == n2->req(), "can't compare %s nodes: n1->req() = %d, n2->req() = %d", NodeClassNames[n1->Opcode()], n1->req(), n2->req());
4677 for (uint i = 1; i < n1->req(); i++) {
4678 if (n1->in(i) < n2->in(i)) return -1;
4679 else if (n1->in(i) > n2->in(i)) return 1;
4680 }
4681
4682 return 0;
4683 }
4684
4685 int Compile::cmp_expensive_nodes(Node** n1p, Node** n2p) {
4686 Node* n1 = *n1p;
4687 Node* n2 = *n2p;
4688
4689 return cmp_expensive_nodes(n1, n2);
4690 }
4691
4692 void Compile::sort_expensive_nodes() {
4693 if (!expensive_nodes_sorted()) {
4694 _expensive_nodes.sort(cmp_expensive_nodes);
4695 }
4696 }
4697
4698 bool Compile::expensive_nodes_sorted() const {
4699 for (int i = 1; i < _expensive_nodes.length(); i++) {
4700 if (cmp_expensive_nodes(_expensive_nodes.adr_at(i), _expensive_nodes.adr_at(i-1)) < 0) {
4701 return false;
4702 }
4703 }
4704 return true;
4705 }
4706
4707 bool Compile::should_optimize_expensive_nodes(PhaseIterGVN &igvn) {
4708 if (_expensive_nodes.length() == 0) {
4709 return false;
4710 }
4711
4712 assert(OptimizeExpensiveOps, "optimization off?");
4713
4714 // Take this opportunity to remove dead nodes from the list
4715 int j = 0;
4716 for (int i = 0; i < _expensive_nodes.length(); i++) {
4717 Node* n = _expensive_nodes.at(i);
4718 if (!n->is_unreachable(igvn)) {
4719 assert(n->is_expensive(), "should be expensive");
4720 _expensive_nodes.at_put(j, n);
4721 j++;
4722 }
4723 }
4724 _expensive_nodes.trunc_to(j);
4725
4726 // Then sort the list so that similar nodes are next to each other
4727 // and check for at least two nodes of identical kind with same data
4728 // inputs.
4729 sort_expensive_nodes();
4730
4731 for (int i = 0; i < _expensive_nodes.length()-1; i++) {
4732 if (cmp_expensive_nodes(_expensive_nodes.adr_at(i), _expensive_nodes.adr_at(i+1)) == 0) {
4733 return true;
4734 }
4735 }
4736
4737 return false;
4738 }
4739
4740 void Compile::cleanup_expensive_nodes(PhaseIterGVN &igvn) {
4741 if (_expensive_nodes.length() == 0) {
4742 return;
4743 }
4744
4745 assert(OptimizeExpensiveOps, "optimization off?");
4746
4747 // Sort to bring similar nodes next to each other and clear the
4748 // control input of nodes for which there's only a single copy.
4749 sort_expensive_nodes();
4750
4751 int j = 0;
4752 int identical = 0;
4753 int i = 0;
4754 bool modified = false;
4755 for (; i < _expensive_nodes.length()-1; i++) {
4756 assert(j <= i, "can't write beyond current index");
4757 if (_expensive_nodes.at(i)->Opcode() == _expensive_nodes.at(i+1)->Opcode()) {
4758 identical++;
4759 _expensive_nodes.at_put(j++, _expensive_nodes.at(i));
4760 continue;
4761 }
4762 if (identical > 0) {
4763 _expensive_nodes.at_put(j++, _expensive_nodes.at(i));
4764 identical = 0;
4765 } else {
4766 Node* n = _expensive_nodes.at(i);
4767 igvn.replace_input_of(n, 0, nullptr);
4768 igvn.hash_insert(n);
4769 modified = true;
4770 }
4771 }
4772 if (identical > 0) {
4773 _expensive_nodes.at_put(j++, _expensive_nodes.at(i));
4774 } else if (_expensive_nodes.length() >= 1) {
4775 Node* n = _expensive_nodes.at(i);
4776 igvn.replace_input_of(n, 0, nullptr);
4777 igvn.hash_insert(n);
4778 modified = true;
4779 }
4780 _expensive_nodes.trunc_to(j);
4781 if (modified) {
4782 igvn.optimize();
4783 }
4784 }
4785
4786 void Compile::add_expensive_node(Node * n) {
4787 assert(!_expensive_nodes.contains(n), "duplicate entry in expensive list");
4788 assert(n->is_expensive(), "expensive nodes with non-null control here only");
4789 assert(!n->is_CFG() && !n->is_Mem(), "no cfg or memory nodes here");
4790 if (OptimizeExpensiveOps) {
4791 _expensive_nodes.append(n);
4792 } else {
4793 // Clear control input and let IGVN optimize expensive nodes if
4794 // OptimizeExpensiveOps is off.
4795 n->set_req(0, nullptr);
4796 }
4797 }
4798
4799 /**
4800 * Track coarsened Lock and Unlock nodes.
4801 */
4802
4803 class Lock_List : public Node_List {
4804 uint _origin_cnt;
4805 public:
4806 Lock_List(Arena *a, uint cnt) : Node_List(a), _origin_cnt(cnt) {}
4807 uint origin_cnt() const { return _origin_cnt; }
4808 };
4809
4810 void Compile::add_coarsened_locks(GrowableArray<AbstractLockNode*>& locks) {
4811 int length = locks.length();
4812 if (length > 0) {
4813 // Have to keep this list until locks elimination during Macro nodes elimination.
4814 Lock_List* locks_list = new (comp_arena()) Lock_List(comp_arena(), length);
4815 AbstractLockNode* alock = locks.at(0);
4816 BoxLockNode* box = alock->box_node()->as_BoxLock();
4817 for (int i = 0; i < length; i++) {
4818 AbstractLockNode* lock = locks.at(i);
4819 assert(lock->is_coarsened(), "expecting only coarsened AbstractLock nodes, but got '%s'[%d] node", lock->Name(), lock->_idx);
4820 locks_list->push(lock);
4821 BoxLockNode* this_box = lock->box_node()->as_BoxLock();
4822 if (this_box != box) {
4823 // Locking regions (BoxLock) could be Unbalanced here:
4824 // - its coarsened locks were eliminated in earlier
4825 // macro nodes elimination followed by loop unroll
4826 // - it is OSR locking region (no Lock node)
4827 // Preserve Unbalanced status in such cases.
4828 if (!this_box->is_unbalanced()) {
4829 this_box->set_coarsened();
4830 }
4831 if (!box->is_unbalanced()) {
4832 box->set_coarsened();
4833 }
4834 }
4835 }
4836 _coarsened_locks.append(locks_list);
4837 }
4838 }
4839
4840 void Compile::remove_useless_coarsened_locks(Unique_Node_List& useful) {
4841 int count = coarsened_count();
4842 for (int i = 0; i < count; i++) {
4843 Node_List* locks_list = _coarsened_locks.at(i);
4844 for (uint j = 0; j < locks_list->size(); j++) {
4845 Node* lock = locks_list->at(j);
4846 assert(lock->is_AbstractLock(), "sanity");
4847 if (!useful.member(lock)) {
4848 locks_list->yank(lock);
4849 }
4850 }
4851 }
4852 }
4853
4854 void Compile::remove_coarsened_lock(Node* n) {
4855 if (n->is_AbstractLock()) {
4856 int count = coarsened_count();
4857 for (int i = 0; i < count; i++) {
4858 Node_List* locks_list = _coarsened_locks.at(i);
4859 locks_list->yank(n);
4860 }
4861 }
4862 }
4863
4864 bool Compile::coarsened_locks_consistent() {
4865 int count = coarsened_count();
4866 for (int i = 0; i < count; i++) {
4867 bool unbalanced = false;
4868 bool modified = false; // track locks kind modifications
4869 Lock_List* locks_list = (Lock_List*)_coarsened_locks.at(i);
4870 uint size = locks_list->size();
4871 if (size == 0) {
4872 unbalanced = false; // All locks were eliminated - good
4873 } else if (size != locks_list->origin_cnt()) {
4874 unbalanced = true; // Some locks were removed from list
4875 } else {
4876 for (uint j = 0; j < size; j++) {
4877 Node* lock = locks_list->at(j);
4878 // All nodes in group should have the same state (modified or not)
4879 if (!lock->as_AbstractLock()->is_coarsened()) {
4880 if (j == 0) {
4881 // first on list was modified, the rest should be too for consistency
4882 modified = true;
4883 } else if (!modified) {
4884 // this lock was modified but previous locks on the list were not
4885 unbalanced = true;
4886 break;
4887 }
4888 } else if (modified) {
4889 // previous locks on list were modified but not this lock
4890 unbalanced = true;
4891 break;
4892 }
4893 }
4894 }
4895 if (unbalanced) {
4896 // unbalanced monitor enter/exit - only some [un]lock nodes were removed or modified
4897 #ifdef ASSERT
4898 if (PrintEliminateLocks) {
4899 tty->print_cr("=== unbalanced coarsened locks ===");
4900 for (uint l = 0; l < size; l++) {
4901 locks_list->at(l)->dump();
4902 }
4903 }
4904 #endif
4905 record_failure(C2Compiler::retry_no_locks_coarsening());
4906 return false;
4907 }
4908 }
4909 return true;
4910 }
4911
4912 // Mark locking regions (identified by BoxLockNode) as unbalanced if
4913 // locks coarsening optimization removed Lock/Unlock nodes from them.
4914 // Such regions become unbalanced because coarsening only removes part
4915 // of Lock/Unlock nodes in region. As result we can't execute other
4916 // locks elimination optimizations which assume all code paths have
4917 // corresponding pair of Lock/Unlock nodes - they are balanced.
4918 void Compile::mark_unbalanced_boxes() const {
4919 int count = coarsened_count();
4920 for (int i = 0; i < count; i++) {
4921 Node_List* locks_list = _coarsened_locks.at(i);
4922 uint size = locks_list->size();
4923 if (size > 0) {
4924 AbstractLockNode* alock = locks_list->at(0)->as_AbstractLock();
4925 BoxLockNode* box = alock->box_node()->as_BoxLock();
4926 if (alock->is_coarsened()) {
4927 // coarsened_locks_consistent(), which is called before this method, verifies
4928 // that the rest of Lock/Unlock nodes on locks_list are also coarsened.
4929 assert(!box->is_eliminated(), "regions with coarsened locks should not be marked as eliminated");
4930 for (uint j = 1; j < size; j++) {
4931 assert(locks_list->at(j)->as_AbstractLock()->is_coarsened(), "only coarsened locks are expected here");
4932 BoxLockNode* this_box = locks_list->at(j)->as_AbstractLock()->box_node()->as_BoxLock();
4933 if (box != this_box) {
4934 assert(!this_box->is_eliminated(), "regions with coarsened locks should not be marked as eliminated");
4935 box->set_unbalanced();
4936 this_box->set_unbalanced();
4937 }
4938 }
4939 }
4940 }
4941 }
4942 }
4943
4944 /**
4945 * Remove the speculative part of types and clean up the graph
4946 */
4947 void Compile::remove_speculative_types(PhaseIterGVN &igvn) {
4948 if (UseTypeSpeculation) {
4949 Unique_Node_List worklist;
4950 worklist.push(root());
4951 int modified = 0;
4952 // Go over all type nodes that carry a speculative type, drop the
4953 // speculative part of the type and enqueue the node for an igvn
4954 // which may optimize it out.
4955 for (uint next = 0; next < worklist.size(); ++next) {
4956 Node *n = worklist.at(next);
4957 if (n->is_Type()) {
4958 TypeNode* tn = n->as_Type();
4959 const Type* t = tn->type();
4960 const Type* t_no_spec = t->remove_speculative();
4961 if (t_no_spec != t) {
4962 bool in_hash = igvn.hash_delete(n);
4963 #ifdef ASSERT
4964 if (!in_hash) {
4965 tty->print_cr("current graph:");
4966 n->dump_bfs(MaxNodeLimit, nullptr, "S$");
4967 tty->cr();
4968 tty->print_cr("erroneous node:");
4969 n->dump();
4970 assert(false, "node should be in igvn hash table");
4971 }
4972 #endif
4973 tn->set_type(t_no_spec);
4974 igvn.hash_insert(n);
4975 igvn._worklist.push(n); // give it a chance to go away
4976 modified++;
4977 }
4978 }
4979 // Iterate over outs - endless loops is unreachable from below
4980 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4981 Node *m = n->fast_out(i);
4982 if (not_a_node(m)) {
4983 continue;
4984 }
4985 worklist.push(m);
4986 }
4987 }
4988 // Drop the speculative part of all types in the igvn's type table
4989 igvn.remove_speculative_types();
4990 if (modified > 0) {
4991 igvn.optimize();
4992 if (failing()) return;
4993 }
4994 #ifdef ASSERT
4995 // Verify that after the IGVN is over no speculative type has resurfaced
4996 worklist.clear();
4997 worklist.push(root());
4998 for (uint next = 0; next < worklist.size(); ++next) {
4999 Node *n = worklist.at(next);
5000 const Type* t = igvn.type_or_null(n);
5001 assert((t == nullptr) || (t == t->remove_speculative()), "no more speculative types");
5002 if (n->is_Type()) {
5003 t = n->as_Type()->type();
5004 assert(t == t->remove_speculative(), "no more speculative types");
5005 }
5006 // Iterate over outs - endless loops is unreachable from below
5007 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
5008 Node *m = n->fast_out(i);
5009 if (not_a_node(m)) {
5010 continue;
5011 }
5012 worklist.push(m);
5013 }
5014 }
5015 igvn.check_no_speculative_types();
5016 #endif
5017 }
5018 }
5019
5020 // Auxiliary methods to support randomized stressing/fuzzing.
5021
5022 int Compile::random() {
5023 _stress_seed = os::next_random(_stress_seed);
5024 return static_cast<int>(_stress_seed);
5025 }
5026
5027 // This method can be called the arbitrary number of times, with current count
5028 // as the argument. The logic allows selecting a single candidate from the
5029 // running list of candidates as follows:
5030 // int count = 0;
5031 // Cand* selected = null;
5032 // while(cand = cand->next()) {
5033 // if (randomized_select(++count)) {
5034 // selected = cand;
5035 // }
5036 // }
5037 //
5038 // Including count equalizes the chances any candidate is "selected".
5039 // This is useful when we don't have the complete list of candidates to choose
5040 // from uniformly. In this case, we need to adjust the randomicity of the
5041 // selection, or else we will end up biasing the selection towards the latter
5042 // candidates.
5043 //
5044 // Quick back-envelope calculation shows that for the list of n candidates
5045 // the equal probability for the candidate to persist as "best" can be
5046 // achieved by replacing it with "next" k-th candidate with the probability
5047 // of 1/k. It can be easily shown that by the end of the run, the
5048 // probability for any candidate is converged to 1/n, thus giving the
5049 // uniform distribution among all the candidates.
5050 //
5051 // We don't care about the domain size as long as (RANDOMIZED_DOMAIN / count) is large.
5052 #define RANDOMIZED_DOMAIN_POW 29
5053 #define RANDOMIZED_DOMAIN (1 << RANDOMIZED_DOMAIN_POW)
5054 #define RANDOMIZED_DOMAIN_MASK ((1 << (RANDOMIZED_DOMAIN_POW + 1)) - 1)
5055 bool Compile::randomized_select(int count) {
5056 assert(count > 0, "only positive");
5057 return (random() & RANDOMIZED_DOMAIN_MASK) < (RANDOMIZED_DOMAIN / count);
5058 }
5059
5060 CloneMap& Compile::clone_map() { return _clone_map; }
5061 void Compile::set_clone_map(Dict* d) { _clone_map._dict = d; }
5062
5063 void NodeCloneInfo::dump_on(outputStream* st) const {
5064 st->print(" {%d:%d} ", idx(), gen());
5065 }
5066
5067 void CloneMap::clone(Node* old, Node* nnn, int gen) {
5068 uint64_t val = value(old->_idx);
5069 NodeCloneInfo cio(val);
5070 assert(val != 0, "old node should be in the map");
5071 NodeCloneInfo cin(cio.idx(), gen + cio.gen());
5072 insert(nnn->_idx, cin.get());
5073 #ifndef PRODUCT
5074 if (is_debug()) {
5075 tty->print_cr("CloneMap::clone inserted node %d info {%d:%d} into CloneMap", nnn->_idx, cin.idx(), cin.gen());
5076 }
5077 #endif
5078 }
5079
5080 void CloneMap::verify_insert_and_clone(Node* old, Node* nnn, int gen) {
5081 NodeCloneInfo cio(value(old->_idx));
5082 if (cio.get() == 0) {
5083 cio.set(old->_idx, 0);
5084 insert(old->_idx, cio.get());
5085 #ifndef PRODUCT
5086 if (is_debug()) {
5087 tty->print_cr("CloneMap::verify_insert_and_clone inserted node %d info {%d:%d} into CloneMap", old->_idx, cio.idx(), cio.gen());
5088 }
5089 #endif
5090 }
5091 clone(old, nnn, gen);
5092 }
5093
5094 int CloneMap::max_gen() const {
5095 int g = 0;
5096 DictI di(_dict);
5097 for(; di.test(); ++di) {
5098 int t = gen(di._key);
5099 if (g < t) {
5100 g = t;
5101 #ifndef PRODUCT
5102 if (is_debug()) {
5103 tty->print_cr("CloneMap::max_gen() update max=%d from %d", g, _2_node_idx_t(di._key));
5104 }
5105 #endif
5106 }
5107 }
5108 return g;
5109 }
5110
5111 void CloneMap::dump(node_idx_t key, outputStream* st) const {
5112 uint64_t val = value(key);
5113 if (val != 0) {
5114 NodeCloneInfo ni(val);
5115 ni.dump_on(st);
5116 }
5117 }
5118
5119 // Move Allocate nodes to the start of the list
5120 void Compile::sort_macro_nodes() {
5121 int count = macro_count();
5122 int allocates = 0;
5123 for (int i = 0; i < count; i++) {
5124 Node* n = macro_node(i);
5125 if (n->is_Allocate()) {
5126 if (i != allocates) {
5127 Node* tmp = macro_node(allocates);
5128 _macro_nodes.at_put(allocates, n);
5129 _macro_nodes.at_put(i, tmp);
5130 }
5131 allocates++;
5132 }
5133 }
5134 }
5135
5136 void Compile::print_method(CompilerPhaseType cpt, int level, Node* n) {
5137 EventCompilerPhase event;
5138 if (event.should_commit()) {
5139 CompilerEvent::PhaseEvent::post(event, C->_latest_stage_start_counter, cpt, C->_compile_id, level);
5140 }
5141 #ifndef PRODUCT
5142 ResourceMark rm;
5143 stringStream ss;
5144 ss.print_raw(CompilerPhaseTypeHelper::to_description(cpt));
5145 if (n != nullptr) {
5146 ss.print(": %d %s ", n->_idx, NodeClassNames[n->Opcode()]);
5147 }
5148
5149 const char* name = ss.as_string();
5150 if (should_print_igv(level)) {
5151 _igv_printer->print_method(name, level);
5152 }
5153 if (should_print_phase(cpt)) {
5154 print_ideal_ir(CompilerPhaseTypeHelper::to_name(cpt));
5155 }
5156 #endif
5157 C->_latest_stage_start_counter.stamp();
5158 }
5159
5160 // Only used from CompileWrapper
5161 void Compile::begin_method() {
5162 #ifndef PRODUCT
5163 if (_method != nullptr && should_print_igv(1)) {
5164 _igv_printer->begin_method();
5165 }
5166 #endif
5167 C->_latest_stage_start_counter.stamp();
5168 }
5169
5170 // Only used from CompileWrapper
5171 void Compile::end_method() {
5172 EventCompilerPhase event;
5173 if (event.should_commit()) {
5174 CompilerEvent::PhaseEvent::post(event, C->_latest_stage_start_counter, PHASE_END, C->_compile_id, 1);
5175 }
5176
5177 #ifndef PRODUCT
5178 if (_method != nullptr && should_print_igv(1)) {
5179 _igv_printer->end_method();
5180 }
5181 #endif
5182 }
5183
5184 bool Compile::should_print_phase(CompilerPhaseType cpt) {
5185 #ifndef PRODUCT
5186 if ((_directive->ideal_phase_mask() & CompilerPhaseTypeHelper::to_bitmask(cpt)) != 0) {
5187 return true;
5188 }
5189 #endif
5190 return false;
5191 }
5192
5193 bool Compile::should_print_igv(int level) {
5194 #ifndef PRODUCT
5195 if (PrintIdealGraphLevel < 0) { // disabled by the user
5196 return false;
5197 }
5198
5199 bool need = directive()->IGVPrintLevelOption >= level;
5200 if (need && !_igv_printer) {
5201 _igv_printer = IdealGraphPrinter::printer();
5202 _igv_printer->set_compile(this);
5203 }
5204 return need;
5205 #else
5206 return false;
5207 #endif
5208 }
5209
5210 #ifndef PRODUCT
5211 IdealGraphPrinter* Compile::_debug_file_printer = nullptr;
5212 IdealGraphPrinter* Compile::_debug_network_printer = nullptr;
5213
5214 // Called from debugger. Prints method to the default file with the default phase name.
5215 // This works regardless of any Ideal Graph Visualizer flags set or not.
5216 void igv_print() {
5217 Compile::current()->igv_print_method_to_file();
5218 }
5219
5220 // Same as igv_print() above but with a specified phase name.
5221 void igv_print(const char* phase_name) {
5222 Compile::current()->igv_print_method_to_file(phase_name);
5223 }
5224
5225 // Called from debugger. Prints method with the default phase name to the default network or the one specified with
5226 // the network flags for the Ideal Graph Visualizer, or to the default file depending on the 'network' argument.
5227 // This works regardless of any Ideal Graph Visualizer flags set or not.
5228 void igv_print(bool network) {
5229 if (network) {
5230 Compile::current()->igv_print_method_to_network();
5231 } else {
5232 Compile::current()->igv_print_method_to_file();
5233 }
5234 }
5235
5236 // Same as igv_print(bool network) above but with a specified phase name.
5237 void igv_print(bool network, const char* phase_name) {
5238 if (network) {
5239 Compile::current()->igv_print_method_to_network(phase_name);
5240 } else {
5241 Compile::current()->igv_print_method_to_file(phase_name);
5242 }
5243 }
5244
5245 // Called from debugger. Normal write to the default _printer. Only works if Ideal Graph Visualizer printing flags are set.
5246 void igv_print_default() {
5247 Compile::current()->print_method(PHASE_DEBUG, 0);
5248 }
5249
5250 // Called from debugger, especially when replaying a trace in which the program state cannot be altered like with rr replay.
5251 // A method is appended to an existing default file with the default phase name. This means that igv_append() must follow
5252 // an earlier igv_print(*) call which sets up the file. This works regardless of any Ideal Graph Visualizer flags set or not.
5253 void igv_append() {
5254 Compile::current()->igv_print_method_to_file("Debug", true);
5255 }
5256
5257 // Same as igv_append() above but with a specified phase name.
5258 void igv_append(const char* phase_name) {
5259 Compile::current()->igv_print_method_to_file(phase_name, true);
5260 }
5261
5262 void Compile::igv_print_method_to_file(const char* phase_name, bool append) {
5263 const char* file_name = "custom_debug.xml";
5264 if (_debug_file_printer == nullptr) {
5265 _debug_file_printer = new IdealGraphPrinter(C, file_name, append);
5266 } else {
5267 _debug_file_printer->update_compiled_method(C->method());
5268 }
5269 tty->print_cr("Method %s to %s", append ? "appended" : "printed", file_name);
5270 _debug_file_printer->print(phase_name, (Node*)C->root());
5271 }
5272
5273 void Compile::igv_print_method_to_network(const char* phase_name) {
5274 if (_debug_network_printer == nullptr) {
5275 _debug_network_printer = new IdealGraphPrinter(C);
5276 } else {
5277 _debug_network_printer->update_compiled_method(C->method());
5278 }
5279 tty->print_cr("Method printed over network stream to IGV");
5280 _debug_network_printer->print(phase_name, (Node*)C->root());
5281 }
5282 #endif
5283
5284 Node* Compile::narrow_value(BasicType bt, Node* value, const Type* type, PhaseGVN* phase, bool transform_res) {
5285 if (type != nullptr && phase->type(value)->higher_equal(type)) {
5286 return value;
5287 }
5288 Node* result = nullptr;
5289 if (bt == T_BYTE) {
5290 result = phase->transform(new LShiftINode(value, phase->intcon(24)));
5291 result = new RShiftINode(result, phase->intcon(24));
5292 } else if (bt == T_BOOLEAN) {
5293 result = new AndINode(value, phase->intcon(0xFF));
5294 } else if (bt == T_CHAR) {
5295 result = new AndINode(value,phase->intcon(0xFFFF));
5296 } else {
5297 assert(bt == T_SHORT, "unexpected narrow type");
5298 result = phase->transform(new LShiftINode(value, phase->intcon(16)));
5299 result = new RShiftINode(result, phase->intcon(16));
5300 }
5301 if (transform_res) {
5302 result = phase->transform(result);
5303 }
5304 return result;
5305 }
5306