1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/bcEscapeAnalyzer.hpp"
26 #include "code/vmreg.hpp"
27 #include "compiler/compileLog.hpp"
28 #include "compiler/oopMap.hpp"
29 #include "gc/shared/barrierSet.hpp"
30 #include "gc/shared/c2/barrierSetC2.hpp"
31 #include "interpreter/interpreter.hpp"
32 #include "opto/callGenerator.hpp"
33 #include "opto/callnode.hpp"
34 #include "opto/castnode.hpp"
35 #include "opto/convertnode.hpp"
36 #include "opto/escape.hpp"
37 #include "opto/locknode.hpp"
38 #include "opto/machnode.hpp"
39 #include "opto/matcher.hpp"
40 #include "opto/parse.hpp"
41 #include "opto/regalloc.hpp"
42 #include "opto/regmask.hpp"
43 #include "opto/rootnode.hpp"
44 #include "opto/runtime.hpp"
45 #include "runtime/sharedRuntime.hpp"
46 #include "runtime/stubRoutines.hpp"
47 #include "utilities/powerOfTwo.hpp"
48
49 // Portions of code courtesy of Clifford Click
50
51 // Optimization - Graph Style
52
53 //=============================================================================
54 uint StartNode::size_of() const { return sizeof(*this); }
55 bool StartNode::cmp( const Node &n ) const
56 { return _domain == ((StartNode&)n)._domain; }
57 const Type *StartNode::bottom_type() const { return _domain; }
58 const Type* StartNode::Value(PhaseGVN* phase) const { return _domain; }
59 #ifndef PRODUCT
60 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
61 void StartNode::dump_compact_spec(outputStream *st) const { /* empty */ }
62 #endif
63
64 //------------------------------Ideal------------------------------------------
65 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
66 return remove_dead_region(phase, can_reshape) ? this : nullptr;
67 }
68
69 //------------------------------calling_convention-----------------------------
70 void StartNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
71 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
72 }
73
74 //------------------------------Registers--------------------------------------
75 const RegMask &StartNode::in_RegMask(uint) const {
76 return RegMask::EMPTY;
77 }
78
79 //------------------------------match------------------------------------------
80 // Construct projections for incoming parameters, and their RegMask info
81 Node *StartNode::match( const ProjNode *proj, const Matcher *match ) {
82 switch (proj->_con) {
83 case TypeFunc::Control:
84 case TypeFunc::I_O:
85 case TypeFunc::Memory:
86 return new MachProjNode(this,proj->_con,RegMask::EMPTY,MachProjNode::unmatched_proj);
87 case TypeFunc::FramePtr:
88 return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
89 case TypeFunc::ReturnAdr:
90 return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
91 case TypeFunc::Parms:
92 default: {
93 uint parm_num = proj->_con - TypeFunc::Parms;
94 const Type *t = _domain->field_at(proj->_con);
95 if (t->base() == Type::Half) // 2nd half of Longs and Doubles
96 return new ConNode(Type::TOP);
97 uint ideal_reg = t->ideal_reg();
98 RegMask &rm = match->_calling_convention_mask[parm_num];
99 return new MachProjNode(this,proj->_con,rm,ideal_reg);
100 }
101 }
102 return nullptr;
103 }
104
105 //------------------------------StartOSRNode----------------------------------
106 // The method start node for an on stack replacement adapter
107
108 //------------------------------osr_domain-----------------------------
109 const TypeTuple *StartOSRNode::osr_domain() {
110 const Type **fields = TypeTuple::fields(2);
111 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // address of osr buffer
112
113 return TypeTuple::make(TypeFunc::Parms+1, fields);
114 }
115
116 //=============================================================================
117 const char * const ParmNode::names[TypeFunc::Parms+1] = {
118 "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms"
119 };
120
121 #ifndef PRODUCT
122 void ParmNode::dump_spec(outputStream *st) const {
123 if( _con < TypeFunc::Parms ) {
124 st->print("%s", names[_con]);
125 } else {
126 st->print("Parm%d: ",_con-TypeFunc::Parms);
127 // Verbose and WizardMode dump bottom_type for all nodes
128 if( !Verbose && !WizardMode ) bottom_type()->dump_on(st);
129 }
130 }
131
132 void ParmNode::dump_compact_spec(outputStream *st) const {
133 if (_con < TypeFunc::Parms) {
134 st->print("%s", names[_con]);
135 } else {
136 st->print("%d:", _con-TypeFunc::Parms);
137 // unconditionally dump bottom_type
138 bottom_type()->dump_on(st);
139 }
140 }
141 #endif
142
143 uint ParmNode::ideal_reg() const {
144 switch( _con ) {
145 case TypeFunc::Control : // fall through
146 case TypeFunc::I_O : // fall through
147 case TypeFunc::Memory : return 0;
148 case TypeFunc::FramePtr : // fall through
149 case TypeFunc::ReturnAdr: return Op_RegP;
150 default : assert( _con > TypeFunc::Parms, "" );
151 // fall through
152 case TypeFunc::Parms : {
153 // Type of argument being passed
154 const Type *t = in(0)->as_Start()->_domain->field_at(_con);
155 return t->ideal_reg();
156 }
157 }
158 ShouldNotReachHere();
159 return 0;
160 }
161
162 //=============================================================================
163 ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) {
164 init_req(TypeFunc::Control,cntrl);
165 init_req(TypeFunc::I_O,i_o);
166 init_req(TypeFunc::Memory,memory);
167 init_req(TypeFunc::FramePtr,frameptr);
168 init_req(TypeFunc::ReturnAdr,retadr);
169 }
170
171 Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){
172 return remove_dead_region(phase, can_reshape) ? this : nullptr;
173 }
174
175 const Type* ReturnNode::Value(PhaseGVN* phase) const {
176 return ( phase->type(in(TypeFunc::Control)) == Type::TOP)
177 ? Type::TOP
178 : Type::BOTTOM;
179 }
180
181 // Do we Match on this edge index or not? No edges on return nodes
182 uint ReturnNode::match_edge(uint idx) const {
183 return 0;
184 }
185
186
187 #ifndef PRODUCT
188 void ReturnNode::dump_req(outputStream *st, DumpConfig* dc) const {
189 // Dump the required inputs, after printing "returns"
190 uint i; // Exit value of loop
191 for (i = 0; i < req(); i++) { // For all required inputs
192 if (i == TypeFunc::Parms) st->print("returns ");
193 Node* p = in(i);
194 if (p != nullptr) {
195 p->dump_idx(false, st, dc);
196 st->print(" ");
197 } else {
198 st->print("_ ");
199 }
200 }
201 }
202 #endif
203
204 //=============================================================================
205 RethrowNode::RethrowNode(
206 Node* cntrl,
207 Node* i_o,
208 Node* memory,
209 Node* frameptr,
210 Node* ret_adr,
211 Node* exception
212 ) : Node(TypeFunc::Parms + 1) {
213 init_req(TypeFunc::Control , cntrl );
214 init_req(TypeFunc::I_O , i_o );
215 init_req(TypeFunc::Memory , memory );
216 init_req(TypeFunc::FramePtr , frameptr );
217 init_req(TypeFunc::ReturnAdr, ret_adr);
218 init_req(TypeFunc::Parms , exception);
219 }
220
221 Node *RethrowNode::Ideal(PhaseGVN *phase, bool can_reshape){
222 return remove_dead_region(phase, can_reshape) ? this : nullptr;
223 }
224
225 const Type* RethrowNode::Value(PhaseGVN* phase) const {
226 return (phase->type(in(TypeFunc::Control)) == Type::TOP)
227 ? Type::TOP
228 : Type::BOTTOM;
229 }
230
231 uint RethrowNode::match_edge(uint idx) const {
232 return 0;
233 }
234
235 #ifndef PRODUCT
236 void RethrowNode::dump_req(outputStream *st, DumpConfig* dc) const {
237 // Dump the required inputs, after printing "exception"
238 uint i; // Exit value of loop
239 for (i = 0; i < req(); i++) { // For all required inputs
240 if (i == TypeFunc::Parms) st->print("exception ");
241 Node* p = in(i);
242 if (p != nullptr) {
243 p->dump_idx(false, st, dc);
244 st->print(" ");
245 } else {
246 st->print("_ ");
247 }
248 }
249 }
250 #endif
251
252 //=============================================================================
253 // Do we Match on this edge index or not? Match only target address & method
254 uint TailCallNode::match_edge(uint idx) const {
255 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1;
256 }
257
258 //=============================================================================
259 // Do we Match on this edge index or not? Match only target address & oop
260 uint TailJumpNode::match_edge(uint idx) const {
261 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1;
262 }
263
264 //=============================================================================
265 JVMState::JVMState(ciMethod* method, JVMState* caller) :
266 _method(method),
267 _receiver_info(nullptr) {
268 assert(method != nullptr, "must be valid call site");
269 _bci = InvocationEntryBci;
270 _reexecute = Reexecute_Undefined;
271 DEBUG_ONLY(_bci = -99); // random garbage value
272 DEBUG_ONLY(_map = (SafePointNode*)-1);
273 _caller = caller;
274 _depth = 1 + (caller == nullptr ? 0 : caller->depth());
275 _locoff = TypeFunc::Parms;
276 _stkoff = _locoff + _method->max_locals();
277 _monoff = _stkoff + _method->max_stack();
278 _scloff = _monoff;
279 _endoff = _monoff;
280 _sp = 0;
281 }
282 JVMState::JVMState(int stack_size) :
283 _method(nullptr),
284 _receiver_info(nullptr) {
285 _bci = InvocationEntryBci;
286 _reexecute = Reexecute_Undefined;
287 DEBUG_ONLY(_map = (SafePointNode*)-1);
288 _caller = nullptr;
289 _depth = 1;
290 _locoff = TypeFunc::Parms;
291 _stkoff = _locoff;
292 _monoff = _stkoff + stack_size;
293 _scloff = _monoff;
294 _endoff = _monoff;
295 _sp = 0;
296 }
297
298 //--------------------------------of_depth-------------------------------------
299 JVMState* JVMState::of_depth(int d) const {
300 const JVMState* jvmp = this;
301 assert(0 < d && (uint)d <= depth(), "oob");
302 for (int skip = depth() - d; skip > 0; skip--) {
303 jvmp = jvmp->caller();
304 }
305 assert(jvmp->depth() == (uint)d, "found the right one");
306 return (JVMState*)jvmp;
307 }
308
309 //-----------------------------same_calls_as-----------------------------------
310 bool JVMState::same_calls_as(const JVMState* that) const {
311 if (this == that) return true;
312 if (this->depth() != that->depth()) return false;
313 const JVMState* p = this;
314 const JVMState* q = that;
315 for (;;) {
316 if (p->_method != q->_method) return false;
317 if (p->_method == nullptr) return true; // bci is irrelevant
318 if (p->_bci != q->_bci) return false;
319 if (p->_reexecute != q->_reexecute) return false;
320 p = p->caller();
321 q = q->caller();
322 if (p == q) return true;
323 assert(p != nullptr && q != nullptr, "depth check ensures we don't run off end");
324 }
325 }
326
327 //------------------------------debug_start------------------------------------
328 uint JVMState::debug_start() const {
329 DEBUG_ONLY(JVMState* jvmroot = of_depth(1));
330 assert(jvmroot->locoff() <= this->locoff(), "youngest JVMState must be last");
331 return of_depth(1)->locoff();
332 }
333
334 //-------------------------------debug_end-------------------------------------
335 uint JVMState::debug_end() const {
336 DEBUG_ONLY(JVMState* jvmroot = of_depth(1));
337 assert(jvmroot->endoff() <= this->endoff(), "youngest JVMState must be last");
338 return endoff();
339 }
340
341 //------------------------------debug_depth------------------------------------
342 uint JVMState::debug_depth() const {
343 uint total = 0;
344 for (const JVMState* jvmp = this; jvmp != nullptr; jvmp = jvmp->caller()) {
345 total += jvmp->debug_size();
346 }
347 return total;
348 }
349
350 #ifndef PRODUCT
351
352 //------------------------------format_helper----------------------------------
353 // Given an allocation (a Chaitin object) and a Node decide if the Node carries
354 // any defined value or not. If it does, print out the register or constant.
355 static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i, GrowableArray<SafePointScalarObjectNode*> *scobjs ) {
356 if (n == nullptr) { st->print(" null"); return; }
357 if (n->is_SafePointScalarObject()) {
358 // Scalar replacement.
359 SafePointScalarObjectNode* spobj = n->as_SafePointScalarObject();
360 scobjs->append_if_missing(spobj);
361 int sco_n = scobjs->find(spobj);
362 assert(sco_n >= 0, "");
363 st->print(" %s%d]=#ScObj" INT32_FORMAT, msg, i, sco_n);
364 return;
365 }
366 if (regalloc->node_regs_max_index() > 0 &&
367 OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined
368 char buf[50];
369 regalloc->dump_register(n,buf,sizeof(buf));
370 st->print(" %s%d]=%s",msg,i,buf);
371 } else { // No register, but might be constant
372 const Type *t = n->bottom_type();
373 switch (t->base()) {
374 case Type::Int:
375 st->print(" %s%d]=#" INT32_FORMAT,msg,i,t->is_int()->get_con());
376 break;
377 case Type::AnyPtr:
378 assert( t == TypePtr::NULL_PTR || n->in_dump(), "" );
379 st->print(" %s%d]=#null",msg,i);
380 break;
381 case Type::AryPtr:
382 case Type::InstPtr:
383 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->isa_oopptr()->const_oop()));
384 break;
385 case Type::KlassPtr:
386 case Type::AryKlassPtr:
387 case Type::InstKlassPtr:
388 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_klassptr()->exact_klass()));
389 break;
390 case Type::MetadataPtr:
391 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_metadataptr()->metadata()));
392 break;
393 case Type::NarrowOop:
394 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_oopptr()->const_oop()));
395 break;
396 case Type::RawPtr:
397 st->print(" %s%d]=#Raw" INTPTR_FORMAT,msg,i,p2i(t->is_rawptr()));
398 break;
399 case Type::DoubleCon:
400 st->print(" %s%d]=#%fD",msg,i,t->is_double_constant()->_d);
401 break;
402 case Type::FloatCon:
403 st->print(" %s%d]=#%fF",msg,i,t->is_float_constant()->_f);
404 break;
405 case Type::Long:
406 st->print(" %s%d]=#" INT64_FORMAT,msg,i,(int64_t)(t->is_long()->get_con()));
407 break;
408 case Type::Half:
409 case Type::Top:
410 st->print(" %s%d]=_",msg,i);
411 break;
412 default: ShouldNotReachHere();
413 }
414 }
415 }
416
417 //---------------------print_method_with_lineno--------------------------------
418 void JVMState::print_method_with_lineno(outputStream* st, bool show_name) const {
419 if (show_name) _method->print_short_name(st);
420
421 int lineno = _method->line_number_from_bci(_bci);
422 if (lineno != -1) {
423 st->print(" @ bci:%d (line %d)", _bci, lineno);
424 } else {
425 st->print(" @ bci:%d", _bci);
426 }
427 }
428
429 //------------------------------format-----------------------------------------
430 void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const {
431 st->print(" #");
432 if (_method) {
433 print_method_with_lineno(st, true);
434 } else {
435 st->print_cr(" runtime stub ");
436 return;
437 }
438 if (n->is_MachSafePoint()) {
439 GrowableArray<SafePointScalarObjectNode*> scobjs;
440 MachSafePointNode *mcall = n->as_MachSafePoint();
441 uint i;
442 // Print locals
443 for (i = 0; i < (uint)loc_size(); i++)
444 format_helper(regalloc, st, mcall->local(this, i), "L[", i, &scobjs);
445 // Print stack
446 for (i = 0; i < (uint)stk_size(); i++) {
447 if ((uint)(_stkoff + i) >= mcall->len())
448 st->print(" oob ");
449 else
450 format_helper(regalloc, st, mcall->stack(this, i), "STK[", i, &scobjs);
451 }
452 for (i = 0; (int)i < nof_monitors(); i++) {
453 Node *box = mcall->monitor_box(this, i);
454 Node *obj = mcall->monitor_obj(this, i);
455 if (regalloc->node_regs_max_index() > 0 &&
456 OptoReg::is_valid(regalloc->get_reg_first(box))) {
457 box = BoxLockNode::box_node(box);
458 format_helper(regalloc, st, box, "MON-BOX[", i, &scobjs);
459 } else {
460 OptoReg::Name box_reg = BoxLockNode::reg(box);
461 st->print(" MON-BOX%d=%s+%d",
462 i,
463 OptoReg::regname(OptoReg::c_frame_pointer),
464 regalloc->reg2offset(box_reg));
465 }
466 const char* obj_msg = "MON-OBJ[";
467 if (EliminateLocks) {
468 if (BoxLockNode::box_node(box)->is_eliminated())
469 obj_msg = "MON-OBJ(LOCK ELIMINATED)[";
470 }
471 format_helper(regalloc, st, obj, obj_msg, i, &scobjs);
472 }
473
474 for (i = 0; i < (uint)scobjs.length(); i++) {
475 // Scalar replaced objects.
476 st->cr();
477 st->print(" # ScObj" INT32_FORMAT " ", i);
478 SafePointScalarObjectNode* spobj = scobjs.at(i);
479 ciKlass* cik = spobj->bottom_type()->is_oopptr()->exact_klass();
480 assert(cik->is_instance_klass() ||
481 cik->is_array_klass(), "Not supported allocation.");
482 ciInstanceKlass *iklass = nullptr;
483 if (cik->is_instance_klass()) {
484 cik->print_name_on(st);
485 iklass = cik->as_instance_klass();
486 } else if (cik->is_type_array_klass()) {
487 cik->as_array_klass()->base_element_type()->print_name_on(st);
488 st->print("[%d]", spobj->n_fields());
489 } else if (cik->is_obj_array_klass()) {
490 ciKlass* cie = cik->as_obj_array_klass()->base_element_klass();
491 if (cie->is_instance_klass()) {
492 cie->print_name_on(st);
493 } else if (cie->is_type_array_klass()) {
494 cie->as_array_klass()->base_element_type()->print_name_on(st);
495 } else {
496 ShouldNotReachHere();
497 }
498 st->print("[%d]", spobj->n_fields());
499 int ndim = cik->as_array_klass()->dimension() - 1;
500 while (ndim-- > 0) {
501 st->print("[]");
502 }
503 }
504 st->print("={");
505 uint nf = spobj->n_fields();
506 if (nf > 0) {
507 uint first_ind = spobj->first_index(mcall->jvms());
508 Node* fld_node = mcall->in(first_ind);
509 ciField* cifield;
510 if (iklass != nullptr) {
511 st->print(" [");
512 cifield = iklass->nonstatic_field_at(0);
513 cifield->print_name_on(st);
514 format_helper(regalloc, st, fld_node, ":", 0, &scobjs);
515 } else {
516 format_helper(regalloc, st, fld_node, "[", 0, &scobjs);
517 }
518 for (uint j = 1; j < nf; j++) {
519 fld_node = mcall->in(first_ind+j);
520 if (iklass != nullptr) {
521 st->print(", [");
522 cifield = iklass->nonstatic_field_at(j);
523 cifield->print_name_on(st);
524 format_helper(regalloc, st, fld_node, ":", j, &scobjs);
525 } else {
526 format_helper(regalloc, st, fld_node, ", [", j, &scobjs);
527 }
528 }
529 }
530 st->print(" }");
531 }
532 }
533 st->cr();
534 if (caller() != nullptr) caller()->format(regalloc, n, st);
535 }
536
537
538 void JVMState::dump_spec(outputStream *st) const {
539 if (_method != nullptr) {
540 bool printed = false;
541 if (!Verbose) {
542 // The JVMS dumps make really, really long lines.
543 // Take out the most boring parts, which are the package prefixes.
544 char buf[500];
545 stringStream namest(buf, sizeof(buf));
546 _method->print_short_name(&namest);
547 if (namest.count() < sizeof(buf)) {
548 const char* name = namest.base();
549 if (name[0] == ' ') ++name;
550 const char* endcn = strchr(name, ':'); // end of class name
551 if (endcn == nullptr) endcn = strchr(name, '(');
552 if (endcn == nullptr) endcn = name + strlen(name);
553 while (endcn > name && endcn[-1] != '.' && endcn[-1] != '/')
554 --endcn;
555 st->print(" %s", endcn);
556 printed = true;
557 }
558 }
559 print_method_with_lineno(st, !printed);
560 if(_reexecute == Reexecute_True)
561 st->print(" reexecute");
562 } else {
563 st->print(" runtime stub");
564 }
565 if (caller() != nullptr) caller()->dump_spec(st);
566 }
567
568
569 void JVMState::dump_on(outputStream* st) const {
570 bool print_map = _map && !((uintptr_t)_map & 1) &&
571 ((caller() == nullptr) || (caller()->map() != _map));
572 if (print_map) {
573 if (_map->len() > _map->req()) { // _map->has_exceptions()
574 Node* ex = _map->in(_map->req()); // _map->next_exception()
575 // skip the first one; it's already being printed
576 while (ex != nullptr && ex->len() > ex->req()) {
577 ex = ex->in(ex->req()); // ex->next_exception()
578 ex->dump(1);
579 }
580 }
581 _map->dump(Verbose ? 2 : 1);
582 }
583 if (caller() != nullptr) {
584 caller()->dump_on(st);
585 }
586 st->print("JVMS depth=%d loc=%d stk=%d arg=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=",
587 depth(), locoff(), stkoff(), argoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false");
588 if (_method == nullptr) {
589 st->print_cr("(none)");
590 } else {
591 _method->print_name(st);
592 st->cr();
593 if (bci() >= 0 && bci() < _method->code_size()) {
594 st->print(" bc: ");
595 _method->print_codes_on(bci(), bci()+1, st);
596 }
597 }
598 }
599
600 // Extra way to dump a jvms from the debugger,
601 // to avoid a bug with C++ member function calls.
602 void dump_jvms(JVMState* jvms) {
603 jvms->dump();
604 }
605 #endif
606
607 //--------------------------clone_shallow--------------------------------------
608 JVMState* JVMState::clone_shallow(Compile* C) const {
609 JVMState* n = has_method() ? new (C) JVMState(_method, _caller) : new (C) JVMState(0);
610 n->set_bci(_bci);
611 n->_reexecute = _reexecute;
612 n->set_locoff(_locoff);
613 n->set_stkoff(_stkoff);
614 n->set_monoff(_monoff);
615 n->set_scloff(_scloff);
616 n->set_endoff(_endoff);
617 n->set_sp(_sp);
618 n->set_map(_map);
619 n->set_receiver_info(_receiver_info);
620 return n;
621 }
622
623 //---------------------------clone_deep----------------------------------------
624 JVMState* JVMState::clone_deep(Compile* C) const {
625 JVMState* n = clone_shallow(C);
626 for (JVMState* p = n; p->_caller != nullptr; p = p->_caller) {
627 p->_caller = p->_caller->clone_shallow(C);
628 }
629 assert(n->depth() == depth(), "sanity");
630 assert(n->debug_depth() == debug_depth(), "sanity");
631 return n;
632 }
633
634 /**
635 * Reset map for all callers
636 */
637 void JVMState::set_map_deep(SafePointNode* map) {
638 for (JVMState* p = this; p != nullptr; p = p->_caller) {
639 p->set_map(map);
640 }
641 }
642
643 // unlike set_map(), this is two-way setting.
644 void JVMState::bind_map(SafePointNode* map) {
645 set_map(map);
646 _map->set_jvms(this);
647 }
648
649 // Adapt offsets in in-array after adding or removing an edge.
650 // Prerequisite is that the JVMState is used by only one node.
651 void JVMState::adapt_position(int delta) {
652 for (JVMState* jvms = this; jvms != nullptr; jvms = jvms->caller()) {
653 jvms->set_locoff(jvms->locoff() + delta);
654 jvms->set_stkoff(jvms->stkoff() + delta);
655 jvms->set_monoff(jvms->monoff() + delta);
656 jvms->set_scloff(jvms->scloff() + delta);
657 jvms->set_endoff(jvms->endoff() + delta);
658 }
659 }
660
661 // Mirror the stack size calculation in the deopt code
662 // How much stack space would we need at this point in the program in
663 // case of deoptimization?
664 int JVMState::interpreter_frame_size() const {
665 const JVMState* jvms = this;
666 int size = 0;
667 int callee_parameters = 0;
668 int callee_locals = 0;
669 int extra_args = method()->max_stack() - stk_size();
670
671 while (jvms != nullptr) {
672 int locks = jvms->nof_monitors();
673 int temps = jvms->stk_size();
674 bool is_top_frame = (jvms == this);
675 ciMethod* method = jvms->method();
676
677 int frame_size = BytesPerWord * Interpreter::size_activation(method->max_stack(),
678 temps + callee_parameters,
679 extra_args,
680 locks,
681 callee_parameters,
682 callee_locals,
683 is_top_frame);
684 size += frame_size;
685
686 callee_parameters = method->size_of_parameters();
687 callee_locals = method->max_locals();
688 extra_args = 0;
689 jvms = jvms->caller();
690 }
691 return size + Deoptimization::last_frame_adjust(0, callee_locals) * BytesPerWord;
692 }
693
694 // Compute receiver info for a compiled lambda form at call site.
695 ciInstance* JVMState::compute_receiver_info(ciMethod* callee) const {
696 assert(callee != nullptr && callee->is_compiled_lambda_form(), "");
697 if (has_method() && method()->is_compiled_lambda_form()) { // callee is not a MH invoker
698 Node* recv = map()->argument(this, 0);
699 assert(recv != nullptr, "");
700 const TypeOopPtr* recv_toop = recv->bottom_type()->isa_oopptr();
701 if (recv_toop != nullptr && recv_toop->const_oop() != nullptr) {
702 return recv_toop->const_oop()->as_instance();
703 }
704 }
705 return nullptr;
706 }
707
708 //=============================================================================
709 bool CallNode::cmp( const Node &n ) const
710 { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; }
711 #ifndef PRODUCT
712 void CallNode::dump_req(outputStream *st, DumpConfig* dc) const {
713 // Dump the required inputs, enclosed in '(' and ')'
714 uint i; // Exit value of loop
715 for (i = 0; i < req(); i++) { // For all required inputs
716 if (i == TypeFunc::Parms) st->print("(");
717 Node* p = in(i);
718 if (p != nullptr) {
719 p->dump_idx(false, st, dc);
720 st->print(" ");
721 } else {
722 st->print("_ ");
723 }
724 }
725 st->print(")");
726 }
727
728 void CallNode::dump_spec(outputStream *st) const {
729 st->print(" ");
730 if (tf() != nullptr) tf()->dump_on(st);
731 if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt);
732 if (jvms() != nullptr) jvms()->dump_spec(st);
733 }
734
735 void AllocateNode::dump_spec(outputStream* st) const {
736 st->print(" ");
737 if (tf() != nullptr) {
738 tf()->dump_on(st);
739 }
740 if (_cnt != COUNT_UNKNOWN) {
741 st->print(" C=%f", _cnt);
742 }
743 const Node* const klass_node = in(KlassNode);
744 if (klass_node != nullptr) {
745 const TypeKlassPtr* const klass_ptr = klass_node->bottom_type()->isa_klassptr();
746
747 if (klass_ptr != nullptr && klass_ptr->klass_is_exact()) {
748 st->print(" allocationKlass:");
749 klass_ptr->exact_klass()->print_name_on(st);
750 }
751 }
752 if (jvms() != nullptr) {
753 jvms()->dump_spec(st);
754 }
755 }
756 #endif
757
758 const Type *CallNode::bottom_type() const { return tf()->range(); }
759 const Type* CallNode::Value(PhaseGVN* phase) const {
760 if (in(0) == nullptr || phase->type(in(0)) == Type::TOP) {
761 return Type::TOP;
762 }
763 return tf()->range();
764 }
765
766 //------------------------------calling_convention-----------------------------
767 void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
768 // Use the standard compiler calling convention
769 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
770 }
771
772
773 //------------------------------match------------------------------------------
774 // Construct projections for control, I/O, memory-fields, ..., and
775 // return result(s) along with their RegMask info
776 Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
777 switch (proj->_con) {
778 case TypeFunc::Control:
779 case TypeFunc::I_O:
780 case TypeFunc::Memory:
781 return new MachProjNode(this,proj->_con,RegMask::EMPTY,MachProjNode::unmatched_proj);
782
783 case TypeFunc::Parms+1: // For LONG & DOUBLE returns
784 assert(tf()->range()->field_at(TypeFunc::Parms+1) == Type::HALF, "");
785 // 2nd half of doubles and longs
786 return new MachProjNode(this,proj->_con, RegMask::EMPTY, (uint)OptoReg::Bad);
787
788 case TypeFunc::Parms: { // Normal returns
789 uint ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg();
790 OptoRegPair regs = Opcode() == Op_CallLeafVector
791 ? match->vector_return_value(ideal_reg) // Calls into assembly vector routine
792 : is_CallRuntime()
793 ? match->c_return_value(ideal_reg) // Calls into C runtime
794 : match-> return_value(ideal_reg); // Calls into compiled Java code
795 RegMask rm = RegMask(regs.first());
796
797 if (Opcode() == Op_CallLeafVector) {
798 // If the return is in vector, compute appropriate regmask taking into account the whole range
799 if(ideal_reg >= Op_VecA && ideal_reg <= Op_VecZ) {
800 if(OptoReg::is_valid(regs.second())) {
801 for (OptoReg::Name r = regs.first(); r <= regs.second(); r = OptoReg::add(r, 1)) {
802 rm.insert(r);
803 }
804 }
805 }
806 }
807
808 if( OptoReg::is_valid(regs.second()) )
809 rm.insert(regs.second());
810 return new MachProjNode(this,proj->_con,rm,ideal_reg);
811 }
812
813 case TypeFunc::ReturnAdr:
814 case TypeFunc::FramePtr:
815 default:
816 ShouldNotReachHere();
817 }
818 return nullptr;
819 }
820
821 // Do we Match on this edge index or not? Match no edges
822 uint CallNode::match_edge(uint idx) const {
823 return 0;
824 }
825
826 //
827 // Determine whether the call could modify the field of the specified
828 // instance at the specified offset.
829 //
830 bool CallNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) const {
831 assert((t_oop != nullptr), "sanity");
832 if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) {
833 const TypeTuple* args = _tf->domain();
834 Node* dest = nullptr;
835 // Stubs that can be called once an ArrayCopyNode is expanded have
836 // different signatures. Look for the second pointer argument,
837 // that is the destination of the copy.
838 for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
839 if (args->field_at(i)->isa_ptr()) {
840 j++;
841 if (j == 2) {
842 dest = in(i);
843 break;
844 }
845 }
846 }
847 guarantee(dest != nullptr, "Call had only one ptr in, broken IR!");
848 if (phase->type(dest)->isa_rawptr()) {
849 // may happen for an arraycopy that initializes a newly allocated object. Conservatively return true;
850 return true;
851 }
852 if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) {
853 return true;
854 }
855 return false;
856 }
857 if (t_oop->is_known_instance()) {
858 // The instance_id is set only for scalar-replaceable allocations which
859 // are not passed as arguments according to Escape Analysis.
860 return false;
861 }
862 if (t_oop->is_ptr_to_boxed_value()) {
863 ciKlass* boxing_klass = t_oop->is_instptr()->instance_klass();
864 if (is_CallStaticJava() && as_CallStaticJava()->is_boxing_method()) {
865 // Skip unrelated boxing methods.
866 Node* proj = proj_out_or_null(TypeFunc::Parms);
867 if ((proj == nullptr) || (phase->type(proj)->is_instptr()->instance_klass() != boxing_klass)) {
868 return false;
869 }
870 }
871 if (is_CallJava() && as_CallJava()->method() != nullptr) {
872 ciMethod* meth = as_CallJava()->method();
873 if (meth->is_getter()) {
874 return false;
875 }
876 // May modify (by reflection) if an boxing object is passed
877 // as argument or returned.
878 Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : nullptr;
879 if (proj != nullptr) {
880 const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr();
881 if ((inst_t != nullptr) && (!inst_t->klass_is_exact() ||
882 (inst_t->instance_klass() == boxing_klass))) {
883 return true;
884 }
885 }
886 const TypeTuple* d = tf()->domain();
887 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
888 const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr();
889 if ((inst_t != nullptr) && (!inst_t->klass_is_exact() ||
890 (inst_t->instance_klass() == boxing_klass))) {
891 return true;
892 }
893 }
894 return false;
895 }
896 }
897 return true;
898 }
899
900 // Does this call have a direct reference to n other than debug information?
901 bool CallNode::has_non_debug_use(const Node *n) {
902 const TypeTuple * d = tf()->domain();
903 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
904 Node *arg = in(i);
905 if (arg == n) {
906 return true;
907 }
908 }
909 return false;
910 }
911
912 // Returns the unique CheckCastPP of a call
913 // or 'this' if there are several CheckCastPP or unexpected uses
914 // or returns null if there is no one.
915 Node *CallNode::result_cast() {
916 Node *cast = nullptr;
917
918 Node *p = proj_out_or_null(TypeFunc::Parms);
919 if (p == nullptr)
920 return nullptr;
921
922 for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
923 Node *use = p->fast_out(i);
924 if (use->is_CheckCastPP()) {
925 if (cast != nullptr) {
926 return this; // more than 1 CheckCastPP
927 }
928 cast = use;
929 } else if (!use->is_Initialize() &&
930 !use->is_AddP() &&
931 use->Opcode() != Op_MemBarStoreStore) {
932 // Expected uses are restricted to a CheckCastPP, an Initialize
933 // node, a MemBarStoreStore (clone) and AddP nodes. If we
934 // encounter any other use (a Phi node can be seen in rare
935 // cases) return this to prevent incorrect optimizations.
936 return this;
937 }
938 }
939 return cast;
940 }
941
942
943 void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts, bool allow_handlers) const {
944 projs->fallthrough_proj = nullptr;
945 projs->fallthrough_catchproj = nullptr;
946 projs->fallthrough_ioproj = nullptr;
947 projs->catchall_ioproj = nullptr;
948 projs->catchall_catchproj = nullptr;
949 projs->fallthrough_memproj = nullptr;
950 projs->catchall_memproj = nullptr;
951 projs->resproj = nullptr;
952 projs->exobj = nullptr;
953
954 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
955 ProjNode *pn = fast_out(i)->as_Proj();
956 if (pn->outcnt() == 0) continue;
957 switch (pn->_con) {
958 case TypeFunc::Control:
959 {
960 // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
961 projs->fallthrough_proj = pn;
962 const Node* cn = pn->unique_ctrl_out_or_null();
963 if (cn != nullptr && cn->is_Catch()) {
964 for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
965 CatchProjNode* cpn = cn->fast_out(k)->as_CatchProj();
966 assert(allow_handlers || !cpn->is_handler_proj(), "not allowed");
967 if (cpn->_con == CatchProjNode::fall_through_index) {
968 assert(cpn->handler_bci() == CatchProjNode::no_handler_bci, "");
969 projs->fallthrough_catchproj = cpn;
970 } else if (!cpn->is_handler_proj()) {
971 projs->catchall_catchproj = cpn;
972 }
973 }
974 }
975 break;
976 }
977 case TypeFunc::I_O:
978 if (pn->_is_io_use) {
979 projs->catchall_ioproj = pn;
980 } else {
981 projs->fallthrough_ioproj = pn;
982 }
983 for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
984 Node* e = pn->out(j);
985 if (e->Opcode() == Op_CreateEx && e->outcnt() > 0) {
986 CatchProjNode* ecpn = e->in(0)->isa_CatchProj();
987 assert(allow_handlers || ecpn == nullptr || !ecpn->is_handler_proj(), "not allowed");
988 if (ecpn != nullptr && ecpn->_con != CatchProjNode::fall_through_index && !ecpn->is_handler_proj()) {
989 assert(projs->exobj == nullptr, "only one");
990 projs->exobj = e;
991 }
992 }
993 }
994 break;
995 case TypeFunc::Memory:
996 if (pn->_is_io_use)
997 projs->catchall_memproj = pn;
998 else
999 projs->fallthrough_memproj = pn;
1000 break;
1001 case TypeFunc::Parms:
1002 projs->resproj = pn;
1003 break;
1004 default:
1005 assert(false, "unexpected projection from allocation node.");
1006 }
1007 }
1008
1009 // The resproj may not exist because the result could be ignored
1010 // and the exception object may not exist if an exception handler
1011 // swallows the exception but all the other must exist and be found.
1012 assert(projs->fallthrough_proj != nullptr, "must be found");
1013 do_asserts = do_asserts && !Compile::current()->inlining_incrementally();
1014 assert(!do_asserts || projs->fallthrough_catchproj != nullptr, "must be found");
1015 assert(!do_asserts || projs->fallthrough_memproj != nullptr, "must be found");
1016 assert(!do_asserts || projs->fallthrough_ioproj != nullptr, "must be found");
1017 assert(!do_asserts || projs->catchall_catchproj != nullptr, "must be found");
1018 if (separate_io_proj) {
1019 assert(!do_asserts || projs->catchall_memproj != nullptr, "must be found");
1020 assert(!do_asserts || projs->catchall_ioproj != nullptr, "must be found");
1021 }
1022 }
1023
1024 Node* CallNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1025 #ifdef ASSERT
1026 // Validate attached generator
1027 CallGenerator* cg = generator();
1028 if (cg != nullptr) {
1029 assert((is_CallStaticJava() && cg->is_mh_late_inline()) ||
1030 (is_CallDynamicJava() && cg->is_virtual_late_inline()), "mismatch");
1031 }
1032 #endif // ASSERT
1033 return SafePointNode::Ideal(phase, can_reshape);
1034 }
1035
1036 bool CallNode::is_call_to_arraycopystub() const {
1037 if (_name != nullptr && strstr(_name, "arraycopy") != nullptr) {
1038 return true;
1039 }
1040 return false;
1041 }
1042
1043 bool CallNode::is_call_to_multianewarray_stub() const {
1044 if (_name != nullptr &&
1045 strstr(_name, "multianewarray") != nullptr &&
1046 strstr(_name, "C2 runtime") != nullptr) {
1047 return true;
1048 }
1049 return false;
1050 }
1051
1052 //=============================================================================
1053 uint CallJavaNode::size_of() const { return sizeof(*this); }
1054 bool CallJavaNode::cmp( const Node &n ) const {
1055 CallJavaNode &call = (CallJavaNode&)n;
1056 return CallNode::cmp(call) && _method == call._method &&
1057 _override_symbolic_info == call._override_symbolic_info;
1058 }
1059
1060 void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {
1061 // Copy debug information and adjust JVMState information
1062 uint old_dbg_start = sfpt->is_Call() ? sfpt->as_Call()->tf()->domain()->cnt() : (uint)TypeFunc::Parms+1;
1063 uint new_dbg_start = tf()->domain()->cnt();
1064 int jvms_adj = new_dbg_start - old_dbg_start;
1065 assert (new_dbg_start == req(), "argument count mismatch");
1066 Compile* C = phase->C;
1067
1068 // SafePointScalarObject node could be referenced several times in debug info.
1069 // Use Dict to record cloned nodes.
1070 Dict* sosn_map = new Dict(cmpkey,hashkey);
1071 for (uint i = old_dbg_start; i < sfpt->req(); i++) {
1072 Node* old_in = sfpt->in(i);
1073 // Clone old SafePointScalarObjectNodes, adjusting their field contents.
1074 if (old_in != nullptr && old_in->is_SafePointScalarObject()) {
1075 SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
1076 bool new_node;
1077 Node* new_in = old_sosn->clone(sosn_map, new_node);
1078 if (new_node) { // New node?
1079 new_in->set_req(0, C->root()); // reset control edge
1080 new_in = phase->transform(new_in); // Register new node.
1081 }
1082 old_in = new_in;
1083 }
1084 add_req(old_in);
1085 }
1086
1087 // JVMS may be shared so clone it before we modify it
1088 set_jvms(sfpt->jvms() != nullptr ? sfpt->jvms()->clone_deep(C) : nullptr);
1089 for (JVMState *jvms = this->jvms(); jvms != nullptr; jvms = jvms->caller()) {
1090 jvms->set_map(this);
1091 jvms->set_locoff(jvms->locoff()+jvms_adj);
1092 jvms->set_stkoff(jvms->stkoff()+jvms_adj);
1093 jvms->set_monoff(jvms->monoff()+jvms_adj);
1094 jvms->set_scloff(jvms->scloff()+jvms_adj);
1095 jvms->set_endoff(jvms->endoff()+jvms_adj);
1096 }
1097 }
1098
1099 #ifdef ASSERT
1100 bool CallJavaNode::validate_symbolic_info() const {
1101 if (method() == nullptr) {
1102 return true; // call into runtime or uncommon trap
1103 }
1104 ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(jvms()->bci());
1105 ciMethod* callee = method();
1106 if (symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic()) {
1107 assert(override_symbolic_info(), "should be set");
1108 }
1109 assert(ciMethod::is_consistent_info(symbolic_info, callee), "inconsistent info");
1110 return true;
1111 }
1112 #endif
1113
1114 #ifndef PRODUCT
1115 void CallJavaNode::dump_spec(outputStream* st) const {
1116 if( _method ) _method->print_short_name(st);
1117 CallNode::dump_spec(st);
1118 }
1119
1120 void CallJavaNode::dump_compact_spec(outputStream* st) const {
1121 if (_method) {
1122 _method->print_short_name(st);
1123 } else {
1124 st->print("<?>");
1125 }
1126 }
1127 #endif
1128
1129 void CallJavaNode::register_for_late_inline() {
1130 if (generator() != nullptr) {
1131 Compile::current()->prepend_late_inline(generator());
1132 set_generator(nullptr);
1133 } else {
1134 assert(false, "repeated inline attempt");
1135 }
1136 }
1137
1138 //=============================================================================
1139 uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
1140 bool CallStaticJavaNode::cmp( const Node &n ) const {
1141 CallStaticJavaNode &call = (CallStaticJavaNode&)n;
1142 return CallJavaNode::cmp(call);
1143 }
1144
1145 Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1146 CallGenerator* cg = generator();
1147 if (can_reshape && cg != nullptr) {
1148 if (cg->is_mh_late_inline()) {
1149 assert(IncrementalInlineMH, "required");
1150 assert(cg->call_node() == this, "mismatch");
1151 assert(cg->method()->is_method_handle_intrinsic(), "required");
1152
1153 // Check whether this MH handle call becomes a candidate for inlining.
1154 ciMethod* callee = cg->method();
1155 vmIntrinsics::ID iid = callee->intrinsic_id();
1156 if (iid == vmIntrinsics::_invokeBasic) {
1157 if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
1158 register_for_late_inline();
1159 }
1160 } else if (iid == vmIntrinsics::_linkToNative) {
1161 // never retry
1162 } else {
1163 assert(callee->has_member_arg(), "wrong type of call?");
1164 if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {
1165 register_for_late_inline();
1166 }
1167 }
1168 } else {
1169 assert(IncrementalInline, "required");
1170 assert(!cg->method()->is_method_handle_intrinsic(), "required");
1171 if (phase->C->print_inlining()) {
1172 phase->C->inline_printer()->record(cg->method(), cg->call_node()->jvms(), InliningResult::FAILURE,
1173 "static call node changed: trying again");
1174 }
1175 register_for_late_inline();
1176 }
1177 }
1178 return CallNode::Ideal(phase, can_reshape);
1179 }
1180
1181 //----------------------------is_uncommon_trap----------------------------
1182 // Returns true if this is an uncommon trap.
1183 bool CallStaticJavaNode::is_uncommon_trap() const {
1184 return (_name != nullptr && !strcmp(_name, "uncommon_trap"));
1185 }
1186
1187 //----------------------------uncommon_trap_request----------------------------
1188 // If this is an uncommon trap, return the request code, else zero.
1189 int CallStaticJavaNode::uncommon_trap_request() const {
1190 return is_uncommon_trap() ? extract_uncommon_trap_request(this) : 0;
1191 }
1192 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) {
1193 #ifndef PRODUCT
1194 if (!(call->req() > TypeFunc::Parms &&
1195 call->in(TypeFunc::Parms) != nullptr &&
1196 call->in(TypeFunc::Parms)->is_Con() &&
1197 call->in(TypeFunc::Parms)->bottom_type()->isa_int())) {
1198 assert(in_dump() != 0, "OK if dumping");
1199 tty->print("[bad uncommon trap]");
1200 return 0;
1201 }
1202 #endif
1203 return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con();
1204 }
1205
1206 #ifndef PRODUCT
1207 void CallStaticJavaNode::dump_spec(outputStream *st) const {
1208 st->print("# Static ");
1209 if (_name != nullptr) {
1210 st->print("%s", _name);
1211 int trap_req = uncommon_trap_request();
1212 if (trap_req != 0) {
1213 char buf[100];
1214 st->print("(%s)",
1215 Deoptimization::format_trap_request(buf, sizeof(buf),
1216 trap_req));
1217 }
1218 st->print(" ");
1219 }
1220 CallJavaNode::dump_spec(st);
1221 }
1222
1223 void CallStaticJavaNode::dump_compact_spec(outputStream* st) const {
1224 if (_method) {
1225 _method->print_short_name(st);
1226 } else if (_name) {
1227 st->print("%s", _name);
1228 } else {
1229 st->print("<?>");
1230 }
1231 }
1232 #endif
1233
1234 //=============================================================================
1235 uint CallDynamicJavaNode::size_of() const { return sizeof(*this); }
1236 bool CallDynamicJavaNode::cmp( const Node &n ) const {
1237 CallDynamicJavaNode &call = (CallDynamicJavaNode&)n;
1238 return CallJavaNode::cmp(call);
1239 }
1240
1241 Node* CallDynamicJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1242 CallGenerator* cg = generator();
1243 if (can_reshape && cg != nullptr) {
1244 if (cg->is_virtual_late_inline()) {
1245 assert(IncrementalInlineVirtual, "required");
1246 assert(cg->call_node() == this, "mismatch");
1247
1248 if (cg->callee_method() == nullptr) {
1249 // Recover symbolic info for method resolution.
1250 ciMethod* caller = jvms()->method();
1251 ciBytecodeStream iter(caller);
1252 iter.force_bci(jvms()->bci());
1253
1254 bool not_used1;
1255 ciSignature* not_used2;
1256 ciMethod* orig_callee = iter.get_method(not_used1, ¬_used2); // callee in the bytecode
1257 ciKlass* holder = iter.get_declared_method_holder();
1258 if (orig_callee->is_method_handle_intrinsic()) {
1259 assert(_override_symbolic_info, "required");
1260 orig_callee = method();
1261 holder = method()->holder();
1262 }
1263
1264 ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);
1265
1266 Node* receiver_node = in(TypeFunc::Parms);
1267 const TypeOopPtr* receiver_type = phase->type(receiver_node)->isa_oopptr();
1268
1269 int not_used3;
1270 bool call_does_dispatch;
1271 ciMethod* callee = phase->C->optimize_virtual_call(caller, klass, holder, orig_callee, receiver_type, true /*is_virtual*/,
1272 call_does_dispatch, not_used3); // out-parameters
1273 if (!call_does_dispatch) {
1274 cg->set_callee_method(callee);
1275 }
1276 }
1277 if (cg->callee_method() != nullptr) {
1278 // Register for late inlining.
1279 register_for_late_inline(); // MH late inlining prepends to the list, so do the same
1280 }
1281 } else {
1282 assert(IncrementalInline, "required");
1283 if (phase->C->print_inlining()) {
1284 phase->C->inline_printer()->record(cg->method(), cg->call_node()->jvms(), InliningResult::FAILURE,
1285 "dynamic call node changed: trying again");
1286 }
1287 register_for_late_inline();
1288 }
1289 }
1290 return CallNode::Ideal(phase, can_reshape);
1291 }
1292
1293 #ifndef PRODUCT
1294 void CallDynamicJavaNode::dump_spec(outputStream *st) const {
1295 st->print("# Dynamic ");
1296 CallJavaNode::dump_spec(st);
1297 }
1298 #endif
1299
1300 //=============================================================================
1301 uint CallRuntimeNode::size_of() const { return sizeof(*this); }
1302 bool CallRuntimeNode::cmp( const Node &n ) const {
1303 CallRuntimeNode &call = (CallRuntimeNode&)n;
1304 return CallNode::cmp(call) && !strcmp(_name,call._name);
1305 }
1306 #ifndef PRODUCT
1307 void CallRuntimeNode::dump_spec(outputStream *st) const {
1308 st->print("# ");
1309 st->print("%s", _name);
1310 CallNode::dump_spec(st);
1311 }
1312 #endif
1313 uint CallLeafVectorNode::size_of() const { return sizeof(*this); }
1314 bool CallLeafVectorNode::cmp( const Node &n ) const {
1315 CallLeafVectorNode &call = (CallLeafVectorNode&)n;
1316 return CallLeafNode::cmp(call) && _num_bits == call._num_bits;
1317 }
1318
1319 //------------------------------calling_convention-----------------------------
1320 void CallRuntimeNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
1321 SharedRuntime::c_calling_convention(sig_bt, parm_regs, argcnt);
1322 }
1323
1324 void CallLeafVectorNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
1325 #ifdef ASSERT
1326 assert(tf()->range()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1327 "return vector size must match");
1328 const TypeTuple* d = tf()->domain();
1329 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1330 Node* arg = in(i);
1331 assert(arg->bottom_type()->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1332 "vector argument size must match");
1333 }
1334 #endif
1335
1336 SharedRuntime::vector_calling_convention(parm_regs, _num_bits, argcnt);
1337 }
1338
1339 //=============================================================================
1340 //------------------------------calling_convention-----------------------------
1341
1342
1343 //=============================================================================
1344 bool CallLeafPureNode::is_unused() const {
1345 return proj_out_or_null(TypeFunc::Parms) == nullptr;
1346 }
1347
1348 bool CallLeafPureNode::is_dead() const {
1349 return proj_out_or_null(TypeFunc::Control) == nullptr;
1350 }
1351
1352 /* We make a tuple of the global input state + TOP for the output values.
1353 * We use this to delete a pure function that is not used: by replacing the call with
1354 * such a tuple, we let output Proj's idealization pick the corresponding input of the
1355 * pure call, so jumping over it, and effectively, removing the call from the graph.
1356 * This avoids doing the graph surgery manually, but leaves that to IGVN
1357 * that is specialized for doing that right. We need also tuple components for output
1358 * values of the function to respect the return arity, and in case there is a projection
1359 * that would pick an output (which shouldn't happen at the moment).
1360 */
1361 TupleNode* CallLeafPureNode::make_tuple_of_input_state_and_top_return_values(const Compile* C) const {
1362 // Transparently propagate input state but parameters
1363 TupleNode* tuple = TupleNode::make(
1364 tf()->range(),
1365 in(TypeFunc::Control),
1366 in(TypeFunc::I_O),
1367 in(TypeFunc::Memory),
1368 in(TypeFunc::FramePtr),
1369 in(TypeFunc::ReturnAdr));
1370
1371 // And add TOPs for the return values
1372 for (uint i = TypeFunc::Parms; i < tf()->range()->cnt(); i++) {
1373 tuple->set_req(i, C->top());
1374 }
1375
1376 return tuple;
1377 }
1378
1379 CallLeafPureNode* CallLeafPureNode::inline_call_leaf_pure_node(Node* control) const {
1380 Node* top = Compile::current()->top();
1381 if (control == nullptr) {
1382 control = in(TypeFunc::Control);
1383 }
1384
1385 CallLeafPureNode* call = new CallLeafPureNode(tf(), entry_point(), _name);
1386 call->init_req(TypeFunc::Control, control);
1387 call->init_req(TypeFunc::I_O, top);
1388 call->init_req(TypeFunc::Memory, top);
1389 call->init_req(TypeFunc::ReturnAdr, top);
1390 call->init_req(TypeFunc::FramePtr, top);
1391 for (unsigned int i = 0; i < tf()->domain()->cnt() - TypeFunc::Parms; i++) {
1392 call->init_req(TypeFunc::Parms + i, in(TypeFunc::Parms + i));
1393 }
1394
1395 return call;
1396 }
1397
1398 Node* CallLeafPureNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1399 if (is_dead()) {
1400 return nullptr;
1401 }
1402
1403 // We need to wait until IGVN because during parsing, usages might still be missing
1404 // and we would remove the call immediately.
1405 if (can_reshape && is_unused()) {
1406 // The result is not used. We remove the call by replacing it with a tuple, that
1407 // is later disintegrated by the projections.
1408 return make_tuple_of_input_state_and_top_return_values(phase->C);
1409 }
1410
1411 return CallRuntimeNode::Ideal(phase, can_reshape);
1412 }
1413
1414 #ifndef PRODUCT
1415 void CallLeafNode::dump_spec(outputStream *st) const {
1416 st->print("# ");
1417 st->print("%s", _name);
1418 CallNode::dump_spec(st);
1419 }
1420 #endif
1421
1422 //=============================================================================
1423
1424 void SafePointNode::set_local(const JVMState* jvms, uint idx, Node *c) {
1425 assert(verify_jvms(jvms), "jvms must match");
1426 int loc = jvms->locoff() + idx;
1427 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
1428 // If current local idx is top then local idx - 1 could
1429 // be a long/double that needs to be killed since top could
1430 // represent the 2nd half of the long/double.
1431 uint ideal = in(loc -1)->ideal_reg();
1432 if (ideal == Op_RegD || ideal == Op_RegL) {
1433 // set other (low index) half to top
1434 set_req(loc - 1, in(loc));
1435 }
1436 }
1437 set_req(loc, c);
1438 }
1439
1440 uint SafePointNode::size_of() const { return sizeof(*this); }
1441 bool SafePointNode::cmp( const Node &n ) const {
1442 return (&n == this); // Always fail except on self
1443 }
1444
1445 //-------------------------set_next_exception----------------------------------
1446 void SafePointNode::set_next_exception(SafePointNode* n) {
1447 assert(n == nullptr || n->Opcode() == Op_SafePoint, "correct value for next_exception");
1448 if (len() == req()) {
1449 if (n != nullptr) add_prec(n);
1450 } else {
1451 set_prec(req(), n);
1452 }
1453 }
1454
1455
1456 //----------------------------next_exception-----------------------------------
1457 SafePointNode* SafePointNode::next_exception() const {
1458 if (len() == req()) {
1459 return nullptr;
1460 } else {
1461 Node* n = in(req());
1462 assert(n == nullptr || n->Opcode() == Op_SafePoint, "no other uses of prec edges");
1463 return (SafePointNode*) n;
1464 }
1465 }
1466
1467
1468 //------------------------------Ideal------------------------------------------
1469 // Skip over any collapsed Regions
1470 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1471 assert(_jvms == nullptr || ((uintptr_t)_jvms->map() & 1) || _jvms->map() == this, "inconsistent JVMState");
1472 return remove_dead_region(phase, can_reshape) ? this : nullptr;
1473 }
1474
1475 //------------------------------Identity---------------------------------------
1476 // Remove obviously duplicate safepoints
1477 Node* SafePointNode::Identity(PhaseGVN* phase) {
1478
1479 // If you have back to back safepoints, remove one
1480 if (in(TypeFunc::Control)->is_SafePoint()) {
1481 Node* out_c = unique_ctrl_out_or_null();
1482 // This can be the safepoint of an outer strip mined loop if the inner loop's backedge was removed. Replacing the
1483 // outer loop's safepoint could confuse removal of the outer loop.
1484 if (out_c != nullptr && !out_c->is_OuterStripMinedLoopEnd()) {
1485 return in(TypeFunc::Control);
1486 }
1487 }
1488
1489 // Transforming long counted loops requires a safepoint node. Do not
1490 // eliminate a safepoint until loop opts are over.
1491 if (in(0)->is_Proj() && !phase->C->major_progress()) {
1492 Node *n0 = in(0)->in(0);
1493 // Check if he is a call projection (except Leaf Call)
1494 if( n0->is_Catch() ) {
1495 n0 = n0->in(0)->in(0);
1496 assert( n0->is_Call(), "expect a call here" );
1497 }
1498 if( n0->is_Call() && n0->as_Call()->guaranteed_safepoint() ) {
1499 // Don't remove a safepoint belonging to an OuterStripMinedLoopEndNode.
1500 // If the loop dies, they will be removed together.
1501 if (has_out_with(Op_OuterStripMinedLoopEnd)) {
1502 return this;
1503 }
1504 // Useless Safepoint, so remove it
1505 return in(TypeFunc::Control);
1506 }
1507 }
1508
1509 return this;
1510 }
1511
1512 //------------------------------Value------------------------------------------
1513 const Type* SafePointNode::Value(PhaseGVN* phase) const {
1514 if (phase->type(in(0)) == Type::TOP) {
1515 return Type::TOP;
1516 }
1517 if (in(0) == this) {
1518 return Type::TOP; // Dead infinite loop
1519 }
1520 return Type::CONTROL;
1521 }
1522
1523 #ifndef PRODUCT
1524 void SafePointNode::dump_spec(outputStream *st) const {
1525 st->print(" SafePoint ");
1526 _replaced_nodes.dump(st);
1527 }
1528 #endif
1529
1530 const RegMask &SafePointNode::in_RegMask(uint idx) const {
1531 if (idx < TypeFunc::Parms) {
1532 return RegMask::EMPTY;
1533 }
1534 // Values outside the domain represent debug info
1535 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
1536 }
1537 const RegMask &SafePointNode::out_RegMask() const {
1538 return RegMask::EMPTY;
1539 }
1540
1541
1542 void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) {
1543 assert((int)grow_by > 0, "sanity");
1544 int monoff = jvms->monoff();
1545 int scloff = jvms->scloff();
1546 int endoff = jvms->endoff();
1547 assert(endoff == (int)req(), "no other states or debug info after me");
1548 Node* top = Compile::current()->top();
1549 for (uint i = 0; i < grow_by; i++) {
1550 ins_req(monoff, top);
1551 }
1552 jvms->set_monoff(monoff + grow_by);
1553 jvms->set_scloff(scloff + grow_by);
1554 jvms->set_endoff(endoff + grow_by);
1555 }
1556
1557 void SafePointNode::push_monitor(const FastLockNode *lock) {
1558 // Add a LockNode, which points to both the original BoxLockNode (the
1559 // stack space for the monitor) and the Object being locked.
1560 const int MonitorEdges = 2;
1561 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
1562 assert(req() == jvms()->endoff(), "correct sizing");
1563 int nextmon = jvms()->scloff();
1564 ins_req(nextmon, lock->box_node());
1565 ins_req(nextmon+1, lock->obj_node());
1566 jvms()->set_scloff(nextmon + MonitorEdges);
1567 jvms()->set_endoff(req());
1568 }
1569
1570 void SafePointNode::pop_monitor() {
1571 // Delete last monitor from debug info
1572 DEBUG_ONLY(int num_before_pop = jvms()->nof_monitors());
1573 const int MonitorEdges = 2;
1574 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
1575 int scloff = jvms()->scloff();
1576 int endoff = jvms()->endoff();
1577 int new_scloff = scloff - MonitorEdges;
1578 int new_endoff = endoff - MonitorEdges;
1579 jvms()->set_scloff(new_scloff);
1580 jvms()->set_endoff(new_endoff);
1581 while (scloff > new_scloff) del_req_ordered(--scloff);
1582 assert(jvms()->nof_monitors() == num_before_pop-1, "");
1583 }
1584
1585 Node *SafePointNode::peek_monitor_box() const {
1586 int mon = jvms()->nof_monitors() - 1;
1587 assert(mon >= 0, "must have a monitor");
1588 return monitor_box(jvms(), mon);
1589 }
1590
1591 Node *SafePointNode::peek_monitor_obj() const {
1592 int mon = jvms()->nof_monitors() - 1;
1593 assert(mon >= 0, "must have a monitor");
1594 return monitor_obj(jvms(), mon);
1595 }
1596
1597 Node* SafePointNode::peek_operand(uint off) const {
1598 assert(jvms()->sp() > 0, "must have an operand");
1599 assert(off < jvms()->sp(), "off is out-of-range");
1600 return stack(jvms(), jvms()->sp() - off - 1);
1601 }
1602
1603 // Do we Match on this edge index or not? Match no edges
1604 uint SafePointNode::match_edge(uint idx) const {
1605 return (TypeFunc::Parms == idx);
1606 }
1607
1608 void SafePointNode::disconnect_from_root(PhaseIterGVN *igvn) {
1609 assert(Opcode() == Op_SafePoint, "only value for safepoint in loops");
1610 int nb = igvn->C->root()->find_prec_edge(this);
1611 if (nb != -1) {
1612 igvn->delete_precedence_of(igvn->C->root(), nb);
1613 }
1614 }
1615
1616 void SafePointNode::remove_non_debug_edges(NodeEdgeTempStorage& non_debug_edges) {
1617 assert(non_debug_edges._state == NodeEdgeTempStorage::state_initial, "not processed");
1618 assert(non_debug_edges.is_empty(), "edges not processed");
1619
1620 while (req() > jvms()->endoff()) {
1621 uint last = req() - 1;
1622 non_debug_edges.push(in(last));
1623 del_req(last);
1624 }
1625
1626 assert(jvms()->endoff() == req(), "no extra edges past debug info allowed");
1627 DEBUG_ONLY(non_debug_edges._state = NodeEdgeTempStorage::state_populated);
1628 }
1629
1630 void SafePointNode::restore_non_debug_edges(NodeEdgeTempStorage& non_debug_edges) {
1631 assert(non_debug_edges._state == NodeEdgeTempStorage::state_populated, "not populated");
1632 assert(jvms()->endoff() == req(), "no extra edges past debug info allowed");
1633
1634 while (!non_debug_edges.is_empty()) {
1635 Node* non_debug_edge = non_debug_edges.pop();
1636 add_req(non_debug_edge);
1637 }
1638
1639 assert(non_debug_edges.is_empty(), "edges not processed");
1640 DEBUG_ONLY(non_debug_edges._state = NodeEdgeTempStorage::state_processed);
1641 }
1642
1643 //============== SafePointScalarObjectNode ==============
1644
1645 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp, Node* alloc, uint first_index, uint depth, uint n_fields) :
1646 TypeNode(tp, 1), // 1 control input -- seems required. Get from root.
1647 _first_index(first_index),
1648 _depth(depth),
1649 _n_fields(n_fields),
1650 _alloc(alloc)
1651 {
1652 #ifdef ASSERT
1653 if (!alloc->is_Allocate() && !(alloc->Opcode() == Op_VectorBox)) {
1654 alloc->dump();
1655 assert(false, "unexpected call node");
1656 }
1657 #endif
1658 init_class_id(Class_SafePointScalarObject);
1659 }
1660
1661 // Do not allow value-numbering for SafePointScalarObject node.
1662 uint SafePointScalarObjectNode::hash() const { return NO_HASH; }
1663 bool SafePointScalarObjectNode::cmp( const Node &n ) const {
1664 return (&n == this); // Always fail except on self
1665 }
1666
1667 uint SafePointScalarObjectNode::ideal_reg() const {
1668 return 0; // No matching to machine instruction
1669 }
1670
1671 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const {
1672 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
1673 }
1674
1675 const RegMask &SafePointScalarObjectNode::out_RegMask() const {
1676 return RegMask::EMPTY;
1677 }
1678
1679 uint SafePointScalarObjectNode::match_edge(uint idx) const {
1680 return 0;
1681 }
1682
1683 SafePointScalarObjectNode*
1684 SafePointScalarObjectNode::clone(Dict* sosn_map, bool& new_node) const {
1685 void* cached = (*sosn_map)[(void*)this];
1686 if (cached != nullptr) {
1687 new_node = false;
1688 return (SafePointScalarObjectNode*)cached;
1689 }
1690 new_node = true;
1691 SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone();
1692 sosn_map->Insert((void*)this, (void*)res);
1693 return res;
1694 }
1695
1696
1697 #ifndef PRODUCT
1698 void SafePointScalarObjectNode::dump_spec(outputStream *st) const {
1699 st->print(" # fields@[%d..%d]", first_index(), first_index() + n_fields() - 1);
1700 }
1701 #endif
1702
1703 //============== SafePointScalarMergeNode ==============
1704
1705 SafePointScalarMergeNode::SafePointScalarMergeNode(const TypeOopPtr* tp, int merge_pointer_idx) :
1706 TypeNode(tp, 1), // 1 control input -- seems required. Get from root.
1707 _merge_pointer_idx(merge_pointer_idx)
1708 {
1709 init_class_id(Class_SafePointScalarMerge);
1710 }
1711
1712 // Do not allow value-numbering for SafePointScalarMerge node.
1713 uint SafePointScalarMergeNode::hash() const { return NO_HASH; }
1714 bool SafePointScalarMergeNode::cmp( const Node &n ) const {
1715 return (&n == this); // Always fail except on self
1716 }
1717
1718 uint SafePointScalarMergeNode::ideal_reg() const {
1719 return 0; // No matching to machine instruction
1720 }
1721
1722 const RegMask &SafePointScalarMergeNode::in_RegMask(uint idx) const {
1723 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
1724 }
1725
1726 const RegMask &SafePointScalarMergeNode::out_RegMask() const {
1727 return RegMask::EMPTY;
1728 }
1729
1730 uint SafePointScalarMergeNode::match_edge(uint idx) const {
1731 return 0;
1732 }
1733
1734 SafePointScalarMergeNode*
1735 SafePointScalarMergeNode::clone(Dict* sosn_map, bool& new_node) const {
1736 void* cached = (*sosn_map)[(void*)this];
1737 if (cached != nullptr) {
1738 new_node = false;
1739 return (SafePointScalarMergeNode*)cached;
1740 }
1741 new_node = true;
1742 SafePointScalarMergeNode* res = (SafePointScalarMergeNode*)Node::clone();
1743 sosn_map->Insert((void*)this, (void*)res);
1744 return res;
1745 }
1746
1747 #ifndef PRODUCT
1748 void SafePointScalarMergeNode::dump_spec(outputStream *st) const {
1749 st->print(" # merge_pointer_idx=%d, scalarized_objects=%d", _merge_pointer_idx, req()-1);
1750 }
1751 #endif
1752
1753 //=============================================================================
1754 uint AllocateNode::size_of() const { return sizeof(*this); }
1755
1756 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
1757 Node *ctrl, Node *mem, Node *abio,
1758 Node *size, Node *klass_node, Node *initial_test)
1759 : CallNode(atype, nullptr, TypeRawPtr::BOTTOM)
1760 {
1761 init_class_id(Class_Allocate);
1762 init_flags(Flag_is_macro);
1763 _is_scalar_replaceable = false;
1764 _is_non_escaping = false;
1765 _is_allocation_MemBar_redundant = false;
1766 Node *topnode = C->top();
1767
1768 init_req( TypeFunc::Control , ctrl );
1769 init_req( TypeFunc::I_O , abio );
1770 init_req( TypeFunc::Memory , mem );
1771 init_req( TypeFunc::ReturnAdr, topnode );
1772 init_req( TypeFunc::FramePtr , topnode );
1773 init_req( AllocSize , size);
1774 init_req( KlassNode , klass_node);
1775 init_req( InitialTest , initial_test);
1776 init_req( ALength , topnode);
1777 init_req( ValidLengthTest , topnode);
1778 C->add_macro_node(this);
1779 }
1780
1781 void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer)
1782 {
1783 assert(initializer != nullptr && initializer->is_object_initializer(),
1784 "unexpected initializer method");
1785 BCEscapeAnalyzer* analyzer = initializer->get_bcea();
1786 if (analyzer == nullptr) {
1787 return;
1788 }
1789
1790 // Allocation node is first parameter in its initializer
1791 if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) {
1792 _is_allocation_MemBar_redundant = true;
1793 }
1794 }
1795 Node *AllocateNode::make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem) {
1796 Node* mark_node = nullptr;
1797 if (UseCompactObjectHeaders) {
1798 Node* klass_node = in(AllocateNode::KlassNode);
1799 Node* proto_adr = phase->transform(AddPNode::make_off_heap(klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset()))));
1800 mark_node = LoadNode::make(*phase, control, mem, proto_adr, phase->type(proto_adr)->is_ptr(), TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
1801 } else {
1802 // For now only enable fast locking for non-array types
1803 mark_node = phase->MakeConX(markWord::prototype().value());
1804 }
1805 return mark_node;
1806 }
1807
1808 // Retrieve the length from the AllocateArrayNode. Narrow the type with a
1809 // CastII, if appropriate. If we are not allowed to create new nodes, and
1810 // a CastII is appropriate, return null.
1811 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseValues* phase, bool allow_new_nodes) {
1812 Node *length = in(AllocateNode::ALength);
1813 assert(length != nullptr, "length is not null");
1814
1815 const TypeInt* length_type = phase->find_int_type(length);
1816 const TypeAryPtr* ary_type = oop_type->isa_aryptr();
1817
1818 if (ary_type != nullptr && length_type != nullptr) {
1819 const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type);
1820 if (narrow_length_type != length_type) {
1821 // Assert one of:
1822 // - the narrow_length is 0
1823 // - the narrow_length is not wider than length
1824 assert(narrow_length_type == TypeInt::ZERO ||
1825 (length_type->is_con() && narrow_length_type->is_con() &&
1826 (narrow_length_type->_hi <= length_type->_lo)) ||
1827 (narrow_length_type->_hi <= length_type->_hi &&
1828 narrow_length_type->_lo >= length_type->_lo),
1829 "narrow type must be narrower than length type");
1830
1831 // Return null if new nodes are not allowed
1832 if (!allow_new_nodes) {
1833 return nullptr;
1834 }
1835 // Create a cast which is control dependent on the initialization to
1836 // propagate the fact that the array length must be positive.
1837 InitializeNode* init = initialization();
1838 if (init != nullptr) {
1839 length = new CastIINode(init->proj_out_or_null(TypeFunc::Control), length, narrow_length_type);
1840 }
1841 }
1842 }
1843
1844 return length;
1845 }
1846
1847 //=============================================================================
1848 const TypeFunc* LockNode::_lock_type_Type = nullptr;
1849
1850 uint LockNode::size_of() const { return sizeof(*this); }
1851
1852 // Redundant lock elimination
1853 //
1854 // There are various patterns of locking where we release and
1855 // immediately reacquire a lock in a piece of code where no operations
1856 // occur in between that would be observable. In those cases we can
1857 // skip releasing and reacquiring the lock without violating any
1858 // fairness requirements. Doing this around a loop could cause a lock
1859 // to be held for a very long time so we concentrate on non-looping
1860 // control flow. We also require that the operations are fully
1861 // redundant meaning that we don't introduce new lock operations on
1862 // some paths so to be able to eliminate it on others ala PRE. This
1863 // would probably require some more extensive graph manipulation to
1864 // guarantee that the memory edges were all handled correctly.
1865 //
1866 // Assuming p is a simple predicate which can't trap in any way and s
1867 // is a synchronized method consider this code:
1868 //
1869 // s();
1870 // if (p)
1871 // s();
1872 // else
1873 // s();
1874 // s();
1875 //
1876 // 1. The unlocks of the first call to s can be eliminated if the
1877 // locks inside the then and else branches are eliminated.
1878 //
1879 // 2. The unlocks of the then and else branches can be eliminated if
1880 // the lock of the final call to s is eliminated.
1881 //
1882 // Either of these cases subsumes the simple case of sequential control flow
1883 //
1884 // Additionally we can eliminate versions without the else case:
1885 //
1886 // s();
1887 // if (p)
1888 // s();
1889 // s();
1890 //
1891 // 3. In this case we eliminate the unlock of the first s, the lock
1892 // and unlock in the then case and the lock in the final s.
1893 //
1894 // Note also that in all these cases the then/else pieces don't have
1895 // to be trivial as long as they begin and end with synchronization
1896 // operations.
1897 //
1898 // s();
1899 // if (p)
1900 // s();
1901 // f();
1902 // s();
1903 // s();
1904 //
1905 // The code will work properly for this case, leaving in the unlock
1906 // before the call to f and the relock after it.
1907 //
1908 // A potentially interesting case which isn't handled here is when the
1909 // locking is partially redundant.
1910 //
1911 // s();
1912 // if (p)
1913 // s();
1914 //
1915 // This could be eliminated putting unlocking on the else case and
1916 // eliminating the first unlock and the lock in the then side.
1917 // Alternatively the unlock could be moved out of the then side so it
1918 // was after the merge and the first unlock and second lock
1919 // eliminated. This might require less manipulation of the memory
1920 // state to get correct.
1921 //
1922 // Additionally we might allow work between a unlock and lock before
1923 // giving up eliminating the locks. The current code disallows any
1924 // conditional control flow between these operations. A formulation
1925 // similar to partial redundancy elimination computing the
1926 // availability of unlocking and the anticipatability of locking at a
1927 // program point would allow detection of fully redundant locking with
1928 // some amount of work in between. I'm not sure how often I really
1929 // think that would occur though. Most of the cases I've seen
1930 // indicate it's likely non-trivial work would occur in between.
1931 // There may be other more complicated constructs where we could
1932 // eliminate locking but I haven't seen any others appear as hot or
1933 // interesting.
1934 //
1935 // Locking and unlocking have a canonical form in ideal that looks
1936 // roughly like this:
1937 //
1938 // <obj>
1939 // | \\------+
1940 // | \ \
1941 // | BoxLock \
1942 // | | | \
1943 // | | \ \
1944 // | | FastLock
1945 // | | /
1946 // | | /
1947 // | | |
1948 //
1949 // Lock
1950 // |
1951 // Proj #0
1952 // |
1953 // MembarAcquire
1954 // |
1955 // Proj #0
1956 //
1957 // MembarRelease
1958 // |
1959 // Proj #0
1960 // |
1961 // Unlock
1962 // |
1963 // Proj #0
1964 //
1965 //
1966 // This code proceeds by processing Lock nodes during PhaseIterGVN
1967 // and searching back through its control for the proper code
1968 // patterns. Once it finds a set of lock and unlock operations to
1969 // eliminate they are marked as eliminatable which causes the
1970 // expansion of the Lock and Unlock macro nodes to make the operation a NOP
1971 //
1972 //=============================================================================
1973
1974 //
1975 // Utility function to skip over uninteresting control nodes. Nodes skipped are:
1976 // - copy regions. (These may not have been optimized away yet.)
1977 // - eliminated locking nodes
1978 //
1979 static Node *next_control(Node *ctrl) {
1980 if (ctrl == nullptr)
1981 return nullptr;
1982 while (1) {
1983 if (ctrl->is_Region()) {
1984 RegionNode *r = ctrl->as_Region();
1985 Node *n = r->is_copy();
1986 if (n == nullptr)
1987 break; // hit a region, return it
1988 else
1989 ctrl = n;
1990 } else if (ctrl->is_Proj()) {
1991 Node *in0 = ctrl->in(0);
1992 if (in0->is_AbstractLock() && in0->as_AbstractLock()->is_eliminated()) {
1993 ctrl = in0->in(0);
1994 } else {
1995 break;
1996 }
1997 } else {
1998 break; // found an interesting control
1999 }
2000 }
2001 return ctrl;
2002 }
2003 //
2004 // Given a control, see if it's the control projection of an Unlock which
2005 // operating on the same object as lock.
2006 //
2007 bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock,
2008 GrowableArray<AbstractLockNode*> &lock_ops) {
2009 ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : nullptr;
2010 if (ctrl_proj != nullptr && ctrl_proj->_con == TypeFunc::Control) {
2011 Node *n = ctrl_proj->in(0);
2012 if (n != nullptr && n->is_Unlock()) {
2013 UnlockNode *unlock = n->as_Unlock();
2014 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2015 Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node());
2016 Node* unlock_obj = bs->step_over_gc_barrier(unlock->obj_node());
2017 if (lock_obj->eqv_uncast(unlock_obj) &&
2018 BoxLockNode::same_slot(lock->box_node(), unlock->box_node()) &&
2019 !unlock->is_eliminated()) {
2020 lock_ops.append(unlock);
2021 return true;
2022 }
2023 }
2024 }
2025 return false;
2026 }
2027
2028 //
2029 // Find the lock matching an unlock. Returns null if a safepoint
2030 // or complicated control is encountered first.
2031 LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) {
2032 LockNode *lock_result = nullptr;
2033 // find the matching lock, or an intervening safepoint
2034 Node *ctrl = next_control(unlock->in(0));
2035 while (1) {
2036 assert(ctrl != nullptr, "invalid control graph");
2037 assert(!ctrl->is_Start(), "missing lock for unlock");
2038 if (ctrl->is_top()) break; // dead control path
2039 if (ctrl->is_Proj()) ctrl = ctrl->in(0);
2040 if (ctrl->is_SafePoint()) {
2041 break; // found a safepoint (may be the lock we are searching for)
2042 } else if (ctrl->is_Region()) {
2043 // Check for a simple diamond pattern. Punt on anything more complicated
2044 if (ctrl->req() == 3 && ctrl->in(1) != nullptr && ctrl->in(2) != nullptr) {
2045 Node *in1 = next_control(ctrl->in(1));
2046 Node *in2 = next_control(ctrl->in(2));
2047 if (((in1->is_IfTrue() && in2->is_IfFalse()) ||
2048 (in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) {
2049 ctrl = next_control(in1->in(0)->in(0));
2050 } else {
2051 break;
2052 }
2053 } else {
2054 break;
2055 }
2056 } else {
2057 ctrl = next_control(ctrl->in(0)); // keep searching
2058 }
2059 }
2060 if (ctrl->is_Lock()) {
2061 LockNode *lock = ctrl->as_Lock();
2062 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2063 Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node());
2064 Node* unlock_obj = bs->step_over_gc_barrier(unlock->obj_node());
2065 if (lock_obj->eqv_uncast(unlock_obj) &&
2066 BoxLockNode::same_slot(lock->box_node(), unlock->box_node())) {
2067 lock_result = lock;
2068 }
2069 }
2070 return lock_result;
2071 }
2072
2073 // This code corresponds to case 3 above.
2074
2075 bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock,
2076 GrowableArray<AbstractLockNode*> &lock_ops) {
2077 Node* if_node = node->in(0);
2078 bool if_true = node->is_IfTrue();
2079
2080 if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) {
2081 Node *lock_ctrl = next_control(if_node->in(0));
2082 if (find_matching_unlock(lock_ctrl, lock, lock_ops)) {
2083 Node* lock1_node = nullptr;
2084 ProjNode* proj = if_node->as_If()->proj_out(!if_true);
2085 if (if_true) {
2086 if (proj->is_IfFalse() && proj->outcnt() == 1) {
2087 lock1_node = proj->unique_out();
2088 }
2089 } else {
2090 if (proj->is_IfTrue() && proj->outcnt() == 1) {
2091 lock1_node = proj->unique_out();
2092 }
2093 }
2094 if (lock1_node != nullptr && lock1_node->is_Lock()) {
2095 LockNode *lock1 = lock1_node->as_Lock();
2096 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2097 Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node());
2098 Node* lock1_obj = bs->step_over_gc_barrier(lock1->obj_node());
2099 if (lock_obj->eqv_uncast(lock1_obj) &&
2100 BoxLockNode::same_slot(lock->box_node(), lock1->box_node()) &&
2101 !lock1->is_eliminated()) {
2102 lock_ops.append(lock1);
2103 return true;
2104 }
2105 }
2106 }
2107 }
2108
2109 lock_ops.trunc_to(0);
2110 return false;
2111 }
2112
2113 bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock,
2114 GrowableArray<AbstractLockNode*> &lock_ops) {
2115 // check each control merging at this point for a matching unlock.
2116 // in(0) should be self edge so skip it.
2117 for (int i = 1; i < (int)region->req(); i++) {
2118 Node *in_node = next_control(region->in(i));
2119 if (in_node != nullptr) {
2120 if (find_matching_unlock(in_node, lock, lock_ops)) {
2121 // found a match so keep on checking.
2122 continue;
2123 } else if (find_lock_and_unlock_through_if(in_node, lock, lock_ops)) {
2124 continue;
2125 }
2126
2127 // If we fall through to here then it was some kind of node we
2128 // don't understand or there wasn't a matching unlock, so give
2129 // up trying to merge locks.
2130 lock_ops.trunc_to(0);
2131 return false;
2132 }
2133 }
2134 return true;
2135
2136 }
2137
2138 // Check that all locks/unlocks associated with object come from balanced regions.
2139 bool AbstractLockNode::is_balanced() {
2140 Node* obj = obj_node();
2141 for (uint j = 0; j < obj->outcnt(); j++) {
2142 Node* n = obj->raw_out(j);
2143 if (n->is_AbstractLock() &&
2144 n->as_AbstractLock()->obj_node()->eqv_uncast(obj)) {
2145 BoxLockNode* n_box = n->as_AbstractLock()->box_node()->as_BoxLock();
2146 if (n_box->is_unbalanced()) {
2147 return false;
2148 }
2149 }
2150 }
2151 return true;
2152 }
2153
2154 const char* AbstractLockNode::_kind_names[] = {"Regular", "NonEscObj", "Coarsened", "Nested"};
2155
2156 const char * AbstractLockNode::kind_as_string() const {
2157 return _kind_names[_kind];
2158 }
2159
2160 #ifndef PRODUCT
2161 //
2162 // Create a counter which counts the number of times this lock is acquired
2163 //
2164 void AbstractLockNode::create_lock_counter(JVMState* state) {
2165 _counter = OptoRuntime::new_named_counter(state, NamedCounter::LockCounter);
2166 }
2167
2168 void AbstractLockNode::set_eliminated_lock_counter() {
2169 if (_counter) {
2170 // Update the counter to indicate that this lock was eliminated.
2171 // The counter update code will stay around even though the
2172 // optimizer will eliminate the lock operation itself.
2173 _counter->set_tag(NamedCounter::EliminatedLockCounter);
2174 }
2175 }
2176
2177 void AbstractLockNode::dump_spec(outputStream* st) const {
2178 st->print("%s ", _kind_names[_kind]);
2179 CallNode::dump_spec(st);
2180 }
2181
2182 void AbstractLockNode::dump_compact_spec(outputStream* st) const {
2183 st->print("%s", _kind_names[_kind]);
2184 }
2185 #endif
2186
2187 //=============================================================================
2188 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2189
2190 // perform any generic optimizations first (returns 'this' or null)
2191 Node *result = SafePointNode::Ideal(phase, can_reshape);
2192 if (result != nullptr) return result;
2193 // Don't bother trying to transform a dead node
2194 if (in(0) && in(0)->is_top()) return nullptr;
2195
2196 // Now see if we can optimize away this lock. We don't actually
2197 // remove the locking here, we simply set the _eliminate flag which
2198 // prevents macro expansion from expanding the lock. Since we don't
2199 // modify the graph, the value returned from this function is the
2200 // one computed above.
2201 if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
2202 //
2203 // If we are locking an non-escaped object, the lock/unlock is unnecessary
2204 //
2205 ConnectionGraph *cgr = phase->C->congraph();
2206 if (cgr != nullptr && cgr->can_eliminate_lock(this)) {
2207 assert(!is_eliminated() || is_coarsened(), "sanity");
2208 // The lock could be marked eliminated by lock coarsening
2209 // code during first IGVN before EA. Replace coarsened flag
2210 // to eliminate all associated locks/unlocks.
2211 #ifdef ASSERT
2212 this->log_lock_optimization(phase->C,"eliminate_lock_set_non_esc1");
2213 #endif
2214 this->set_non_esc_obj();
2215 return result;
2216 }
2217
2218 if (!phase->C->do_locks_coarsening()) {
2219 return result; // Compiling without locks coarsening
2220 }
2221 //
2222 // Try lock coarsening
2223 //
2224 PhaseIterGVN* iter = phase->is_IterGVN();
2225 if (iter != nullptr && !is_eliminated()) {
2226
2227 GrowableArray<AbstractLockNode*> lock_ops;
2228
2229 Node *ctrl = next_control(in(0));
2230
2231 // now search back for a matching Unlock
2232 if (find_matching_unlock(ctrl, this, lock_ops)) {
2233 // found an unlock directly preceding this lock. This is the
2234 // case of single unlock directly control dependent on a
2235 // single lock which is the trivial version of case 1 or 2.
2236 } else if (ctrl->is_Region() ) {
2237 if (find_unlocks_for_region(ctrl->as_Region(), this, lock_ops)) {
2238 // found lock preceded by multiple unlocks along all paths
2239 // joining at this point which is case 3 in description above.
2240 }
2241 } else {
2242 // see if this lock comes from either half of an if and the
2243 // predecessors merges unlocks and the other half of the if
2244 // performs a lock.
2245 if (find_lock_and_unlock_through_if(ctrl, this, lock_ops)) {
2246 // found unlock splitting to an if with locks on both branches.
2247 }
2248 }
2249
2250 if (lock_ops.length() > 0) {
2251 // add ourselves to the list of locks to be eliminated.
2252 lock_ops.append(this);
2253
2254 #ifndef PRODUCT
2255 if (PrintEliminateLocks) {
2256 int locks = 0;
2257 int unlocks = 0;
2258 if (Verbose) {
2259 tty->print_cr("=== Locks coarsening ===");
2260 tty->print("Obj: ");
2261 obj_node()->dump();
2262 }
2263 for (int i = 0; i < lock_ops.length(); i++) {
2264 AbstractLockNode* lock = lock_ops.at(i);
2265 if (lock->Opcode() == Op_Lock)
2266 locks++;
2267 else
2268 unlocks++;
2269 if (Verbose) {
2270 tty->print("Box %d: ", i);
2271 box_node()->dump();
2272 tty->print(" %d: ", i);
2273 lock->dump();
2274 }
2275 }
2276 tty->print_cr("=== Coarsened %d unlocks and %d locks", unlocks, locks);
2277 }
2278 #endif
2279
2280 // for each of the identified locks, mark them
2281 // as eliminatable
2282 for (int i = 0; i < lock_ops.length(); i++) {
2283 AbstractLockNode* lock = lock_ops.at(i);
2284
2285 // Mark it eliminated by coarsening and update any counters
2286 #ifdef ASSERT
2287 lock->log_lock_optimization(phase->C, "eliminate_lock_set_coarsened");
2288 #endif
2289 lock->set_coarsened();
2290 }
2291 // Record this coarsened group.
2292 phase->C->add_coarsened_locks(lock_ops);
2293 } else if (ctrl->is_Region() &&
2294 iter->_worklist.member(ctrl)) {
2295 // We weren't able to find any opportunities but the region this
2296 // lock is control dependent on hasn't been processed yet so put
2297 // this lock back on the worklist so we can check again once any
2298 // region simplification has occurred.
2299 iter->_worklist.push(this);
2300 }
2301 }
2302 }
2303
2304 return result;
2305 }
2306
2307 //=============================================================================
2308 bool LockNode::is_nested_lock_region() {
2309 return is_nested_lock_region(nullptr);
2310 }
2311
2312 // p is used for access to compilation log; no logging if null
2313 bool LockNode::is_nested_lock_region(Compile * c) {
2314 BoxLockNode* box = box_node()->as_BoxLock();
2315 int stk_slot = box->stack_slot();
2316 if (stk_slot <= 0) {
2317 #ifdef ASSERT
2318 this->log_lock_optimization(c, "eliminate_lock_INLR_1");
2319 #endif
2320 return false; // External lock or it is not Box (Phi node).
2321 }
2322
2323 // Ignore complex cases: merged locks or multiple locks.
2324 Node* obj = obj_node();
2325 LockNode* unique_lock = nullptr;
2326 Node* bad_lock = nullptr;
2327 if (!box->is_simple_lock_region(&unique_lock, obj, &bad_lock)) {
2328 #ifdef ASSERT
2329 this->log_lock_optimization(c, "eliminate_lock_INLR_2a", bad_lock);
2330 #endif
2331 return false;
2332 }
2333 if (unique_lock != this) {
2334 #ifdef ASSERT
2335 this->log_lock_optimization(c, "eliminate_lock_INLR_2b", (unique_lock != nullptr ? unique_lock : bad_lock));
2336 if (PrintEliminateLocks && Verbose) {
2337 tty->print_cr("=============== unique_lock != this ============");
2338 tty->print(" this: ");
2339 this->dump();
2340 tty->print(" box: ");
2341 box->dump();
2342 tty->print(" obj: ");
2343 obj->dump();
2344 if (unique_lock != nullptr) {
2345 tty->print(" unique_lock: ");
2346 unique_lock->dump();
2347 }
2348 if (bad_lock != nullptr) {
2349 tty->print(" bad_lock: ");
2350 bad_lock->dump();
2351 }
2352 tty->print_cr("===============");
2353 }
2354 #endif
2355 return false;
2356 }
2357
2358 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2359 obj = bs->step_over_gc_barrier(obj);
2360 // Look for external lock for the same object.
2361 SafePointNode* sfn = this->as_SafePoint();
2362 JVMState* youngest_jvms = sfn->jvms();
2363 int max_depth = youngest_jvms->depth();
2364 for (int depth = 1; depth <= max_depth; depth++) {
2365 JVMState* jvms = youngest_jvms->of_depth(depth);
2366 int num_mon = jvms->nof_monitors();
2367 // Loop over monitors
2368 for (int idx = 0; idx < num_mon; idx++) {
2369 Node* obj_node = sfn->monitor_obj(jvms, idx);
2370 obj_node = bs->step_over_gc_barrier(obj_node);
2371 BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock();
2372 if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) {
2373 box->set_nested();
2374 return true;
2375 }
2376 }
2377 }
2378 #ifdef ASSERT
2379 this->log_lock_optimization(c, "eliminate_lock_INLR_3");
2380 #endif
2381 return false;
2382 }
2383
2384 //=============================================================================
2385 uint UnlockNode::size_of() const { return sizeof(*this); }
2386
2387 //=============================================================================
2388 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2389
2390 // perform any generic optimizations first (returns 'this' or null)
2391 Node *result = SafePointNode::Ideal(phase, can_reshape);
2392 if (result != nullptr) return result;
2393 // Don't bother trying to transform a dead node
2394 if (in(0) && in(0)->is_top()) return nullptr;
2395
2396 // Now see if we can optimize away this unlock. We don't actually
2397 // remove the unlocking here, we simply set the _eliminate flag which
2398 // prevents macro expansion from expanding the unlock. Since we don't
2399 // modify the graph, the value returned from this function is the
2400 // one computed above.
2401 // Escape state is defined after Parse phase.
2402 if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
2403 //
2404 // If we are unlocking an non-escaped object, the lock/unlock is unnecessary.
2405 //
2406 ConnectionGraph *cgr = phase->C->congraph();
2407 if (cgr != nullptr && cgr->can_eliminate_lock(this)) {
2408 assert(!is_eliminated() || is_coarsened(), "sanity");
2409 // The lock could be marked eliminated by lock coarsening
2410 // code during first IGVN before EA. Replace coarsened flag
2411 // to eliminate all associated locks/unlocks.
2412 #ifdef ASSERT
2413 this->log_lock_optimization(phase->C, "eliminate_lock_set_non_esc2");
2414 #endif
2415 this->set_non_esc_obj();
2416 }
2417 }
2418 return result;
2419 }
2420
2421 void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag, Node* bad_lock) const {
2422 if (C == nullptr) {
2423 return;
2424 }
2425 CompileLog* log = C->log();
2426 if (log != nullptr) {
2427 Node* box = box_node();
2428 Node* obj = obj_node();
2429 int box_id = box != nullptr ? box->_idx : -1;
2430 int obj_id = obj != nullptr ? obj->_idx : -1;
2431
2432 log->begin_head("%s compile_id='%d' lock_id='%d' class='%s' kind='%s' box_id='%d' obj_id='%d' bad_id='%d'",
2433 tag, C->compile_id(), this->_idx,
2434 is_Unlock() ? "unlock" : is_Lock() ? "lock" : "?",
2435 kind_as_string(), box_id, obj_id, (bad_lock != nullptr ? bad_lock->_idx : -1));
2436 log->stamp();
2437 log->end_head();
2438 JVMState* p = is_Unlock() ? (as_Unlock()->dbg_jvms()) : jvms();
2439 while (p != nullptr) {
2440 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
2441 p = p->caller();
2442 }
2443 log->tail(tag);
2444 }
2445 }
2446
2447 bool CallNode::may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase) const {
2448 if (dest_t->is_known_instance() && t_oop->is_known_instance()) {
2449 return dest_t->instance_id() == t_oop->instance_id();
2450 }
2451
2452 if (dest_t->isa_instptr() && !dest_t->is_instptr()->instance_klass()->equals(phase->C->env()->Object_klass())) {
2453 // clone
2454 if (t_oop->isa_aryptr()) {
2455 return false;
2456 }
2457 if (!t_oop->isa_instptr()) {
2458 return true;
2459 }
2460 if (dest_t->maybe_java_subtype_of(t_oop) || t_oop->maybe_java_subtype_of(dest_t)) {
2461 return true;
2462 }
2463 // unrelated
2464 return false;
2465 }
2466
2467 if (dest_t->isa_aryptr()) {
2468 // arraycopy or array clone
2469 if (t_oop->isa_instptr()) {
2470 return false;
2471 }
2472 if (!t_oop->isa_aryptr()) {
2473 return true;
2474 }
2475
2476 const Type* elem = dest_t->is_aryptr()->elem();
2477 if (elem == Type::BOTTOM) {
2478 // An array but we don't know what elements are
2479 return true;
2480 }
2481
2482 dest_t = dest_t->add_offset(Type::OffsetBot)->is_oopptr();
2483 uint dest_alias = phase->C->get_alias_index(dest_t);
2484 uint t_oop_alias = phase->C->get_alias_index(t_oop);
2485
2486 return dest_alias == t_oop_alias;
2487 }
2488
2489 return true;
2490 }
2491
2492 PowDNode::PowDNode(Compile* C, Node* base, Node* exp)
2493 : CallLeafPureNode(
2494 OptoRuntime::Math_DD_D_Type(),
2495 StubRoutines::dpow() != nullptr ? StubRoutines::dpow() : CAST_FROM_FN_PTR(address, SharedRuntime::dpow),
2496 "pow") {
2497 add_flag(Flag_is_macro);
2498 C->add_macro_node(this);
2499
2500 init_req(TypeFunc::Parms + 0, base);
2501 init_req(TypeFunc::Parms + 1, C->top()); // double slot padding
2502 init_req(TypeFunc::Parms + 2, exp);
2503 init_req(TypeFunc::Parms + 3, C->top()); // double slot padding
2504 }
2505
2506 const Type* PowDNode::Value(PhaseGVN* phase) const {
2507 const Type* t_base = phase->type(base());
2508 const Type* t_exp = phase->type(exp());
2509
2510 if (t_base == Type::TOP || t_exp == Type::TOP) {
2511 return Type::TOP;
2512 }
2513
2514 const TypeD* base_con = t_base->isa_double_constant();
2515 const TypeD* exp_con = t_exp->isa_double_constant();
2516 const TypeD* result_t = nullptr;
2517
2518 // constant folding: both inputs are constants
2519 if (base_con != nullptr && exp_con != nullptr) {
2520 result_t = TypeD::make(SharedRuntime::dpow(base_con->getd(), exp_con->getd()));
2521 }
2522
2523 // Special cases when only the exponent is known:
2524 if (exp_con != nullptr) {
2525 double e = exp_con->getd();
2526
2527 // If the second argument is positive or negative zero, then the result is 1.0.
2528 // i.e., pow(x, +/-0.0D) => 1.0
2529 if (e == 0.0) { // true for both -0.0 and +0.0
2530 result_t = TypeD::ONE;
2531 }
2532
2533 // If the second argument is NaN, then the result is NaN.
2534 // i.e., pow(x, NaN) => NaN
2535 if (g_isnan(e)) {
2536 result_t = TypeD::make(NAN);
2537 }
2538 }
2539
2540 if (result_t != nullptr) {
2541 // We can't simply return a TypeD here, it must be a tuple type to be compatible with call nodes.
2542 const Type** fields = TypeTuple::fields(2);
2543 fields[TypeFunc::Parms + 0] = result_t;
2544 fields[TypeFunc::Parms + 1] = Type::HALF;
2545 return TypeTuple::make(TypeFunc::Parms + 2, fields);
2546 }
2547
2548 return tf()->range();
2549 }
2550
2551 Node* PowDNode::Ideal(PhaseGVN* phase, bool can_reshape) {
2552 if (!can_reshape) {
2553 return nullptr; // wait for igvn
2554 }
2555
2556 PhaseIterGVN* igvn = phase->is_IterGVN();
2557 Node* base = this->base();
2558 Node* exp = this->exp();
2559
2560 const Type* t_exp = phase->type(exp);
2561 const TypeD* exp_con = t_exp->isa_double_constant();
2562
2563 // Special cases when only the exponent is known:
2564 if (exp_con != nullptr) {
2565 double e = exp_con->getd();
2566
2567 // If the second argument is 1.0, then the result is the same as the first argument.
2568 // i.e., pow(x, 1.0) => x
2569 if (e == 1.0) {
2570 return make_tuple_of_input_state_and_result(igvn, base);
2571 }
2572
2573 // If the second argument is 2.0, then strength reduce to multiplications.
2574 // i.e., pow(x, 2.0) => x * x
2575 if (e == 2.0) {
2576 Node* mul = igvn->transform(new MulDNode(base, base));
2577 return make_tuple_of_input_state_and_result(igvn, mul);
2578 }
2579
2580 // If the second argument is 0.5, the strength reduce to square roots.
2581 // i.e., pow(x, 0.5) => sqrt(x) iff x > 0
2582 if (e == 0.5 && Matcher::match_rule_supported(Op_SqrtD)) {
2583 Node* ctrl = in(TypeFunc::Control);
2584 Node* zero = igvn->zerocon(T_DOUBLE);
2585
2586 // According to the API specs, pow(-0.0, 0.5) = 0.0 and sqrt(-0.0) = -0.0.
2587 // So pow(-0.0, 0.5) shouldn't be replaced with sqrt(-0.0).
2588 // -0.0/+0.0 are both excluded since floating-point comparison doesn't distinguish -0.0 from +0.0.
2589 Node* cmp = igvn->register_new_node_with_optimizer(new CmpDNode(base, zero));
2590 Node* test = igvn->register_new_node_with_optimizer(new BoolNode(cmp, BoolTest::le));
2591
2592 IfNode* iff = new IfNode(ctrl, test, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
2593 igvn->register_new_node_with_optimizer(iff);
2594 Node* if_slow = igvn->register_new_node_with_optimizer(new IfTrueNode(iff)); // x <= 0
2595 Node* if_fast = igvn->register_new_node_with_optimizer(new IfFalseNode(iff)); // x > 0
2596
2597 // slow path: call pow(x, 0.5)
2598 Node* call = igvn->register_new_node_with_optimizer(inline_call_leaf_pure_node(if_slow));
2599 Node* call_ctrl = igvn->register_new_node_with_optimizer(new ProjNode(call, TypeFunc::Control));
2600 Node* call_result = igvn->register_new_node_with_optimizer(new ProjNode(call, TypeFunc::Parms + 0));
2601
2602 // fast path: sqrt(x)
2603 Node* sqrt = igvn->register_new_node_with_optimizer(new SqrtDNode(igvn->C, if_fast, base));
2604
2605 // merge paths
2606 RegionNode* region = new RegionNode(3);
2607 igvn->register_new_node_with_optimizer(region);
2608 region->init_req(1, call_ctrl); // slow path
2609 region->init_req(2, if_fast); // fast path
2610
2611 PhiNode* phi = new PhiNode(region, Type::DOUBLE);
2612 igvn->register_new_node_with_optimizer(phi);
2613 phi->init_req(1, call_result); // slow: pow() result
2614 phi->init_req(2, sqrt); // fast: sqrt() result
2615
2616 igvn->C->set_has_split_ifs(true); // Has chance for split-if optimization
2617
2618 return make_tuple_of_input_state_and_result(igvn, phi, region);
2619 }
2620 }
2621
2622 return CallLeafPureNode::Ideal(phase, can_reshape);
2623 }
2624
2625 // We can't simply have Ideal() returning a Con or MulNode since the users are still expecting a Call node, but we could
2626 // produce a tuple that follows the same pattern so users can still get control, io, memory, etc..
2627 TupleNode* PowDNode::make_tuple_of_input_state_and_result(PhaseIterGVN* phase, Node* result, Node* control) {
2628 if (control == nullptr) {
2629 control = in(TypeFunc::Control);
2630 }
2631
2632 Compile* C = phase->C;
2633 C->remove_macro_node(this);
2634 TupleNode* tuple = TupleNode::make(
2635 tf()->range(),
2636 control,
2637 in(TypeFunc::I_O),
2638 in(TypeFunc::Memory),
2639 in(TypeFunc::FramePtr),
2640 in(TypeFunc::ReturnAdr),
2641 result,
2642 C->top());
2643 return tuple;
2644 }