1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/bcEscapeAnalyzer.hpp"
26 #include "code/vmreg.hpp"
27 #include "compiler/compileLog.hpp"
28 #include "compiler/oopMap.hpp"
29 #include "gc/shared/barrierSet.hpp"
30 #include "gc/shared/c2/barrierSetC2.hpp"
31 #include "interpreter/interpreter.hpp"
32 #include "opto/callGenerator.hpp"
33 #include "opto/callnode.hpp"
34 #include "opto/castnode.hpp"
35 #include "opto/convertnode.hpp"
36 #include "opto/escape.hpp"
37 #include "opto/locknode.hpp"
38 #include "opto/machnode.hpp"
39 #include "opto/matcher.hpp"
40 #include "opto/parse.hpp"
41 #include "opto/regalloc.hpp"
42 #include "opto/regmask.hpp"
43 #include "opto/rootnode.hpp"
44 #include "opto/runtime.hpp"
45 #include "runtime/sharedRuntime.hpp"
46 #include "utilities/powerOfTwo.hpp"
47
48 // Portions of code courtesy of Clifford Click
49
50 // Optimization - Graph Style
51
52 //=============================================================================
53 uint StartNode::size_of() const { return sizeof(*this); }
54 bool StartNode::cmp( const Node &n ) const
55 { return _domain == ((StartNode&)n)._domain; }
56 const Type *StartNode::bottom_type() const { return _domain; }
57 const Type* StartNode::Value(PhaseGVN* phase) const { return _domain; }
58 #ifndef PRODUCT
59 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
60 void StartNode::dump_compact_spec(outputStream *st) const { /* empty */ }
61 #endif
62
63 //------------------------------Ideal------------------------------------------
64 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
65 return remove_dead_region(phase, can_reshape) ? this : nullptr;
66 }
67
68 //------------------------------calling_convention-----------------------------
69 void StartNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
70 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
71 }
72
73 //------------------------------Registers--------------------------------------
74 const RegMask &StartNode::in_RegMask(uint) const {
75 return RegMask::EMPTY;
76 }
77
78 //------------------------------match------------------------------------------
79 // Construct projections for incoming parameters, and their RegMask info
80 Node *StartNode::match( const ProjNode *proj, const Matcher *match ) {
81 switch (proj->_con) {
82 case TypeFunc::Control:
83 case TypeFunc::I_O:
84 case TypeFunc::Memory:
85 return new MachProjNode(this,proj->_con,RegMask::EMPTY,MachProjNode::unmatched_proj);
86 case TypeFunc::FramePtr:
87 return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
88 case TypeFunc::ReturnAdr:
89 return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
90 case TypeFunc::Parms:
91 default: {
92 uint parm_num = proj->_con - TypeFunc::Parms;
93 const Type *t = _domain->field_at(proj->_con);
94 if (t->base() == Type::Half) // 2nd half of Longs and Doubles
95 return new ConNode(Type::TOP);
96 uint ideal_reg = t->ideal_reg();
97 RegMask &rm = match->_calling_convention_mask[parm_num];
98 return new MachProjNode(this,proj->_con,rm,ideal_reg);
99 }
100 }
101 return nullptr;
102 }
103
104 //------------------------------StartOSRNode----------------------------------
105 // The method start node for an on stack replacement adapter
106
107 //------------------------------osr_domain-----------------------------
108 const TypeTuple *StartOSRNode::osr_domain() {
109 const Type **fields = TypeTuple::fields(2);
110 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // address of osr buffer
111
112 return TypeTuple::make(TypeFunc::Parms+1, fields);
113 }
114
115 //=============================================================================
116 const char * const ParmNode::names[TypeFunc::Parms+1] = {
117 "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms"
118 };
119
120 #ifndef PRODUCT
121 void ParmNode::dump_spec(outputStream *st) const {
122 if( _con < TypeFunc::Parms ) {
123 st->print("%s", names[_con]);
124 } else {
125 st->print("Parm%d: ",_con-TypeFunc::Parms);
126 // Verbose and WizardMode dump bottom_type for all nodes
127 if( !Verbose && !WizardMode ) bottom_type()->dump_on(st);
128 }
129 }
130
131 void ParmNode::dump_compact_spec(outputStream *st) const {
132 if (_con < TypeFunc::Parms) {
133 st->print("%s", names[_con]);
134 } else {
135 st->print("%d:", _con-TypeFunc::Parms);
136 // unconditionally dump bottom_type
137 bottom_type()->dump_on(st);
138 }
139 }
140 #endif
141
142 uint ParmNode::ideal_reg() const {
143 switch( _con ) {
144 case TypeFunc::Control : // fall through
145 case TypeFunc::I_O : // fall through
146 case TypeFunc::Memory : return 0;
147 case TypeFunc::FramePtr : // fall through
148 case TypeFunc::ReturnAdr: return Op_RegP;
149 default : assert( _con > TypeFunc::Parms, "" );
150 // fall through
151 case TypeFunc::Parms : {
152 // Type of argument being passed
153 const Type *t = in(0)->as_Start()->_domain->field_at(_con);
154 return t->ideal_reg();
155 }
156 }
157 ShouldNotReachHere();
158 return 0;
159 }
160
161 //=============================================================================
162 ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) {
163 init_req(TypeFunc::Control,cntrl);
164 init_req(TypeFunc::I_O,i_o);
165 init_req(TypeFunc::Memory,memory);
166 init_req(TypeFunc::FramePtr,frameptr);
167 init_req(TypeFunc::ReturnAdr,retadr);
168 }
169
170 Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){
171 return remove_dead_region(phase, can_reshape) ? this : nullptr;
172 }
173
174 const Type* ReturnNode::Value(PhaseGVN* phase) const {
175 return ( phase->type(in(TypeFunc::Control)) == Type::TOP)
176 ? Type::TOP
177 : Type::BOTTOM;
178 }
179
180 // Do we Match on this edge index or not? No edges on return nodes
181 uint ReturnNode::match_edge(uint idx) const {
182 return 0;
183 }
184
185
186 #ifndef PRODUCT
187 void ReturnNode::dump_req(outputStream *st, DumpConfig* dc) const {
188 // Dump the required inputs, after printing "returns"
189 uint i; // Exit value of loop
190 for (i = 0; i < req(); i++) { // For all required inputs
191 if (i == TypeFunc::Parms) st->print("returns ");
192 Node* p = in(i);
193 if (p != nullptr) {
194 p->dump_idx(false, st, dc);
195 st->print(" ");
196 } else {
197 st->print("_ ");
198 }
199 }
200 }
201 #endif
202
203 //=============================================================================
204 RethrowNode::RethrowNode(
205 Node* cntrl,
206 Node* i_o,
207 Node* memory,
208 Node* frameptr,
209 Node* ret_adr,
210 Node* exception
211 ) : Node(TypeFunc::Parms + 1) {
212 init_req(TypeFunc::Control , cntrl );
213 init_req(TypeFunc::I_O , i_o );
214 init_req(TypeFunc::Memory , memory );
215 init_req(TypeFunc::FramePtr , frameptr );
216 init_req(TypeFunc::ReturnAdr, ret_adr);
217 init_req(TypeFunc::Parms , exception);
218 }
219
220 Node *RethrowNode::Ideal(PhaseGVN *phase, bool can_reshape){
221 return remove_dead_region(phase, can_reshape) ? this : nullptr;
222 }
223
224 const Type* RethrowNode::Value(PhaseGVN* phase) const {
225 return (phase->type(in(TypeFunc::Control)) == Type::TOP)
226 ? Type::TOP
227 : Type::BOTTOM;
228 }
229
230 uint RethrowNode::match_edge(uint idx) const {
231 return 0;
232 }
233
234 #ifndef PRODUCT
235 void RethrowNode::dump_req(outputStream *st, DumpConfig* dc) const {
236 // Dump the required inputs, after printing "exception"
237 uint i; // Exit value of loop
238 for (i = 0; i < req(); i++) { // For all required inputs
239 if (i == TypeFunc::Parms) st->print("exception ");
240 Node* p = in(i);
241 if (p != nullptr) {
242 p->dump_idx(false, st, dc);
243 st->print(" ");
244 } else {
245 st->print("_ ");
246 }
247 }
248 }
249 #endif
250
251 //=============================================================================
252 // Do we Match on this edge index or not? Match only target address & method
253 uint TailCallNode::match_edge(uint idx) const {
254 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1;
255 }
256
257 //=============================================================================
258 // Do we Match on this edge index or not? Match only target address & oop
259 uint TailJumpNode::match_edge(uint idx) const {
260 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1;
261 }
262
263 //=============================================================================
264 JVMState::JVMState(ciMethod* method, JVMState* caller) :
265 _method(method),
266 _receiver_info(nullptr) {
267 assert(method != nullptr, "must be valid call site");
268 _bci = InvocationEntryBci;
269 _reexecute = Reexecute_Undefined;
270 DEBUG_ONLY(_bci = -99); // random garbage value
271 DEBUG_ONLY(_map = (SafePointNode*)-1);
272 _caller = caller;
273 _depth = 1 + (caller == nullptr ? 0 : caller->depth());
274 _locoff = TypeFunc::Parms;
275 _stkoff = _locoff + _method->max_locals();
276 _monoff = _stkoff + _method->max_stack();
277 _scloff = _monoff;
278 _endoff = _monoff;
279 _sp = 0;
280 }
281 JVMState::JVMState(int stack_size) :
282 _method(nullptr),
283 _receiver_info(nullptr) {
284 _bci = InvocationEntryBci;
285 _reexecute = Reexecute_Undefined;
286 DEBUG_ONLY(_map = (SafePointNode*)-1);
287 _caller = nullptr;
288 _depth = 1;
289 _locoff = TypeFunc::Parms;
290 _stkoff = _locoff;
291 _monoff = _stkoff + stack_size;
292 _scloff = _monoff;
293 _endoff = _monoff;
294 _sp = 0;
295 }
296
297 //--------------------------------of_depth-------------------------------------
298 JVMState* JVMState::of_depth(int d) const {
299 const JVMState* jvmp = this;
300 assert(0 < d && (uint)d <= depth(), "oob");
301 for (int skip = depth() - d; skip > 0; skip--) {
302 jvmp = jvmp->caller();
303 }
304 assert(jvmp->depth() == (uint)d, "found the right one");
305 return (JVMState*)jvmp;
306 }
307
308 //-----------------------------same_calls_as-----------------------------------
309 bool JVMState::same_calls_as(const JVMState* that) const {
310 if (this == that) return true;
311 if (this->depth() != that->depth()) return false;
312 const JVMState* p = this;
313 const JVMState* q = that;
314 for (;;) {
315 if (p->_method != q->_method) return false;
316 if (p->_method == nullptr) return true; // bci is irrelevant
317 if (p->_bci != q->_bci) return false;
318 if (p->_reexecute != q->_reexecute) return false;
319 p = p->caller();
320 q = q->caller();
321 if (p == q) return true;
322 assert(p != nullptr && q != nullptr, "depth check ensures we don't run off end");
323 }
324 }
325
326 //------------------------------debug_start------------------------------------
327 uint JVMState::debug_start() const {
328 DEBUG_ONLY(JVMState* jvmroot = of_depth(1));
329 assert(jvmroot->locoff() <= this->locoff(), "youngest JVMState must be last");
330 return of_depth(1)->locoff();
331 }
332
333 //-------------------------------debug_end-------------------------------------
334 uint JVMState::debug_end() const {
335 DEBUG_ONLY(JVMState* jvmroot = of_depth(1));
336 assert(jvmroot->endoff() <= this->endoff(), "youngest JVMState must be last");
337 return endoff();
338 }
339
340 //------------------------------debug_depth------------------------------------
341 uint JVMState::debug_depth() const {
342 uint total = 0;
343 for (const JVMState* jvmp = this; jvmp != nullptr; jvmp = jvmp->caller()) {
344 total += jvmp->debug_size();
345 }
346 return total;
347 }
348
349 #ifndef PRODUCT
350
351 //------------------------------format_helper----------------------------------
352 // Given an allocation (a Chaitin object) and a Node decide if the Node carries
353 // any defined value or not. If it does, print out the register or constant.
354 static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i, GrowableArray<SafePointScalarObjectNode*> *scobjs ) {
355 if (n == nullptr) { st->print(" null"); return; }
356 if (n->is_SafePointScalarObject()) {
357 // Scalar replacement.
358 SafePointScalarObjectNode* spobj = n->as_SafePointScalarObject();
359 scobjs->append_if_missing(spobj);
360 int sco_n = scobjs->find(spobj);
361 assert(sco_n >= 0, "");
362 st->print(" %s%d]=#ScObj" INT32_FORMAT, msg, i, sco_n);
363 return;
364 }
365 if (regalloc->node_regs_max_index() > 0 &&
366 OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined
367 char buf[50];
368 regalloc->dump_register(n,buf,sizeof(buf));
369 st->print(" %s%d]=%s",msg,i,buf);
370 } else { // No register, but might be constant
371 const Type *t = n->bottom_type();
372 switch (t->base()) {
373 case Type::Int:
374 st->print(" %s%d]=#" INT32_FORMAT,msg,i,t->is_int()->get_con());
375 break;
376 case Type::AnyPtr:
377 assert( t == TypePtr::NULL_PTR || n->in_dump(), "" );
378 st->print(" %s%d]=#null",msg,i);
379 break;
380 case Type::AryPtr:
381 case Type::InstPtr:
382 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->isa_oopptr()->const_oop()));
383 break;
384 case Type::KlassPtr:
385 case Type::AryKlassPtr:
386 case Type::InstKlassPtr:
387 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_klassptr()->exact_klass()));
388 break;
389 case Type::MetadataPtr:
390 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_metadataptr()->metadata()));
391 break;
392 case Type::NarrowOop:
393 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_oopptr()->const_oop()));
394 break;
395 case Type::RawPtr:
396 st->print(" %s%d]=#Raw" INTPTR_FORMAT,msg,i,p2i(t->is_rawptr()));
397 break;
398 case Type::DoubleCon:
399 st->print(" %s%d]=#%fD",msg,i,t->is_double_constant()->_d);
400 break;
401 case Type::FloatCon:
402 st->print(" %s%d]=#%fF",msg,i,t->is_float_constant()->_f);
403 break;
404 case Type::Long:
405 st->print(" %s%d]=#" INT64_FORMAT,msg,i,(int64_t)(t->is_long()->get_con()));
406 break;
407 case Type::Half:
408 case Type::Top:
409 st->print(" %s%d]=_",msg,i);
410 break;
411 default: ShouldNotReachHere();
412 }
413 }
414 }
415
416 //---------------------print_method_with_lineno--------------------------------
417 void JVMState::print_method_with_lineno(outputStream* st, bool show_name) const {
418 if (show_name) _method->print_short_name(st);
419
420 int lineno = _method->line_number_from_bci(_bci);
421 if (lineno != -1) {
422 st->print(" @ bci:%d (line %d)", _bci, lineno);
423 } else {
424 st->print(" @ bci:%d", _bci);
425 }
426 }
427
428 //------------------------------format-----------------------------------------
429 void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const {
430 st->print(" #");
431 if (_method) {
432 print_method_with_lineno(st, true);
433 } else {
434 st->print_cr(" runtime stub ");
435 return;
436 }
437 if (n->is_MachSafePoint()) {
438 GrowableArray<SafePointScalarObjectNode*> scobjs;
439 MachSafePointNode *mcall = n->as_MachSafePoint();
440 uint i;
441 // Print locals
442 for (i = 0; i < (uint)loc_size(); i++)
443 format_helper(regalloc, st, mcall->local(this, i), "L[", i, &scobjs);
444 // Print stack
445 for (i = 0; i < (uint)stk_size(); i++) {
446 if ((uint)(_stkoff + i) >= mcall->len())
447 st->print(" oob ");
448 else
449 format_helper(regalloc, st, mcall->stack(this, i), "STK[", i, &scobjs);
450 }
451 for (i = 0; (int)i < nof_monitors(); i++) {
452 Node *box = mcall->monitor_box(this, i);
453 Node *obj = mcall->monitor_obj(this, i);
454 if (regalloc->node_regs_max_index() > 0 &&
455 OptoReg::is_valid(regalloc->get_reg_first(box))) {
456 box = BoxLockNode::box_node(box);
457 format_helper(regalloc, st, box, "MON-BOX[", i, &scobjs);
458 } else {
459 OptoReg::Name box_reg = BoxLockNode::reg(box);
460 st->print(" MON-BOX%d=%s+%d",
461 i,
462 OptoReg::regname(OptoReg::c_frame_pointer),
463 regalloc->reg2offset(box_reg));
464 }
465 const char* obj_msg = "MON-OBJ[";
466 if (EliminateLocks) {
467 if (BoxLockNode::box_node(box)->is_eliminated())
468 obj_msg = "MON-OBJ(LOCK ELIMINATED)[";
469 }
470 format_helper(regalloc, st, obj, obj_msg, i, &scobjs);
471 }
472
473 for (i = 0; i < (uint)scobjs.length(); i++) {
474 // Scalar replaced objects.
475 st->cr();
476 st->print(" # ScObj" INT32_FORMAT " ", i);
477 SafePointScalarObjectNode* spobj = scobjs.at(i);
478 ciKlass* cik = spobj->bottom_type()->is_oopptr()->exact_klass();
479 assert(cik->is_instance_klass() ||
480 cik->is_array_klass(), "Not supported allocation.");
481 ciInstanceKlass *iklass = nullptr;
482 if (cik->is_instance_klass()) {
483 cik->print_name_on(st);
484 iklass = cik->as_instance_klass();
485 } else if (cik->is_type_array_klass()) {
486 cik->as_array_klass()->base_element_type()->print_name_on(st);
487 st->print("[%d]", spobj->n_fields());
488 } else if (cik->is_obj_array_klass()) {
489 ciKlass* cie = cik->as_obj_array_klass()->base_element_klass();
490 if (cie->is_instance_klass()) {
491 cie->print_name_on(st);
492 } else if (cie->is_type_array_klass()) {
493 cie->as_array_klass()->base_element_type()->print_name_on(st);
494 } else {
495 ShouldNotReachHere();
496 }
497 st->print("[%d]", spobj->n_fields());
498 int ndim = cik->as_array_klass()->dimension() - 1;
499 while (ndim-- > 0) {
500 st->print("[]");
501 }
502 }
503 st->print("={");
504 uint nf = spobj->n_fields();
505 if (nf > 0) {
506 uint first_ind = spobj->first_index(mcall->jvms());
507 Node* fld_node = mcall->in(first_ind);
508 ciField* cifield;
509 if (iklass != nullptr) {
510 st->print(" [");
511 cifield = iklass->nonstatic_field_at(0);
512 cifield->print_name_on(st);
513 format_helper(regalloc, st, fld_node, ":", 0, &scobjs);
514 } else {
515 format_helper(regalloc, st, fld_node, "[", 0, &scobjs);
516 }
517 for (uint j = 1; j < nf; j++) {
518 fld_node = mcall->in(first_ind+j);
519 if (iklass != nullptr) {
520 st->print(", [");
521 cifield = iklass->nonstatic_field_at(j);
522 cifield->print_name_on(st);
523 format_helper(regalloc, st, fld_node, ":", j, &scobjs);
524 } else {
525 format_helper(regalloc, st, fld_node, ", [", j, &scobjs);
526 }
527 }
528 }
529 st->print(" }");
530 }
531 }
532 st->cr();
533 if (caller() != nullptr) caller()->format(regalloc, n, st);
534 }
535
536
537 void JVMState::dump_spec(outputStream *st) const {
538 if (_method != nullptr) {
539 bool printed = false;
540 if (!Verbose) {
541 // The JVMS dumps make really, really long lines.
542 // Take out the most boring parts, which are the package prefixes.
543 char buf[500];
544 stringStream namest(buf, sizeof(buf));
545 _method->print_short_name(&namest);
546 if (namest.count() < sizeof(buf)) {
547 const char* name = namest.base();
548 if (name[0] == ' ') ++name;
549 const char* endcn = strchr(name, ':'); // end of class name
550 if (endcn == nullptr) endcn = strchr(name, '(');
551 if (endcn == nullptr) endcn = name + strlen(name);
552 while (endcn > name && endcn[-1] != '.' && endcn[-1] != '/')
553 --endcn;
554 st->print(" %s", endcn);
555 printed = true;
556 }
557 }
558 print_method_with_lineno(st, !printed);
559 if(_reexecute == Reexecute_True)
560 st->print(" reexecute");
561 } else {
562 st->print(" runtime stub");
563 }
564 if (caller() != nullptr) caller()->dump_spec(st);
565 }
566
567
568 void JVMState::dump_on(outputStream* st) const {
569 bool print_map = _map && !((uintptr_t)_map & 1) &&
570 ((caller() == nullptr) || (caller()->map() != _map));
571 if (print_map) {
572 if (_map->len() > _map->req()) { // _map->has_exceptions()
573 Node* ex = _map->in(_map->req()); // _map->next_exception()
574 // skip the first one; it's already being printed
575 while (ex != nullptr && ex->len() > ex->req()) {
576 ex = ex->in(ex->req()); // ex->next_exception()
577 ex->dump(1);
578 }
579 }
580 _map->dump(Verbose ? 2 : 1);
581 }
582 if (caller() != nullptr) {
583 caller()->dump_on(st);
584 }
585 st->print("JVMS depth=%d loc=%d stk=%d arg=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=",
586 depth(), locoff(), stkoff(), argoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false");
587 if (_method == nullptr) {
588 st->print_cr("(none)");
589 } else {
590 _method->print_name(st);
591 st->cr();
592 if (bci() >= 0 && bci() < _method->code_size()) {
593 st->print(" bc: ");
594 _method->print_codes_on(bci(), bci()+1, st);
595 }
596 }
597 }
598
599 // Extra way to dump a jvms from the debugger,
600 // to avoid a bug with C++ member function calls.
601 void dump_jvms(JVMState* jvms) {
602 jvms->dump();
603 }
604 #endif
605
606 //--------------------------clone_shallow--------------------------------------
607 JVMState* JVMState::clone_shallow(Compile* C) const {
608 JVMState* n = has_method() ? new (C) JVMState(_method, _caller) : new (C) JVMState(0);
609 n->set_bci(_bci);
610 n->_reexecute = _reexecute;
611 n->set_locoff(_locoff);
612 n->set_stkoff(_stkoff);
613 n->set_monoff(_monoff);
614 n->set_scloff(_scloff);
615 n->set_endoff(_endoff);
616 n->set_sp(_sp);
617 n->set_map(_map);
618 n->set_receiver_info(_receiver_info);
619 return n;
620 }
621
622 //---------------------------clone_deep----------------------------------------
623 JVMState* JVMState::clone_deep(Compile* C) const {
624 JVMState* n = clone_shallow(C);
625 for (JVMState* p = n; p->_caller != nullptr; p = p->_caller) {
626 p->_caller = p->_caller->clone_shallow(C);
627 }
628 assert(n->depth() == depth(), "sanity");
629 assert(n->debug_depth() == debug_depth(), "sanity");
630 return n;
631 }
632
633 /**
634 * Reset map for all callers
635 */
636 void JVMState::set_map_deep(SafePointNode* map) {
637 for (JVMState* p = this; p != nullptr; p = p->_caller) {
638 p->set_map(map);
639 }
640 }
641
642 // unlike set_map(), this is two-way setting.
643 void JVMState::bind_map(SafePointNode* map) {
644 set_map(map);
645 _map->set_jvms(this);
646 }
647
648 // Adapt offsets in in-array after adding or removing an edge.
649 // Prerequisite is that the JVMState is used by only one node.
650 void JVMState::adapt_position(int delta) {
651 for (JVMState* jvms = this; jvms != nullptr; jvms = jvms->caller()) {
652 jvms->set_locoff(jvms->locoff() + delta);
653 jvms->set_stkoff(jvms->stkoff() + delta);
654 jvms->set_monoff(jvms->monoff() + delta);
655 jvms->set_scloff(jvms->scloff() + delta);
656 jvms->set_endoff(jvms->endoff() + delta);
657 }
658 }
659
660 // Mirror the stack size calculation in the deopt code
661 // How much stack space would we need at this point in the program in
662 // case of deoptimization?
663 int JVMState::interpreter_frame_size() const {
664 const JVMState* jvms = this;
665 int size = 0;
666 int callee_parameters = 0;
667 int callee_locals = 0;
668 int extra_args = method()->max_stack() - stk_size();
669
670 while (jvms != nullptr) {
671 int locks = jvms->nof_monitors();
672 int temps = jvms->stk_size();
673 bool is_top_frame = (jvms == this);
674 ciMethod* method = jvms->method();
675
676 int frame_size = BytesPerWord * Interpreter::size_activation(method->max_stack(),
677 temps + callee_parameters,
678 extra_args,
679 locks,
680 callee_parameters,
681 callee_locals,
682 is_top_frame);
683 size += frame_size;
684
685 callee_parameters = method->size_of_parameters();
686 callee_locals = method->max_locals();
687 extra_args = 0;
688 jvms = jvms->caller();
689 }
690 return size + Deoptimization::last_frame_adjust(0, callee_locals) * BytesPerWord;
691 }
692
693 // Compute receiver info for a compiled lambda form at call site.
694 ciInstance* JVMState::compute_receiver_info(ciMethod* callee) const {
695 assert(callee != nullptr && callee->is_compiled_lambda_form(), "");
696 if (has_method() && method()->is_compiled_lambda_form()) { // callee is not a MH invoker
697 Node* recv = map()->argument(this, 0);
698 assert(recv != nullptr, "");
699 const TypeOopPtr* recv_toop = recv->bottom_type()->isa_oopptr();
700 if (recv_toop != nullptr && recv_toop->const_oop() != nullptr) {
701 return recv_toop->const_oop()->as_instance();
702 }
703 }
704 return nullptr;
705 }
706
707 //=============================================================================
708 bool CallNode::cmp( const Node &n ) const
709 { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; }
710 #ifndef PRODUCT
711 void CallNode::dump_req(outputStream *st, DumpConfig* dc) const {
712 // Dump the required inputs, enclosed in '(' and ')'
713 uint i; // Exit value of loop
714 for (i = 0; i < req(); i++) { // For all required inputs
715 if (i == TypeFunc::Parms) st->print("(");
716 Node* p = in(i);
717 if (p != nullptr) {
718 p->dump_idx(false, st, dc);
719 st->print(" ");
720 } else {
721 st->print("_ ");
722 }
723 }
724 st->print(")");
725 }
726
727 void CallNode::dump_spec(outputStream *st) const {
728 st->print(" ");
729 if (tf() != nullptr) tf()->dump_on(st);
730 if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt);
731 if (jvms() != nullptr) jvms()->dump_spec(st);
732 }
733
734 void AllocateNode::dump_spec(outputStream* st) const {
735 st->print(" ");
736 if (tf() != nullptr) {
737 tf()->dump_on(st);
738 }
739 if (_cnt != COUNT_UNKNOWN) {
740 st->print(" C=%f", _cnt);
741 }
742 const Node* const klass_node = in(KlassNode);
743 if (klass_node != nullptr) {
744 const TypeKlassPtr* const klass_ptr = klass_node->bottom_type()->isa_klassptr();
745
746 if (klass_ptr != nullptr && klass_ptr->klass_is_exact()) {
747 st->print(" allocationKlass:");
748 klass_ptr->exact_klass()->print_name_on(st);
749 }
750 }
751 if (jvms() != nullptr) {
752 jvms()->dump_spec(st);
753 }
754 }
755 #endif
756
757 const Type *CallNode::bottom_type() const { return tf()->range(); }
758 const Type* CallNode::Value(PhaseGVN* phase) const {
759 if (in(0) == nullptr || phase->type(in(0)) == Type::TOP) {
760 return Type::TOP;
761 }
762 return tf()->range();
763 }
764
765 //------------------------------calling_convention-----------------------------
766 void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
767 // Use the standard compiler calling convention
768 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
769 }
770
771
772 //------------------------------match------------------------------------------
773 // Construct projections for control, I/O, memory-fields, ..., and
774 // return result(s) along with their RegMask info
775 Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
776 switch (proj->_con) {
777 case TypeFunc::Control:
778 case TypeFunc::I_O:
779 case TypeFunc::Memory:
780 return new MachProjNode(this,proj->_con,RegMask::EMPTY,MachProjNode::unmatched_proj);
781
782 case TypeFunc::Parms+1: // For LONG & DOUBLE returns
783 assert(tf()->range()->field_at(TypeFunc::Parms+1) == Type::HALF, "");
784 // 2nd half of doubles and longs
785 return new MachProjNode(this,proj->_con, RegMask::EMPTY, (uint)OptoReg::Bad);
786
787 case TypeFunc::Parms: { // Normal returns
788 uint ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg();
789 OptoRegPair regs = Opcode() == Op_CallLeafVector
790 ? match->vector_return_value(ideal_reg) // Calls into assembly vector routine
791 : is_CallRuntime()
792 ? match->c_return_value(ideal_reg) // Calls into C runtime
793 : match-> return_value(ideal_reg); // Calls into compiled Java code
794 RegMask rm = RegMask(regs.first());
795
796 if (Opcode() == Op_CallLeafVector) {
797 // If the return is in vector, compute appropriate regmask taking into account the whole range
798 if(ideal_reg >= Op_VecA && ideal_reg <= Op_VecZ) {
799 if(OptoReg::is_valid(regs.second())) {
800 for (OptoReg::Name r = regs.first(); r <= regs.second(); r = OptoReg::add(r, 1)) {
801 rm.insert(r);
802 }
803 }
804 }
805 }
806
807 if( OptoReg::is_valid(regs.second()) )
808 rm.insert(regs.second());
809 return new MachProjNode(this,proj->_con,rm,ideal_reg);
810 }
811
812 case TypeFunc::ReturnAdr:
813 case TypeFunc::FramePtr:
814 default:
815 ShouldNotReachHere();
816 }
817 return nullptr;
818 }
819
820 // Do we Match on this edge index or not? Match no edges
821 uint CallNode::match_edge(uint idx) const {
822 return 0;
823 }
824
825 //
826 // Determine whether the call could modify the field of the specified
827 // instance at the specified offset.
828 //
829 bool CallNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) {
830 assert((t_oop != nullptr), "sanity");
831 if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) {
832 const TypeTuple* args = _tf->domain();
833 Node* dest = nullptr;
834 // Stubs that can be called once an ArrayCopyNode is expanded have
835 // different signatures. Look for the second pointer argument,
836 // that is the destination of the copy.
837 for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
838 if (args->field_at(i)->isa_ptr()) {
839 j++;
840 if (j == 2) {
841 dest = in(i);
842 break;
843 }
844 }
845 }
846 guarantee(dest != nullptr, "Call had only one ptr in, broken IR!");
847 if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) {
848 return true;
849 }
850 return false;
851 }
852 if (t_oop->is_known_instance()) {
853 // The instance_id is set only for scalar-replaceable allocations which
854 // are not passed as arguments according to Escape Analysis.
855 return false;
856 }
857 if (t_oop->is_ptr_to_boxed_value()) {
858 ciKlass* boxing_klass = t_oop->is_instptr()->instance_klass();
859 if (is_CallStaticJava() && as_CallStaticJava()->is_boxing_method()) {
860 // Skip unrelated boxing methods.
861 Node* proj = proj_out_or_null(TypeFunc::Parms);
862 if ((proj == nullptr) || (phase->type(proj)->is_instptr()->instance_klass() != boxing_klass)) {
863 return false;
864 }
865 }
866 if (is_CallJava() && as_CallJava()->method() != nullptr) {
867 ciMethod* meth = as_CallJava()->method();
868 if (meth->is_getter()) {
869 return false;
870 }
871 // May modify (by reflection) if an boxing object is passed
872 // as argument or returned.
873 Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : nullptr;
874 if (proj != nullptr) {
875 const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr();
876 if ((inst_t != nullptr) && (!inst_t->klass_is_exact() ||
877 (inst_t->instance_klass() == boxing_klass))) {
878 return true;
879 }
880 }
881 const TypeTuple* d = tf()->domain();
882 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
883 const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr();
884 if ((inst_t != nullptr) && (!inst_t->klass_is_exact() ||
885 (inst_t->instance_klass() == boxing_klass))) {
886 return true;
887 }
888 }
889 return false;
890 }
891 }
892 return true;
893 }
894
895 // Does this call have a direct reference to n other than debug information?
896 bool CallNode::has_non_debug_use(Node *n) {
897 const TypeTuple * d = tf()->domain();
898 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
899 Node *arg = in(i);
900 if (arg == n) {
901 return true;
902 }
903 }
904 return false;
905 }
906
907 // Returns the unique CheckCastPP of a call
908 // or 'this' if there are several CheckCastPP or unexpected uses
909 // or returns null if there is no one.
910 Node *CallNode::result_cast() {
911 Node *cast = nullptr;
912
913 Node *p = proj_out_or_null(TypeFunc::Parms);
914 if (p == nullptr)
915 return nullptr;
916
917 for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
918 Node *use = p->fast_out(i);
919 if (use->is_CheckCastPP()) {
920 if (cast != nullptr) {
921 return this; // more than 1 CheckCastPP
922 }
923 cast = use;
924 } else if (!use->is_Initialize() &&
925 !use->is_AddP() &&
926 use->Opcode() != Op_MemBarStoreStore) {
927 // Expected uses are restricted to a CheckCastPP, an Initialize
928 // node, a MemBarStoreStore (clone) and AddP nodes. If we
929 // encounter any other use (a Phi node can be seen in rare
930 // cases) return this to prevent incorrect optimizations.
931 return this;
932 }
933 }
934 return cast;
935 }
936
937
938 void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts) const {
939 projs->fallthrough_proj = nullptr;
940 projs->fallthrough_catchproj = nullptr;
941 projs->fallthrough_ioproj = nullptr;
942 projs->catchall_ioproj = nullptr;
943 projs->catchall_catchproj = nullptr;
944 projs->fallthrough_memproj = nullptr;
945 projs->catchall_memproj = nullptr;
946 projs->resproj = nullptr;
947 projs->exobj = nullptr;
948
949 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
950 ProjNode *pn = fast_out(i)->as_Proj();
951 if (pn->outcnt() == 0) continue;
952 switch (pn->_con) {
953 case TypeFunc::Control:
954 {
955 // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
956 projs->fallthrough_proj = pn;
957 const Node* cn = pn->unique_ctrl_out_or_null();
958 if (cn != nullptr && cn->is_Catch()) {
959 ProjNode *cpn = nullptr;
960 for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
961 cpn = cn->fast_out(k)->as_Proj();
962 assert(cpn->is_CatchProj(), "must be a CatchProjNode");
963 if (cpn->_con == CatchProjNode::fall_through_index)
964 projs->fallthrough_catchproj = cpn;
965 else {
966 assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index.");
967 projs->catchall_catchproj = cpn;
968 }
969 }
970 }
971 break;
972 }
973 case TypeFunc::I_O:
974 if (pn->_is_io_use)
975 projs->catchall_ioproj = pn;
976 else
977 projs->fallthrough_ioproj = pn;
978 for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
979 Node* e = pn->out(j);
980 if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) {
981 assert(projs->exobj == nullptr, "only one");
982 projs->exobj = e;
983 }
984 }
985 break;
986 case TypeFunc::Memory:
987 if (pn->_is_io_use)
988 projs->catchall_memproj = pn;
989 else
990 projs->fallthrough_memproj = pn;
991 break;
992 case TypeFunc::Parms:
993 projs->resproj = pn;
994 break;
995 default:
996 assert(false, "unexpected projection from allocation node.");
997 }
998 }
999
1000 // The resproj may not exist because the result could be ignored
1001 // and the exception object may not exist if an exception handler
1002 // swallows the exception but all the other must exist and be found.
1003 assert(projs->fallthrough_proj != nullptr, "must be found");
1004 do_asserts = do_asserts && !Compile::current()->inlining_incrementally();
1005 assert(!do_asserts || projs->fallthrough_catchproj != nullptr, "must be found");
1006 assert(!do_asserts || projs->fallthrough_memproj != nullptr, "must be found");
1007 assert(!do_asserts || projs->fallthrough_ioproj != nullptr, "must be found");
1008 assert(!do_asserts || projs->catchall_catchproj != nullptr, "must be found");
1009 if (separate_io_proj) {
1010 assert(!do_asserts || projs->catchall_memproj != nullptr, "must be found");
1011 assert(!do_asserts || projs->catchall_ioproj != nullptr, "must be found");
1012 }
1013 }
1014
1015 Node* CallNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1016 #ifdef ASSERT
1017 // Validate attached generator
1018 CallGenerator* cg = generator();
1019 if (cg != nullptr) {
1020 assert((is_CallStaticJava() && cg->is_mh_late_inline()) ||
1021 (is_CallDynamicJava() && cg->is_virtual_late_inline()), "mismatch");
1022 }
1023 #endif // ASSERT
1024 return SafePointNode::Ideal(phase, can_reshape);
1025 }
1026
1027 bool CallNode::is_call_to_arraycopystub() const {
1028 if (_name != nullptr && strstr(_name, "arraycopy") != nullptr) {
1029 return true;
1030 }
1031 return false;
1032 }
1033
1034 bool CallNode::is_call_to_multianewarray_stub() const {
1035 if (_name != nullptr &&
1036 strstr(_name, "multianewarray") != nullptr &&
1037 strstr(_name, "C2 runtime") != nullptr) {
1038 return true;
1039 }
1040 return false;
1041 }
1042
1043 //=============================================================================
1044 uint CallJavaNode::size_of() const { return sizeof(*this); }
1045 bool CallJavaNode::cmp( const Node &n ) const {
1046 CallJavaNode &call = (CallJavaNode&)n;
1047 return CallNode::cmp(call) && _method == call._method &&
1048 _override_symbolic_info == call._override_symbolic_info;
1049 }
1050
1051 void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {
1052 // Copy debug information and adjust JVMState information
1053 uint old_dbg_start = sfpt->is_Call() ? sfpt->as_Call()->tf()->domain()->cnt() : (uint)TypeFunc::Parms+1;
1054 uint new_dbg_start = tf()->domain()->cnt();
1055 int jvms_adj = new_dbg_start - old_dbg_start;
1056 assert (new_dbg_start == req(), "argument count mismatch");
1057 Compile* C = phase->C;
1058
1059 // SafePointScalarObject node could be referenced several times in debug info.
1060 // Use Dict to record cloned nodes.
1061 Dict* sosn_map = new Dict(cmpkey,hashkey);
1062 for (uint i = old_dbg_start; i < sfpt->req(); i++) {
1063 Node* old_in = sfpt->in(i);
1064 // Clone old SafePointScalarObjectNodes, adjusting their field contents.
1065 if (old_in != nullptr && old_in->is_SafePointScalarObject()) {
1066 SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
1067 bool new_node;
1068 Node* new_in = old_sosn->clone(sosn_map, new_node);
1069 if (new_node) { // New node?
1070 new_in->set_req(0, C->root()); // reset control edge
1071 new_in = phase->transform(new_in); // Register new node.
1072 }
1073 old_in = new_in;
1074 }
1075 add_req(old_in);
1076 }
1077
1078 // JVMS may be shared so clone it before we modify it
1079 set_jvms(sfpt->jvms() != nullptr ? sfpt->jvms()->clone_deep(C) : nullptr);
1080 for (JVMState *jvms = this->jvms(); jvms != nullptr; jvms = jvms->caller()) {
1081 jvms->set_map(this);
1082 jvms->set_locoff(jvms->locoff()+jvms_adj);
1083 jvms->set_stkoff(jvms->stkoff()+jvms_adj);
1084 jvms->set_monoff(jvms->monoff()+jvms_adj);
1085 jvms->set_scloff(jvms->scloff()+jvms_adj);
1086 jvms->set_endoff(jvms->endoff()+jvms_adj);
1087 }
1088 }
1089
1090 #ifdef ASSERT
1091 bool CallJavaNode::validate_symbolic_info() const {
1092 if (method() == nullptr) {
1093 return true; // call into runtime or uncommon trap
1094 }
1095 ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(jvms()->bci());
1096 ciMethod* callee = method();
1097 if (symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic()) {
1098 assert(override_symbolic_info(), "should be set");
1099 }
1100 assert(ciMethod::is_consistent_info(symbolic_info, callee), "inconsistent info");
1101 return true;
1102 }
1103 #endif
1104
1105 #ifndef PRODUCT
1106 void CallJavaNode::dump_spec(outputStream* st) const {
1107 if( _method ) _method->print_short_name(st);
1108 CallNode::dump_spec(st);
1109 }
1110
1111 void CallJavaNode::dump_compact_spec(outputStream* st) const {
1112 if (_method) {
1113 _method->print_short_name(st);
1114 } else {
1115 st->print("<?>");
1116 }
1117 }
1118 #endif
1119
1120 void CallJavaNode::register_for_late_inline() {
1121 if (generator() != nullptr) {
1122 Compile::current()->prepend_late_inline(generator());
1123 set_generator(nullptr);
1124 } else {
1125 assert(false, "repeated inline attempt");
1126 }
1127 }
1128
1129 //=============================================================================
1130 uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
1131 bool CallStaticJavaNode::cmp( const Node &n ) const {
1132 CallStaticJavaNode &call = (CallStaticJavaNode&)n;
1133 return CallJavaNode::cmp(call);
1134 }
1135
1136 Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1137 CallGenerator* cg = generator();
1138 if (can_reshape && cg != nullptr) {
1139 if (cg->is_mh_late_inline()) {
1140 assert(IncrementalInlineMH, "required");
1141 assert(cg->call_node() == this, "mismatch");
1142 assert(cg->method()->is_method_handle_intrinsic(), "required");
1143
1144 // Check whether this MH handle call becomes a candidate for inlining.
1145 ciMethod* callee = cg->method();
1146 vmIntrinsics::ID iid = callee->intrinsic_id();
1147 if (iid == vmIntrinsics::_invokeBasic) {
1148 if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
1149 register_for_late_inline();
1150 }
1151 } else if (iid == vmIntrinsics::_linkToNative) {
1152 // never retry
1153 } else {
1154 assert(callee->has_member_arg(), "wrong type of call?");
1155 if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {
1156 register_for_late_inline();
1157 }
1158 }
1159 } else {
1160 assert(IncrementalInline, "required");
1161 assert(!cg->method()->is_method_handle_intrinsic(), "required");
1162 if (phase->C->print_inlining()) {
1163 phase->C->inline_printer()->record(cg->method(), cg->call_node()->jvms(), InliningResult::FAILURE,
1164 "static call node changed: trying again");
1165 }
1166 register_for_late_inline();
1167 }
1168 }
1169 return CallNode::Ideal(phase, can_reshape);
1170 }
1171
1172 //----------------------------is_uncommon_trap----------------------------
1173 // Returns true if this is an uncommon trap.
1174 bool CallStaticJavaNode::is_uncommon_trap() const {
1175 return (_name != nullptr && !strcmp(_name, "uncommon_trap"));
1176 }
1177
1178 //----------------------------uncommon_trap_request----------------------------
1179 // If this is an uncommon trap, return the request code, else zero.
1180 int CallStaticJavaNode::uncommon_trap_request() const {
1181 return is_uncommon_trap() ? extract_uncommon_trap_request(this) : 0;
1182 }
1183 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) {
1184 #ifndef PRODUCT
1185 if (!(call->req() > TypeFunc::Parms &&
1186 call->in(TypeFunc::Parms) != nullptr &&
1187 call->in(TypeFunc::Parms)->is_Con() &&
1188 call->in(TypeFunc::Parms)->bottom_type()->isa_int())) {
1189 assert(in_dump() != 0, "OK if dumping");
1190 tty->print("[bad uncommon trap]");
1191 return 0;
1192 }
1193 #endif
1194 return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con();
1195 }
1196
1197 #ifndef PRODUCT
1198 void CallStaticJavaNode::dump_spec(outputStream *st) const {
1199 st->print("# Static ");
1200 if (_name != nullptr) {
1201 st->print("%s", _name);
1202 int trap_req = uncommon_trap_request();
1203 if (trap_req != 0) {
1204 char buf[100];
1205 st->print("(%s)",
1206 Deoptimization::format_trap_request(buf, sizeof(buf),
1207 trap_req));
1208 }
1209 st->print(" ");
1210 }
1211 CallJavaNode::dump_spec(st);
1212 }
1213
1214 void CallStaticJavaNode::dump_compact_spec(outputStream* st) const {
1215 if (_method) {
1216 _method->print_short_name(st);
1217 } else if (_name) {
1218 st->print("%s", _name);
1219 } else {
1220 st->print("<?>");
1221 }
1222 }
1223 #endif
1224
1225 //=============================================================================
1226 uint CallDynamicJavaNode::size_of() const { return sizeof(*this); }
1227 bool CallDynamicJavaNode::cmp( const Node &n ) const {
1228 CallDynamicJavaNode &call = (CallDynamicJavaNode&)n;
1229 return CallJavaNode::cmp(call);
1230 }
1231
1232 Node* CallDynamicJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1233 CallGenerator* cg = generator();
1234 if (can_reshape && cg != nullptr) {
1235 if (cg->is_virtual_late_inline()) {
1236 assert(IncrementalInlineVirtual, "required");
1237 assert(cg->call_node() == this, "mismatch");
1238
1239 if (cg->callee_method() == nullptr) {
1240 // Recover symbolic info for method resolution.
1241 ciMethod* caller = jvms()->method();
1242 ciBytecodeStream iter(caller);
1243 iter.force_bci(jvms()->bci());
1244
1245 bool not_used1;
1246 ciSignature* not_used2;
1247 ciMethod* orig_callee = iter.get_method(not_used1, ¬_used2); // callee in the bytecode
1248 ciKlass* holder = iter.get_declared_method_holder();
1249 if (orig_callee->is_method_handle_intrinsic()) {
1250 assert(_override_symbolic_info, "required");
1251 orig_callee = method();
1252 holder = method()->holder();
1253 }
1254
1255 ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);
1256
1257 Node* receiver_node = in(TypeFunc::Parms);
1258 const TypeOopPtr* receiver_type = phase->type(receiver_node)->isa_oopptr();
1259
1260 int not_used3;
1261 bool call_does_dispatch;
1262 ciMethod* callee = phase->C->optimize_virtual_call(caller, klass, holder, orig_callee, receiver_type, true /*is_virtual*/,
1263 call_does_dispatch, not_used3); // out-parameters
1264 if (!call_does_dispatch) {
1265 cg->set_callee_method(callee);
1266 }
1267 }
1268 if (cg->callee_method() != nullptr) {
1269 // Register for late inlining.
1270 register_for_late_inline(); // MH late inlining prepends to the list, so do the same
1271 }
1272 } else {
1273 assert(IncrementalInline, "required");
1274 if (phase->C->print_inlining()) {
1275 phase->C->inline_printer()->record(cg->method(), cg->call_node()->jvms(), InliningResult::FAILURE,
1276 "dynamic call node changed: trying again");
1277 }
1278 register_for_late_inline();
1279 }
1280 }
1281 return CallNode::Ideal(phase, can_reshape);
1282 }
1283
1284 #ifndef PRODUCT
1285 void CallDynamicJavaNode::dump_spec(outputStream *st) const {
1286 st->print("# Dynamic ");
1287 CallJavaNode::dump_spec(st);
1288 }
1289 #endif
1290
1291 //=============================================================================
1292 uint CallRuntimeNode::size_of() const { return sizeof(*this); }
1293 bool CallRuntimeNode::cmp( const Node &n ) const {
1294 CallRuntimeNode &call = (CallRuntimeNode&)n;
1295 return CallNode::cmp(call) && !strcmp(_name,call._name);
1296 }
1297 #ifndef PRODUCT
1298 void CallRuntimeNode::dump_spec(outputStream *st) const {
1299 st->print("# ");
1300 st->print("%s", _name);
1301 CallNode::dump_spec(st);
1302 }
1303 #endif
1304 uint CallLeafVectorNode::size_of() const { return sizeof(*this); }
1305 bool CallLeafVectorNode::cmp( const Node &n ) const {
1306 CallLeafVectorNode &call = (CallLeafVectorNode&)n;
1307 return CallLeafNode::cmp(call) && _num_bits == call._num_bits;
1308 }
1309
1310 //------------------------------calling_convention-----------------------------
1311 void CallRuntimeNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
1312 SharedRuntime::c_calling_convention(sig_bt, parm_regs, argcnt);
1313 }
1314
1315 void CallLeafVectorNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
1316 #ifdef ASSERT
1317 assert(tf()->range()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1318 "return vector size must match");
1319 const TypeTuple* d = tf()->domain();
1320 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1321 Node* arg = in(i);
1322 assert(arg->bottom_type()->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1323 "vector argument size must match");
1324 }
1325 #endif
1326
1327 SharedRuntime::vector_calling_convention(parm_regs, _num_bits, argcnt);
1328 }
1329
1330 //=============================================================================
1331 //------------------------------calling_convention-----------------------------
1332
1333
1334 //=============================================================================
1335 bool CallLeafPureNode::is_unused() const {
1336 return proj_out_or_null(TypeFunc::Parms) == nullptr;
1337 }
1338
1339 bool CallLeafPureNode::is_dead() const {
1340 return proj_out_or_null(TypeFunc::Control) == nullptr;
1341 }
1342
1343 /* We make a tuple of the global input state + TOP for the output values.
1344 * We use this to delete a pure function that is not used: by replacing the call with
1345 * such a tuple, we let output Proj's idealization pick the corresponding input of the
1346 * pure call, so jumping over it, and effectively, removing the call from the graph.
1347 * This avoids doing the graph surgery manually, but leaves that to IGVN
1348 * that is specialized for doing that right. We need also tuple components for output
1349 * values of the function to respect the return arity, and in case there is a projection
1350 * that would pick an output (which shouldn't happen at the moment).
1351 */
1352 TupleNode* CallLeafPureNode::make_tuple_of_input_state_and_top_return_values(const Compile* C) const {
1353 // Transparently propagate input state but parameters
1354 TupleNode* tuple = TupleNode::make(
1355 tf()->range(),
1356 in(TypeFunc::Control),
1357 in(TypeFunc::I_O),
1358 in(TypeFunc::Memory),
1359 in(TypeFunc::FramePtr),
1360 in(TypeFunc::ReturnAdr));
1361
1362 // And add TOPs for the return values
1363 for (uint i = TypeFunc::Parms; i < tf()->range()->cnt(); i++) {
1364 tuple->set_req(i, C->top());
1365 }
1366
1367 return tuple;
1368 }
1369
1370 Node* CallLeafPureNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1371 if (is_dead()) {
1372 return nullptr;
1373 }
1374
1375 // We need to wait until IGVN because during parsing, usages might still be missing
1376 // and we would remove the call immediately.
1377 if (can_reshape && is_unused()) {
1378 // The result is not used. We remove the call by replacing it with a tuple, that
1379 // is later disintegrated by the projections.
1380 return make_tuple_of_input_state_and_top_return_values(phase->C);
1381 }
1382
1383 return CallRuntimeNode::Ideal(phase, can_reshape);
1384 }
1385
1386 #ifndef PRODUCT
1387 void CallLeafNode::dump_spec(outputStream *st) const {
1388 st->print("# ");
1389 st->print("%s", _name);
1390 CallNode::dump_spec(st);
1391 }
1392 #endif
1393
1394 //=============================================================================
1395
1396 void SafePointNode::set_local(const JVMState* jvms, uint idx, Node *c) {
1397 assert(verify_jvms(jvms), "jvms must match");
1398 int loc = jvms->locoff() + idx;
1399 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
1400 // If current local idx is top then local idx - 1 could
1401 // be a long/double that needs to be killed since top could
1402 // represent the 2nd half of the long/double.
1403 uint ideal = in(loc -1)->ideal_reg();
1404 if (ideal == Op_RegD || ideal == Op_RegL) {
1405 // set other (low index) half to top
1406 set_req(loc - 1, in(loc));
1407 }
1408 }
1409 set_req(loc, c);
1410 }
1411
1412 uint SafePointNode::size_of() const { return sizeof(*this); }
1413 bool SafePointNode::cmp( const Node &n ) const {
1414 return (&n == this); // Always fail except on self
1415 }
1416
1417 //-------------------------set_next_exception----------------------------------
1418 void SafePointNode::set_next_exception(SafePointNode* n) {
1419 assert(n == nullptr || n->Opcode() == Op_SafePoint, "correct value for next_exception");
1420 if (len() == req()) {
1421 if (n != nullptr) add_prec(n);
1422 } else {
1423 set_prec(req(), n);
1424 }
1425 }
1426
1427
1428 //----------------------------next_exception-----------------------------------
1429 SafePointNode* SafePointNode::next_exception() const {
1430 if (len() == req()) {
1431 return nullptr;
1432 } else {
1433 Node* n = in(req());
1434 assert(n == nullptr || n->Opcode() == Op_SafePoint, "no other uses of prec edges");
1435 return (SafePointNode*) n;
1436 }
1437 }
1438
1439
1440 //------------------------------Ideal------------------------------------------
1441 // Skip over any collapsed Regions
1442 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1443 assert(_jvms == nullptr || ((uintptr_t)_jvms->map() & 1) || _jvms->map() == this, "inconsistent JVMState");
1444 return remove_dead_region(phase, can_reshape) ? this : nullptr;
1445 }
1446
1447 //------------------------------Identity---------------------------------------
1448 // Remove obviously duplicate safepoints
1449 Node* SafePointNode::Identity(PhaseGVN* phase) {
1450
1451 // If you have back to back safepoints, remove one
1452 if (in(TypeFunc::Control)->is_SafePoint()) {
1453 Node* out_c = unique_ctrl_out_or_null();
1454 // This can be the safepoint of an outer strip mined loop if the inner loop's backedge was removed. Replacing the
1455 // outer loop's safepoint could confuse removal of the outer loop.
1456 if (out_c != nullptr && !out_c->is_OuterStripMinedLoopEnd()) {
1457 return in(TypeFunc::Control);
1458 }
1459 }
1460
1461 // Transforming long counted loops requires a safepoint node. Do not
1462 // eliminate a safepoint until loop opts are over.
1463 if (in(0)->is_Proj() && !phase->C->major_progress()) {
1464 Node *n0 = in(0)->in(0);
1465 // Check if he is a call projection (except Leaf Call)
1466 if( n0->is_Catch() ) {
1467 n0 = n0->in(0)->in(0);
1468 assert( n0->is_Call(), "expect a call here" );
1469 }
1470 if( n0->is_Call() && n0->as_Call()->guaranteed_safepoint() ) {
1471 // Don't remove a safepoint belonging to an OuterStripMinedLoopEndNode.
1472 // If the loop dies, they will be removed together.
1473 if (has_out_with(Op_OuterStripMinedLoopEnd)) {
1474 return this;
1475 }
1476 // Useless Safepoint, so remove it
1477 return in(TypeFunc::Control);
1478 }
1479 }
1480
1481 return this;
1482 }
1483
1484 //------------------------------Value------------------------------------------
1485 const Type* SafePointNode::Value(PhaseGVN* phase) const {
1486 if (phase->type(in(0)) == Type::TOP) {
1487 return Type::TOP;
1488 }
1489 if (in(0) == this) {
1490 return Type::TOP; // Dead infinite loop
1491 }
1492 return Type::CONTROL;
1493 }
1494
1495 #ifndef PRODUCT
1496 void SafePointNode::dump_spec(outputStream *st) const {
1497 st->print(" SafePoint ");
1498 _replaced_nodes.dump(st);
1499 }
1500 #endif
1501
1502 const RegMask &SafePointNode::in_RegMask(uint idx) const {
1503 if (idx < TypeFunc::Parms) {
1504 return RegMask::EMPTY;
1505 }
1506 // Values outside the domain represent debug info
1507 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
1508 }
1509 const RegMask &SafePointNode::out_RegMask() const {
1510 return RegMask::EMPTY;
1511 }
1512
1513
1514 void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) {
1515 assert((int)grow_by > 0, "sanity");
1516 int monoff = jvms->monoff();
1517 int scloff = jvms->scloff();
1518 int endoff = jvms->endoff();
1519 assert(endoff == (int)req(), "no other states or debug info after me");
1520 Node* top = Compile::current()->top();
1521 for (uint i = 0; i < grow_by; i++) {
1522 ins_req(monoff, top);
1523 }
1524 jvms->set_monoff(monoff + grow_by);
1525 jvms->set_scloff(scloff + grow_by);
1526 jvms->set_endoff(endoff + grow_by);
1527 }
1528
1529 void SafePointNode::push_monitor(const FastLockNode *lock) {
1530 // Add a LockNode, which points to both the original BoxLockNode (the
1531 // stack space for the monitor) and the Object being locked.
1532 const int MonitorEdges = 2;
1533 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
1534 assert(req() == jvms()->endoff(), "correct sizing");
1535 int nextmon = jvms()->scloff();
1536 ins_req(nextmon, lock->box_node());
1537 ins_req(nextmon+1, lock->obj_node());
1538 jvms()->set_scloff(nextmon + MonitorEdges);
1539 jvms()->set_endoff(req());
1540 }
1541
1542 void SafePointNode::pop_monitor() {
1543 // Delete last monitor from debug info
1544 DEBUG_ONLY(int num_before_pop = jvms()->nof_monitors());
1545 const int MonitorEdges = 2;
1546 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
1547 int scloff = jvms()->scloff();
1548 int endoff = jvms()->endoff();
1549 int new_scloff = scloff - MonitorEdges;
1550 int new_endoff = endoff - MonitorEdges;
1551 jvms()->set_scloff(new_scloff);
1552 jvms()->set_endoff(new_endoff);
1553 while (scloff > new_scloff) del_req_ordered(--scloff);
1554 assert(jvms()->nof_monitors() == num_before_pop-1, "");
1555 }
1556
1557 Node *SafePointNode::peek_monitor_box() const {
1558 int mon = jvms()->nof_monitors() - 1;
1559 assert(mon >= 0, "must have a monitor");
1560 return monitor_box(jvms(), mon);
1561 }
1562
1563 Node *SafePointNode::peek_monitor_obj() const {
1564 int mon = jvms()->nof_monitors() - 1;
1565 assert(mon >= 0, "must have a monitor");
1566 return monitor_obj(jvms(), mon);
1567 }
1568
1569 Node* SafePointNode::peek_operand(uint off) const {
1570 assert(jvms()->sp() > 0, "must have an operand");
1571 assert(off < jvms()->sp(), "off is out-of-range");
1572 return stack(jvms(), jvms()->sp() - off - 1);
1573 }
1574
1575 // Do we Match on this edge index or not? Match no edges
1576 uint SafePointNode::match_edge(uint idx) const {
1577 return (TypeFunc::Parms == idx);
1578 }
1579
1580 void SafePointNode::disconnect_from_root(PhaseIterGVN *igvn) {
1581 assert(Opcode() == Op_SafePoint, "only value for safepoint in loops");
1582 int nb = igvn->C->root()->find_prec_edge(this);
1583 if (nb != -1) {
1584 igvn->delete_precedence_of(igvn->C->root(), nb);
1585 }
1586 }
1587
1588 //============== SafePointScalarObjectNode ==============
1589
1590 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp, Node* alloc, uint first_index, uint depth, uint n_fields) :
1591 TypeNode(tp, 1), // 1 control input -- seems required. Get from root.
1592 _first_index(first_index),
1593 _depth(depth),
1594 _n_fields(n_fields),
1595 _alloc(alloc)
1596 {
1597 #ifdef ASSERT
1598 if (!alloc->is_Allocate() && !(alloc->Opcode() == Op_VectorBox)) {
1599 alloc->dump();
1600 assert(false, "unexpected call node");
1601 }
1602 #endif
1603 init_class_id(Class_SafePointScalarObject);
1604 }
1605
1606 // Do not allow value-numbering for SafePointScalarObject node.
1607 uint SafePointScalarObjectNode::hash() const { return NO_HASH; }
1608 bool SafePointScalarObjectNode::cmp( const Node &n ) const {
1609 return (&n == this); // Always fail except on self
1610 }
1611
1612 uint SafePointScalarObjectNode::ideal_reg() const {
1613 return 0; // No matching to machine instruction
1614 }
1615
1616 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const {
1617 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
1618 }
1619
1620 const RegMask &SafePointScalarObjectNode::out_RegMask() const {
1621 return RegMask::EMPTY;
1622 }
1623
1624 uint SafePointScalarObjectNode::match_edge(uint idx) const {
1625 return 0;
1626 }
1627
1628 SafePointScalarObjectNode*
1629 SafePointScalarObjectNode::clone(Dict* sosn_map, bool& new_node) const {
1630 void* cached = (*sosn_map)[(void*)this];
1631 if (cached != nullptr) {
1632 new_node = false;
1633 return (SafePointScalarObjectNode*)cached;
1634 }
1635 new_node = true;
1636 SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone();
1637 sosn_map->Insert((void*)this, (void*)res);
1638 return res;
1639 }
1640
1641
1642 #ifndef PRODUCT
1643 void SafePointScalarObjectNode::dump_spec(outputStream *st) const {
1644 st->print(" # fields@[%d..%d]", first_index(), first_index() + n_fields() - 1);
1645 }
1646 #endif
1647
1648 //============== SafePointScalarMergeNode ==============
1649
1650 SafePointScalarMergeNode::SafePointScalarMergeNode(const TypeOopPtr* tp, int merge_pointer_idx) :
1651 TypeNode(tp, 1), // 1 control input -- seems required. Get from root.
1652 _merge_pointer_idx(merge_pointer_idx)
1653 {
1654 init_class_id(Class_SafePointScalarMerge);
1655 }
1656
1657 // Do not allow value-numbering for SafePointScalarMerge node.
1658 uint SafePointScalarMergeNode::hash() const { return NO_HASH; }
1659 bool SafePointScalarMergeNode::cmp( const Node &n ) const {
1660 return (&n == this); // Always fail except on self
1661 }
1662
1663 uint SafePointScalarMergeNode::ideal_reg() const {
1664 return 0; // No matching to machine instruction
1665 }
1666
1667 const RegMask &SafePointScalarMergeNode::in_RegMask(uint idx) const {
1668 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
1669 }
1670
1671 const RegMask &SafePointScalarMergeNode::out_RegMask() const {
1672 return RegMask::EMPTY;
1673 }
1674
1675 uint SafePointScalarMergeNode::match_edge(uint idx) const {
1676 return 0;
1677 }
1678
1679 SafePointScalarMergeNode*
1680 SafePointScalarMergeNode::clone(Dict* sosn_map, bool& new_node) const {
1681 void* cached = (*sosn_map)[(void*)this];
1682 if (cached != nullptr) {
1683 new_node = false;
1684 return (SafePointScalarMergeNode*)cached;
1685 }
1686 new_node = true;
1687 SafePointScalarMergeNode* res = (SafePointScalarMergeNode*)Node::clone();
1688 sosn_map->Insert((void*)this, (void*)res);
1689 return res;
1690 }
1691
1692 #ifndef PRODUCT
1693 void SafePointScalarMergeNode::dump_spec(outputStream *st) const {
1694 st->print(" # merge_pointer_idx=%d, scalarized_objects=%d", _merge_pointer_idx, req()-1);
1695 }
1696 #endif
1697
1698 //=============================================================================
1699 uint AllocateNode::size_of() const { return sizeof(*this); }
1700
1701 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
1702 Node *ctrl, Node *mem, Node *abio,
1703 Node *size, Node *klass_node, Node *initial_test)
1704 : CallNode(atype, nullptr, TypeRawPtr::BOTTOM)
1705 {
1706 init_class_id(Class_Allocate);
1707 init_flags(Flag_is_macro);
1708 _is_scalar_replaceable = false;
1709 _is_non_escaping = false;
1710 _is_allocation_MemBar_redundant = false;
1711 Node *topnode = C->top();
1712
1713 init_req( TypeFunc::Control , ctrl );
1714 init_req( TypeFunc::I_O , abio );
1715 init_req( TypeFunc::Memory , mem );
1716 init_req( TypeFunc::ReturnAdr, topnode );
1717 init_req( TypeFunc::FramePtr , topnode );
1718 init_req( AllocSize , size);
1719 init_req( KlassNode , klass_node);
1720 init_req( InitialTest , initial_test);
1721 init_req( ALength , topnode);
1722 init_req( ValidLengthTest , topnode);
1723 C->add_macro_node(this);
1724 }
1725
1726 void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer)
1727 {
1728 assert(initializer != nullptr && initializer->is_object_initializer(),
1729 "unexpected initializer method");
1730 BCEscapeAnalyzer* analyzer = initializer->get_bcea();
1731 if (analyzer == nullptr) {
1732 return;
1733 }
1734
1735 // Allocation node is first parameter in its initializer
1736 if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) {
1737 _is_allocation_MemBar_redundant = true;
1738 }
1739 }
1740 Node *AllocateNode::make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem) {
1741 Node* mark_node = nullptr;
1742 if (UseCompactObjectHeaders) {
1743 Node* klass_node = in(AllocateNode::KlassNode);
1744 Node* proto_adr = phase->transform(new AddPNode(klass_node, klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset()))));
1745 mark_node = LoadNode::make(*phase, control, mem, proto_adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
1746 } else {
1747 // For now only enable fast locking for non-array types
1748 mark_node = phase->MakeConX(markWord::prototype().value());
1749 }
1750 return mark_node;
1751 }
1752
1753 // Retrieve the length from the AllocateArrayNode. Narrow the type with a
1754 // CastII, if appropriate. If we are not allowed to create new nodes, and
1755 // a CastII is appropriate, return null.
1756 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseValues* phase, bool allow_new_nodes) {
1757 Node *length = in(AllocateNode::ALength);
1758 assert(length != nullptr, "length is not null");
1759
1760 const TypeInt* length_type = phase->find_int_type(length);
1761 const TypeAryPtr* ary_type = oop_type->isa_aryptr();
1762
1763 if (ary_type != nullptr && length_type != nullptr) {
1764 const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type);
1765 if (narrow_length_type != length_type) {
1766 // Assert one of:
1767 // - the narrow_length is 0
1768 // - the narrow_length is not wider than length
1769 assert(narrow_length_type == TypeInt::ZERO ||
1770 (length_type->is_con() && narrow_length_type->is_con() &&
1771 (narrow_length_type->_hi <= length_type->_lo)) ||
1772 (narrow_length_type->_hi <= length_type->_hi &&
1773 narrow_length_type->_lo >= length_type->_lo),
1774 "narrow type must be narrower than length type");
1775
1776 // Return null if new nodes are not allowed
1777 if (!allow_new_nodes) {
1778 return nullptr;
1779 }
1780 // Create a cast which is control dependent on the initialization to
1781 // propagate the fact that the array length must be positive.
1782 InitializeNode* init = initialization();
1783 if (init != nullptr) {
1784 length = new CastIINode(init->proj_out_or_null(TypeFunc::Control), length, narrow_length_type);
1785 }
1786 }
1787 }
1788
1789 return length;
1790 }
1791
1792 //=============================================================================
1793 const TypeFunc* LockNode::_lock_type_Type = nullptr;
1794
1795 uint LockNode::size_of() const { return sizeof(*this); }
1796
1797 // Redundant lock elimination
1798 //
1799 // There are various patterns of locking where we release and
1800 // immediately reacquire a lock in a piece of code where no operations
1801 // occur in between that would be observable. In those cases we can
1802 // skip releasing and reacquiring the lock without violating any
1803 // fairness requirements. Doing this around a loop could cause a lock
1804 // to be held for a very long time so we concentrate on non-looping
1805 // control flow. We also require that the operations are fully
1806 // redundant meaning that we don't introduce new lock operations on
1807 // some paths so to be able to eliminate it on others ala PRE. This
1808 // would probably require some more extensive graph manipulation to
1809 // guarantee that the memory edges were all handled correctly.
1810 //
1811 // Assuming p is a simple predicate which can't trap in any way and s
1812 // is a synchronized method consider this code:
1813 //
1814 // s();
1815 // if (p)
1816 // s();
1817 // else
1818 // s();
1819 // s();
1820 //
1821 // 1. The unlocks of the first call to s can be eliminated if the
1822 // locks inside the then and else branches are eliminated.
1823 //
1824 // 2. The unlocks of the then and else branches can be eliminated if
1825 // the lock of the final call to s is eliminated.
1826 //
1827 // Either of these cases subsumes the simple case of sequential control flow
1828 //
1829 // Additionally we can eliminate versions without the else case:
1830 //
1831 // s();
1832 // if (p)
1833 // s();
1834 // s();
1835 //
1836 // 3. In this case we eliminate the unlock of the first s, the lock
1837 // and unlock in the then case and the lock in the final s.
1838 //
1839 // Note also that in all these cases the then/else pieces don't have
1840 // to be trivial as long as they begin and end with synchronization
1841 // operations.
1842 //
1843 // s();
1844 // if (p)
1845 // s();
1846 // f();
1847 // s();
1848 // s();
1849 //
1850 // The code will work properly for this case, leaving in the unlock
1851 // before the call to f and the relock after it.
1852 //
1853 // A potentially interesting case which isn't handled here is when the
1854 // locking is partially redundant.
1855 //
1856 // s();
1857 // if (p)
1858 // s();
1859 //
1860 // This could be eliminated putting unlocking on the else case and
1861 // eliminating the first unlock and the lock in the then side.
1862 // Alternatively the unlock could be moved out of the then side so it
1863 // was after the merge and the first unlock and second lock
1864 // eliminated. This might require less manipulation of the memory
1865 // state to get correct.
1866 //
1867 // Additionally we might allow work between a unlock and lock before
1868 // giving up eliminating the locks. The current code disallows any
1869 // conditional control flow between these operations. A formulation
1870 // similar to partial redundancy elimination computing the
1871 // availability of unlocking and the anticipatability of locking at a
1872 // program point would allow detection of fully redundant locking with
1873 // some amount of work in between. I'm not sure how often I really
1874 // think that would occur though. Most of the cases I've seen
1875 // indicate it's likely non-trivial work would occur in between.
1876 // There may be other more complicated constructs where we could
1877 // eliminate locking but I haven't seen any others appear as hot or
1878 // interesting.
1879 //
1880 // Locking and unlocking have a canonical form in ideal that looks
1881 // roughly like this:
1882 //
1883 // <obj>
1884 // | \\------+
1885 // | \ \
1886 // | BoxLock \
1887 // | | | \
1888 // | | \ \
1889 // | | FastLock
1890 // | | /
1891 // | | /
1892 // | | |
1893 //
1894 // Lock
1895 // |
1896 // Proj #0
1897 // |
1898 // MembarAcquire
1899 // |
1900 // Proj #0
1901 //
1902 // MembarRelease
1903 // |
1904 // Proj #0
1905 // |
1906 // Unlock
1907 // |
1908 // Proj #0
1909 //
1910 //
1911 // This code proceeds by processing Lock nodes during PhaseIterGVN
1912 // and searching back through its control for the proper code
1913 // patterns. Once it finds a set of lock and unlock operations to
1914 // eliminate they are marked as eliminatable which causes the
1915 // expansion of the Lock and Unlock macro nodes to make the operation a NOP
1916 //
1917 //=============================================================================
1918
1919 //
1920 // Utility function to skip over uninteresting control nodes. Nodes skipped are:
1921 // - copy regions. (These may not have been optimized away yet.)
1922 // - eliminated locking nodes
1923 //
1924 static Node *next_control(Node *ctrl) {
1925 if (ctrl == nullptr)
1926 return nullptr;
1927 while (1) {
1928 if (ctrl->is_Region()) {
1929 RegionNode *r = ctrl->as_Region();
1930 Node *n = r->is_copy();
1931 if (n == nullptr)
1932 break; // hit a region, return it
1933 else
1934 ctrl = n;
1935 } else if (ctrl->is_Proj()) {
1936 Node *in0 = ctrl->in(0);
1937 if (in0->is_AbstractLock() && in0->as_AbstractLock()->is_eliminated()) {
1938 ctrl = in0->in(0);
1939 } else {
1940 break;
1941 }
1942 } else {
1943 break; // found an interesting control
1944 }
1945 }
1946 return ctrl;
1947 }
1948 //
1949 // Given a control, see if it's the control projection of an Unlock which
1950 // operating on the same object as lock.
1951 //
1952 bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock,
1953 GrowableArray<AbstractLockNode*> &lock_ops) {
1954 ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : nullptr;
1955 if (ctrl_proj != nullptr && ctrl_proj->_con == TypeFunc::Control) {
1956 Node *n = ctrl_proj->in(0);
1957 if (n != nullptr && n->is_Unlock()) {
1958 UnlockNode *unlock = n->as_Unlock();
1959 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1960 Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node());
1961 Node* unlock_obj = bs->step_over_gc_barrier(unlock->obj_node());
1962 if (lock_obj->eqv_uncast(unlock_obj) &&
1963 BoxLockNode::same_slot(lock->box_node(), unlock->box_node()) &&
1964 !unlock->is_eliminated()) {
1965 lock_ops.append(unlock);
1966 return true;
1967 }
1968 }
1969 }
1970 return false;
1971 }
1972
1973 //
1974 // Find the lock matching an unlock. Returns null if a safepoint
1975 // or complicated control is encountered first.
1976 LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) {
1977 LockNode *lock_result = nullptr;
1978 // find the matching lock, or an intervening safepoint
1979 Node *ctrl = next_control(unlock->in(0));
1980 while (1) {
1981 assert(ctrl != nullptr, "invalid control graph");
1982 assert(!ctrl->is_Start(), "missing lock for unlock");
1983 if (ctrl->is_top()) break; // dead control path
1984 if (ctrl->is_Proj()) ctrl = ctrl->in(0);
1985 if (ctrl->is_SafePoint()) {
1986 break; // found a safepoint (may be the lock we are searching for)
1987 } else if (ctrl->is_Region()) {
1988 // Check for a simple diamond pattern. Punt on anything more complicated
1989 if (ctrl->req() == 3 && ctrl->in(1) != nullptr && ctrl->in(2) != nullptr) {
1990 Node *in1 = next_control(ctrl->in(1));
1991 Node *in2 = next_control(ctrl->in(2));
1992 if (((in1->is_IfTrue() && in2->is_IfFalse()) ||
1993 (in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) {
1994 ctrl = next_control(in1->in(0)->in(0));
1995 } else {
1996 break;
1997 }
1998 } else {
1999 break;
2000 }
2001 } else {
2002 ctrl = next_control(ctrl->in(0)); // keep searching
2003 }
2004 }
2005 if (ctrl->is_Lock()) {
2006 LockNode *lock = ctrl->as_Lock();
2007 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2008 Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node());
2009 Node* unlock_obj = bs->step_over_gc_barrier(unlock->obj_node());
2010 if (lock_obj->eqv_uncast(unlock_obj) &&
2011 BoxLockNode::same_slot(lock->box_node(), unlock->box_node())) {
2012 lock_result = lock;
2013 }
2014 }
2015 return lock_result;
2016 }
2017
2018 // This code corresponds to case 3 above.
2019
2020 bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock,
2021 GrowableArray<AbstractLockNode*> &lock_ops) {
2022 Node* if_node = node->in(0);
2023 bool if_true = node->is_IfTrue();
2024
2025 if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) {
2026 Node *lock_ctrl = next_control(if_node->in(0));
2027 if (find_matching_unlock(lock_ctrl, lock, lock_ops)) {
2028 Node* lock1_node = nullptr;
2029 ProjNode* proj = if_node->as_If()->proj_out(!if_true);
2030 if (if_true) {
2031 if (proj->is_IfFalse() && proj->outcnt() == 1) {
2032 lock1_node = proj->unique_out();
2033 }
2034 } else {
2035 if (proj->is_IfTrue() && proj->outcnt() == 1) {
2036 lock1_node = proj->unique_out();
2037 }
2038 }
2039 if (lock1_node != nullptr && lock1_node->is_Lock()) {
2040 LockNode *lock1 = lock1_node->as_Lock();
2041 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2042 Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node());
2043 Node* lock1_obj = bs->step_over_gc_barrier(lock1->obj_node());
2044 if (lock_obj->eqv_uncast(lock1_obj) &&
2045 BoxLockNode::same_slot(lock->box_node(), lock1->box_node()) &&
2046 !lock1->is_eliminated()) {
2047 lock_ops.append(lock1);
2048 return true;
2049 }
2050 }
2051 }
2052 }
2053
2054 lock_ops.trunc_to(0);
2055 return false;
2056 }
2057
2058 bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock,
2059 GrowableArray<AbstractLockNode*> &lock_ops) {
2060 // check each control merging at this point for a matching unlock.
2061 // in(0) should be self edge so skip it.
2062 for (int i = 1; i < (int)region->req(); i++) {
2063 Node *in_node = next_control(region->in(i));
2064 if (in_node != nullptr) {
2065 if (find_matching_unlock(in_node, lock, lock_ops)) {
2066 // found a match so keep on checking.
2067 continue;
2068 } else if (find_lock_and_unlock_through_if(in_node, lock, lock_ops)) {
2069 continue;
2070 }
2071
2072 // If we fall through to here then it was some kind of node we
2073 // don't understand or there wasn't a matching unlock, so give
2074 // up trying to merge locks.
2075 lock_ops.trunc_to(0);
2076 return false;
2077 }
2078 }
2079 return true;
2080
2081 }
2082
2083 // Check that all locks/unlocks associated with object come from balanced regions.
2084 bool AbstractLockNode::is_balanced() {
2085 Node* obj = obj_node();
2086 for (uint j = 0; j < obj->outcnt(); j++) {
2087 Node* n = obj->raw_out(j);
2088 if (n->is_AbstractLock() &&
2089 n->as_AbstractLock()->obj_node()->eqv_uncast(obj)) {
2090 BoxLockNode* n_box = n->as_AbstractLock()->box_node()->as_BoxLock();
2091 if (n_box->is_unbalanced()) {
2092 return false;
2093 }
2094 }
2095 }
2096 return true;
2097 }
2098
2099 const char* AbstractLockNode::_kind_names[] = {"Regular", "NonEscObj", "Coarsened", "Nested"};
2100
2101 const char * AbstractLockNode::kind_as_string() const {
2102 return _kind_names[_kind];
2103 }
2104
2105 #ifndef PRODUCT
2106 //
2107 // Create a counter which counts the number of times this lock is acquired
2108 //
2109 void AbstractLockNode::create_lock_counter(JVMState* state) {
2110 _counter = OptoRuntime::new_named_counter(state, NamedCounter::LockCounter);
2111 }
2112
2113 void AbstractLockNode::set_eliminated_lock_counter() {
2114 if (_counter) {
2115 // Update the counter to indicate that this lock was eliminated.
2116 // The counter update code will stay around even though the
2117 // optimizer will eliminate the lock operation itself.
2118 _counter->set_tag(NamedCounter::EliminatedLockCounter);
2119 }
2120 }
2121
2122 void AbstractLockNode::dump_spec(outputStream* st) const {
2123 st->print("%s ", _kind_names[_kind]);
2124 CallNode::dump_spec(st);
2125 }
2126
2127 void AbstractLockNode::dump_compact_spec(outputStream* st) const {
2128 st->print("%s", _kind_names[_kind]);
2129 }
2130 #endif
2131
2132 //=============================================================================
2133 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2134
2135 // perform any generic optimizations first (returns 'this' or null)
2136 Node *result = SafePointNode::Ideal(phase, can_reshape);
2137 if (result != nullptr) return result;
2138 // Don't bother trying to transform a dead node
2139 if (in(0) && in(0)->is_top()) return nullptr;
2140
2141 // Now see if we can optimize away this lock. We don't actually
2142 // remove the locking here, we simply set the _eliminate flag which
2143 // prevents macro expansion from expanding the lock. Since we don't
2144 // modify the graph, the value returned from this function is the
2145 // one computed above.
2146 if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
2147 //
2148 // If we are locking an non-escaped object, the lock/unlock is unnecessary
2149 //
2150 ConnectionGraph *cgr = phase->C->congraph();
2151 if (cgr != nullptr && cgr->can_eliminate_lock(this)) {
2152 assert(!is_eliminated() || is_coarsened(), "sanity");
2153 // The lock could be marked eliminated by lock coarsening
2154 // code during first IGVN before EA. Replace coarsened flag
2155 // to eliminate all associated locks/unlocks.
2156 #ifdef ASSERT
2157 this->log_lock_optimization(phase->C,"eliminate_lock_set_non_esc1");
2158 #endif
2159 this->set_non_esc_obj();
2160 return result;
2161 }
2162
2163 if (!phase->C->do_locks_coarsening()) {
2164 return result; // Compiling without locks coarsening
2165 }
2166 //
2167 // Try lock coarsening
2168 //
2169 PhaseIterGVN* iter = phase->is_IterGVN();
2170 if (iter != nullptr && !is_eliminated()) {
2171
2172 GrowableArray<AbstractLockNode*> lock_ops;
2173
2174 Node *ctrl = next_control(in(0));
2175
2176 // now search back for a matching Unlock
2177 if (find_matching_unlock(ctrl, this, lock_ops)) {
2178 // found an unlock directly preceding this lock. This is the
2179 // case of single unlock directly control dependent on a
2180 // single lock which is the trivial version of case 1 or 2.
2181 } else if (ctrl->is_Region() ) {
2182 if (find_unlocks_for_region(ctrl->as_Region(), this, lock_ops)) {
2183 // found lock preceded by multiple unlocks along all paths
2184 // joining at this point which is case 3 in description above.
2185 }
2186 } else {
2187 // see if this lock comes from either half of an if and the
2188 // predecessors merges unlocks and the other half of the if
2189 // performs a lock.
2190 if (find_lock_and_unlock_through_if(ctrl, this, lock_ops)) {
2191 // found unlock splitting to an if with locks on both branches.
2192 }
2193 }
2194
2195 if (lock_ops.length() > 0) {
2196 // add ourselves to the list of locks to be eliminated.
2197 lock_ops.append(this);
2198
2199 #ifndef PRODUCT
2200 if (PrintEliminateLocks) {
2201 int locks = 0;
2202 int unlocks = 0;
2203 if (Verbose) {
2204 tty->print_cr("=== Locks coarsening ===");
2205 tty->print("Obj: ");
2206 obj_node()->dump();
2207 }
2208 for (int i = 0; i < lock_ops.length(); i++) {
2209 AbstractLockNode* lock = lock_ops.at(i);
2210 if (lock->Opcode() == Op_Lock)
2211 locks++;
2212 else
2213 unlocks++;
2214 if (Verbose) {
2215 tty->print("Box %d: ", i);
2216 box_node()->dump();
2217 tty->print(" %d: ", i);
2218 lock->dump();
2219 }
2220 }
2221 tty->print_cr("=== Coarsened %d unlocks and %d locks", unlocks, locks);
2222 }
2223 #endif
2224
2225 // for each of the identified locks, mark them
2226 // as eliminatable
2227 for (int i = 0; i < lock_ops.length(); i++) {
2228 AbstractLockNode* lock = lock_ops.at(i);
2229
2230 // Mark it eliminated by coarsening and update any counters
2231 #ifdef ASSERT
2232 lock->log_lock_optimization(phase->C, "eliminate_lock_set_coarsened");
2233 #endif
2234 lock->set_coarsened();
2235 }
2236 // Record this coarsened group.
2237 phase->C->add_coarsened_locks(lock_ops);
2238 } else if (ctrl->is_Region() &&
2239 iter->_worklist.member(ctrl)) {
2240 // We weren't able to find any opportunities but the region this
2241 // lock is control dependent on hasn't been processed yet so put
2242 // this lock back on the worklist so we can check again once any
2243 // region simplification has occurred.
2244 iter->_worklist.push(this);
2245 }
2246 }
2247 }
2248
2249 return result;
2250 }
2251
2252 //=============================================================================
2253 bool LockNode::is_nested_lock_region() {
2254 return is_nested_lock_region(nullptr);
2255 }
2256
2257 // p is used for access to compilation log; no logging if null
2258 bool LockNode::is_nested_lock_region(Compile * c) {
2259 BoxLockNode* box = box_node()->as_BoxLock();
2260 int stk_slot = box->stack_slot();
2261 if (stk_slot <= 0) {
2262 #ifdef ASSERT
2263 this->log_lock_optimization(c, "eliminate_lock_INLR_1");
2264 #endif
2265 return false; // External lock or it is not Box (Phi node).
2266 }
2267
2268 // Ignore complex cases: merged locks or multiple locks.
2269 Node* obj = obj_node();
2270 LockNode* unique_lock = nullptr;
2271 Node* bad_lock = nullptr;
2272 if (!box->is_simple_lock_region(&unique_lock, obj, &bad_lock)) {
2273 #ifdef ASSERT
2274 this->log_lock_optimization(c, "eliminate_lock_INLR_2a", bad_lock);
2275 #endif
2276 return false;
2277 }
2278 if (unique_lock != this) {
2279 #ifdef ASSERT
2280 this->log_lock_optimization(c, "eliminate_lock_INLR_2b", (unique_lock != nullptr ? unique_lock : bad_lock));
2281 if (PrintEliminateLocks && Verbose) {
2282 tty->print_cr("=============== unique_lock != this ============");
2283 tty->print(" this: ");
2284 this->dump();
2285 tty->print(" box: ");
2286 box->dump();
2287 tty->print(" obj: ");
2288 obj->dump();
2289 if (unique_lock != nullptr) {
2290 tty->print(" unique_lock: ");
2291 unique_lock->dump();
2292 }
2293 if (bad_lock != nullptr) {
2294 tty->print(" bad_lock: ");
2295 bad_lock->dump();
2296 }
2297 tty->print_cr("===============");
2298 }
2299 #endif
2300 return false;
2301 }
2302
2303 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2304 obj = bs->step_over_gc_barrier(obj);
2305 // Look for external lock for the same object.
2306 SafePointNode* sfn = this->as_SafePoint();
2307 JVMState* youngest_jvms = sfn->jvms();
2308 int max_depth = youngest_jvms->depth();
2309 for (int depth = 1; depth <= max_depth; depth++) {
2310 JVMState* jvms = youngest_jvms->of_depth(depth);
2311 int num_mon = jvms->nof_monitors();
2312 // Loop over monitors
2313 for (int idx = 0; idx < num_mon; idx++) {
2314 Node* obj_node = sfn->monitor_obj(jvms, idx);
2315 obj_node = bs->step_over_gc_barrier(obj_node);
2316 BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock();
2317 if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) {
2318 box->set_nested();
2319 return true;
2320 }
2321 }
2322 }
2323 #ifdef ASSERT
2324 this->log_lock_optimization(c, "eliminate_lock_INLR_3");
2325 #endif
2326 return false;
2327 }
2328
2329 //=============================================================================
2330 uint UnlockNode::size_of() const { return sizeof(*this); }
2331
2332 //=============================================================================
2333 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2334
2335 // perform any generic optimizations first (returns 'this' or null)
2336 Node *result = SafePointNode::Ideal(phase, can_reshape);
2337 if (result != nullptr) return result;
2338 // Don't bother trying to transform a dead node
2339 if (in(0) && in(0)->is_top()) return nullptr;
2340
2341 // Now see if we can optimize away this unlock. We don't actually
2342 // remove the unlocking here, we simply set the _eliminate flag which
2343 // prevents macro expansion from expanding the unlock. Since we don't
2344 // modify the graph, the value returned from this function is the
2345 // one computed above.
2346 // Escape state is defined after Parse phase.
2347 if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
2348 //
2349 // If we are unlocking an non-escaped object, the lock/unlock is unnecessary.
2350 //
2351 ConnectionGraph *cgr = phase->C->congraph();
2352 if (cgr != nullptr && cgr->can_eliminate_lock(this)) {
2353 assert(!is_eliminated() || is_coarsened(), "sanity");
2354 // The lock could be marked eliminated by lock coarsening
2355 // code during first IGVN before EA. Replace coarsened flag
2356 // to eliminate all associated locks/unlocks.
2357 #ifdef ASSERT
2358 this->log_lock_optimization(phase->C, "eliminate_lock_set_non_esc2");
2359 #endif
2360 this->set_non_esc_obj();
2361 }
2362 }
2363 return result;
2364 }
2365
2366 void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag, Node* bad_lock) const {
2367 if (C == nullptr) {
2368 return;
2369 }
2370 CompileLog* log = C->log();
2371 if (log != nullptr) {
2372 Node* box = box_node();
2373 Node* obj = obj_node();
2374 int box_id = box != nullptr ? box->_idx : -1;
2375 int obj_id = obj != nullptr ? obj->_idx : -1;
2376
2377 log->begin_head("%s compile_id='%d' lock_id='%d' class='%s' kind='%s' box_id='%d' obj_id='%d' bad_id='%d'",
2378 tag, C->compile_id(), this->_idx,
2379 is_Unlock() ? "unlock" : is_Lock() ? "lock" : "?",
2380 kind_as_string(), box_id, obj_id, (bad_lock != nullptr ? bad_lock->_idx : -1));
2381 log->stamp();
2382 log->end_head();
2383 JVMState* p = is_Unlock() ? (as_Unlock()->dbg_jvms()) : jvms();
2384 while (p != nullptr) {
2385 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
2386 p = p->caller();
2387 }
2388 log->tail(tag);
2389 }
2390 }
2391
2392 bool CallNode::may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase) {
2393 if (dest_t->is_known_instance() && t_oop->is_known_instance()) {
2394 return dest_t->instance_id() == t_oop->instance_id();
2395 }
2396
2397 if (dest_t->isa_instptr() && !dest_t->is_instptr()->instance_klass()->equals(phase->C->env()->Object_klass())) {
2398 // clone
2399 if (t_oop->isa_aryptr()) {
2400 return false;
2401 }
2402 if (!t_oop->isa_instptr()) {
2403 return true;
2404 }
2405 if (dest_t->maybe_java_subtype_of(t_oop) || t_oop->maybe_java_subtype_of(dest_t)) {
2406 return true;
2407 }
2408 // unrelated
2409 return false;
2410 }
2411
2412 if (dest_t->isa_aryptr()) {
2413 // arraycopy or array clone
2414 if (t_oop->isa_instptr()) {
2415 return false;
2416 }
2417 if (!t_oop->isa_aryptr()) {
2418 return true;
2419 }
2420
2421 const Type* elem = dest_t->is_aryptr()->elem();
2422 if (elem == Type::BOTTOM) {
2423 // An array but we don't know what elements are
2424 return true;
2425 }
2426
2427 dest_t = dest_t->add_offset(Type::OffsetBot)->is_oopptr();
2428 uint dest_alias = phase->C->get_alias_index(dest_t);
2429 uint t_oop_alias = phase->C->get_alias_index(t_oop);
2430
2431 return dest_alias == t_oop_alias;
2432 }
2433
2434 return true;
2435 }