1 /*
2 * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/bcEscapeAnalyzer.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "gc/shared/barrierSet.hpp"
28 #include "gc/shared/c2/barrierSetC2.hpp"
29 #include "libadt/vectset.hpp"
30 #include "memory/allocation.hpp"
31 #include "memory/metaspace.hpp"
32 #include "memory/resourceArea.hpp"
33 #include "opto/arraycopynode.hpp"
34 #include "opto/c2compiler.hpp"
35 #include "opto/callnode.hpp"
36 #include "opto/castnode.hpp"
37 #include "opto/cfgnode.hpp"
38 #include "opto/compile.hpp"
39 #include "opto/escape.hpp"
40 #include "opto/inlinetypenode.hpp"
41 #include "opto/locknode.hpp"
42 #include "opto/macro.hpp"
43 #include "opto/movenode.hpp"
44 #include "opto/narrowptrnode.hpp"
45 #include "opto/phaseX.hpp"
46 #include "opto/rootnode.hpp"
47 #include "utilities/macros.hpp"
48
49 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
50 // If ReduceAllocationMerges is enabled we might call split_through_phi during
51 // split_unique_types and that will create additional nodes that need to be
52 // pushed to the ConnectionGraph. The code below bumps the initial capacity of
53 // _nodes by 10% to account for these additional nodes. If capacity is exceeded
54 // the array will be reallocated.
55 _nodes(C->comp_arena(), C->do_reduce_allocation_merges() ? C->unique()*1.10 : C->unique(), C->unique(), nullptr),
56 _in_worklist(C->comp_arena()),
57 _next_pidx(0),
58 _collecting(true),
59 _verify(false),
60 _compile(C),
61 _igvn(igvn),
62 _invocation(invocation),
63 _build_iterations(0),
64 _build_time(0.),
65 _node_map(C->comp_arena()) {
66 // Add unknown java object.
67 add_java_object(C->top(), PointsToNode::GlobalEscape);
68 phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject();
69 set_not_scalar_replaceable(phantom_obj NOT_PRODUCT(COMMA "Phantom object"));
70 // Add ConP and ConN null oop nodes
71 Node* oop_null = igvn->zerocon(T_OBJECT);
72 assert(oop_null->_idx < nodes_size(), "should be created already");
73 add_java_object(oop_null, PointsToNode::NoEscape);
74 null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject();
75 set_not_scalar_replaceable(null_obj NOT_PRODUCT(COMMA "Null object"));
76 if (UseCompressedOops) {
77 Node* noop_null = igvn->zerocon(T_NARROWOOP);
78 assert(noop_null->_idx < nodes_size(), "should be created already");
79 map_ideal_node(noop_null, null_obj);
80 }
81 }
82
83 bool ConnectionGraph::has_candidates(Compile *C) {
84 // EA brings benefits only when the code has allocations and/or locks which
85 // are represented by ideal Macro nodes.
86 int cnt = C->macro_count();
87 for (int i = 0; i < cnt; i++) {
88 Node *n = C->macro_node(i);
89 if (n->is_Allocate()) {
90 return true;
91 }
92 if (n->is_Lock()) {
93 Node* obj = n->as_Lock()->obj_node()->uncast();
94 if (!(obj->is_Parm() || obj->is_Con())) {
95 return true;
96 }
97 }
98 if (n->is_CallStaticJava() &&
99 n->as_CallStaticJava()->is_boxing_method()) {
100 return true;
101 }
102 }
103 return false;
104 }
105
106 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {
107 Compile::TracePhase tp(Phase::_t_escapeAnalysis);
108 ResourceMark rm;
109
110 // Add ConP and ConN null oop nodes before ConnectionGraph construction
111 // to create space for them in ConnectionGraph::_nodes[].
112 Node* oop_null = igvn->zerocon(T_OBJECT);
113 Node* noop_null = igvn->zerocon(T_NARROWOOP);
114 int invocation = 0;
115 if (C->congraph() != nullptr) {
116 invocation = C->congraph()->_invocation + 1;
117 }
118 ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn, invocation);
119 NOT_PRODUCT(if (C->should_print_igv(/* Any level */ 1)) C->igv_printer()->set_congraph(congraph);)
120 // Perform escape analysis
121 if (congraph->compute_escape()) {
122 // There are non escaping objects.
123 C->set_congraph(congraph);
124 }
125 NOT_PRODUCT(if (C->should_print_igv(/* Any level */ 1)) C->igv_printer()->set_congraph(nullptr);)
126 // Cleanup.
127 if (oop_null->outcnt() == 0) {
128 igvn->hash_delete(oop_null);
129 }
130 if (noop_null->outcnt() == 0) {
131 igvn->hash_delete(noop_null);
132 }
133
134 C->print_method(PHASE_AFTER_EA, 2);
135 }
136
137 bool ConnectionGraph::compute_escape() {
138 Compile* C = _compile;
139 PhaseGVN* igvn = _igvn;
140
141 // Worklists used by EA.
142 Unique_Node_List delayed_worklist;
143 Unique_Node_List reducible_merges;
144 GrowableArray<Node*> alloc_worklist;
145 GrowableArray<Node*> ptr_cmp_worklist;
146 GrowableArray<MemBarStoreStoreNode*> storestore_worklist;
147 GrowableArray<ArrayCopyNode*> arraycopy_worklist;
148 GrowableArray<PointsToNode*> ptnodes_worklist;
149 GrowableArray<JavaObjectNode*> java_objects_worklist;
150 GrowableArray<JavaObjectNode*> non_escaped_allocs_worklist;
151 GrowableArray<FieldNode*> oop_fields_worklist;
152 GrowableArray<SafePointNode*> sfn_worklist;
153 GrowableArray<MergeMemNode*> mergemem_worklist;
154 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
155
156 { Compile::TracePhase tp(Phase::_t_connectionGraph);
157
158 // 1. Populate Connection Graph (CG) with PointsTo nodes.
159 ideal_nodes.map(C->live_nodes(), nullptr); // preallocate space
160 // Initialize worklist
161 if (C->root() != nullptr) {
162 ideal_nodes.push(C->root());
163 }
164 // Processed ideal nodes are unique on ideal_nodes list
165 // but several ideal nodes are mapped to the phantom_obj.
166 // To avoid duplicated entries on the following worklists
167 // add the phantom_obj only once to them.
168 ptnodes_worklist.append(phantom_obj);
169 java_objects_worklist.append(phantom_obj);
170 for( uint next = 0; next < ideal_nodes.size(); ++next ) {
171 Node* n = ideal_nodes.at(next);
172 if ((n->Opcode() == Op_LoadX || n->Opcode() == Op_StoreX) &&
173 !n->in(MemNode::Address)->is_AddP() &&
174 _igvn->type(n->in(MemNode::Address))->isa_oopptr()) {
175 // Load/Store at mark work address is at offset 0 so has no AddP which confuses EA
176 Node* addp = AddPNode::make_with_base(n->in(MemNode::Address), n->in(MemNode::Address), _igvn->MakeConX(0));
177 _igvn->register_new_node_with_optimizer(addp);
178 _igvn->replace_input_of(n, MemNode::Address, addp);
179 ideal_nodes.push(addp);
180 _nodes.at_put_grow(addp->_idx, nullptr, nullptr);
181 }
182 // Create PointsTo nodes and add them to Connection Graph. Called
183 // only once per ideal node since ideal_nodes is Unique_Node list.
184 add_node_to_connection_graph(n, &delayed_worklist);
185 PointsToNode* ptn = ptnode_adr(n->_idx);
186 if (ptn != nullptr && ptn != phantom_obj) {
187 ptnodes_worklist.append(ptn);
188 if (ptn->is_JavaObject()) {
189 java_objects_worklist.append(ptn->as_JavaObject());
190 if ((n->is_Allocate() || n->is_CallStaticJava()) &&
191 (ptn->escape_state() < PointsToNode::GlobalEscape)) {
192 // Only allocations and java static calls results are interesting.
193 non_escaped_allocs_worklist.append(ptn->as_JavaObject());
194 }
195 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
196 oop_fields_worklist.append(ptn->as_Field());
197 }
198 }
199 // Collect some interesting nodes for further use.
200 switch (n->Opcode()) {
201 case Op_MergeMem:
202 // Collect all MergeMem nodes to add memory slices for
203 // scalar replaceable objects in split_unique_types().
204 mergemem_worklist.append(n->as_MergeMem());
205 break;
206 case Op_CmpP:
207 case Op_CmpN:
208 // Collect compare pointers nodes.
209 if (OptimizePtrCompare) {
210 ptr_cmp_worklist.append(n);
211 }
212 break;
213 case Op_MemBarStoreStore:
214 // Collect all MemBarStoreStore nodes so that depending on the
215 // escape status of the associated Allocate node some of them
216 // may be eliminated.
217 if (!UseStoreStoreForCtor || n->req() > MemBarNode::Precedent) {
218 storestore_worklist.append(n->as_MemBarStoreStore());
219 }
220 // If MemBarStoreStore has a precedent edge add it to the worklist (like MemBarRelease)
221 case Op_MemBarRelease:
222 if (n->req() > MemBarNode::Precedent) {
223 record_for_optimizer(n);
224 }
225 break;
226 #ifdef ASSERT
227 case Op_AddP:
228 // Collect address nodes for graph verification.
229 addp_worklist.append(n);
230 break;
231 #endif
232 case Op_ArrayCopy:
233 // Keep a list of ArrayCopy nodes so if one of its input is non
234 // escaping, we can record a unique type
235 arraycopy_worklist.append(n->as_ArrayCopy());
236 break;
237 default:
238 // not interested now, ignore...
239 break;
240 }
241 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
242 Node* m = n->fast_out(i); // Get user
243 ideal_nodes.push(m);
244 }
245 if (n->is_SafePoint()) {
246 sfn_worklist.append(n->as_SafePoint());
247 }
248 }
249
250 #ifndef PRODUCT
251 if (_compile->directive()->TraceEscapeAnalysisOption) {
252 tty->print("+++++ Initial worklist for ");
253 _compile->method()->print_name();
254 tty->print_cr(" (ea_inv=%d)", _invocation);
255 for (int i = 0; i < ptnodes_worklist.length(); i++) {
256 PointsToNode* ptn = ptnodes_worklist.at(i);
257 ptn->dump();
258 }
259 tty->print_cr("+++++ Calculating escape states and scalar replaceability");
260 }
261 #endif
262
263 if (non_escaped_allocs_worklist.length() == 0) {
264 _collecting = false;
265 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
266 return false; // Nothing to do.
267 }
268 // Add final simple edges to graph.
269 while(delayed_worklist.size() > 0) {
270 Node* n = delayed_worklist.pop();
271 add_final_edges(n);
272 }
273
274 #ifdef ASSERT
275 if (VerifyConnectionGraph) {
276 // Verify that no new simple edges could be created and all
277 // local vars has edges.
278 _verify = true;
279 int ptnodes_length = ptnodes_worklist.length();
280 for (int next = 0; next < ptnodes_length; ++next) {
281 PointsToNode* ptn = ptnodes_worklist.at(next);
282 add_final_edges(ptn->ideal_node());
283 if (ptn->is_LocalVar() && ptn->edge_count() == 0) {
284 ptn->dump();
285 assert(ptn->as_LocalVar()->edge_count() > 0, "sanity");
286 }
287 }
288 _verify = false;
289 }
290 #endif
291 // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes
292 // processing, calls to CI to resolve symbols (types, fields, methods)
293 // referenced in bytecode. During symbol resolution VM may throw
294 // an exception which CI cleans and converts to compilation failure.
295 if (C->failing()) {
296 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
297 return false;
298 }
299
300 _compile->print_method(PHASE_EA_AFTER_INITIAL_CONGRAPH, 4);
301
302 // 2. Finish Graph construction by propagating references to all
303 // java objects through graph.
304 if (!complete_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist,
305 java_objects_worklist, oop_fields_worklist)) {
306 // All objects escaped or hit time or iterations limits.
307 _collecting = false;
308 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
309 return false;
310 }
311
312 _compile->print_method(PHASE_EA_AFTER_COMPLETE_CONGRAPH, 4);
313
314 // 3. Adjust scalar_replaceable state of nonescaping objects and push
315 // scalar replaceable allocations on alloc_worklist for processing
316 // in split_unique_types().
317 GrowableArray<JavaObjectNode*> jobj_worklist;
318 int non_escaped_length = non_escaped_allocs_worklist.length();
319 bool found_nsr_alloc = false;
320 for (int next = 0; next < non_escaped_length; next++) {
321 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);
322 bool noescape = (ptn->escape_state() == PointsToNode::NoEscape);
323 Node* n = ptn->ideal_node();
324 if (n->is_Allocate()) {
325 n->as_Allocate()->_is_non_escaping = noescape;
326 }
327 if (noescape && ptn->scalar_replaceable()) {
328 adjust_scalar_replaceable_state(ptn, reducible_merges);
329 if (ptn->scalar_replaceable()) {
330 jobj_worklist.push(ptn);
331 } else {
332 found_nsr_alloc = true;
333 }
334 }
335 _compile->print_method(PHASE_EA_ADJUST_SCALAR_REPLACEABLE_ITER, 6, n);
336 }
337
338 // Propagate NSR (Not Scalar Replaceable) state.
339 if (found_nsr_alloc) {
340 find_scalar_replaceable_allocs(jobj_worklist, reducible_merges);
341 }
342
343 // alloc_worklist will be processed in reverse push order.
344 // Therefore the reducible Phis will be processed for last and that's what we
345 // want because by then the scalarizable inputs of the merge will already have
346 // an unique instance type.
347 for (uint i = 0; i < reducible_merges.size(); i++ ) {
348 Node* n = reducible_merges.at(i);
349 alloc_worklist.append(n);
350 }
351
352 for (int next = 0; next < jobj_worklist.length(); ++next) {
353 JavaObjectNode* jobj = jobj_worklist.at(next);
354 if (jobj->scalar_replaceable()) {
355 alloc_worklist.append(jobj->ideal_node());
356 }
357 }
358
359 #ifdef ASSERT
360 if (VerifyConnectionGraph) {
361 // Verify that graph is complete - no new edges could be added or needed.
362 verify_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist,
363 java_objects_worklist, addp_worklist);
364 }
365 assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build");
366 assert(null_obj->escape_state() == PointsToNode::NoEscape &&
367 null_obj->edge_count() == 0 &&
368 !null_obj->arraycopy_src() &&
369 !null_obj->arraycopy_dst(), "sanity");
370 #endif
371
372 _collecting = false;
373
374 _compile->print_method(PHASE_EA_AFTER_PROPAGATE_NSR, 4);
375 } // TracePhase t3("connectionGraph")
376
377 // 4. Optimize ideal graph based on EA information.
378 bool has_non_escaping_obj = (non_escaped_allocs_worklist.length() > 0);
379 if (has_non_escaping_obj) {
380 optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist);
381 }
382
383 #ifndef PRODUCT
384 if (PrintEscapeAnalysis) {
385 dump(ptnodes_worklist); // Dump ConnectionGraph
386 }
387 #endif
388
389 #ifdef ASSERT
390 if (VerifyConnectionGraph) {
391 int alloc_length = alloc_worklist.length();
392 for (int next = 0; next < alloc_length; ++next) {
393 Node* n = alloc_worklist.at(next);
394 PointsToNode* ptn = ptnode_adr(n->_idx);
395 assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity");
396 }
397 }
398
399 if (VerifyReduceAllocationMerges) {
400 for (uint i = 0; i < reducible_merges.size(); i++ ) {
401 Node* n = reducible_merges.at(i);
402 if (!can_reduce_phi(n->as_Phi())) {
403 TraceReduceAllocationMerges = true;
404 n->dump(2);
405 n->dump(-2);
406 assert(can_reduce_phi(n->as_Phi()), "Sanity: previous reducible Phi is no longer reducible before SUT.");
407 }
408 }
409 }
410 #endif
411
412 _compile->print_method(PHASE_EA_AFTER_GRAPH_OPTIMIZATION, 4);
413
414 // 5. Separate memory graph for scalar replaceable allcations.
415 bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0);
416 if (has_scalar_replaceable_candidates && EliminateAllocations) {
417 assert(C->do_aliasing(), "Aliasing should be enabled");
418 // Now use the escape information to create unique types for
419 // scalar replaceable objects.
420 split_unique_types(alloc_worklist, arraycopy_worklist, mergemem_worklist, reducible_merges);
421 if (C->failing()) {
422 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
423 return false;
424 }
425
426 #ifdef ASSERT
427 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
428 tty->print("=== No allocations eliminated for ");
429 C->method()->print_short_name();
430 if (!EliminateAllocations) {
431 tty->print(" since EliminateAllocations is off ===");
432 } else if(!has_scalar_replaceable_candidates) {
433 tty->print(" since there are no scalar replaceable candidates ===");
434 }
435 tty->cr();
436 #endif
437 }
438
439 // 6. Expand flat accesses if the object does not escape. This adds nodes to
440 // the graph, so it has to be after split_unique_types. This expands atomic
441 // mismatched accesses (though encapsulated in LoadFlats and StoreFlats) into
442 // non-mismatched accesses, so it is better before reduce allocation merges.
443 if (has_non_escaping_obj) {
444 optimize_flat_accesses(sfn_worklist);
445 }
446
447 _compile->print_method(PHASE_EA_AFTER_SPLIT_UNIQUE_TYPES, 4);
448
449 // 7. Reduce allocation merges used as debug information. This is done after
450 // split_unique_types because the methods used to create SafePointScalarObject
451 // need to traverse the memory graph to find values for object fields. We also
452 // set to null the scalarized inputs of reducible Phis so that the Allocate
453 // that they point can be later scalar replaced.
454 bool delay = _igvn->delay_transform();
455 _igvn->set_delay_transform(true);
456 for (uint i = 0; i < reducible_merges.size(); i++) {
457 Node* n = reducible_merges.at(i);
458 if (n->outcnt() > 0) {
459 if (!reduce_phi_on_safepoints(n->as_Phi())) {
460 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
461 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
462 return false;
463 }
464
465 // Now we set the scalar replaceable inputs of ophi to null, which is
466 // the last piece that would prevent it from being scalar replaceable.
467 reset_scalar_replaceable_entries(n->as_Phi());
468 }
469 }
470 _igvn->set_delay_transform(delay);
471
472 // Annotate at safepoints if they have <= ArgEscape objects in their scope and at
473 // java calls if they pass ArgEscape objects as parameters.
474 if (has_non_escaping_obj &&
475 (C->env()->should_retain_local_variables() ||
476 C->env()->jvmti_can_get_owned_monitor_info() ||
477 C->env()->jvmti_can_walk_any_space() ||
478 DeoptimizeObjectsALot)) {
479 int sfn_length = sfn_worklist.length();
480 for (int next = 0; next < sfn_length; next++) {
481 SafePointNode* sfn = sfn_worklist.at(next);
482 sfn->set_has_ea_local_in_scope(has_ea_local_in_scope(sfn));
483 if (sfn->is_CallJava()) {
484 CallJavaNode* call = sfn->as_CallJava();
485 call->set_arg_escape(has_arg_escape(call));
486 }
487 }
488 }
489
490 _compile->print_method(PHASE_EA_AFTER_REDUCE_PHI_ON_SAFEPOINTS, 4);
491
492 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
493 return has_non_escaping_obj;
494 }
495
496 // Check if it's profitable to reduce the Phi passed as parameter. Returns true
497 // if at least one scalar replaceable allocation participates in the merge.
498 bool ConnectionGraph::can_reduce_phi_check_inputs(PhiNode* ophi) const {
499 bool found_sr_allocate = false;
500
501 for (uint i = 1; i < ophi->req(); i++) {
502 JavaObjectNode* ptn = unique_java_object(ophi->in(i));
503 if (ptn != nullptr && ptn->scalar_replaceable()) {
504 AllocateNode* alloc = ptn->ideal_node()->as_Allocate();
505
506 // Don't handle arrays.
507 if (alloc->Opcode() != Op_Allocate) {
508 assert(alloc->Opcode() == Op_AllocateArray, "Unexpected type of allocation.");
509 continue;
510 }
511
512 if (PhaseMacroExpand::can_eliminate_allocation(_igvn, alloc, nullptr)) {
513 found_sr_allocate = true;
514 } else {
515 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("%dth input of Phi %d is SR but can't be eliminated.", i, ophi->_idx);)
516 ptn->set_scalar_replaceable(false);
517 }
518 }
519 }
520
521 NOT_PRODUCT(if (TraceReduceAllocationMerges && !found_sr_allocate) tty->print_cr("Can NOT reduce Phi %d on invocation %d. No SR Allocate as input.", ophi->_idx, _invocation);)
522 return found_sr_allocate;
523 }
524
525 // We can reduce the Cmp if it's a comparison between the Phi and a constant.
526 // I require the 'other' input to be a constant so that I can move the Cmp
527 // around safely.
528 bool ConnectionGraph::can_reduce_cmp(Node* n, Node* cmp) const {
529 assert(cmp->Opcode() == Op_CmpP || cmp->Opcode() == Op_CmpN, "not expected node: %s", cmp->Name());
530 Node* left = cmp->in(1);
531 Node* right = cmp->in(2);
532
533 return (left == n || right == n) &&
534 (left->is_Con() || right->is_Con()) &&
535 cmp->outcnt() == 1;
536 }
537
538 // We are going to check if any of the SafePointScalarMerge entries
539 // in the SafePoint reference the Phi that we are checking.
540 bool ConnectionGraph::has_been_reduced(PhiNode* n, SafePointNode* sfpt) const {
541 JVMState *jvms = sfpt->jvms();
542
543 for (uint i = jvms->debug_start(); i < jvms->debug_end(); i++) {
544 Node* sfpt_in = sfpt->in(i);
545 if (sfpt_in->is_SafePointScalarMerge()) {
546 SafePointScalarMergeNode* smerge = sfpt_in->as_SafePointScalarMerge();
547 Node* nsr_ptr = sfpt->in(smerge->merge_pointer_idx(jvms));
548 if (nsr_ptr == n) {
549 return true;
550 }
551 }
552 }
553
554 return false;
555 }
556
557 // Check if we are able to untangle the merge. The following patterns are
558 // supported:
559 // - Phi -> SafePoints
560 // - Phi -> CmpP/N
561 // - Phi -> AddP -> Load
562 // - Phi -> CastPP -> SafePoints
563 // - Phi -> CastPP -> AddP -> Load
564 bool ConnectionGraph::can_reduce_check_users(Node* n, uint nesting) const {
565 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
566 Node* use = n->fast_out(i);
567
568 if (use->is_SafePoint()) {
569 if (use->is_Call() && use->as_Call()->has_non_debug_use(n)) {
570 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Call has non_debug_use().", n->_idx, _invocation);)
571 return false;
572 } else if (has_been_reduced(n->is_Phi() ? n->as_Phi() : n->as_CastPP()->in(1)->as_Phi(), use->as_SafePoint())) {
573 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. It has already been reduced.", n->_idx, _invocation);)
574 return false;
575 }
576 } else if (use->is_AddP()) {
577 Node* addp = use;
578 for (DUIterator_Fast jmax, j = addp->fast_outs(jmax); j < jmax; j++) {
579 Node* use_use = addp->fast_out(j);
580 const Type* load_type = _igvn->type(use_use);
581
582 if (!use_use->is_Load() || !use_use->as_Load()->can_split_through_phi_base(_igvn)) {
583 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. AddP user isn't a [splittable] Load(): %s", n->_idx, _invocation, use_use->Name());)
584 return false;
585 } else if (load_type->isa_narrowklass() || load_type->isa_klassptr()) {
586 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. [Narrow] Klass Load: %s", n->_idx, _invocation, use_use->Name());)
587 return false;
588 }
589 }
590 } else if (nesting > 0) {
591 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Unsupported user %s at nesting level %d.", n->_idx, _invocation, use->Name(), nesting);)
592 return false;
593 } else if (use->is_CastPP()) {
594 const Type* cast_t = _igvn->type(use);
595 if (cast_t == nullptr || cast_t->make_ptr()->isa_instptr() == nullptr) {
596 #ifndef PRODUCT
597 if (TraceReduceAllocationMerges) {
598 tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP is not to an instance.", n->_idx, _invocation);
599 use->dump();
600 }
601 #endif
602 return false;
603 }
604
605 bool is_trivial_control = use->in(0) == nullptr || use->in(0) == n->in(0);
606 if (!is_trivial_control) {
607 // If it's not a trivial control then we check if we can reduce the
608 // CmpP/N used by the If controlling the cast.
609 if (use->in(0)->is_IfTrue() || use->in(0)->is_IfFalse()) {
610 Node* iff = use->in(0)->in(0);
611 // We may have an OpaqueConstantBool node between If and Bool nodes. But we could also have a sub class of IfNode,
612 // for example, an OuterStripMinedLoopEnd or a Parse Predicate. Bail out in all these cases.
613 bool can_reduce = (iff->Opcode() == Op_If) && iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp();
614 if (can_reduce) {
615 Node* iff_cmp = iff->in(1)->in(1);
616 int opc = iff_cmp->Opcode();
617 can_reduce = (opc == Op_CmpP || opc == Op_CmpN) && can_reduce_cmp(n, iff_cmp);
618 }
619 if (!can_reduce) {
620 #ifndef PRODUCT
621 if (TraceReduceAllocationMerges) {
622 tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP %d doesn't have simple control.", n->_idx, _invocation, use->_idx);
623 n->dump(5);
624 }
625 #endif
626 return false;
627 }
628 }
629 }
630
631 if (!can_reduce_check_users(use, nesting+1)) {
632 return false;
633 }
634 } else if (use->Opcode() == Op_CmpP || use->Opcode() == Op_CmpN) {
635 if (!can_reduce_cmp(n, use)) {
636 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. CmpP/N %d isn't reducible.", n->_idx, _invocation, use->_idx);)
637 return false;
638 }
639 } else {
640 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. One of the uses is: %d %s", n->_idx, _invocation, use->_idx, use->Name());)
641 return false;
642 }
643 }
644
645 return true;
646 }
647
648 // Returns true if: 1) It's profitable to reduce the merge, and 2) The Phi is
649 // only used in some certain code shapes. Check comments in
650 // 'can_reduce_phi_inputs' and 'can_reduce_phi_users' for more
651 // details.
652 bool ConnectionGraph::can_reduce_phi(PhiNode* ophi) const {
653 // If there was an error attempting to reduce allocation merges for this
654 // method we might have disabled the compilation and be retrying with RAM
655 // disabled.
656 if (!_compile->do_reduce_allocation_merges() || ophi->region()->Opcode() != Op_Region) {
657 return false;
658 }
659
660 const Type* phi_t = _igvn->type(ophi);
661 if (phi_t == nullptr ||
662 phi_t->make_ptr() == nullptr ||
663 phi_t->make_ptr()->isa_aryptr() != nullptr) {
664 return false;
665 }
666
667 if (!can_reduce_phi_check_inputs(ophi) || !can_reduce_check_users(ophi, /* nesting: */ 0)) {
668 return false;
669 }
670
671 NOT_PRODUCT(if (TraceReduceAllocationMerges) { tty->print_cr("Can reduce Phi %d during invocation %d: ", ophi->_idx, _invocation); })
672 return true;
673 }
674
675 // This method will return a CmpP/N that we need to use on the If controlling a
676 // CastPP after it was split. This method is only called on bases that are
677 // nullable therefore we always need a controlling if for the splitted CastPP.
678 //
679 // 'curr_ctrl' is the control of the CastPP that we want to split through phi.
680 // If the CastPP currently doesn't have a control then the CmpP/N will be
681 // against the null constant, otherwise it will be against the constant input of
682 // the existing CmpP/N. It's guaranteed that there will be a CmpP/N in the later
683 // case because we have constraints on it and because the CastPP has a control
684 // input.
685 Node* ConnectionGraph::specialize_cmp(Node* base, Node* curr_ctrl) {
686 const Type* t = base->bottom_type();
687 Node* con = nullptr;
688
689 if (curr_ctrl == nullptr || curr_ctrl->is_Region()) {
690 con = _igvn->zerocon(t->basic_type());
691 } else {
692 // can_reduce_check_users() verified graph: true/false -> if -> bool -> cmp
693 assert(curr_ctrl->in(0)->Opcode() == Op_If, "unexpected node %s", curr_ctrl->in(0)->Name());
694 Node* bol = curr_ctrl->in(0)->in(1);
695 assert(bol->is_Bool(), "unexpected node %s", bol->Name());
696 Node* curr_cmp = bol->in(1);
697 assert(curr_cmp->Opcode() == Op_CmpP || curr_cmp->Opcode() == Op_CmpN, "unexpected node %s", curr_cmp->Name());
698 con = curr_cmp->in(1)->is_Con() ? curr_cmp->in(1) : curr_cmp->in(2);
699 }
700
701 return CmpNode::make(base, con, t->basic_type());
702 }
703
704 // This method 'specializes' the CastPP passed as parameter to the base passed
705 // as parameter. Note that the existing CastPP input is a Phi. "Specialize"
706 // means that the CastPP now will be specific for a given base instead of a Phi.
707 // An If-Then-Else-Region block is inserted to control the CastPP. The control
708 // of the CastPP is a copy of the current one (if there is one) or a check
709 // against null.
710 //
711 // Before:
712 //
713 // C1 C2 ... Cn
714 // \ | /
715 // \ | /
716 // \ | /
717 // \ | /
718 // \ | /
719 // \ | /
720 // \|/
721 // Region B1 B2 ... Bn
722 // | \ | /
723 // | \ | /
724 // | \ | /
725 // | \ | /
726 // | \ | /
727 // | \ | /
728 // ---------------> Phi
729 // |
730 // X |
731 // | |
732 // | |
733 // ------> CastPP
734 //
735 // After (only partial illustration; base = B2, current_control = C2):
736 //
737 // C2
738 // |
739 // If
740 // / \
741 // / \
742 // T F
743 // /\ /
744 // / \ /
745 // / \ /
746 // C1 CastPP Reg Cn
747 // | | |
748 // | | |
749 // | | |
750 // -------------- | ----------
751 // | | |
752 // Region
753 //
754 Node* ConnectionGraph::specialize_castpp(Node* castpp, Node* base, Node* current_control) {
755 Node* control_successor = current_control->unique_ctrl_out();
756 Node* cmp = _igvn->transform(specialize_cmp(base, castpp->in(0)));
757 Node* bol = _igvn->transform(new BoolNode(cmp, BoolTest::ne));
758 IfNode* if_ne = _igvn->transform(new IfNode(current_control, bol, PROB_MIN, COUNT_UNKNOWN))->as_If();
759 Node* not_eq_control = _igvn->transform(new IfTrueNode(if_ne));
760 Node* yes_eq_control = _igvn->transform(new IfFalseNode(if_ne));
761 Node* end_region = _igvn->transform(new RegionNode(3));
762
763 // Insert the new if-else-region block into the graph
764 end_region->set_req(1, not_eq_control);
765 end_region->set_req(2, yes_eq_control);
766 control_successor->replace_edge(current_control, end_region, _igvn);
767
768 _igvn->_worklist.push(current_control);
769 _igvn->_worklist.push(control_successor);
770
771 return _igvn->transform(ConstraintCastNode::make_cast_for_type(not_eq_control, base, _igvn->type(castpp), ConstraintCastNode::DependencyType::NonFloatingNonNarrowing, nullptr));
772 }
773
774 Node* ConnectionGraph::split_castpp_load_through_phi(Node* curr_addp, Node* curr_load, Node* region, GrowableArray<Node*>* bases_for_loads, GrowableArray<Node *> &alloc_worklist) {
775 const Type* load_type = _igvn->type(curr_load);
776 Node* nsr_value = _igvn->zerocon(load_type->basic_type());
777 Node* memory = curr_load->in(MemNode::Memory);
778
779 // The data_phi merging the loads needs to be nullable if
780 // we are loading pointers.
781 if (load_type->make_ptr() != nullptr) {
782 if (load_type->isa_narrowoop()) {
783 load_type = load_type->meet(TypeNarrowOop::NULL_PTR);
784 } else if (load_type->isa_ptr()) {
785 load_type = load_type->meet(TypePtr::NULL_PTR);
786 } else {
787 assert(false, "Unexpected load ptr type.");
788 }
789 }
790
791 Node* data_phi = PhiNode::make(region, nsr_value, load_type);
792
793 for (int i = 1; i < bases_for_loads->length(); i++) {
794 Node* base = bases_for_loads->at(i);
795 Node* cmp_region = nullptr;
796 if (base != nullptr) {
797 if (base->is_CFG()) { // means that we added a CastPP as child of this CFG node
798 cmp_region = base->unique_ctrl_out_or_null();
799 assert(cmp_region != nullptr, "There should be.");
800 base = base->find_out_with(Op_CastPP);
801 }
802
803 Node* addr = _igvn->transform(AddPNode::make_with_base(base, curr_addp->in(AddPNode::Offset)));
804 Node* mem = (memory->is_Phi() && (memory->in(0) == region)) ? memory->in(i) : memory;
805 Node* load = curr_load->clone();
806 load->set_req(0, nullptr);
807 load->set_req(1, mem);
808 load->set_req(2, addr);
809
810 if (cmp_region != nullptr) { // see comment on previous if
811 Node* intermediate_phi = PhiNode::make(cmp_region, nsr_value, load_type);
812 intermediate_phi->set_req(1, _igvn->transform(load));
813 load = intermediate_phi;
814 }
815
816 data_phi->set_req(i, _igvn->transform(load));
817 } else {
818 // Just use the default, which is already in phi
819 }
820 }
821
822 // Takes care of updating CG and split_unique_types worklists due
823 // to cloned AddP->Load.
824 updates_after_load_split(data_phi, curr_load, alloc_worklist);
825
826 return _igvn->transform(data_phi);
827 }
828
829 // This method only reduces CastPP fields loads; SafePoints are handled
830 // separately. The idea here is basically to clone the CastPP and place copies
831 // on each input of the Phi, including non-scalar replaceable inputs.
832 // Experimentation shows that the resulting IR graph is simpler that way than if
833 // we just split the cast through scalar-replaceable inputs.
834 //
835 // The reduction process requires that CastPP's control be one of:
836 // 1) no control,
837 // 2) the same region as Ophi, or
838 // 3) an IfTrue/IfFalse coming from an CmpP/N between Ophi and a constant.
839 //
840 // After splitting the CastPP we'll put it under an If-Then-Else-Region control
841 // flow. If the CastPP originally had an IfTrue/False control input then we'll
842 // use a similar CmpP/N to control the new If-Then-Else-Region. Otherwise, we'll
843 // juse use a CmpP/N against the null constant.
844 //
845 // The If-Then-Else-Region isn't always needed. For instance, if input to
846 // splitted cast was not nullable (or if it was the null constant) then we don't
847 // need (shouldn't) use a CastPP at all.
848 //
849 // After the casts are splitted we'll split the AddP->Loads through the Phi and
850 // connect them to the just split CastPPs.
851 //
852 // Before (CastPP control is same as Phi):
853 //
854 // Region Allocate Null Call
855 // | \ | /
856 // | \ | /
857 // | \ | /
858 // | \ | /
859 // | \ | /
860 // | \ | /
861 // ------------------> Phi # Oop Phi
862 // | |
863 // | |
864 // | |
865 // | |
866 // ----------------> CastPP
867 // |
868 // AddP
869 // |
870 // Load
871 //
872 // After (Very much simplified):
873 //
874 // Call Null
875 // \ /
876 // CmpP
877 // |
878 // Bool#NE
879 // |
880 // If
881 // / \
882 // T F
883 // / \ /
884 // / R
885 // CastPP |
886 // | |
887 // AddP |
888 // | |
889 // Load |
890 // \ | 0
891 // Allocate \ | /
892 // \ \ | /
893 // AddP Phi
894 // \ /
895 // Load /
896 // \ 0 /
897 // \ | /
898 // \|/
899 // Phi # "Field" Phi
900 //
901 void ConnectionGraph::reduce_phi_on_castpp_field_load(Node* curr_castpp, GrowableArray<Node*> &alloc_worklist) {
902 Node* ophi = curr_castpp->in(1);
903 assert(ophi->is_Phi(), "Expected this to be a Phi node.");
904
905 // Identify which base should be used for AddP->Load later when spliting the
906 // CastPP->Loads through ophi. Three kind of values may be stored in this
907 // array, depending on the nullability status of the corresponding input in
908 // ophi.
909 //
910 // - nullptr: Meaning that the base is actually the null constant and therefore
911 // we won't try to load from it.
912 //
913 // - CFG Node: Meaning that the base is a CastPP that was specialized for
914 // this input of Ophi. I.e., we added an If->Then->Else-Region
915 // that will 'activate' the CastPp only when the input is not Null.
916 //
917 // - Other Node: Meaning that the base is not nullable and therefore we'll try
918 // to load directly from it.
919 GrowableArray<Node*> bases_for_loads(ophi->req(), ophi->req(), nullptr);
920
921 for (uint i = 1; i < ophi->req(); i++) {
922 Node* base = ophi->in(i);
923 const Type* base_t = _igvn->type(base);
924
925 if (base_t->maybe_null()) {
926 if (base->is_Con()) {
927 // Nothing todo as bases_for_loads[i] is already null
928 } else {
929 Node* new_castpp = specialize_castpp(curr_castpp, base, ophi->in(0)->in(i));
930 bases_for_loads.at_put(i, new_castpp->in(0)); // Use the ctrl of the new node just as a flag
931 }
932 } else {
933 bases_for_loads.at_put(i, base);
934 }
935 }
936
937 // Now let's split the CastPP->Loads through the Phi
938 for (int i = curr_castpp->outcnt()-1; i >= 0;) {
939 Node* use = curr_castpp->raw_out(i);
940 if (use->is_AddP()) {
941 for (int j = use->outcnt()-1; j >= 0;) {
942 Node* use_use = use->raw_out(j);
943 assert(use_use->is_Load(), "Expected this to be a Load node.");
944
945 // We can't make an unconditional load from a nullable input. The
946 // 'split_castpp_load_through_phi` method will add an
947 // 'If-Then-Else-Region` around nullable bases and only load from them
948 // when the input is not null.
949 Node* phi = split_castpp_load_through_phi(use, use_use, ophi->in(0), &bases_for_loads, alloc_worklist);
950 _igvn->replace_node(use_use, phi);
951
952 --j;
953 j = MIN2(j, (int)use->outcnt()-1);
954 }
955
956 _igvn->remove_dead_node(use, PhaseIterGVN::NodeOrigin::Graph);
957 }
958 --i;
959 i = MIN2(i, (int)curr_castpp->outcnt()-1);
960 }
961 }
962
963 // This method split a given CmpP/N through the Phi used in one of its inputs.
964 // As a result we convert a comparison with a pointer to a comparison with an
965 // integer.
966 // The only requirement is that one of the inputs of the CmpP/N must be a Phi
967 // while the other must be a constant.
968 // The splitting process is basically just cloning the CmpP/N above the input
969 // Phi. However, some (most) of the cloned CmpP/Ns won't be requred because we
970 // can prove at compile time the result of the comparison.
971 //
972 // Before:
973 //
974 // in1 in2 ... inN
975 // \ | /
976 // \ | /
977 // \ | /
978 // \ | /
979 // \ | /
980 // \ | /
981 // Phi
982 // | Other
983 // | /
984 // | /
985 // | /
986 // CmpP/N
987 //
988 // After:
989 //
990 // in1 Other in2 Other inN Other
991 // | | | | | |
992 // \ | | | | |
993 // \ / | / | /
994 // CmpP/N CmpP/N CmpP/N
995 // Bool Bool Bool
996 // \ | /
997 // \ | /
998 // \ | /
999 // \ | /
1000 // \ | /
1001 // \ | /
1002 // \ | /
1003 // \ | /
1004 // Phi
1005 // |
1006 // | Zero
1007 // | /
1008 // | /
1009 // | /
1010 // CmpI
1011 //
1012 //
1013 void ConnectionGraph::reduce_phi_on_cmp(Node* cmp) {
1014 Node* ophi = cmp->in(1)->is_Con() ? cmp->in(2) : cmp->in(1);
1015 assert(ophi->is_Phi(), "Expected this to be a Phi node.");
1016
1017 Node* other = cmp->in(1)->is_Con() ? cmp->in(1) : cmp->in(2);
1018 Node* zero = _igvn->intcon(0);
1019 Node* one = _igvn->intcon(1);
1020 BoolTest::mask mask = cmp->unique_out()->as_Bool()->_test._test;
1021
1022 // This Phi will merge the result of the Cmps split through the Phi
1023 Node* res_phi = PhiNode::make(ophi->in(0), zero, TypeInt::INT);
1024
1025 for (uint i=1; i<ophi->req(); i++) {
1026 Node* ophi_input = ophi->in(i);
1027 Node* res_phi_input = nullptr;
1028
1029 const TypeInt* tcmp = optimize_ptr_compare(ophi_input, other);
1030 if (tcmp->singleton()) {
1031 if ((mask == BoolTest::mask::eq && tcmp == TypeInt::CC_EQ) ||
1032 (mask == BoolTest::mask::ne && tcmp == TypeInt::CC_GT)) {
1033 res_phi_input = one;
1034 } else {
1035 res_phi_input = zero;
1036 }
1037 } else {
1038 Node* ncmp = _igvn->transform(cmp->clone());
1039 ncmp->set_req(1, ophi_input);
1040 ncmp->set_req(2, other);
1041 Node* bol = _igvn->transform(new BoolNode(ncmp, mask));
1042 res_phi_input = bol->as_Bool()->as_int_value(_igvn);
1043 }
1044
1045 res_phi->set_req(i, res_phi_input);
1046 }
1047
1048 // This CMP always compares whether the output of "res_phi" is TRUE as far as the "mask".
1049 Node* new_cmp = _igvn->transform(new CmpINode(_igvn->transform(res_phi), (mask == BoolTest::mask::eq) ? one : zero));
1050 _igvn->replace_node(cmp, new_cmp);
1051 }
1052
1053 // Push the newly created AddP on alloc_worklist and patch
1054 // the connection graph. Note that the changes in the CG below
1055 // won't affect the ES of objects since the new nodes have the
1056 // same status as the old ones.
1057 void ConnectionGraph::updates_after_load_split(Node* data_phi, Node* previous_load, GrowableArray<Node *> &alloc_worklist) {
1058 assert(data_phi != nullptr, "Output of split_through_phi is null.");
1059 assert(data_phi != previous_load, "Output of split_through_phi is same as input.");
1060 assert(data_phi->is_Phi(), "Output of split_through_phi isn't a Phi.");
1061
1062 if (data_phi == nullptr || !data_phi->is_Phi()) {
1063 // Make this a retry?
1064 return ;
1065 }
1066
1067 Node* previous_addp = previous_load->in(MemNode::Address);
1068 FieldNode* fn = ptnode_adr(previous_addp->_idx)->as_Field();
1069 for (uint i = 1; i < data_phi->req(); i++) {
1070 Node* new_load = data_phi->in(i);
1071
1072 if (new_load->is_Phi()) {
1073 // new_load is currently the "intermediate_phi" from an specialized
1074 // CastPP.
1075 new_load = new_load->in(1);
1076 }
1077
1078 // "new_load" might actually be a constant, parameter, etc.
1079 if (new_load->is_Load()) {
1080 Node* new_addp = new_load->in(MemNode::Address);
1081
1082 // If new_load is a Load but not from an AddP, it means that the load is folded into another
1083 // load. And since this load is not from a field, we cannot create a unique type for it.
1084 // For example:
1085 //
1086 // if (b) {
1087 // Holder h1 = new Holder();
1088 // Object o = ...;
1089 // h.o = o.getClass();
1090 // } else {
1091 // Holder h2 = ...;
1092 // }
1093 // Holder h = Phi(h1, h2);
1094 // Object r = h.o;
1095 //
1096 // Then, splitting r through the merge point results in:
1097 //
1098 // if (b) {
1099 // Holder h1 = new Holder();
1100 // Object o = ...;
1101 // h.o = o.getClass();
1102 // Object o1 = h.o;
1103 // } else {
1104 // Holder h2 = ...;
1105 // Object o2 = h2.o;
1106 // }
1107 // Object r = Phi(o1, o2);
1108 //
1109 // In this case, o1 is folded to o.getClass() which is a Load but not from an AddP, but from
1110 // an OopHandle that is loaded from the Klass of o.
1111 if (!new_addp->is_AddP()) {
1112 continue;
1113 }
1114 Node* base = get_addp_base(new_addp);
1115
1116 // The base might not be something that we can create an unique
1117 // type for. If that's the case we are done with that input.
1118 PointsToNode* jobj_ptn = unique_java_object(base);
1119 if (jobj_ptn == nullptr || !jobj_ptn->scalar_replaceable()) {
1120 continue;
1121 }
1122
1123 // Push to alloc_worklist since the base has an unique_type
1124 alloc_worklist.append_if_missing(new_addp);
1125
1126 // Now let's add the node to the connection graph
1127 _nodes.at_grow(new_addp->_idx, nullptr);
1128 add_field(new_addp, fn->escape_state(), fn->offset());
1129 add_base(ptnode_adr(new_addp->_idx)->as_Field(), ptnode_adr(base->_idx));
1130
1131 // If the load doesn't load an object then it won't be
1132 // part of the connection graph
1133 PointsToNode* curr_load_ptn = ptnode_adr(previous_load->_idx);
1134 if (curr_load_ptn != nullptr) {
1135 _nodes.at_grow(new_load->_idx, nullptr);
1136 add_local_var(new_load, curr_load_ptn->escape_state());
1137 add_edge(ptnode_adr(new_load->_idx), ptnode_adr(new_addp->_idx)->as_Field());
1138 }
1139 }
1140 }
1141 }
1142
1143 void ConnectionGraph::reduce_phi_on_field_access(Node* previous_addp, GrowableArray<Node *> &alloc_worklist) {
1144 // We'll pass this to 'split_through_phi' so that it'll do the split even
1145 // though the load doesn't have an unique instance type.
1146 bool ignore_missing_instance_id = true;
1147
1148 // All AddPs are present in the connection graph
1149 FieldNode* fn = ptnode_adr(previous_addp->_idx)->as_Field();
1150
1151 // Iterate over AddP looking for a Load
1152 for (int k = previous_addp->outcnt()-1; k >= 0;) {
1153 Node* previous_load = previous_addp->raw_out(k);
1154 if (previous_load->is_Load()) {
1155 Node* data_phi = previous_load->as_Load()->split_through_phi(_igvn, ignore_missing_instance_id);
1156
1157 // Takes care of updating CG and split_unique_types worklists due to cloned
1158 // AddP->Load.
1159 updates_after_load_split(data_phi, previous_load, alloc_worklist);
1160
1161 _igvn->replace_node(previous_load, data_phi);
1162 }
1163 --k;
1164 k = MIN2(k, (int)previous_addp->outcnt()-1);
1165 }
1166
1167 // Remove the old AddP from the processing list because it's dead now
1168 assert(previous_addp->outcnt() == 0, "AddP should be dead now.");
1169 alloc_worklist.remove_if_existing(previous_addp);
1170 }
1171
1172 // Create a 'selector' Phi based on the inputs of 'ophi'. If index 'i' of the
1173 // selector is:
1174 // -> a '-1' constant, the i'th input of the original Phi is NSR.
1175 // -> a 'x' constant >=0, the i'th input of of original Phi will be SR and
1176 // the info about the scalarized object will be at index x of ObjectMergeValue::possible_objects
1177 PhiNode* ConnectionGraph::create_selector(PhiNode* ophi) const {
1178 Node* minus_one = _igvn->register_new_node_with_optimizer(ConINode::make(-1));
1179 Node* selector = _igvn->register_new_node_with_optimizer(PhiNode::make(ophi->region(), minus_one, TypeInt::INT));
1180 uint number_of_sr_objects = 0;
1181 for (uint i = 1; i < ophi->req(); i++) {
1182 Node* base = ophi->in(i);
1183 JavaObjectNode* ptn = unique_java_object(base);
1184
1185 if (ptn != nullptr && ptn->scalar_replaceable()) {
1186 Node* sr_obj_idx = _igvn->register_new_node_with_optimizer(ConINode::make(number_of_sr_objects));
1187 selector->set_req(i, sr_obj_idx);
1188 number_of_sr_objects++;
1189 }
1190 }
1191
1192 return selector->as_Phi();
1193 }
1194
1195 // Returns true if the AddP node 'n' has at least one base that is a reducible
1196 // merge. If the base is a CastPP/CheckCastPP then the input of the cast is
1197 // checked instead.
1198 bool ConnectionGraph::has_reducible_merge_base(AddPNode* n, Unique_Node_List &reducible_merges) {
1199 PointsToNode* ptn = ptnode_adr(n->_idx);
1200 if (ptn == nullptr || !ptn->is_Field() || ptn->as_Field()->base_count() < 2) {
1201 return false;
1202 }
1203
1204 for (BaseIterator i(ptn->as_Field()); i.has_next(); i.next()) {
1205 Node* base = i.get()->ideal_node();
1206
1207 if (reducible_merges.member(base)) {
1208 return true;
1209 }
1210
1211 if (base->is_CastPP() || base->is_CheckCastPP()) {
1212 base = base->in(1);
1213 if (reducible_merges.member(base)) {
1214 return true;
1215 }
1216 }
1217 }
1218
1219 return false;
1220 }
1221
1222 // This method will call its helper method to reduce SafePoint nodes that use
1223 // 'ophi' or a casted version of 'ophi'. All SafePoint nodes using the same
1224 // "version" of Phi use the same debug information (regarding the Phi).
1225 // Therefore, I collect all safepoints and patch them all at once.
1226 //
1227 // The safepoints using the Phi node have to be processed before safepoints of
1228 // CastPP nodes. The reason is, when reducing a CastPP we add a reference (the
1229 // NSR merge pointer) to the input of the CastPP (i.e., the Phi) in the
1230 // safepoint. If we process CastPP's safepoints before Phi's safepoints the
1231 // algorithm that process Phi's safepoints will think that the added Phi
1232 // reference is a regular reference.
1233 bool ConnectionGraph::reduce_phi_on_safepoints(PhiNode* ophi) {
1234 PhiNode* selector = create_selector(ophi);
1235 Unique_Node_List safepoints;
1236 Unique_Node_List casts;
1237
1238 // Just collect the users of the Phis for later processing
1239 // in the needed order.
1240 for (uint i = 0; i < ophi->outcnt(); i++) {
1241 Node* use = ophi->raw_out(i);
1242 if (use->is_SafePoint()) {
1243 safepoints.push(use);
1244 } else if (use->is_CastPP()) {
1245 casts.push(use);
1246 } else {
1247 assert(use->outcnt() == 0, "Only CastPP & SafePoint users should be left.");
1248 }
1249 }
1250
1251 // Need to process safepoints using the Phi first
1252 if (!reduce_phi_on_safepoints_helper(ophi, nullptr, selector, safepoints)) {
1253 return false;
1254 }
1255
1256 // Now process CastPP->safepoints
1257 for (uint i = 0; i < casts.size(); i++) {
1258 Node* cast = casts.at(i);
1259 Unique_Node_List cast_sfpts;
1260
1261 for (DUIterator_Fast jmax, j = cast->fast_outs(jmax); j < jmax; j++) {
1262 Node* use_use = cast->fast_out(j);
1263 if (use_use->is_SafePoint()) {
1264 cast_sfpts.push(use_use);
1265 } else {
1266 assert(use_use->outcnt() == 0, "Only SafePoint users should be left.");
1267 }
1268 }
1269
1270 if (!reduce_phi_on_safepoints_helper(ophi, cast, selector, cast_sfpts)) {
1271 return false;
1272 }
1273 }
1274
1275 return true;
1276 }
1277
1278 // This method will create a SafePointScalarMERGEnode for each SafePoint in
1279 // 'safepoints'. It then will iterate on the inputs of 'ophi' and create a
1280 // SafePointScalarObjectNode for each scalar replaceable input. Each
1281 // SafePointScalarMergeNode may describe multiple scalar replaced objects -
1282 // check detailed description in SafePointScalarMergeNode class header.
1283 bool ConnectionGraph::reduce_phi_on_safepoints_helper(Node* ophi, Node* cast, Node* selector, Unique_Node_List& safepoints) {
1284 PhaseMacroExpand mexp(*_igvn);
1285 Node* original_sfpt_parent = cast != nullptr ? cast : ophi;
1286 const TypeOopPtr* merge_t = _igvn->type(original_sfpt_parent)->make_oopptr();
1287
1288 Node* nsr_merge_pointer = ophi;
1289 if (cast != nullptr) {
1290 const Type* new_t = merge_t->meet(TypePtr::NULL_PTR);
1291 nsr_merge_pointer = _igvn->transform(ConstraintCastNode::make_cast_for_type(cast->in(0), cast->in(1), new_t, ConstraintCastNode::DependencyType::FloatingNarrowing, nullptr));
1292 }
1293
1294 for (uint spi = 0; spi < safepoints.size(); spi++) {
1295 SafePointNode* sfpt = safepoints.at(spi)->as_SafePoint();
1296
1297 SafePointNode::NodeEdgeTempStorage non_debug_edges_worklist(*_igvn);
1298
1299 // All sfpt inputs are implicitly included into debug info during the scalarization process below.
1300 // Keep non-debug inputs separately, so they stay non-debug.
1301 sfpt->remove_non_debug_edges(non_debug_edges_worklist);
1302
1303 JVMState* jvms = sfpt->jvms();
1304 uint merge_idx = (sfpt->req() - jvms->scloff());
1305 int debug_start = jvms->debug_start();
1306
1307 SafePointScalarMergeNode* smerge = new SafePointScalarMergeNode(merge_t, merge_idx);
1308 smerge->init_req(0, _compile->root());
1309 _igvn->register_new_node_with_optimizer(smerge);
1310
1311 assert(sfpt->jvms()->endoff() == sfpt->req(), "no extra edges past debug info allowed");
1312
1313 // The next two inputs are:
1314 // (1) A copy of the original pointer to NSR objects.
1315 // (2) A selector, used to decide if we need to rematerialize an object
1316 // or use the pointer to a NSR object.
1317 // See more details of these fields in the declaration of SafePointScalarMergeNode.
1318 // It is safe to include them into debug info straight away since create_scalarized_object_description()
1319 // will include all newly added inputs into debug info anyway.
1320 sfpt->add_req(nsr_merge_pointer);
1321 sfpt->add_req(selector);
1322 sfpt->jvms()->set_endoff(sfpt->req());
1323
1324 for (uint i = 1; i < ophi->req(); i++) {
1325 Node* base = ophi->in(i);
1326 JavaObjectNode* ptn = unique_java_object(base);
1327
1328 // If the base is not scalar replaceable we don't need to register information about
1329 // it at this time.
1330 if (ptn == nullptr || !ptn->scalar_replaceable()) {
1331 continue;
1332 }
1333
1334 AllocateNode* alloc = ptn->ideal_node()->as_Allocate();
1335 Unique_Node_List value_worklist;
1336 #ifdef ASSERT
1337 const Type* res_type = alloc->result_cast()->bottom_type();
1338 if (res_type->is_inlinetypeptr() && !Compile::current()->has_circular_inline_type()) {
1339 assert(!ophi->as_Phi()->can_push_inline_types_down(_igvn), "missed earlier scalarization opportunity");
1340 }
1341 #endif
1342 SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt, &value_worklist);
1343 if (sobj == nullptr) {
1344 _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
1345 sfpt->restore_non_debug_edges(non_debug_edges_worklist);
1346 return false; // non-recoverable failure; recompile
1347 }
1348
1349 // Now make a pass over the debug information replacing any references
1350 // to the allocated object with "sobj"
1351 Node* ccpp = alloc->result_cast();
1352 sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn);
1353 non_debug_edges_worklist.remove_edge_if_present(ccpp); // drop scalarized input from non-debug info
1354
1355 // Register the scalarized object as a candidate for reallocation
1356 smerge->add_req(sobj);
1357
1358 // Scalarize inline types that were added to the safepoint.
1359 // Don't allow linking a constant oop (if available) for flat array elements
1360 // because Deoptimization::reassign_flat_array_elements needs field values.
1361 const bool allow_oop = !merge_t->is_flat();
1362 for (uint j = 0; j < value_worklist.size(); ++j) {
1363 InlineTypeNode* vt = value_worklist.at(j)->as_InlineType();
1364 vt->make_scalar_in_safepoints(_igvn, allow_oop);
1365 }
1366 }
1367
1368 // Replaces debug information references to "original_sfpt_parent" in "sfpt" with references to "smerge"
1369 sfpt->replace_edges_in_range(original_sfpt_parent, smerge, debug_start, jvms->debug_end(), _igvn);
1370 non_debug_edges_worklist.remove_edge_if_present(original_sfpt_parent); // drop scalarized input from non-debug info
1371
1372 // The call to 'replace_edges_in_range' above might have removed the
1373 // reference to ophi that we need at _merge_pointer_idx. The line below make
1374 // sure the reference is maintained.
1375 sfpt->set_req(smerge->merge_pointer_idx(jvms), nsr_merge_pointer);
1376
1377 sfpt->restore_non_debug_edges(non_debug_edges_worklist);
1378
1379 _igvn->_worklist.push(sfpt);
1380 }
1381
1382 return true;
1383 }
1384
1385 void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node*> &alloc_worklist) {
1386 bool delay = _igvn->delay_transform();
1387 _igvn->set_delay_transform(true);
1388 _igvn->hash_delete(ophi);
1389
1390 // Copying all users first because some will be removed and others won't.
1391 // Ophi also may acquire some new users as part of Cast reduction.
1392 // CastPPs also need to be processed before CmpPs.
1393 Unique_Node_List castpps;
1394 Unique_Node_List others;
1395 for (DUIterator_Fast imax, i = ophi->fast_outs(imax); i < imax; i++) {
1396 Node* use = ophi->fast_out(i);
1397
1398 if (use->is_CastPP()) {
1399 castpps.push(use);
1400 } else if (use->is_AddP() || use->is_Cmp()) {
1401 others.push(use);
1402 } else {
1403 // Safepoints to be processed later; other users aren't expected here
1404 assert(use->is_SafePoint(), "Unexpected user of reducible Phi %d -> %d:%s:%d", ophi->_idx, use->_idx, use->Name(), use->outcnt());
1405 }
1406 }
1407
1408 _compile->print_method(PHASE_EA_BEFORE_PHI_REDUCTION, 5, ophi);
1409
1410 // CastPPs need to be processed before Cmps because during the process of
1411 // splitting CastPPs we make reference to the inputs of the Cmp that is used
1412 // by the If controlling the CastPP.
1413 for (uint i = 0; i < castpps.size(); i++) {
1414 reduce_phi_on_castpp_field_load(castpps.at(i), alloc_worklist);
1415 _compile->print_method(PHASE_EA_AFTER_PHI_CASTPP_REDUCTION, 6, castpps.at(i));
1416 }
1417
1418 for (uint i = 0; i < others.size(); i++) {
1419 Node* use = others.at(i);
1420
1421 if (use->is_AddP()) {
1422 reduce_phi_on_field_access(use, alloc_worklist);
1423 _compile->print_method(PHASE_EA_AFTER_PHI_ADDP_REDUCTION, 6, use);
1424 } else if(use->is_Cmp()) {
1425 reduce_phi_on_cmp(use);
1426 _compile->print_method(PHASE_EA_AFTER_PHI_CMP_REDUCTION, 6, use);
1427 }
1428 }
1429
1430 _igvn->set_delay_transform(delay);
1431 }
1432
1433 void ConnectionGraph::reset_scalar_replaceable_entries(PhiNode* ophi) {
1434 Node* null_ptr = _igvn->makecon(TypePtr::NULL_PTR);
1435 const TypeOopPtr* merge_t = _igvn->type(ophi)->make_oopptr();
1436 const Type* new_t = merge_t->meet(TypePtr::NULL_PTR);
1437 Node* new_phi = _igvn->register_new_node_with_optimizer(PhiNode::make(ophi->region(), null_ptr, new_t));
1438
1439 for (uint i = 1; i < ophi->req(); i++) {
1440 Node* base = ophi->in(i);
1441 JavaObjectNode* ptn = unique_java_object(base);
1442
1443 if (ptn != nullptr && ptn->scalar_replaceable()) {
1444 new_phi->set_req(i, null_ptr);
1445 } else {
1446 new_phi->set_req(i, ophi->in(i));
1447 }
1448 }
1449
1450 for (int i = ophi->outcnt()-1; i >= 0;) {
1451 Node* out = ophi->raw_out(i);
1452
1453 if (out->is_ConstraintCast()) {
1454 const Type* out_t = _igvn->type(out)->make_ptr();
1455 const Type* out_new_t = out_t->meet(TypePtr::NULL_PTR);
1456 bool change = out_new_t != out_t;
1457
1458 for (int j = out->outcnt()-1; change && j >= 0; --j) {
1459 Node* out2 = out->raw_out(j);
1460 if (!out2->is_SafePoint()) {
1461 change = false;
1462 break;
1463 }
1464 }
1465
1466 if (change) {
1467 Node* new_cast = ConstraintCastNode::make_cast_for_type(out->in(0), out->in(1), out_new_t, ConstraintCastNode::DependencyType::NonFloatingNarrowing, nullptr);
1468 _igvn->replace_node(out, new_cast);
1469 _igvn->register_new_node_with_optimizer(new_cast);
1470 }
1471 }
1472
1473 --i;
1474 i = MIN2(i, (int)ophi->outcnt()-1);
1475 }
1476
1477 _igvn->replace_node(ophi, new_phi);
1478 }
1479
1480 void ConnectionGraph::verify_ram_nodes(Compile* C, Node* root) {
1481 if (!C->do_reduce_allocation_merges()) return;
1482
1483 Unique_Node_List ideal_nodes;
1484 ideal_nodes.map(C->live_nodes(), nullptr); // preallocate space
1485 ideal_nodes.push(root);
1486
1487 for (uint next = 0; next < ideal_nodes.size(); ++next) {
1488 Node* n = ideal_nodes.at(next);
1489
1490 if (n->is_SafePointScalarMerge()) {
1491 SafePointScalarMergeNode* merge = n->as_SafePointScalarMerge();
1492
1493 // Validate inputs of merge
1494 for (uint i = 1; i < merge->req(); i++) {
1495 if (merge->in(i) != nullptr && !merge->in(i)->is_top() && !merge->in(i)->is_SafePointScalarObject()) {
1496 assert(false, "SafePointScalarMerge inputs should be null/top or SafePointScalarObject.");
1497 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
1498 }
1499 }
1500
1501 // Validate users of merge
1502 for (DUIterator_Fast imax, i = merge->fast_outs(imax); i < imax; i++) {
1503 Node* sfpt = merge->fast_out(i);
1504 if (sfpt->is_SafePoint()) {
1505 int merge_idx = merge->merge_pointer_idx(sfpt->as_SafePoint()->jvms());
1506
1507 if (sfpt->in(merge_idx) != nullptr && sfpt->in(merge_idx)->is_SafePointScalarMerge()) {
1508 assert(false, "SafePointScalarMerge nodes can't be nested.");
1509 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
1510 }
1511 } else {
1512 assert(false, "Only safepoints can use SafePointScalarMerge nodes.");
1513 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
1514 }
1515 }
1516 }
1517
1518 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1519 Node* m = n->fast_out(i);
1520 ideal_nodes.push(m);
1521 }
1522 }
1523 }
1524
1525 // Returns true if there is an object in the scope of sfn that does not escape globally.
1526 bool ConnectionGraph::has_ea_local_in_scope(SafePointNode* sfn) {
1527 Compile* C = _compile;
1528 for (JVMState* jvms = sfn->jvms(); jvms != nullptr; jvms = jvms->caller()) {
1529 if (C->env()->should_retain_local_variables() || C->env()->jvmti_can_walk_any_space() ||
1530 DeoptimizeObjectsALot) {
1531 // Jvmti agents can access locals. Must provide info about local objects at runtime.
1532 int num_locs = jvms->loc_size();
1533 for (int idx = 0; idx < num_locs; idx++) {
1534 Node* l = sfn->local(jvms, idx);
1535 if (not_global_escape(l)) {
1536 return true;
1537 }
1538 }
1539 }
1540 if (C->env()->jvmti_can_get_owned_monitor_info() ||
1541 C->env()->jvmti_can_walk_any_space() || DeoptimizeObjectsALot) {
1542 // Jvmti agents can read monitors. Must provide info about locked objects at runtime.
1543 int num_mon = jvms->nof_monitors();
1544 for (int idx = 0; idx < num_mon; idx++) {
1545 Node* m = sfn->monitor_obj(jvms, idx);
1546 if (m != nullptr && not_global_escape(m)) {
1547 return true;
1548 }
1549 }
1550 }
1551 }
1552 return false;
1553 }
1554
1555 // Returns true if at least one of the arguments to the call is an object
1556 // that does not escape globally.
1557 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
1558 if (call->method() != nullptr) {
1559 uint max_idx = TypeFunc::Parms + call->method()->arg_size();
1560 for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
1561 Node* p = call->in(idx);
1562 if (not_global_escape(p)) {
1563 return true;
1564 }
1565 }
1566 } else {
1567 const char* name = call->as_CallStaticJava()->_name;
1568 assert(name != nullptr, "no name");
1569 // no arg escapes through uncommon traps
1570 if (strcmp(name, "uncommon_trap") != 0) {
1571 // process_call_arguments() assumes that all arguments escape globally
1572 const TypeTuple* d = call->tf()->domain_sig();
1573 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1574 const Type* at = d->field_at(i);
1575 if (at->isa_oopptr() != nullptr) {
1576 return true;
1577 }
1578 }
1579 }
1580 }
1581 return false;
1582 }
1583
1584
1585
1586 // Utility function for nodes that load an object
1587 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
1588 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1589 // ThreadLocal has RawPtr type.
1590 const Type* t = _igvn->type(n);
1591 if (t->make_ptr() != nullptr) {
1592 Node* adr = n->in(MemNode::Address);
1593 #ifdef ASSERT
1594 if (!adr->is_AddP()) {
1595 assert(_igvn->type(adr)->isa_rawptr(), "sanity");
1596 } else {
1597 assert((ptnode_adr(adr->_idx) == nullptr ||
1598 ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity");
1599 }
1600 #endif
1601 add_local_var_and_edge(n, PointsToNode::NoEscape,
1602 adr, delayed_worklist);
1603 }
1604 }
1605
1606 void ConnectionGraph::add_proj(Node* n, Unique_Node_List* delayed_worklist) {
1607 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && n->in(0)->as_Call()->returns_pointer()) {
1608 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
1609 } else if (n->in(0)->is_LoadFlat()) {
1610 // Treat LoadFlat outputs similar to a call return value
1611 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
1612 } else if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() && n->bottom_type()->isa_ptr()) {
1613 CallNode* call = n->in(0)->as_Call();
1614 assert(call->tf()->returns_inline_type_as_fields(), "");
1615 if (n->as_Proj()->_con == TypeFunc::Parms || !returns_an_argument(call)) {
1616 // either:
1617 // - not an argument returned
1618 // - the returned buffer for a returned scalarized argument
1619 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
1620 } else {
1621 add_local_var(n, PointsToNode::NoEscape);
1622 }
1623 }
1624 }
1625
1626 // Populate Connection Graph with PointsTo nodes and create simple
1627 // connection graph edges.
1628 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
1629 assert(!_verify, "this method should not be called for verification");
1630 PhaseGVN* igvn = _igvn;
1631 uint n_idx = n->_idx;
1632 PointsToNode* n_ptn = ptnode_adr(n_idx);
1633 if (n_ptn != nullptr) {
1634 return; // No need to redefine PointsTo node during first iteration.
1635 }
1636 int opcode = n->Opcode();
1637 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode);
1638 if (gc_handled) {
1639 return; // Ignore node if already handled by GC.
1640 }
1641
1642 if (n->is_Call()) {
1643 // Arguments to allocation and locking don't escape.
1644 if (n->is_AbstractLock()) {
1645 // Put Lock and Unlock nodes on IGVN worklist to process them during
1646 // first IGVN optimization when escape information is still available.
1647 record_for_optimizer(n);
1648 } else if (n->is_Allocate()) {
1649 add_call_node(n->as_Call());
1650 record_for_optimizer(n);
1651 } else {
1652 if (n->is_CallStaticJava()) {
1653 const char* name = n->as_CallStaticJava()->_name;
1654 if (name != nullptr && strcmp(name, "uncommon_trap") == 0) {
1655 return; // Skip uncommon traps
1656 }
1657 }
1658 // Don't mark as processed since call's arguments have to be processed.
1659 delayed_worklist->push(n);
1660 // Check if a call returns an object.
1661 if ((n->as_Call()->returns_pointer() &&
1662 n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) ||
1663 (n->is_CallStaticJava() &&
1664 n->as_CallStaticJava()->is_boxing_method())) {
1665 add_call_node(n->as_Call());
1666 } else if (n->as_Call()->tf()->returns_inline_type_as_fields()) {
1667 bool returns_oop = false;
1668 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !returns_oop; i++) {
1669 ProjNode* pn = n->fast_out(i)->as_Proj();
1670 if (pn->_con >= TypeFunc::Parms && pn->bottom_type()->isa_ptr()) {
1671 returns_oop = true;
1672 }
1673 }
1674 if (returns_oop) {
1675 add_call_node(n->as_Call());
1676 }
1677 }
1678 }
1679 return;
1680 }
1681 // Put this check here to process call arguments since some call nodes
1682 // point to phantom_obj.
1683 if (n_ptn == phantom_obj || n_ptn == null_obj) {
1684 return; // Skip predefined nodes.
1685 }
1686 switch (opcode) {
1687 case Op_AddP: {
1688 Node* base = get_addp_base(n);
1689 PointsToNode* ptn_base = ptnode_adr(base->_idx);
1690 // Field nodes are created for all field types. They are used in
1691 // adjust_scalar_replaceable_state() and split_unique_types().
1692 // Note, non-oop fields will have only base edges in Connection
1693 // Graph because such fields are not used for oop loads and stores.
1694 int offset = address_offset(n, igvn);
1695 add_field(n, PointsToNode::NoEscape, offset);
1696 if (ptn_base == nullptr) {
1697 delayed_worklist->push(n); // Process it later.
1698 } else {
1699 n_ptn = ptnode_adr(n_idx);
1700 add_base(n_ptn->as_Field(), ptn_base);
1701 }
1702 break;
1703 }
1704 case Op_CastX2P:
1705 case Op_CastI2N: {
1706 map_ideal_node(n, phantom_obj);
1707 break;
1708 }
1709 case Op_InlineType:
1710 case Op_CastPP:
1711 case Op_CheckCastPP:
1712 case Op_EncodeP:
1713 case Op_DecodeN:
1714 case Op_EncodePKlass:
1715 case Op_DecodeNKlass: {
1716 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
1717 break;
1718 }
1719 case Op_CMoveP: {
1720 add_local_var(n, PointsToNode::NoEscape);
1721 // Do not add edges during first iteration because some could be
1722 // not defined yet.
1723 delayed_worklist->push(n);
1724 break;
1725 }
1726 case Op_ConP:
1727 case Op_ConN:
1728 case Op_ConNKlass: {
1729 // assume all oop constants globally escape except for null
1730 PointsToNode::EscapeState es;
1731 const Type* t = igvn->type(n);
1732 if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) {
1733 es = PointsToNode::NoEscape;
1734 } else {
1735 es = PointsToNode::GlobalEscape;
1736 }
1737 PointsToNode* ptn_con = add_java_object(n, es);
1738 set_not_scalar_replaceable(ptn_con NOT_PRODUCT(COMMA "Constant pointer"));
1739 break;
1740 }
1741 case Op_CreateEx: {
1742 // assume that all exception objects globally escape
1743 map_ideal_node(n, phantom_obj);
1744 break;
1745 }
1746 case Op_LoadKlass:
1747 case Op_LoadNKlass: {
1748 // Unknown class is loaded
1749 map_ideal_node(n, phantom_obj);
1750 break;
1751 }
1752 case Op_LoadP:
1753 case Op_LoadN: {
1754 add_objload_to_connection_graph(n, delayed_worklist);
1755 break;
1756 }
1757 case Op_Parm: {
1758 map_ideal_node(n, phantom_obj);
1759 break;
1760 }
1761 case Op_PartialSubtypeCheck: {
1762 // Produces Null or notNull and is used in only in CmpP so
1763 // phantom_obj could be used.
1764 map_ideal_node(n, phantom_obj); // Result is unknown
1765 break;
1766 }
1767 case Op_Phi: {
1768 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1769 // ThreadLocal has RawPtr type.
1770 const Type* t = n->as_Phi()->type();
1771 if (t->make_ptr() != nullptr) {
1772 add_local_var(n, PointsToNode::NoEscape);
1773 // Do not add edges during first iteration because some could be
1774 // not defined yet.
1775 delayed_worklist->push(n);
1776 }
1777 break;
1778 }
1779 case Op_LoadFlat:
1780 // Treat LoadFlat similar to an unknown call that receives nothing and produces its results
1781 map_ideal_node(n, phantom_obj);
1782 break;
1783 case Op_StoreFlat:
1784 // Treat StoreFlat similar to a call that escapes the stored flattened fields
1785 delayed_worklist->push(n);
1786 break;
1787 case Op_Proj: {
1788 // we are only interested in the oop result projection from a call
1789 add_proj(n, delayed_worklist);
1790 break;
1791 }
1792 case Op_Rethrow: // Exception object escapes
1793 case Op_Return: {
1794 if (n->req() > TypeFunc::Parms &&
1795 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
1796 // Treat Return value as LocalVar with GlobalEscape escape state.
1797 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist);
1798 }
1799 break;
1800 }
1801 case Op_CompareAndExchangeP:
1802 case Op_CompareAndExchangeN:
1803 case Op_GetAndSetP:
1804 case Op_GetAndSetN: {
1805 add_objload_to_connection_graph(n, delayed_worklist);
1806 // fall-through
1807 }
1808 case Op_StoreP:
1809 case Op_StoreN:
1810 case Op_StoreNKlass:
1811 case Op_WeakCompareAndSwapP:
1812 case Op_WeakCompareAndSwapN:
1813 case Op_CompareAndSwapP:
1814 case Op_CompareAndSwapN: {
1815 add_to_congraph_unsafe_access(n, opcode, delayed_worklist);
1816 break;
1817 }
1818 case Op_AryEq:
1819 case Op_CountPositives:
1820 case Op_StrComp:
1821 case Op_StrEquals:
1822 case Op_StrIndexOf:
1823 case Op_StrIndexOfChar:
1824 case Op_StrInflatedCopy:
1825 case Op_StrCompressedCopy:
1826 case Op_VectorizedHashCode:
1827 case Op_EncodeISOArray: {
1828 add_local_var(n, PointsToNode::ArgEscape);
1829 delayed_worklist->push(n); // Process it later.
1830 break;
1831 }
1832 case Op_ThreadLocal: {
1833 PointsToNode* ptn_thr = add_java_object(n, PointsToNode::ArgEscape);
1834 set_not_scalar_replaceable(ptn_thr NOT_PRODUCT(COMMA "Constant pointer"));
1835 break;
1836 }
1837 case Op_Blackhole: {
1838 // All blackhole pointer arguments are globally escaping.
1839 // Only do this if there is at least one pointer argument.
1840 // Do not add edges during first iteration because some could be
1841 // not defined yet, defer to final step.
1842 for (uint i = 0; i < n->req(); i++) {
1843 Node* in = n->in(i);
1844 if (in != nullptr) {
1845 const Type* at = _igvn->type(in);
1846 if (!at->isa_ptr()) continue;
1847
1848 add_local_var(n, PointsToNode::GlobalEscape);
1849 delayed_worklist->push(n);
1850 break;
1851 }
1852 }
1853 break;
1854 }
1855 default:
1856 ; // Do nothing for nodes not related to EA.
1857 }
1858 return;
1859 }
1860
1861 // Add final simple edges to graph.
1862 void ConnectionGraph::add_final_edges(Node *n) {
1863 PointsToNode* n_ptn = ptnode_adr(n->_idx);
1864 #ifdef ASSERT
1865 if (_verify && n_ptn->is_JavaObject())
1866 return; // This method does not change graph for JavaObject.
1867 #endif
1868
1869 if (n->is_Call()) {
1870 process_call_arguments(n->as_Call());
1871 return;
1872 }
1873 assert(n->is_Store() || n->is_LoadStore() || n->is_StoreFlat() ||
1874 ((n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr)),
1875 "node should be registered already");
1876 int opcode = n->Opcode();
1877 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
1878 if (gc_handled) {
1879 return; // Ignore node if already handled by GC.
1880 }
1881 switch (opcode) {
1882 case Op_AddP: {
1883 Node* base = get_addp_base(n);
1884 PointsToNode* ptn_base = ptnode_adr(base->_idx);
1885 assert(ptn_base != nullptr, "field's base should be registered");
1886 add_base(n_ptn->as_Field(), ptn_base);
1887 break;
1888 }
1889 case Op_InlineType:
1890 case Op_CastPP:
1891 case Op_CheckCastPP:
1892 case Op_EncodeP:
1893 case Op_DecodeN:
1894 case Op_EncodePKlass:
1895 case Op_DecodeNKlass: {
1896 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr);
1897 break;
1898 }
1899 case Op_CMoveP: {
1900 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
1901 Node* in = n->in(i);
1902 if (in == nullptr) {
1903 continue; // ignore null
1904 }
1905 Node* uncast_in = in->uncast();
1906 if (uncast_in->is_top() || uncast_in == n) {
1907 continue; // ignore top or inputs which go back this node
1908 }
1909 PointsToNode* ptn = ptnode_adr(in->_idx);
1910 assert(ptn != nullptr, "node should be registered");
1911 add_edge(n_ptn, ptn);
1912 }
1913 break;
1914 }
1915 case Op_LoadP:
1916 case Op_LoadN: {
1917 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1918 // ThreadLocal has RawPtr type.
1919 assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type");
1920 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr);
1921 break;
1922 }
1923 case Op_Phi: {
1924 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1925 // ThreadLocal has RawPtr type.
1926 assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type");
1927 for (uint i = 1; i < n->req(); i++) {
1928 Node* in = n->in(i);
1929 if (in == nullptr) {
1930 continue; // ignore null
1931 }
1932 Node* uncast_in = in->uncast();
1933 if (uncast_in->is_top() || uncast_in == n) {
1934 continue; // ignore top or inputs which go back this node
1935 }
1936 PointsToNode* ptn = ptnode_adr(in->_idx);
1937 assert(ptn != nullptr, "node should be registered");
1938 add_edge(n_ptn, ptn);
1939 }
1940 break;
1941 }
1942 case Op_StoreFlat: {
1943 // StoreFlat globally escapes its stored flattened fields
1944 InlineTypeNode* value = n->as_StoreFlat()->value();
1945 ciInlineKlass* vk = _igvn->type(value)->inline_klass();
1946 for (int i = 0; i < vk->nof_nonstatic_fields(); i++) {
1947 ciField* field = vk->nonstatic_field_at(i);
1948 if (field->type()->is_primitive_type()) {
1949 continue;
1950 }
1951
1952 Node* field_value = value->field_value_by_offset(field->offset_in_bytes(), true);
1953 PointsToNode* field_value_ptn = ptnode_adr(field_value->_idx);
1954 set_escape_state(field_value_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "store into a flat field"));
1955 }
1956 break;
1957 }
1958 case Op_Proj: {
1959 add_proj(n, nullptr);
1960 break;
1961 }
1962 case Op_Rethrow: // Exception object escapes
1963 case Op_Return: {
1964 assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(),
1965 "Unexpected node type");
1966 // Treat Return value as LocalVar with GlobalEscape escape state.
1967 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr);
1968 break;
1969 }
1970 case Op_CompareAndExchangeP:
1971 case Op_CompareAndExchangeN:
1972 case Op_GetAndSetP:
1973 case Op_GetAndSetN:{
1974 assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type");
1975 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr);
1976 // fall-through
1977 }
1978 case Op_CompareAndSwapP:
1979 case Op_CompareAndSwapN:
1980 case Op_WeakCompareAndSwapP:
1981 case Op_WeakCompareAndSwapN:
1982 case Op_StoreP:
1983 case Op_StoreN:
1984 case Op_StoreNKlass:{
1985 add_final_edges_unsafe_access(n, opcode);
1986 break;
1987 }
1988 case Op_VectorizedHashCode:
1989 case Op_AryEq:
1990 case Op_CountPositives:
1991 case Op_StrComp:
1992 case Op_StrEquals:
1993 case Op_StrIndexOf:
1994 case Op_StrIndexOfChar:
1995 case Op_StrInflatedCopy:
1996 case Op_StrCompressedCopy:
1997 case Op_EncodeISOArray: {
1998 // char[]/byte[] arrays passed to string intrinsic do not escape but
1999 // they are not scalar replaceable. Adjust escape state for them.
2000 // Start from in(2) edge since in(1) is memory edge.
2001 for (uint i = 2; i < n->req(); i++) {
2002 Node* adr = n->in(i);
2003 const Type* at = _igvn->type(adr);
2004 if (!adr->is_top() && at->isa_ptr()) {
2005 assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
2006 at->isa_ptr() != nullptr, "expecting a pointer");
2007 if (adr->is_AddP()) {
2008 adr = get_addp_base(adr);
2009 }
2010 PointsToNode* ptn = ptnode_adr(adr->_idx);
2011 assert(ptn != nullptr, "node should be registered");
2012 add_edge(n_ptn, ptn);
2013 }
2014 }
2015 break;
2016 }
2017 case Op_Blackhole: {
2018 // All blackhole pointer arguments are globally escaping.
2019 for (uint i = 0; i < n->req(); i++) {
2020 Node* in = n->in(i);
2021 if (in != nullptr) {
2022 const Type* at = _igvn->type(in);
2023 if (!at->isa_ptr()) continue;
2024
2025 if (in->is_AddP()) {
2026 in = get_addp_base(in);
2027 }
2028
2029 PointsToNode* ptn = ptnode_adr(in->_idx);
2030 assert(ptn != nullptr, "should be defined already");
2031 set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "blackhole"));
2032 add_edge(n_ptn, ptn);
2033 }
2034 }
2035 break;
2036 }
2037 default: {
2038 // This method should be called only for EA specific nodes which may
2039 // miss some edges when they were created.
2040 #ifdef ASSERT
2041 n->dump(1);
2042 #endif
2043 guarantee(false, "unknown node");
2044 }
2045 }
2046 return;
2047 }
2048
2049 void ConnectionGraph::add_to_congraph_unsafe_access(Node* n, uint opcode, Unique_Node_List* delayed_worklist) {
2050 Node* adr = n->in(MemNode::Address);
2051 const Type* adr_type = _igvn->type(adr);
2052 adr_type = adr_type->make_ptr();
2053 if (adr_type == nullptr) {
2054 return; // skip dead nodes
2055 }
2056 if (adr_type->isa_oopptr()
2057 || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
2058 && adr_type == TypeRawPtr::NOTNULL
2059 && is_captured_store_address(adr))) {
2060 delayed_worklist->push(n); // Process it later.
2061 #ifdef ASSERT
2062 assert (adr->is_AddP(), "expecting an AddP");
2063 if (adr_type == TypeRawPtr::NOTNULL) {
2064 // Verify a raw address for a store captured by Initialize node.
2065 int offs = (int) _igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
2066 assert(offs != Type::OffsetBot, "offset must be a constant");
2067 }
2068 #endif
2069 } else {
2070 // Ignore copy the displaced header to the BoxNode (OSR compilation).
2071 if (adr->is_BoxLock()) {
2072 return;
2073 }
2074 // Stored value escapes in unsafe access.
2075 if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
2076 delayed_worklist->push(n); // Process unsafe access later.
2077 return;
2078 }
2079 #ifdef ASSERT
2080 n->dump(1);
2081 assert(false, "not unsafe");
2082 #endif
2083 }
2084 }
2085
2086 bool ConnectionGraph::add_final_edges_unsafe_access(Node* n, uint opcode) {
2087 Node* adr = n->in(MemNode::Address);
2088 const Type *adr_type = _igvn->type(adr);
2089 adr_type = adr_type->make_ptr();
2090 #ifdef ASSERT
2091 if (adr_type == nullptr) {
2092 n->dump(1);
2093 assert(adr_type != nullptr, "dead node should not be on list");
2094 return true;
2095 }
2096 #endif
2097
2098 if (adr_type->isa_oopptr()
2099 || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
2100 && adr_type == TypeRawPtr::NOTNULL
2101 && is_captured_store_address(adr))) {
2102 // Point Address to Value
2103 PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
2104 assert(adr_ptn != nullptr &&
2105 adr_ptn->as_Field()->is_oop(), "node should be registered");
2106 Node* val = n->in(MemNode::ValueIn);
2107 PointsToNode* ptn = ptnode_adr(val->_idx);
2108 assert(ptn != nullptr, "node should be registered");
2109 add_edge(adr_ptn, ptn);
2110 return true;
2111 } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
2112 // Stored value escapes in unsafe access.
2113 Node* val = n->in(MemNode::ValueIn);
2114 PointsToNode* ptn = ptnode_adr(val->_idx);
2115 assert(ptn != nullptr, "node should be registered");
2116 set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address"));
2117 // Add edge to object for unsafe access with offset.
2118 PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
2119 assert(adr_ptn != nullptr, "node should be registered");
2120 if (adr_ptn->is_Field()) {
2121 assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
2122 add_edge(adr_ptn, ptn);
2123 }
2124 return true;
2125 }
2126 #ifdef ASSERT
2127 n->dump(1);
2128 assert(false, "not unsafe");
2129 #endif
2130 return false;
2131 }
2132
2133 // Iterate over the domains for the scalarized and non scalarized calling conventions: Only move to the next element
2134 // in the non scalarized calling convention once all elements of the scalarized calling convention for that parameter
2135 // have been iterated over. So (ignoring hidden arguments such as the null marker) iterating over:
2136 // value class MyValue {
2137 // int f1;
2138 // float f2;
2139 // }
2140 // void m(Object o, MyValue v, int i)
2141 // produces the pairs:
2142 // (Object, Object), (Myvalue, int), (MyValue, float), (int, int)
2143 class DomainIterator : public StackObj {
2144 private:
2145 const TypeTuple* _domain;
2146 const TypeTuple* _domain_cc;
2147 const GrowableArray<SigEntry>* _sig_cc;
2148
2149 uint _i_domain;
2150 uint _i_domain_cc;
2151 int _i_sig_cc;
2152 uint _depth;
2153 uint _first_field_pos;
2154 const bool _is_static;
2155
2156 void next_helper() {
2157 if (_sig_cc == nullptr) {
2158 return;
2159 }
2160 BasicType prev_bt = _i_sig_cc > 0 ? _sig_cc->at(_i_sig_cc-1)._bt : T_ILLEGAL;
2161 BasicType prev_prev_bt = _i_sig_cc > 1 ? _sig_cc->at(_i_sig_cc-2)._bt : T_ILLEGAL;
2162 while (_i_sig_cc < _sig_cc->length()) {
2163 BasicType bt = _sig_cc->at(_i_sig_cc)._bt;
2164 assert(bt != T_VOID || _sig_cc->at(_i_sig_cc-1)._bt == prev_bt, "incorrect prev bt");
2165 if (bt == T_METADATA) {
2166 if (_depth == 0) {
2167 _first_field_pos = _i_domain_cc;
2168 }
2169 _depth++;
2170 } else if (bt == T_VOID && (prev_bt != T_LONG && prev_bt != T_DOUBLE)) {
2171 _depth--;
2172 if (_depth == 0) {
2173 _i_domain++;
2174 }
2175 } else if (bt == T_OBJECT && prev_bt == T_METADATA && (_is_static || _i_domain > 0) && _sig_cc->at(_i_sig_cc)._offset == 0) {
2176 assert(_sig_cc->at(_i_sig_cc)._vt_oop, "buffer expected right after T_METADATA");
2177 assert(_depth == 1, "only root value has buffer");
2178 _i_domain_cc++;
2179 _first_field_pos = _i_domain_cc;
2180 } else if (bt == T_BOOLEAN && prev_prev_bt == T_METADATA && (_is_static || _i_domain > 0) && _sig_cc->at(_i_sig_cc)._offset == -1) {
2181 assert(_sig_cc->at(_i_sig_cc)._null_marker, "null marker expected right after T_METADATA");
2182 assert(_depth == 1, "only root value null marker");
2183 _i_domain_cc++;
2184 _first_field_pos = _i_domain_cc;
2185 } else {
2186 return;
2187 }
2188 prev_prev_bt = prev_bt;
2189 prev_bt = bt;
2190 _i_sig_cc++;
2191 }
2192 }
2193
2194 public:
2195
2196 DomainIterator(CallJavaNode* call) :
2197 _domain(call->tf()->domain_sig()),
2198 _domain_cc(call->tf()->domain_cc()),
2199 _sig_cc(call->method()->get_sig_cc()),
2200 _i_domain(TypeFunc::Parms),
2201 _i_domain_cc(TypeFunc::Parms),
2202 _i_sig_cc(0),
2203 _depth(0),
2204 _first_field_pos(0),
2205 _is_static(call->method()->is_static()) {
2206 next_helper();
2207 }
2208
2209 bool has_next() const {
2210 assert(_sig_cc == nullptr || (_i_sig_cc < _sig_cc->length()) == (_i_domain < _domain->cnt()), "should reach end in sync");
2211 assert((_i_domain < _domain->cnt()) == (_i_domain_cc < _domain_cc->cnt()), "should reach end in sync");
2212 return _i_domain < _domain->cnt();
2213 }
2214
2215 void next() {
2216 assert(_depth != 0 || _domain->field_at(_i_domain) == _domain_cc->field_at(_i_domain_cc), "should produce same non scalarized elements");
2217 _i_sig_cc++;
2218 if (_depth == 0) {
2219 _i_domain++;
2220 }
2221 _i_domain_cc++;
2222 next_helper();
2223 }
2224
2225 uint i_domain() const {
2226 return _i_domain;
2227 }
2228
2229 uint i_domain_cc() const {
2230 return _i_domain_cc;
2231 }
2232
2233 const Type* current_domain() const {
2234 return _domain->field_at(_i_domain);
2235 }
2236
2237 const Type* current_domain_cc() const {
2238 return _domain_cc->field_at(_i_domain_cc);
2239 }
2240
2241 uint first_field_pos() const {
2242 assert(_first_field_pos >= TypeFunc::Parms, "not yet updated?");
2243 return _first_field_pos;
2244 }
2245 };
2246
2247 // Determine whether any arguments are returned.
2248 bool ConnectionGraph::returns_an_argument(CallNode* call) {
2249 ciMethod* meth = call->as_CallJava()->method();
2250 BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
2251 if (call_analyzer == nullptr) {
2252 return false;
2253 }
2254
2255 const TypeTuple* d = call->tf()->domain_sig();
2256 bool ret_arg = false;
2257 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2258 if (d->field_at(i)->isa_ptr() != nullptr &&
2259 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
2260 if (meth->is_scalarized_arg(i - TypeFunc::Parms) && !compatible_return(call->as_CallJava(), i)) {
2261 return false;
2262 }
2263 if (call->tf()->returns_inline_type_as_fields() != meth->is_scalarized_arg(i - TypeFunc::Parms)) {
2264 return false;
2265 }
2266 ret_arg = true;
2267 }
2268 }
2269 return ret_arg;
2270 }
2271
2272 void ConnectionGraph::add_call_node(CallNode* call) {
2273 assert(call->returns_pointer() || call->tf()->returns_inline_type_as_fields(), "only for call which returns pointer");
2274 uint call_idx = call->_idx;
2275 if (call->is_Allocate()) {
2276 Node* k = call->in(AllocateNode::KlassNode);
2277 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
2278 assert(kt != nullptr, "TypeKlassPtr required.");
2279 PointsToNode::EscapeState es = PointsToNode::NoEscape;
2280 bool scalar_replaceable = true;
2281 NOT_PRODUCT(const char* nsr_reason = "");
2282 if (call->is_AllocateArray()) {
2283 if (!kt->isa_aryklassptr()) { // StressReflectiveCode
2284 es = PointsToNode::GlobalEscape;
2285 } else {
2286 int length = call->in(AllocateNode::ALength)->find_int_con(-1);
2287 if (length < 0) {
2288 // Not scalar replaceable if the length is not constant.
2289 scalar_replaceable = false;
2290 NOT_PRODUCT(nsr_reason = "has a non-constant length");
2291 } else if (length > EliminateAllocationArraySizeLimit) {
2292 // Not scalar replaceable if the length is too big.
2293 scalar_replaceable = false;
2294 NOT_PRODUCT(nsr_reason = "has a length that is too big");
2295 }
2296 }
2297 } else { // Allocate instance
2298 if (!kt->isa_instklassptr()) { // StressReflectiveCode
2299 es = PointsToNode::GlobalEscape;
2300 } else {
2301 const TypeInstKlassPtr* ikt = kt->is_instklassptr();
2302 ciInstanceKlass* ik = ikt->klass_is_exact() ? ikt->exact_klass()->as_instance_klass() : ikt->instance_klass();
2303 if (ik->is_subclass_of(_compile->env()->Thread_klass()) ||
2304 ik->is_subclass_of(_compile->env()->Reference_klass()) ||
2305 !ik->can_be_instantiated() ||
2306 ik->has_finalizer()) {
2307 es = PointsToNode::GlobalEscape;
2308 } else {
2309 int nfields = ik->as_instance_klass()->nof_nonstatic_fields();
2310 if (nfields > EliminateAllocationFieldsLimit) {
2311 // Not scalar replaceable if there are too many fields.
2312 scalar_replaceable = false;
2313 NOT_PRODUCT(nsr_reason = "has too many fields");
2314 }
2315 }
2316 }
2317 }
2318 add_java_object(call, es);
2319 PointsToNode* ptn = ptnode_adr(call_idx);
2320 if (!scalar_replaceable && ptn->scalar_replaceable()) {
2321 set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA nsr_reason));
2322 }
2323 } else if (call->is_CallStaticJava()) {
2324 // Call nodes could be different types:
2325 //
2326 // 1. CallDynamicJavaNode (what happened during call is unknown):
2327 //
2328 // - mapped to GlobalEscape JavaObject node if oop is returned;
2329 //
2330 // - all oop arguments are escaping globally;
2331 //
2332 // 2. CallStaticJavaNode (execute bytecode analysis if possible):
2333 //
2334 // - the same as CallDynamicJavaNode if can't do bytecode analysis;
2335 //
2336 // - mapped to GlobalEscape JavaObject node if unknown oop is returned;
2337 // - mapped to NoEscape JavaObject node if non-escaping object allocated
2338 // during call is returned;
2339 // - mapped to ArgEscape LocalVar node pointed to object arguments
2340 // which are returned and does not escape during call;
2341 //
2342 // - oop arguments escaping status is defined by bytecode analysis;
2343 //
2344 // For a static call, we know exactly what method is being called.
2345 // Use bytecode estimator to record whether the call's return value escapes.
2346 ciMethod* meth = call->as_CallJava()->method();
2347 if (meth == nullptr) {
2348 const char* name = call->as_CallStaticJava()->_name;
2349 assert(call->as_CallStaticJava()->is_call_to_multianewarray_stub() ||
2350 strncmp(name, "load_unknown_inline", 19) == 0 ||
2351 strncmp(name, "store_inline_type_fields_to_buf", 31) == 0, "TODO: add failed case check");
2352 // Returns a newly allocated non-escaped object.
2353 add_java_object(call, PointsToNode::NoEscape);
2354 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
2355 } else if (meth->is_boxing_method()) {
2356 // Returns boxing object
2357 PointsToNode::EscapeState es;
2358 vmIntrinsics::ID intr = meth->intrinsic_id();
2359 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
2360 // It does not escape if object is always allocated.
2361 es = PointsToNode::NoEscape;
2362 } else {
2363 // It escapes globally if object could be loaded from cache.
2364 es = PointsToNode::GlobalEscape;
2365 }
2366 add_java_object(call, es);
2367 if (es == PointsToNode::GlobalEscape) {
2368 set_not_scalar_replaceable(ptnode_adr(call->_idx) NOT_PRODUCT(COMMA "object can be loaded from boxing cache"));
2369 }
2370 } else {
2371 BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
2372 call_analyzer->copy_dependencies(_compile->dependencies());
2373 if (call_analyzer->is_return_allocated()) {
2374 // Returns a newly allocated non-escaped object, simply
2375 // update dependency information.
2376 // Mark it as NoEscape so that objects referenced by
2377 // it's fields will be marked as NoEscape at least.
2378 add_java_object(call, PointsToNode::NoEscape);
2379 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
2380 } else {
2381 // For non scalarized argument/return: add_proj() adds an edge between the return projection and the call,
2382 // process_call_arguments() adds an edge between the call and the argument
2383 // For scalarized argument/return: process_call_arguments() adds an edge between a call projection for a field
2384 // and the argument input to the call for that field. An edge is added between the projection for the returned
2385 // buffer and the call.
2386 if (returns_an_argument(call) && !call->tf()->returns_inline_type_as_fields()) {
2387 // returns non scalarized argument
2388 add_local_var(call, PointsToNode::ArgEscape);
2389 } else {
2390 // Returns unknown object or scalarized argument being returned
2391 map_ideal_node(call, phantom_obj);
2392 }
2393 }
2394 }
2395 } else {
2396 // An other type of call, assume the worst case:
2397 // returned value is unknown and globally escapes.
2398 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");
2399 map_ideal_node(call, phantom_obj);
2400 }
2401 }
2402
2403 // Check that the return type is compatible with the type of the argument being returned i.e. that there's no cast that
2404 // fails in the method
2405 bool ConnectionGraph::compatible_return(CallJavaNode* call, uint k) {
2406 return call->tf()->domain_sig()->field_at(k)->is_instptr()->instance_klass() == call->tf()->range_sig()->field_at(TypeFunc::Parms)->is_instptr()->instance_klass();
2407 }
2408
2409 void ConnectionGraph::process_call_arguments(CallNode *call) {
2410 bool is_arraycopy = false;
2411 switch (call->Opcode()) {
2412 #ifdef ASSERT
2413 case Op_Allocate:
2414 case Op_AllocateArray:
2415 case Op_Lock:
2416 case Op_Unlock:
2417 assert(false, "should be done already");
2418 break;
2419 #endif
2420 case Op_ArrayCopy:
2421 case Op_CallLeafNoFP:
2422 // Most array copies are ArrayCopy nodes at this point but there
2423 // are still a few direct calls to the copy subroutines (See
2424 // PhaseStringOpts::copy_string())
2425 is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
2426 call->as_CallLeaf()->is_call_to_arraycopystub();
2427 // fall through
2428 case Op_CallLeafVector:
2429 case Op_CallLeaf: {
2430 // Stub calls, objects do not escape but they are not scale replaceable.
2431 // Adjust escape state for outgoing arguments.
2432 const TypeTuple * d = call->tf()->domain_sig();
2433 bool src_has_oops = false;
2434 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2435 const Type* at = d->field_at(i);
2436 Node *arg = call->in(i);
2437 if (arg == nullptr) {
2438 continue;
2439 }
2440 const Type *aat = _igvn->type(arg);
2441 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
2442 continue;
2443 }
2444 if (arg->is_AddP()) {
2445 //
2446 // The inline_native_clone() case when the arraycopy stub is called
2447 // after the allocation before Initialize and CheckCastPP nodes.
2448 // Or normal arraycopy for object arrays case.
2449 //
2450 // Set AddP's base (Allocate) as not scalar replaceable since
2451 // pointer to the base (with offset) is passed as argument.
2452 //
2453 arg = get_addp_base(arg);
2454 }
2455 PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2456 assert(arg_ptn != nullptr, "should be registered");
2457 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
2458 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
2459 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
2460 aat->isa_ptr() != nullptr, "expecting an Ptr");
2461 bool arg_has_oops = aat->isa_oopptr() &&
2462 (aat->isa_instptr() ||
2463 (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)) ||
2464 (aat->isa_aryptr() && aat->isa_aryptr()->elem() != nullptr &&
2465 aat->isa_aryptr()->is_flat() &&
2466 aat->isa_aryptr()->elem()->inline_klass()->contains_oops()));
2467 if (i == TypeFunc::Parms) {
2468 src_has_oops = arg_has_oops;
2469 }
2470 //
2471 // src or dst could be j.l.Object when other is basic type array:
2472 //
2473 // arraycopy(char[],0,Object*,0,size);
2474 // arraycopy(Object*,0,char[],0,size);
2475 //
2476 // Don't add edges in such cases.
2477 //
2478 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
2479 arg_has_oops && (i > TypeFunc::Parms);
2480 #ifdef ASSERT
2481 if (!(is_arraycopy ||
2482 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
2483 (call->as_CallLeaf()->_name != nullptr &&
2484 (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
2485 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
2486 strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||
2487 strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 ||
2488 strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 ||
2489 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 ||
2490 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 ||
2491 strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_encryptAESCrypt") == 0 ||
2492 strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_decryptAESCrypt") == 0 ||
2493 strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 ||
2494 strcmp(call->as_CallLeaf()->_name, "galoisCounterMode_AESCrypt") == 0 ||
2495 strcmp(call->as_CallLeaf()->_name, "poly1305_processBlocks") == 0 ||
2496 strcmp(call->as_CallLeaf()->_name, "intpoly_montgomeryMult_P256") == 0 ||
2497 strcmp(call->as_CallLeaf()->_name, "intpoly_assign") == 0 ||
2498 strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
2499 strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 ||
2500 strcmp(call->as_CallLeaf()->_name, "kyberNtt") == 0 ||
2501 strcmp(call->as_CallLeaf()->_name, "kyberInverseNtt") == 0 ||
2502 strcmp(call->as_CallLeaf()->_name, "kyberNttMult") == 0 ||
2503 strcmp(call->as_CallLeaf()->_name, "kyberAddPoly_2") == 0 ||
2504 strcmp(call->as_CallLeaf()->_name, "kyberAddPoly_3") == 0 ||
2505 strcmp(call->as_CallLeaf()->_name, "kyber12To16") == 0 ||
2506 strcmp(call->as_CallLeaf()->_name, "kyberBarrettReduce") == 0 ||
2507 strcmp(call->as_CallLeaf()->_name, "dilithiumAlmostNtt") == 0 ||
2508 strcmp(call->as_CallLeaf()->_name, "dilithiumAlmostInverseNtt") == 0 ||
2509 strcmp(call->as_CallLeaf()->_name, "dilithiumNttMult") == 0 ||
2510 strcmp(call->as_CallLeaf()->_name, "dilithiumMontMulByConstant") == 0 ||
2511 strcmp(call->as_CallLeaf()->_name, "dilithiumDecomposePoly") == 0 ||
2512 strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
2513 strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
2514 strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
2515 strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
2516 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
2517 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
2518 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
2519 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
2520 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
2521 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
2522 strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
2523 strcmp(call->as_CallLeaf()->_name, "double_keccak") == 0 ||
2524 strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
2525 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
2526 strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
2527 strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
2528 strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
2529 strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||
2530 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
2531 strcmp(call->as_CallLeaf()->_name, "load_unknown_inline") == 0 ||
2532 strcmp(call->as_CallLeaf()->_name, "store_unknown_inline") == 0 ||
2533 strcmp(call->as_CallLeaf()->_name, "store_inline_type_fields_to_buf") == 0 ||
2534 strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
2535 strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
2536 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
2537 strcmp(call->as_CallLeaf()->_name, "stringIndexOf") == 0 ||
2538 strcmp(call->as_CallLeaf()->_name, "arraysort_stub") == 0 ||
2539 strcmp(call->as_CallLeaf()->_name, "array_partition_stub") == 0 ||
2540 strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0 ||
2541 strcmp(call->as_CallLeaf()->_name, "unsafe_setmemory") == 0)
2542 ))) {
2543 call->dump();
2544 fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
2545 }
2546 #endif
2547 // Always process arraycopy's destination object since
2548 // we need to add all possible edges to references in
2549 // source object.
2550 if (arg_esc >= PointsToNode::ArgEscape &&
2551 !arg_is_arraycopy_dest) {
2552 continue;
2553 }
2554 PointsToNode::EscapeState es = PointsToNode::ArgEscape;
2555 if (call->is_ArrayCopy()) {
2556 ArrayCopyNode* ac = call->as_ArrayCopy();
2557 if (ac->is_clonebasic() ||
2558 ac->is_arraycopy_validated() ||
2559 ac->is_copyof_validated() ||
2560 ac->is_copyofrange_validated()) {
2561 es = PointsToNode::NoEscape;
2562 }
2563 }
2564 set_escape_state(arg_ptn, es NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2565 if (arg_is_arraycopy_dest) {
2566 Node* src = call->in(TypeFunc::Parms);
2567 if (src->is_AddP()) {
2568 src = get_addp_base(src);
2569 }
2570 PointsToNode* src_ptn = ptnode_adr(src->_idx);
2571 assert(src_ptn != nullptr, "should be registered");
2572 // Special arraycopy edge:
2573 // Only escape state of destination object's fields affects
2574 // escape state of fields in source object.
2575 add_arraycopy(call, es, src_ptn, arg_ptn);
2576 }
2577 }
2578 }
2579 break;
2580 }
2581 case Op_CallStaticJava: {
2582 // For a static call, we know exactly what method is being called.
2583 // Use bytecode estimator to record the call's escape affects
2584 #ifdef ASSERT
2585 const char* name = call->as_CallStaticJava()->_name;
2586 assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only");
2587 #endif
2588 ciMethod* meth = call->as_CallJava()->method();
2589 if ((meth != nullptr) && meth->is_boxing_method()) {
2590 break; // Boxing methods do not modify any oops.
2591 }
2592 BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr;
2593 // fall-through if not a Java method or no analyzer information
2594 if (call_analyzer != nullptr) {
2595 PointsToNode* call_ptn = ptnode_adr(call->_idx);
2596 bool ret_arg = returns_an_argument(call);
2597 for (DomainIterator di(call->as_CallJava()); di.has_next(); di.next()) {
2598 int k = di.i_domain() - TypeFunc::Parms;
2599 const Type* at = di.current_domain_cc();
2600 Node* arg = call->in(di.i_domain_cc());
2601 PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2602 assert(!call_analyzer->is_arg_returned(k) || !meth->is_scalarized_arg(k) ||
2603 !compatible_return(call->as_CallJava(), di.i_domain()) ||
2604 call->proj_out_or_null(di.i_domain_cc() - di.first_field_pos() + TypeFunc::Parms + 1) == nullptr ||
2605 _igvn->type(call->proj_out_or_null(di.i_domain_cc() - di.first_field_pos() + TypeFunc::Parms + 1)) == at,
2606 "scalarized return and scalarized argument should match");
2607 if (at->isa_ptr() != nullptr && call_analyzer->is_arg_returned(k) && ret_arg) {
2608 // The call returns arguments.
2609 if (meth->is_scalarized_arg(k)) {
2610 ProjNode* res_proj = call->proj_out_or_null(di.i_domain_cc() - di.first_field_pos() + TypeFunc::Parms + 1);
2611 if (res_proj != nullptr) {
2612 assert(_igvn->type(res_proj)->isa_ptr(), "scalarized return and scalarized argument should match");
2613 if (res_proj->_con != TypeFunc::Parms) {
2614 // add an edge between the result projection for a field and the argument projection for the same argument field
2615 PointsToNode* proj_ptn = ptnode_adr(res_proj->_idx);
2616 add_edge(proj_ptn, arg_ptn);
2617 if (!call_analyzer->is_return_local()) {
2618 add_edge(proj_ptn, phantom_obj);
2619 }
2620 }
2621 }
2622 } else if (call_ptn != nullptr) { // Is call's result used?
2623 assert(call_ptn->is_LocalVar(), "node should be registered");
2624 assert(arg_ptn != nullptr, "node should be registered");
2625 add_edge(call_ptn, arg_ptn);
2626 }
2627 }
2628 if (at->isa_oopptr() != nullptr &&
2629 arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
2630 if (!call_analyzer->is_arg_stack(k)) {
2631 // The argument global escapes
2632 set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2633 } else {
2634 set_escape_state(arg_ptn, PointsToNode::ArgEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2635 if (!call_analyzer->is_arg_local(k)) {
2636 // The argument itself doesn't escape, but any fields might
2637 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2638 }
2639 }
2640 }
2641 }
2642 if (call_ptn != nullptr && call_ptn->is_LocalVar()) {
2643 // The call returns arguments.
2644 assert(call_ptn->edge_count() > 0, "sanity");
2645 if (!call_analyzer->is_return_local()) {
2646 // Returns also unknown object.
2647 add_edge(call_ptn, phantom_obj);
2648 }
2649 }
2650 break;
2651 }
2652 }
2653 default: {
2654 // Fall-through here if not a Java method or no analyzer information
2655 // or some other type of call, assume the worst case: all arguments
2656 // globally escape.
2657 const TypeTuple* d = call->tf()->domain_cc();
2658 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2659 const Type* at = d->field_at(i);
2660 if (at->isa_oopptr() != nullptr) {
2661 Node* arg = call->in(i);
2662 if (arg->is_AddP()) {
2663 arg = get_addp_base(arg);
2664 }
2665 assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already");
2666 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2667 }
2668 }
2669 }
2670 }
2671 }
2672
2673
2674 // Finish Graph construction.
2675 bool ConnectionGraph::complete_connection_graph(
2676 GrowableArray<PointsToNode*>& ptnodes_worklist,
2677 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,
2678 GrowableArray<JavaObjectNode*>& java_objects_worklist,
2679 GrowableArray<FieldNode*>& oop_fields_worklist) {
2680 // Normally only 1-3 passes needed to build Connection Graph depending
2681 // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler.
2682 // Set limit to 20 to catch situation when something did go wrong and
2683 // bailout Escape Analysis.
2684 // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag.
2685 #define GRAPH_BUILD_ITER_LIMIT 20
2686
2687 // Propagate GlobalEscape and ArgEscape escape states and check that
2688 // we still have non-escaping objects. The method pushs on _worklist
2689 // Field nodes which reference phantom_object.
2690 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) {
2691 return false; // Nothing to do.
2692 }
2693 // Now propagate references to all JavaObject nodes.
2694 int java_objects_length = java_objects_worklist.length();
2695 elapsedTimer build_time;
2696 build_time.start();
2697 elapsedTimer time;
2698 bool timeout = false;
2699 int new_edges = 1;
2700 int iterations = 0;
2701 do {
2702 while ((new_edges > 0) &&
2703 (iterations++ < GRAPH_BUILD_ITER_LIMIT)) {
2704 double start_time = time.seconds();
2705 time.start();
2706 new_edges = 0;
2707 // Propagate references to phantom_object for nodes pushed on _worklist
2708 // by find_non_escaped_objects() and find_field_value().
2709 new_edges += add_java_object_edges(phantom_obj, false);
2710 for (int next = 0; next < java_objects_length; ++next) {
2711 JavaObjectNode* ptn = java_objects_worklist.at(next);
2712 new_edges += add_java_object_edges(ptn, true);
2713
2714 #define SAMPLE_SIZE 4
2715 if ((next % SAMPLE_SIZE) == 0) {
2716 // Each 4 iterations calculate how much time it will take
2717 // to complete graph construction.
2718 time.stop();
2719 // Poll for requests from shutdown mechanism to quiesce compiler
2720 // because Connection graph construction may take long time.
2721 CompileBroker::maybe_block();
2722 double stop_time = time.seconds();
2723 double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE;
2724 double time_until_end = time_per_iter * (double)(java_objects_length - next);
2725 if ((start_time + time_until_end) >= EscapeAnalysisTimeout) {
2726 timeout = true;
2727 break; // Timeout
2728 }
2729 start_time = stop_time;
2730 time.start();
2731 }
2732 #undef SAMPLE_SIZE
2733
2734 }
2735 if (timeout) break;
2736 if (new_edges > 0) {
2737 // Update escape states on each iteration if graph was updated.
2738 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) {
2739 return false; // Nothing to do.
2740 }
2741 }
2742 time.stop();
2743 if (time.seconds() >= EscapeAnalysisTimeout) {
2744 timeout = true;
2745 break;
2746 }
2747 _compile->print_method(PHASE_EA_COMPLETE_CONNECTION_GRAPH_ITER, 5);
2748 }
2749 if ((iterations < GRAPH_BUILD_ITER_LIMIT) && !timeout) {
2750 time.start();
2751 // Find fields which have unknown value.
2752 int fields_length = oop_fields_worklist.length();
2753 for (int next = 0; next < fields_length; next++) {
2754 FieldNode* field = oop_fields_worklist.at(next);
2755 if (field->edge_count() == 0) {
2756 new_edges += find_field_value(field);
2757 // This code may added new edges to phantom_object.
2758 // Need an other cycle to propagate references to phantom_object.
2759 }
2760 }
2761 time.stop();
2762 if (time.seconds() >= EscapeAnalysisTimeout) {
2763 timeout = true;
2764 break;
2765 }
2766 } else {
2767 new_edges = 0; // Bailout
2768 }
2769 } while (new_edges > 0);
2770
2771 build_time.stop();
2772 _build_time = build_time.seconds();
2773 _build_iterations = iterations;
2774
2775 // Bailout if passed limits.
2776 if ((iterations >= GRAPH_BUILD_ITER_LIMIT) || timeout) {
2777 Compile* C = _compile;
2778 if (C->log() != nullptr) {
2779 C->log()->begin_elem("connectionGraph_bailout reason='reached ");
2780 C->log()->text("%s", timeout ? "time" : "iterations");
2781 C->log()->end_elem(" limit'");
2782 }
2783 assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build during invocation %d (%f sec, %d iterations) with %d nodes and worklist size %d",
2784 _invocation, _build_time, _build_iterations, nodes_size(), ptnodes_worklist.length());
2785 // Possible infinite build_connection_graph loop,
2786 // bailout (no changes to ideal graph were made).
2787 return false;
2788 }
2789
2790 #undef GRAPH_BUILD_ITER_LIMIT
2791
2792 // Find fields initialized by null for non-escaping Allocations.
2793 int non_escaped_length = non_escaped_allocs_worklist.length();
2794 for (int next = 0; next < non_escaped_length; next++) {
2795 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);
2796 PointsToNode::EscapeState es = ptn->escape_state();
2797 assert(es <= PointsToNode::ArgEscape, "sanity");
2798 if (es == PointsToNode::NoEscape) {
2799 if (find_init_values_null(ptn, _igvn) > 0) {
2800 // Adding references to null object does not change escape states
2801 // since it does not escape. Also no fields are added to null object.
2802 add_java_object_edges(null_obj, false);
2803 }
2804 }
2805 Node* n = ptn->ideal_node();
2806 if (n->is_Allocate()) {
2807 // The object allocated by this Allocate node will never be
2808 // seen by an other thread. Mark it so that when it is
2809 // expanded no MemBarStoreStore is added.
2810 InitializeNode* ini = n->as_Allocate()->initialization();
2811 if (ini != nullptr)
2812 ini->set_does_not_escape();
2813 }
2814 }
2815 return true; // Finished graph construction.
2816 }
2817
2818 // Propagate GlobalEscape and ArgEscape escape states to all nodes
2819 // and check that we still have non-escaping java objects.
2820 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist,
2821 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,
2822 bool print_method) {
2823 GrowableArray<PointsToNode*> escape_worklist;
2824 // First, put all nodes with GlobalEscape and ArgEscape states on worklist.
2825 int ptnodes_length = ptnodes_worklist.length();
2826 for (int next = 0; next < ptnodes_length; ++next) {
2827 PointsToNode* ptn = ptnodes_worklist.at(next);
2828 if (ptn->escape_state() >= PointsToNode::ArgEscape ||
2829 ptn->fields_escape_state() >= PointsToNode::ArgEscape) {
2830 escape_worklist.push(ptn);
2831 }
2832 }
2833 // Set escape states to referenced nodes (edges list).
2834 while (escape_worklist.length() > 0) {
2835 PointsToNode* ptn = escape_worklist.pop();
2836 PointsToNode::EscapeState es = ptn->escape_state();
2837 PointsToNode::EscapeState field_es = ptn->fields_escape_state();
2838 if (ptn->is_Field() && ptn->as_Field()->is_oop() &&
2839 es >= PointsToNode::ArgEscape) {
2840 // GlobalEscape or ArgEscape state of field means it has unknown value.
2841 if (add_edge(ptn, phantom_obj)) {
2842 // New edge was added
2843 add_field_uses_to_worklist(ptn->as_Field());
2844 }
2845 }
2846 for (EdgeIterator i(ptn); i.has_next(); i.next()) {
2847 PointsToNode* e = i.get();
2848 if (e->is_Arraycopy()) {
2849 assert(ptn->arraycopy_dst(), "sanity");
2850 // Propagate only fields escape state through arraycopy edge.
2851 if (e->fields_escape_state() < field_es) {
2852 set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
2853 escape_worklist.push(e);
2854 }
2855 } else if (es >= field_es) {
2856 // fields_escape_state is also set to 'es' if it is less than 'es'.
2857 if (e->escape_state() < es) {
2858 set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
2859 escape_worklist.push(e);
2860 }
2861 } else {
2862 // Propagate field escape state.
2863 bool es_changed = false;
2864 if (e->fields_escape_state() < field_es) {
2865 set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
2866 es_changed = true;
2867 }
2868 if ((e->escape_state() < field_es) &&
2869 e->is_Field() && ptn->is_JavaObject() &&
2870 e->as_Field()->is_oop()) {
2871 // Change escape state of referenced fields.
2872 set_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
2873 es_changed = true;
2874 } else if (e->escape_state() < es) {
2875 set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
2876 es_changed = true;
2877 }
2878 if (es_changed) {
2879 escape_worklist.push(e);
2880 }
2881 }
2882 if (print_method) {
2883 _compile->print_method(PHASE_EA_CONNECTION_GRAPH_PROPAGATE_ITER, 6, e->ideal_node());
2884 }
2885 }
2886 }
2887 // Remove escaped objects from non_escaped list.
2888 for (int next = non_escaped_allocs_worklist.length()-1; next >= 0 ; --next) {
2889 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);
2890 if (ptn->escape_state() >= PointsToNode::GlobalEscape) {
2891 non_escaped_allocs_worklist.delete_at(next);
2892 }
2893 if (ptn->escape_state() == PointsToNode::NoEscape) {
2894 // Find fields in non-escaped allocations which have unknown value.
2895 find_init_values_phantom(ptn);
2896 }
2897 }
2898 return (non_escaped_allocs_worklist.length() > 0);
2899 }
2900
2901 // Add all references to JavaObject node by walking over all uses.
2902 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) {
2903 int new_edges = 0;
2904 if (populate_worklist) {
2905 // Populate _worklist by uses of jobj's uses.
2906 for (UseIterator i(jobj); i.has_next(); i.next()) {
2907 PointsToNode* use = i.get();
2908 if (use->is_Arraycopy()) {
2909 continue;
2910 }
2911 add_uses_to_worklist(use);
2912 if (use->is_Field() && use->as_Field()->is_oop()) {
2913 // Put on worklist all field's uses (loads) and
2914 // related field nodes (same base and offset).
2915 add_field_uses_to_worklist(use->as_Field());
2916 }
2917 }
2918 }
2919 for (int l = 0; l < _worklist.length(); l++) {
2920 PointsToNode* use = _worklist.at(l);
2921 if (PointsToNode::is_base_use(use)) {
2922 // Add reference from jobj to field and from field to jobj (field's base).
2923 use = PointsToNode::get_use_node(use)->as_Field();
2924 if (add_base(use->as_Field(), jobj)) {
2925 new_edges++;
2926 }
2927 continue;
2928 }
2929 assert(!use->is_JavaObject(), "sanity");
2930 if (use->is_Arraycopy()) {
2931 if (jobj == null_obj) { // null object does not have field edges
2932 continue;
2933 }
2934 // Added edge from Arraycopy node to arraycopy's source java object
2935 if (add_edge(use, jobj)) {
2936 jobj->set_arraycopy_src();
2937 new_edges++;
2938 }
2939 // and stop here.
2940 continue;
2941 }
2942 if (!add_edge(use, jobj)) {
2943 continue; // No new edge added, there was such edge already.
2944 }
2945 new_edges++;
2946 if (use->is_LocalVar()) {
2947 add_uses_to_worklist(use);
2948 if (use->arraycopy_dst()) {
2949 for (EdgeIterator i(use); i.has_next(); i.next()) {
2950 PointsToNode* e = i.get();
2951 if (e->is_Arraycopy()) {
2952 if (jobj == null_obj) { // null object does not have field edges
2953 continue;
2954 }
2955 // Add edge from arraycopy's destination java object to Arraycopy node.
2956 if (add_edge(jobj, e)) {
2957 new_edges++;
2958 jobj->set_arraycopy_dst();
2959 }
2960 }
2961 }
2962 }
2963 } else {
2964 // Added new edge to stored in field values.
2965 // Put on worklist all field's uses (loads) and
2966 // related field nodes (same base and offset).
2967 add_field_uses_to_worklist(use->as_Field());
2968 }
2969 }
2970 _worklist.clear();
2971 _in_worklist.reset();
2972 return new_edges;
2973 }
2974
2975 // Put on worklist all related field nodes.
2976 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) {
2977 assert(field->is_oop(), "sanity");
2978 int offset = field->offset();
2979 add_uses_to_worklist(field);
2980 // Loop over all bases of this field and push on worklist Field nodes
2981 // with the same offset and base (since they may reference the same field).
2982 for (BaseIterator i(field); i.has_next(); i.next()) {
2983 PointsToNode* base = i.get();
2984 add_fields_to_worklist(field, base);
2985 // Check if the base was source object of arraycopy and go over arraycopy's
2986 // destination objects since values stored to a field of source object are
2987 // accessible by uses (loads) of fields of destination objects.
2988 if (base->arraycopy_src()) {
2989 for (UseIterator j(base); j.has_next(); j.next()) {
2990 PointsToNode* arycp = j.get();
2991 if (arycp->is_Arraycopy()) {
2992 for (UseIterator k(arycp); k.has_next(); k.next()) {
2993 PointsToNode* abase = k.get();
2994 if (abase->arraycopy_dst() && abase != base) {
2995 // Look for the same arraycopy reference.
2996 add_fields_to_worklist(field, abase);
2997 }
2998 }
2999 }
3000 }
3001 }
3002 }
3003 }
3004
3005 // Put on worklist all related field nodes.
3006 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) {
3007 int offset = field->offset();
3008 if (base->is_LocalVar()) {
3009 for (UseIterator j(base); j.has_next(); j.next()) {
3010 PointsToNode* f = j.get();
3011 if (PointsToNode::is_base_use(f)) { // Field
3012 f = PointsToNode::get_use_node(f);
3013 if (f == field || !f->as_Field()->is_oop()) {
3014 continue;
3015 }
3016 int offs = f->as_Field()->offset();
3017 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
3018 add_to_worklist(f);
3019 }
3020 }
3021 }
3022 } else {
3023 assert(base->is_JavaObject(), "sanity");
3024 if (// Skip phantom_object since it is only used to indicate that
3025 // this field's content globally escapes.
3026 (base != phantom_obj) &&
3027 // null object node does not have fields.
3028 (base != null_obj)) {
3029 for (EdgeIterator i(base); i.has_next(); i.next()) {
3030 PointsToNode* f = i.get();
3031 // Skip arraycopy edge since store to destination object field
3032 // does not update value in source object field.
3033 if (f->is_Arraycopy()) {
3034 assert(base->arraycopy_dst(), "sanity");
3035 continue;
3036 }
3037 if (f == field || !f->as_Field()->is_oop()) {
3038 continue;
3039 }
3040 int offs = f->as_Field()->offset();
3041 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
3042 add_to_worklist(f);
3043 }
3044 }
3045 }
3046 }
3047 }
3048
3049 // Find fields which have unknown value.
3050 int ConnectionGraph::find_field_value(FieldNode* field) {
3051 // Escaped fields should have init value already.
3052 assert(field->escape_state() == PointsToNode::NoEscape, "sanity");
3053 int new_edges = 0;
3054 for (BaseIterator i(field); i.has_next(); i.next()) {
3055 PointsToNode* base = i.get();
3056 if (base->is_JavaObject()) {
3057 // Skip Allocate's fields which will be processed later.
3058 if (base->ideal_node()->is_Allocate()) {
3059 return 0;
3060 }
3061 assert(base == null_obj, "only null ptr base expected here");
3062 }
3063 }
3064 if (add_edge(field, phantom_obj)) {
3065 // New edge was added
3066 new_edges++;
3067 add_field_uses_to_worklist(field);
3068 }
3069 return new_edges;
3070 }
3071
3072 // Find fields initializing values for allocations.
3073 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
3074 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
3075 PointsToNode* init_val = phantom_obj;
3076 Node* alloc = pta->ideal_node();
3077
3078 // Do nothing for Allocate nodes since its fields values are
3079 // "known" unless they are initialized by arraycopy/clone.
3080 if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
3081 if (alloc->as_Allocate()->in(AllocateNode::InitValue) != nullptr) {
3082 // Null-free inline type arrays are initialized with an init value instead of null
3083 init_val = ptnode_adr(alloc->as_Allocate()->in(AllocateNode::InitValue)->_idx);
3084 assert(init_val != nullptr, "init value should be registered");
3085 } else {
3086 return 0;
3087 }
3088 }
3089 // Non-escaped allocation returned from Java or runtime call has unknown values in fields.
3090 assert(pta->arraycopy_dst() || alloc->is_CallStaticJava() || init_val != phantom_obj, "sanity");
3091 #ifdef ASSERT
3092 if (alloc->is_CallStaticJava() && alloc->as_CallStaticJava()->method() == nullptr) {
3093 const char* name = alloc->as_CallStaticJava()->_name;
3094 assert(alloc->as_CallStaticJava()->is_call_to_multianewarray_stub() ||
3095 strncmp(name, "load_unknown_inline", 19) == 0 ||
3096 strncmp(name, "store_inline_type_fields_to_buf", 31) == 0, "sanity");
3097 }
3098 #endif
3099 // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
3100 int new_edges = 0;
3101 for (EdgeIterator i(pta); i.has_next(); i.next()) {
3102 PointsToNode* field = i.get();
3103 if (field->is_Field() && field->as_Field()->is_oop()) {
3104 if (add_edge(field, init_val)) {
3105 // New edge was added
3106 new_edges++;
3107 add_field_uses_to_worklist(field->as_Field());
3108 }
3109 }
3110 }
3111 return new_edges;
3112 }
3113
3114 // Find fields initializing values for allocations.
3115 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) {
3116 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
3117 Node* alloc = pta->ideal_node();
3118 // Do nothing for Call nodes since its fields values are unknown.
3119 if (!alloc->is_Allocate() || alloc->as_Allocate()->in(AllocateNode::InitValue) != nullptr) {
3120 return 0;
3121 }
3122 InitializeNode* ini = alloc->as_Allocate()->initialization();
3123 bool visited_bottom_offset = false;
3124 GrowableArray<int> offsets_worklist;
3125 int new_edges = 0;
3126
3127 // Check if an oop field's initializing value is recorded and add
3128 // a corresponding null if field's value if it is not recorded.
3129 // Connection Graph does not record a default initialization by null
3130 // captured by Initialize node.
3131 //
3132 for (EdgeIterator i(pta); i.has_next(); i.next()) {
3133 PointsToNode* field = i.get(); // Field (AddP)
3134 if (!field->is_Field() || !field->as_Field()->is_oop()) {
3135 continue; // Not oop field
3136 }
3137 int offset = field->as_Field()->offset();
3138 if (offset == Type::OffsetBot) {
3139 if (!visited_bottom_offset) {
3140 // OffsetBot is used to reference array's element,
3141 // always add reference to null to all Field nodes since we don't
3142 // known which element is referenced.
3143 if (add_edge(field, null_obj)) {
3144 // New edge was added
3145 new_edges++;
3146 add_field_uses_to_worklist(field->as_Field());
3147 visited_bottom_offset = true;
3148 }
3149 }
3150 } else {
3151 // Check only oop fields.
3152 const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type();
3153 if (adr_type->isa_rawptr()) {
3154 #ifdef ASSERT
3155 // Raw pointers are used for initializing stores so skip it
3156 // since it should be recorded already
3157 Node* base = get_addp_base(field->ideal_node());
3158 assert(adr_type->isa_rawptr() && is_captured_store_address(field->ideal_node()), "unexpected pointer type");
3159 #endif
3160 continue;
3161 }
3162 if (!offsets_worklist.contains(offset)) {
3163 offsets_worklist.append(offset);
3164 Node* value = nullptr;
3165 if (ini != nullptr) {
3166 // StoreP::value_basic_type() == T_ADDRESS
3167 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS;
3168 Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase);
3169 // Make sure initializing store has the same type as this AddP.
3170 // This AddP may reference non existing field because it is on a
3171 // dead branch of bimorphic call which is not eliminated yet.
3172 if (store != nullptr && store->is_Store() &&
3173 store->as_Store()->value_basic_type() == ft) {
3174 value = store->in(MemNode::ValueIn);
3175 #ifdef ASSERT
3176 if (VerifyConnectionGraph) {
3177 // Verify that AddP already points to all objects the value points to.
3178 PointsToNode* val = ptnode_adr(value->_idx);
3179 assert((val != nullptr), "should be processed already");
3180 PointsToNode* missed_obj = nullptr;
3181 if (val->is_JavaObject()) {
3182 if (!field->points_to(val->as_JavaObject())) {
3183 missed_obj = val;
3184 }
3185 } else {
3186 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
3187 tty->print_cr("----------init store has invalid value -----");
3188 store->dump();
3189 val->dump();
3190 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
3191 }
3192 for (EdgeIterator j(val); j.has_next(); j.next()) {
3193 PointsToNode* obj = j.get();
3194 if (obj->is_JavaObject()) {
3195 if (!field->points_to(obj->as_JavaObject())) {
3196 missed_obj = obj;
3197 break;
3198 }
3199 }
3200 }
3201 }
3202 if (missed_obj != nullptr) {
3203 tty->print_cr("----------field---------------------------------");
3204 field->dump();
3205 tty->print_cr("----------missed reference to object------------");
3206 missed_obj->dump();
3207 tty->print_cr("----------object referenced by init store-------");
3208 store->dump();
3209 val->dump();
3210 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
3211 }
3212 }
3213 #endif
3214 } else {
3215 // There could be initializing stores which follow allocation.
3216 // For example, a volatile field store is not collected
3217 // by Initialize node.
3218 //
3219 // Need to check for dependent loads to separate such stores from
3220 // stores which follow loads. For now, add initial value null so
3221 // that compare pointers optimization works correctly.
3222 }
3223 }
3224 if (value == nullptr) {
3225 // A field's initializing value was not recorded. Add null.
3226 if (add_edge(field, null_obj)) {
3227 // New edge was added
3228 new_edges++;
3229 add_field_uses_to_worklist(field->as_Field());
3230 }
3231 }
3232 }
3233 }
3234 }
3235 return new_edges;
3236 }
3237
3238 // Adjust scalar_replaceable state after Connection Graph is built.
3239 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj, Unique_Node_List &reducible_merges) {
3240 // A Phi 'x' is a _candidate_ to be reducible if 'can_reduce_phi(x)'
3241 // returns true. If one of the constraints in this method set 'jobj' to NSR
3242 // then the candidate Phi is discarded. If the Phi has another SR 'jobj' as
3243 // input, 'adjust_scalar_replaceable_state' will eventually be called with
3244 // that other object and the Phi will become a reducible Phi.
3245 // There could be multiple merges involving the same jobj.
3246 Unique_Node_List candidates;
3247
3248 // Search for non-escaping objects which are not scalar replaceable
3249 // and mark them to propagate the state to referenced objects.
3250
3251 for (UseIterator i(jobj); i.has_next(); i.next()) {
3252 PointsToNode* use = i.get();
3253 if (use->is_Arraycopy()) {
3254 continue;
3255 }
3256 if (use->is_Field()) {
3257 FieldNode* field = use->as_Field();
3258 assert(field->is_oop() && field->scalar_replaceable(), "sanity");
3259 // 1. An object is not scalar replaceable if the field into which it is
3260 // stored has unknown offset (stored into unknown element of an array).
3261 if (field->offset() == Type::OffsetBot) {
3262 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored at unknown offset"));
3263 return;
3264 }
3265 for (BaseIterator i(field); i.has_next(); i.next()) {
3266 PointsToNode* base = i.get();
3267 // 2. An object is not scalar replaceable if the field into which it is
3268 // stored has multiple bases one of which is null.
3269 if ((base == null_obj) && (field->base_count() > 1)) {
3270 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with potentially null base"));
3271 return;
3272 }
3273 // 2.5. An object is not scalar replaceable if the field into which it is
3274 // stored has NSR base.
3275 if (!base->scalar_replaceable()) {
3276 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base"));
3277 return;
3278 }
3279 }
3280 }
3281 assert(use->is_Field() || use->is_LocalVar(), "sanity");
3282 // 3. An object is not scalar replaceable if it is merged with other objects
3283 // and we can't remove the merge
3284 for (EdgeIterator j(use); j.has_next(); j.next()) {
3285 PointsToNode* ptn = j.get();
3286 if (ptn->is_JavaObject() && ptn != jobj) {
3287 Node* use_n = use->ideal_node();
3288
3289 // These other local vars may point to multiple objects through a Phi
3290 // In this case we skip them and see if we can reduce the Phi.
3291 if (use_n->is_CastPP() || use_n->is_CheckCastPP()) {
3292 use_n = use_n->in(1);
3293 }
3294
3295 // If it's already a candidate or confirmed reducible merge we can skip verification
3296 if (candidates.member(use_n) || reducible_merges.member(use_n)) {
3297 continue;
3298 }
3299
3300 if (use_n->is_Phi() && can_reduce_phi(use_n->as_Phi())) {
3301 candidates.push(use_n);
3302 } else {
3303 // Mark all objects as NSR if we can't remove the merge
3304 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA trace_merged_message(ptn)));
3305 set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA trace_merged_message(jobj)));
3306 }
3307 }
3308 }
3309 if (!jobj->scalar_replaceable()) {
3310 return;
3311 }
3312 }
3313
3314 for (EdgeIterator j(jobj); j.has_next(); j.next()) {
3315 if (j.get()->is_Arraycopy()) {
3316 continue;
3317 }
3318
3319 // Non-escaping object node should point only to field nodes.
3320 FieldNode* field = j.get()->as_Field();
3321 int offset = field->as_Field()->offset();
3322
3323 // 4. An object is not scalar replaceable if it has a field with unknown
3324 // offset (array's element is accessed in loop).
3325 if (offset == Type::OffsetBot) {
3326 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "has field with unknown offset"));
3327 return;
3328 }
3329 // 5. Currently an object is not scalar replaceable if a LoadStore node
3330 // access its field since the field value is unknown after it.
3331 //
3332 Node* n = field->ideal_node();
3333
3334 // Test for an unsafe access that was parsed as maybe off heap
3335 // (with a CheckCastPP to raw memory).
3336 assert(n->is_AddP(), "expect an address computation");
3337 if (n->in(AddPNode::Base)->is_top() &&
3338 n->in(AddPNode::Address)->Opcode() == Op_CheckCastPP) {
3339 assert(n->in(AddPNode::Address)->bottom_type()->isa_rawptr(), "raw address so raw cast expected");
3340 assert(_igvn->type(n->in(AddPNode::Address)->in(1))->isa_oopptr(), "cast pattern at unsafe access expected");
3341 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used as base of mixed unsafe access"));
3342 return;
3343 }
3344
3345 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3346 Node* u = n->fast_out(i);
3347 if (u->is_LoadStore() || (u->is_Mem() && u->as_Mem()->is_mismatched_access())) {
3348 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used in LoadStore or mismatched access"));
3349 return;
3350 }
3351 }
3352
3353 // 6. Or the address may point to more then one object. This may produce
3354 // the false positive result (set not scalar replaceable)
3355 // since the flow-insensitive escape analysis can't separate
3356 // the case when stores overwrite the field's value from the case
3357 // when stores happened on different control branches.
3358 //
3359 // Note: it will disable scalar replacement in some cases:
3360 //
3361 // Point p[] = new Point[1];
3362 // p[0] = new Point(); // Will be not scalar replaced
3363 //
3364 // but it will save us from incorrect optimizations in next cases:
3365 //
3366 // Point p[] = new Point[1];
3367 // if ( x ) p[0] = new Point(); // Will be not scalar replaced
3368 //
3369 if (field->base_count() > 1 && candidates.size() == 0) {
3370 if (has_non_reducible_merge(field, reducible_merges)) {
3371 for (BaseIterator i(field); i.has_next(); i.next()) {
3372 PointsToNode* base = i.get();
3373 // Don't take into account LocalVar nodes which
3374 // may point to only one object which should be also
3375 // this field's base by now.
3376 if (base->is_JavaObject() && base != jobj) {
3377 // Mark all bases.
3378 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "may point to more than one object"));
3379 set_not_scalar_replaceable(base NOT_PRODUCT(COMMA "may point to more than one object"));
3380 }
3381 }
3382
3383 if (!jobj->scalar_replaceable()) {
3384 return;
3385 }
3386 }
3387 }
3388 }
3389
3390 // The candidate is truly a reducible merge only if none of the other
3391 // constraints ruled it as NSR. There could be multiple merges involving the
3392 // same jobj.
3393 assert(jobj->scalar_replaceable(), "sanity");
3394 for (uint i = 0; i < candidates.size(); i++ ) {
3395 Node* candidate = candidates.at(i);
3396 reducible_merges.push(candidate);
3397 }
3398 }
3399
3400 bool ConnectionGraph::has_non_reducible_merge(FieldNode* field, Unique_Node_List& reducible_merges) {
3401 for (BaseIterator i(field); i.has_next(); i.next()) {
3402 Node* base = i.get()->ideal_node();
3403 if (base->is_Phi() && !reducible_merges.member(base)) {
3404 return true;
3405 }
3406 }
3407 return false;
3408 }
3409
3410 void ConnectionGraph::revisit_reducible_phi_status(JavaObjectNode* jobj, Unique_Node_List& reducible_merges) {
3411 assert(jobj != nullptr && !jobj->scalar_replaceable(), "jobj should be set as NSR before calling this function.");
3412
3413 // Look for 'phis' that refer to 'jobj' as the last
3414 // remaining scalar replaceable input.
3415 uint reducible_merges_cnt = reducible_merges.size();
3416 for (uint i = 0; i < reducible_merges_cnt; i++) {
3417 Node* phi = reducible_merges.at(i);
3418
3419 // This 'Phi' will be a 'good' if it still points to
3420 // at least one scalar replaceable object. Note that 'obj'
3421 // was/should be marked as NSR before calling this function.
3422 bool good_phi = false;
3423
3424 for (uint j = 1; j < phi->req(); j++) {
3425 JavaObjectNode* phi_in_obj = unique_java_object(phi->in(j));
3426 if (phi_in_obj != nullptr && phi_in_obj->scalar_replaceable()) {
3427 good_phi = true;
3428 break;
3429 }
3430 }
3431
3432 if (!good_phi) {
3433 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Phi %d became non-reducible after node %d became NSR.", phi->_idx, jobj->ideal_node()->_idx);)
3434 reducible_merges.remove(i);
3435
3436 // Decrement the index because the 'remove' call above actually
3437 // moves the last entry of the list to position 'i'.
3438 i--;
3439
3440 reducible_merges_cnt--;
3441 }
3442 }
3443 }
3444
3445 // Propagate NSR (Not scalar replaceable) state.
3446 void ConnectionGraph::find_scalar_replaceable_allocs(GrowableArray<JavaObjectNode*>& jobj_worklist, Unique_Node_List &reducible_merges) {
3447 int jobj_length = jobj_worklist.length();
3448 bool found_nsr_alloc = true;
3449 while (found_nsr_alloc) {
3450 found_nsr_alloc = false;
3451 for (int next = 0; next < jobj_length; ++next) {
3452 JavaObjectNode* jobj = jobj_worklist.at(next);
3453 for (UseIterator i(jobj); (jobj->scalar_replaceable() && i.has_next()); i.next()) {
3454 PointsToNode* use = i.get();
3455 if (use->is_Field()) {
3456 FieldNode* field = use->as_Field();
3457 assert(field->is_oop() && field->scalar_replaceable(), "sanity");
3458 assert(field->offset() != Type::OffsetBot, "sanity");
3459 for (BaseIterator i(field); i.has_next(); i.next()) {
3460 PointsToNode* base = i.get();
3461 // An object is not scalar replaceable if the field into which
3462 // it is stored has NSR base.
3463 if ((base != null_obj) && !base->scalar_replaceable()) {
3464 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base"));
3465 // Any merge that had only 'jobj' as scalar-replaceable will now be non-reducible,
3466 // because there is no point in reducing a Phi that won't improve the number of SR
3467 // objects.
3468 revisit_reducible_phi_status(jobj, reducible_merges);
3469 found_nsr_alloc = true;
3470 break;
3471 }
3472 }
3473 } else if (use->is_LocalVar()) {
3474 Node* phi = use->ideal_node();
3475 if (phi->Opcode() == Op_Phi && reducible_merges.member(phi) && !can_reduce_phi(phi->as_Phi())) {
3476 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is merged in a non-reducible phi"));
3477 reducible_merges.yank(phi);
3478 found_nsr_alloc = true;
3479 break;
3480 }
3481 }
3482 _compile->print_method(PHASE_EA_PROPAGATE_NSR_ITER, 5, jobj->ideal_node());
3483 }
3484 }
3485 }
3486 }
3487
3488 #ifdef ASSERT
3489 void ConnectionGraph::verify_connection_graph(
3490 GrowableArray<PointsToNode*>& ptnodes_worklist,
3491 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,
3492 GrowableArray<JavaObjectNode*>& java_objects_worklist,
3493 GrowableArray<Node*>& addp_worklist) {
3494 // Verify that graph is complete - no new edges could be added.
3495 int java_objects_length = java_objects_worklist.length();
3496 int non_escaped_length = non_escaped_allocs_worklist.length();
3497 int new_edges = 0;
3498 for (int next = 0; next < java_objects_length; ++next) {
3499 JavaObjectNode* ptn = java_objects_worklist.at(next);
3500 new_edges += add_java_object_edges(ptn, true);
3501 }
3502 assert(new_edges == 0, "graph was not complete");
3503 // Verify that escape state is final.
3504 int length = non_escaped_allocs_worklist.length();
3505 find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist, /*print_method=*/ false);
3506 assert((non_escaped_length == non_escaped_allocs_worklist.length()) &&
3507 (non_escaped_length == length) &&
3508 (_worklist.length() == 0), "escape state was not final");
3509
3510 // Verify fields information.
3511 int addp_length = addp_worklist.length();
3512 for (int next = 0; next < addp_length; ++next ) {
3513 Node* n = addp_worklist.at(next);
3514 FieldNode* field = ptnode_adr(n->_idx)->as_Field();
3515 if (field->is_oop()) {
3516 // Verify that field has all bases
3517 Node* base = get_addp_base(n);
3518 PointsToNode* ptn = ptnode_adr(base->_idx);
3519 if (ptn->is_JavaObject()) {
3520 assert(field->has_base(ptn->as_JavaObject()), "sanity");
3521 } else {
3522 assert(ptn->is_LocalVar(), "sanity");
3523 for (EdgeIterator i(ptn); i.has_next(); i.next()) {
3524 PointsToNode* e = i.get();
3525 if (e->is_JavaObject()) {
3526 assert(field->has_base(e->as_JavaObject()), "sanity");
3527 }
3528 }
3529 }
3530 // Verify that all fields have initializing values.
3531 if (field->edge_count() == 0) {
3532 tty->print_cr("----------field does not have references----------");
3533 field->dump();
3534 for (BaseIterator i(field); i.has_next(); i.next()) {
3535 PointsToNode* base = i.get();
3536 tty->print_cr("----------field has next base---------------------");
3537 base->dump();
3538 if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) {
3539 tty->print_cr("----------base has fields-------------------------");
3540 for (EdgeIterator j(base); j.has_next(); j.next()) {
3541 j.get()->dump();
3542 }
3543 tty->print_cr("----------base has references---------------------");
3544 for (UseIterator j(base); j.has_next(); j.next()) {
3545 j.get()->dump();
3546 }
3547 }
3548 }
3549 for (UseIterator i(field); i.has_next(); i.next()) {
3550 i.get()->dump();
3551 }
3552 assert(field->edge_count() > 0, "sanity");
3553 }
3554 }
3555 }
3556 }
3557 #endif
3558
3559 // Optimize ideal graph.
3560 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
3561 GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) {
3562 Compile* C = _compile;
3563 PhaseIterGVN* igvn = _igvn;
3564 if (EliminateLocks) {
3565 // Mark locks before changing ideal graph.
3566 int cnt = C->macro_count();
3567 for (int i = 0; i < cnt; i++) {
3568 Node *n = C->macro_node(i);
3569 if (n->is_AbstractLock()) { // Lock and Unlock nodes
3570 AbstractLockNode* alock = n->as_AbstractLock();
3571 if (!alock->is_non_esc_obj()) {
3572 const Type* obj_type = igvn->type(alock->obj_node());
3573 if (can_eliminate_lock(alock) && !obj_type->is_inlinetypeptr()) {
3574 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
3575 // The lock could be marked eliminated by lock coarsening
3576 // code during first IGVN before EA. Replace coarsened flag
3577 // to eliminate all associated locks/unlocks.
3578 #ifdef ASSERT
3579 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
3580 #endif
3581 alock->set_non_esc_obj();
3582 }
3583 }
3584 }
3585 }
3586 }
3587
3588 if (OptimizePtrCompare) {
3589 for (int i = 0; i < ptr_cmp_worklist.length(); i++) {
3590 Node *n = ptr_cmp_worklist.at(i);
3591 assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be");
3592 const TypeInt* tcmp = optimize_ptr_compare(n->in(1), n->in(2));
3593 if (tcmp->singleton()) {
3594 Node* cmp = igvn->makecon(tcmp);
3595 #ifndef PRODUCT
3596 if (PrintOptimizePtrCompare) {
3597 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));
3598 if (Verbose) {
3599 n->dump(1);
3600 }
3601 }
3602 #endif
3603 igvn->replace_node(n, cmp);
3604 }
3605 }
3606 }
3607
3608 // For MemBarStoreStore nodes added in library_call.cpp, check
3609 // escape status of associated AllocateNode and optimize out
3610 // MemBarStoreStore node if the allocated object never escapes.
3611 for (int i = 0; i < storestore_worklist.length(); i++) {
3612 Node* storestore = storestore_worklist.at(i);
3613 Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
3614 if (alloc->is_Allocate() && not_global_escape(alloc)) {
3615 if (alloc->in(AllocateNode::InlineType) != nullptr) {
3616 // Non-escaping inline type buffer allocations don't require a membar
3617 storestore->as_MemBar()->remove(_igvn);
3618 } else {
3619 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
3620 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory));
3621 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
3622 igvn->register_new_node_with_optimizer(mb);
3623 igvn->replace_node(storestore, mb);
3624 }
3625 }
3626 }
3627 }
3628
3629 // Atomic flat accesses on non-escaping objects can be optimized to non-atomic accesses
3630 void ConnectionGraph::optimize_flat_accesses(GrowableArray<SafePointNode*>& sfn_worklist) {
3631 PhaseIterGVN& igvn = *_igvn;
3632 bool delay = igvn.delay_transform();
3633 igvn.set_delay_transform(true);
3634 igvn.C->for_each_flat_access([&](Node* n) {
3635 Node* base = n->is_LoadFlat() ? n->as_LoadFlat()->base() : n->as_StoreFlat()->base();
3636 if (!not_global_escape(base)) {
3637 return;
3638 }
3639
3640 bool expanded;
3641 if (n->is_LoadFlat()) {
3642 expanded = n->as_LoadFlat()->expand_non_atomic(igvn);
3643 } else {
3644 expanded = n->as_StoreFlat()->expand_non_atomic(igvn);
3645 }
3646 if (expanded) {
3647 sfn_worklist.remove(n->as_SafePoint());
3648 igvn.C->remove_flat_access(n);
3649 }
3650 });
3651 igvn.set_delay_transform(delay);
3652 }
3653
3654 // Optimize objects compare.
3655 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* left, Node* right) {
3656 const TypeInt* UNKNOWN = TypeInt::CC; // [-1, 0,1]
3657 if (!OptimizePtrCompare) {
3658 return UNKNOWN;
3659 }
3660 const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO
3661 const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE
3662
3663 PointsToNode* ptn1 = ptnode_adr(left->_idx);
3664 PointsToNode* ptn2 = ptnode_adr(right->_idx);
3665 JavaObjectNode* jobj1 = unique_java_object(left);
3666 JavaObjectNode* jobj2 = unique_java_object(right);
3667
3668 // The use of this method during allocation merge reduction may cause 'left'
3669 // or 'right' be something (e.g., a Phi) that isn't in the connection graph or
3670 // that doesn't reference an unique java object.
3671 if (ptn1 == nullptr || ptn2 == nullptr ||
3672 jobj1 == nullptr || jobj2 == nullptr) {
3673 return UNKNOWN;
3674 }
3675
3676 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");
3677 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");
3678
3679 // Check simple cases first.
3680 if (jobj1 != nullptr) {
3681 if (jobj1->escape_state() == PointsToNode::NoEscape) {
3682 if (jobj1 == jobj2) {
3683 // Comparing the same not escaping object.
3684 return EQ;
3685 }
3686 Node* obj = jobj1->ideal_node();
3687 // Comparing not escaping allocation.
3688 if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
3689 !ptn2->points_to(jobj1)) {
3690 return NE; // This includes nullness check.
3691 }
3692 }
3693 }
3694 if (jobj2 != nullptr) {
3695 if (jobj2->escape_state() == PointsToNode::NoEscape) {
3696 Node* obj = jobj2->ideal_node();
3697 // Comparing not escaping allocation.
3698 if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
3699 !ptn1->points_to(jobj2)) {
3700 return NE; // This includes nullness check.
3701 }
3702 }
3703 }
3704 if (jobj1 != nullptr && jobj1 != phantom_obj &&
3705 jobj2 != nullptr && jobj2 != phantom_obj &&
3706 jobj1->ideal_node()->is_Con() &&
3707 jobj2->ideal_node()->is_Con()) {
3708 // Klass or String constants compare. Need to be careful with
3709 // compressed pointers - compare types of ConN and ConP instead of nodes.
3710 const Type* t1 = jobj1->ideal_node()->get_ptr_type();
3711 const Type* t2 = jobj2->ideal_node()->get_ptr_type();
3712 if (t1->make_ptr() == t2->make_ptr()) {
3713 return EQ;
3714 } else {
3715 return NE;
3716 }
3717 }
3718 if (ptn1->meet(ptn2)) {
3719 return UNKNOWN; // Sets are not disjoint
3720 }
3721
3722 // Sets are disjoint.
3723 bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj);
3724 bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj);
3725 bool set1_has_null_ptr = ptn1->points_to(null_obj);
3726 bool set2_has_null_ptr = ptn2->points_to(null_obj);
3727 if ((set1_has_unknown_ptr && set2_has_null_ptr) ||
3728 (set2_has_unknown_ptr && set1_has_null_ptr)) {
3729 // Check nullness of unknown object.
3730 return UNKNOWN;
3731 }
3732
3733 // Disjointness by itself is not sufficient since
3734 // alias analysis is not complete for escaped objects.
3735 // Disjoint sets are definitely unrelated only when
3736 // at least one set has only not escaping allocations.
3737 if (!set1_has_unknown_ptr && !set1_has_null_ptr) {
3738 if (ptn1->non_escaping_allocation()) {
3739 return NE;
3740 }
3741 }
3742 if (!set2_has_unknown_ptr && !set2_has_null_ptr) {
3743 if (ptn2->non_escaping_allocation()) {
3744 return NE;
3745 }
3746 }
3747 return UNKNOWN;
3748 }
3749
3750 // Connection Graph construction functions.
3751
3752 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) {
3753 PointsToNode* ptadr = _nodes.at(n->_idx);
3754 if (ptadr != nullptr) {
3755 assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity");
3756 return;
3757 }
3758 Compile* C = _compile;
3759 ptadr = new (C->comp_arena()) LocalVarNode(this, n, es);
3760 map_ideal_node(n, ptadr);
3761 }
3762
3763 PointsToNode* ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) {
3764 PointsToNode* ptadr = _nodes.at(n->_idx);
3765 if (ptadr != nullptr) {
3766 assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity");
3767 return ptadr;
3768 }
3769 Compile* C = _compile;
3770 ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es);
3771 map_ideal_node(n, ptadr);
3772 return ptadr;
3773 }
3774
3775 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) {
3776 PointsToNode* ptadr = _nodes.at(n->_idx);
3777 if (ptadr != nullptr) {
3778 assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity");
3779 return;
3780 }
3781 bool unsafe = false;
3782 bool is_oop = is_oop_field(n, offset, &unsafe);
3783 if (unsafe) {
3784 es = PointsToNode::GlobalEscape;
3785 }
3786 Compile* C = _compile;
3787 FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop);
3788 map_ideal_node(n, field);
3789 }
3790
3791 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es,
3792 PointsToNode* src, PointsToNode* dst) {
3793 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
3794 assert((src != null_obj) && (dst != null_obj), "not for ConP null");
3795 PointsToNode* ptadr = _nodes.at(n->_idx);
3796 if (ptadr != nullptr) {
3797 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
3798 return;
3799 }
3800 Compile* C = _compile;
3801 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
3802 map_ideal_node(n, ptadr);
3803 // Add edge from arraycopy node to source object.
3804 (void)add_edge(ptadr, src);
3805 src->set_arraycopy_src();
3806 // Add edge from destination object to arraycopy node.
3807 (void)add_edge(dst, ptadr);
3808 dst->set_arraycopy_dst();
3809 }
3810
3811 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
3812 const Type* adr_type = n->as_AddP()->bottom_type();
3813 int field_offset = adr_type->isa_aryptr() ? adr_type->isa_aryptr()->field_offset().get() : Type::OffsetBot;
3814 BasicType bt = T_INT;
3815 if (offset == Type::OffsetBot && field_offset == Type::OffsetBot) {
3816 // Check only oop fields.
3817 if (!adr_type->isa_aryptr() ||
3818 adr_type->isa_aryptr()->elem() == Type::BOTTOM ||
3819 adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) {
3820 // OffsetBot is used to reference array's element. Ignore first AddP.
3821 if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) {
3822 bt = T_OBJECT;
3823 }
3824 }
3825 } else if (offset != oopDesc::klass_offset_in_bytes()) {
3826 if (adr_type->isa_instptr()) {
3827 ciField* field = _compile->alias_type(adr_type->is_ptr())->field();
3828 if (field != nullptr) {
3829 bt = field->layout_type();
3830 } else {
3831 // Check for unsafe oop field access
3832 if (has_oop_node_outs(n)) {
3833 bt = T_OBJECT;
3834 (*unsafe) = true;
3835 }
3836 }
3837 } else if (adr_type->isa_aryptr()) {
3838 if (offset == arrayOopDesc::length_offset_in_bytes()) {
3839 // Ignore array length load.
3840 } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) {
3841 // Ignore first AddP.
3842 } else {
3843 const Type* elemtype = adr_type->is_aryptr()->elem();
3844 if (adr_type->is_aryptr()->is_flat() && field_offset != Type::OffsetBot) {
3845 ciInlineKlass* vk = elemtype->inline_klass();
3846 field_offset += vk->payload_offset();
3847 ciField* field = vk->get_field_by_offset(field_offset, false);
3848 if (field != nullptr) {
3849 bt = field->layout_type();
3850 } else {
3851 assert(field_offset == vk->payload_offset() + vk->null_marker_offset_in_payload(), "no field or null marker of %s at offset %d", vk->name()->as_utf8(), field_offset);
3852 bt = T_BOOLEAN;
3853 }
3854 } else {
3855 bt = elemtype->array_element_basic_type();
3856 }
3857 }
3858 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
3859 // Allocation initialization, ThreadLocal field access, unsafe access
3860 if (has_oop_node_outs(n)) {
3861 bt = T_OBJECT;
3862 }
3863 }
3864 }
3865 // Note: T_NARROWOOP is not classed as a real reference type
3866 bool res = (is_reference_type(bt) || bt == T_NARROWOOP);
3867 assert(!has_oop_node_outs(n) || res, "sanity: AddP has oop outs, needs to be treated as oop field");
3868 return res;
3869 }
3870
3871 bool ConnectionGraph::has_oop_node_outs(Node* n) {
3872 return n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3873 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3874 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3875 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n);
3876 }
3877
3878 // Returns unique pointed java object or null.
3879 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) const {
3880 // If the node was created after the escape computation we can't answer.
3881 uint idx = n->_idx;
3882 if (idx >= nodes_size()) {
3883 return nullptr;
3884 }
3885 PointsToNode* ptn = ptnode_adr(idx);
3886 if (ptn == nullptr) {
3887 return nullptr;
3888 }
3889 if (ptn->is_JavaObject()) {
3890 return ptn->as_JavaObject();
3891 }
3892 assert(ptn->is_LocalVar(), "sanity");
3893 // Check all java objects it points to.
3894 JavaObjectNode* jobj = nullptr;
3895 for (EdgeIterator i(ptn); i.has_next(); i.next()) {
3896 PointsToNode* e = i.get();
3897 if (e->is_JavaObject()) {
3898 if (jobj == nullptr) {
3899 jobj = e->as_JavaObject();
3900 } else if (jobj != e) {
3901 return nullptr;
3902 }
3903 }
3904 }
3905 return jobj;
3906 }
3907
3908 // Return true if this node points only to non-escaping allocations.
3909 bool PointsToNode::non_escaping_allocation() {
3910 if (is_JavaObject()) {
3911 Node* n = ideal_node();
3912 if (n->is_Allocate() || n->is_CallStaticJava()) {
3913 return (escape_state() == PointsToNode::NoEscape);
3914 } else {
3915 return false;
3916 }
3917 }
3918 assert(is_LocalVar(), "sanity");
3919 // Check all java objects it points to.
3920 for (EdgeIterator i(this); i.has_next(); i.next()) {
3921 PointsToNode* e = i.get();
3922 if (e->is_JavaObject()) {
3923 Node* n = e->ideal_node();
3924 if ((e->escape_state() != PointsToNode::NoEscape) ||
3925 !(n->is_Allocate() || n->is_CallStaticJava())) {
3926 return false;
3927 }
3928 }
3929 }
3930 return true;
3931 }
3932
3933 // Return true if we know the node does not escape globally.
3934 bool ConnectionGraph::not_global_escape(Node *n) {
3935 assert(!_collecting, "should not call during graph construction");
3936 // If the node was created after the escape computation we can't answer.
3937 uint idx = n->_idx;
3938 if (idx >= nodes_size()) {
3939 return false;
3940 }
3941 PointsToNode* ptn = ptnode_adr(idx);
3942 if (ptn == nullptr) {
3943 return false; // not in congraph (e.g. ConI)
3944 }
3945 PointsToNode::EscapeState es = ptn->escape_state();
3946 // If we have already computed a value, return it.
3947 if (es >= PointsToNode::GlobalEscape) {
3948 return false;
3949 }
3950 if (ptn->is_JavaObject()) {
3951 return true; // (es < PointsToNode::GlobalEscape);
3952 }
3953 assert(ptn->is_LocalVar(), "sanity");
3954 // Check all java objects it points to.
3955 for (EdgeIterator i(ptn); i.has_next(); i.next()) {
3956 if (i.get()->escape_state() >= PointsToNode::GlobalEscape) {
3957 return false;
3958 }
3959 }
3960 return true;
3961 }
3962
3963 // Return true if locked object does not escape globally
3964 // and locked code region (identified by BoxLockNode) is balanced:
3965 // all compiled code paths have corresponding Lock/Unlock pairs.
3966 bool ConnectionGraph::can_eliminate_lock(AbstractLockNode* alock) {
3967 if (alock->is_balanced() && not_global_escape(alock->obj_node())) {
3968 if (EliminateNestedLocks) {
3969 // We can mark whole locking region as Local only when only
3970 // one object is used for locking.
3971 alock->box_node()->as_BoxLock()->set_local();
3972 }
3973 return true;
3974 }
3975 return false;
3976 }
3977
3978 // Helper functions
3979
3980 // Return true if this node points to specified node or nodes it points to.
3981 bool PointsToNode::points_to(JavaObjectNode* ptn) const {
3982 if (is_JavaObject()) {
3983 return (this == ptn);
3984 }
3985 assert(is_LocalVar() || is_Field(), "sanity");
3986 for (EdgeIterator i(this); i.has_next(); i.next()) {
3987 if (i.get() == ptn) {
3988 return true;
3989 }
3990 }
3991 return false;
3992 }
3993
3994 // Return true if one node points to an other.
3995 bool PointsToNode::meet(PointsToNode* ptn) {
3996 if (this == ptn) {
3997 return true;
3998 } else if (ptn->is_JavaObject()) {
3999 return this->points_to(ptn->as_JavaObject());
4000 } else if (this->is_JavaObject()) {
4001 return ptn->points_to(this->as_JavaObject());
4002 }
4003 assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity");
4004 int ptn_count = ptn->edge_count();
4005 for (EdgeIterator i(this); i.has_next(); i.next()) {
4006 PointsToNode* this_e = i.get();
4007 for (int j = 0; j < ptn_count; j++) {
4008 if (this_e == ptn->edge(j)) {
4009 return true;
4010 }
4011 }
4012 }
4013 return false;
4014 }
4015
4016 #ifdef ASSERT
4017 // Return true if bases point to this java object.
4018 bool FieldNode::has_base(JavaObjectNode* jobj) const {
4019 for (BaseIterator i(this); i.has_next(); i.next()) {
4020 if (i.get() == jobj) {
4021 return true;
4022 }
4023 }
4024 return false;
4025 }
4026 #endif
4027
4028 bool ConnectionGraph::is_captured_store_address(Node* addp) {
4029 // Handle simple case first.
4030 assert(_igvn->type(addp)->isa_oopptr() == nullptr, "should be raw access");
4031 if (addp->in(AddPNode::Address)->is_Proj() && addp->in(AddPNode::Address)->in(0)->is_Allocate()) {
4032 return true;
4033 } else if (addp->in(AddPNode::Address)->is_Phi()) {
4034 for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
4035 Node* addp_use = addp->fast_out(i);
4036 if (addp_use->is_Store()) {
4037 for (DUIterator_Fast jmax, j = addp_use->fast_outs(jmax); j < jmax; j++) {
4038 if (addp_use->fast_out(j)->is_Initialize()) {
4039 return true;
4040 }
4041 }
4042 }
4043 }
4044 }
4045 return false;
4046 }
4047
4048 int ConnectionGraph::address_offset(Node* adr, PhaseValues* phase) {
4049 const Type *adr_type = phase->type(adr);
4050 if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) {
4051 // We are computing a raw address for a store captured by an Initialize
4052 // compute an appropriate address type. AddP cases #3 and #5 (see below).
4053 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
4054 assert(offs != Type::OffsetBot ||
4055 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
4056 "offset must be a constant or it is initialization of array");
4057 return offs;
4058 }
4059 return adr_type->is_ptr()->flat_offset();
4060 }
4061
4062 Node* ConnectionGraph::get_addp_base(Node *addp) {
4063 assert(addp->is_AddP(), "must be AddP");
4064 //
4065 // AddP cases for Base and Address inputs:
4066 // case #1. Direct object's field reference:
4067 // Allocate
4068 // |
4069 // Proj #5 ( oop result )
4070 // |
4071 // CheckCastPP (cast to instance type)
4072 // | |
4073 // AddP ( base == address )
4074 //
4075 // case #2. Indirect object's field reference:
4076 // Phi
4077 // |
4078 // CastPP (cast to instance type)
4079 // | |
4080 // AddP ( base == address )
4081 //
4082 // case #3. Raw object's field reference for Initialize node.
4083 // Could have an additional Phi merging multiple allocations.
4084 // Allocate
4085 // |
4086 // Proj #5 ( oop result )
4087 // top |
4088 // \ |
4089 // AddP ( base == top )
4090 //
4091 // case #4. Array's element reference:
4092 // {CheckCastPP | CastPP}
4093 // | | |
4094 // | AddP ( array's element offset )
4095 // | |
4096 // AddP ( array's offset )
4097 //
4098 // case #5. Raw object's field reference for arraycopy stub call:
4099 // The inline_native_clone() case when the arraycopy stub is called
4100 // after the allocation before Initialize and CheckCastPP nodes.
4101 // Allocate
4102 // |
4103 // Proj #5 ( oop result )
4104 // | |
4105 // AddP ( base == address )
4106 //
4107 // case #6. Constant Pool, ThreadLocal, CastX2P, Klass, OSR buffer buf or
4108 // Raw object's field reference:
4109 // {ConP, ThreadLocal, CastX2P, raw Load, Parm0}
4110 // top |
4111 // \ |
4112 // AddP ( base == top )
4113 //
4114 // case #7. Klass's field reference.
4115 // LoadKlass
4116 // | |
4117 // AddP ( base == address )
4118 //
4119 // case #8. narrow Klass's field reference.
4120 // LoadNKlass
4121 // |
4122 // DecodeN
4123 // | |
4124 // AddP ( base == address )
4125 //
4126 // case #9. Mixed unsafe access
4127 // {instance}
4128 // |
4129 // CheckCastPP (raw)
4130 // top |
4131 // \ |
4132 // AddP ( base == top )
4133 //
4134 // case #10. Klass fetched with
4135 // LibraryCallKit::load_*_refined_array_klass()
4136 // which has en extra Phi.
4137 // LoadKlass LoadKlass
4138 // | |
4139 // CastPP CastPP
4140 // \ /
4141 // Phi
4142 // top |
4143 // \ |
4144 // AddP ( base == top )
4145 //
4146 Node *base = addp->in(AddPNode::Base);
4147 if (base->uncast()->is_top()) { // The AddP case #3, #6, #9, and #10.
4148 base = addp->in(AddPNode::Address);
4149 while (base->is_AddP()) {
4150 // Case #6 (unsafe access) may have several chained AddP nodes.
4151 assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
4152 base = base->in(AddPNode::Address);
4153 }
4154 if (base->Opcode() == Op_CheckCastPP &&
4155 base->bottom_type()->isa_rawptr() &&
4156 _igvn->type(base->in(1))->isa_oopptr()) {
4157 base = base->in(1); // Case #9
4158 } else {
4159 // Case #3, #6, and #10
4160 Node* uncast_base = base->uncast();
4161 int opcode = uncast_base->Opcode();
4162 assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
4163 opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||
4164 (_igvn->C->is_osr_compilation() && uncast_base->is_Parm() && uncast_base->as_Parm()->_con == TypeFunc::Parms)||
4165 (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != nullptr)) ||
4166 (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_klassptr() != nullptr)) ||
4167 is_captured_store_address(addp) ||
4168 is_load_array_klass_related(uncast_base), "sanity");
4169 }
4170 }
4171 return base;
4172 }
4173
4174 #ifdef ASSERT
4175 // Case #10
4176 bool ConnectionGraph::is_load_array_klass_related(const Node* uncast_base) {
4177 if (!uncast_base->is_Phi() || uncast_base->req() != 3) {
4178 return false;
4179 }
4180 Node* in1 = uncast_base->in(1);
4181 Node* in2 = uncast_base->in(2);
4182 return in1->uncast()->Opcode() == Op_LoadKlass &&
4183 in2->uncast()->Opcode() == Op_LoadKlass;
4184 }
4185 #endif
4186
4187 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) {
4188 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
4189 Node* addp2 = addp->raw_out(0);
4190 if (addp->outcnt() == 1 && addp2->is_AddP() &&
4191 addp2->in(AddPNode::Base) == n &&
4192 addp2->in(AddPNode::Address) == addp) {
4193 assert(addp->in(AddPNode::Base) == n, "expecting the same base");
4194 //
4195 // Find array's offset to push it on worklist first and
4196 // as result process an array's element offset first (pushed second)
4197 // to avoid CastPP for the array's offset.
4198 // Otherwise the inserted CastPP (LocalVar) will point to what
4199 // the AddP (Field) points to. Which would be wrong since
4200 // the algorithm expects the CastPP has the same point as
4201 // as AddP's base CheckCastPP (LocalVar).
4202 //
4203 // ArrayAllocation
4204 // |
4205 // CheckCastPP
4206 // |
4207 // memProj (from ArrayAllocation CheckCastPP)
4208 // | ||
4209 // | || Int (element index)
4210 // | || | ConI (log(element size))
4211 // | || | /
4212 // | || LShift
4213 // | || /
4214 // | AddP (array's element offset)
4215 // | |
4216 // | | ConI (array's offset: #12(32-bits) or #24(64-bits))
4217 // | / /
4218 // AddP (array's offset)
4219 // |
4220 // Load/Store (memory operation on array's element)
4221 //
4222 return addp2;
4223 }
4224 return nullptr;
4225 }
4226
4227 //
4228 // Adjust the type and inputs of an AddP which computes the
4229 // address of a field of an instance
4230 //
4231 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
4232 PhaseGVN* igvn = _igvn;
4233 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
4234 assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr");
4235 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
4236 if (t == nullptr) {
4237 // We are computing a raw address for a store captured by an Initialize
4238 // compute an appropriate address type (cases #3 and #5).
4239 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
4240 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
4241 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
4242 assert(offs != Type::OffsetBot, "offset must be a constant");
4243 if (base_t->isa_aryptr() != nullptr) {
4244 // In the case of a flat inline type array, each field has its
4245 // own slice so we need to extract the field being accessed from
4246 // the address computation
4247 t = base_t->isa_aryptr()->add_field_offset_and_offset(offs)->is_oopptr();
4248 } else {
4249 t = base_t->add_offset(offs)->is_oopptr();
4250 }
4251 }
4252 int inst_id = base_t->instance_id();
4253 assert(!t->is_known_instance() || t->instance_id() == inst_id,
4254 "old type must be non-instance or match new type");
4255
4256 // The type 't' could be subclass of 'base_t'.
4257 // As result t->offset() could be large then base_t's size and it will
4258 // cause the failure in add_offset() with narrow oops since TypeOopPtr()
4259 // constructor verifies correctness of the offset.
4260 //
4261 // It could happened on subclass's branch (from the type profiling
4262 // inlining) which was not eliminated during parsing since the exactness
4263 // of the allocation type was not propagated to the subclass type check.
4264 //
4265 // Or the type 't' could be not related to 'base_t' at all.
4266 // It could happen when CHA type is different from MDO type on a dead path
4267 // (for example, from instanceof check) which is not collapsed during parsing.
4268 //
4269 // Do nothing for such AddP node and don't process its users since
4270 // this code branch will go away.
4271 //
4272 if (!t->is_known_instance() &&
4273 !base_t->maybe_java_subtype_of(t)) {
4274 return false; // bail out
4275 }
4276 const TypePtr* tinst = base_t->add_offset(t->offset());
4277 if (tinst->isa_aryptr() && t->isa_aryptr()) {
4278 // In the case of a flat inline type array, each field has its
4279 // own slice so we need to keep track of the field being accessed.
4280 tinst = tinst->is_aryptr()->with_field_offset(t->is_aryptr()->field_offset().get());
4281 // Keep array properties (not flat/null-free)
4282 tinst = tinst->is_aryptr()->update_properties(t->is_aryptr());
4283 if (tinst == nullptr) {
4284 return false; // Skip dead path with inconsistent properties
4285 }
4286 }
4287
4288 // Do NOT remove the next line: ensure a new alias index is allocated
4289 // for the instance type. Note: C++ will not remove it since the call
4290 // has side effect.
4291 int alias_idx = _compile->get_alias_index(tinst);
4292 igvn->set_type(addp, tinst);
4293 // record the allocation in the node map
4294 set_map(addp, get_map(base->_idx));
4295 // Set addp's Base and Address to 'base'.
4296 Node *abase = addp->in(AddPNode::Base);
4297 Node *adr = addp->in(AddPNode::Address);
4298 if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
4299 adr->in(0)->_idx == (uint)inst_id) {
4300 // Skip AddP cases #3 and #5.
4301 } else {
4302 assert(!abase->is_top(), "sanity"); // AddP case #3
4303 if (abase != base) {
4304 igvn->hash_delete(addp);
4305 addp->set_req(AddPNode::Base, base);
4306 if (abase == adr) {
4307 addp->set_req(AddPNode::Address, base);
4308 } else {
4309 // AddP case #4 (adr is array's element offset AddP node)
4310 #ifdef ASSERT
4311 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr();
4312 assert(adr->is_AddP() && atype != nullptr &&
4313 atype->instance_id() == inst_id, "array's element offset should be processed first");
4314 #endif
4315 }
4316 igvn->hash_insert(addp);
4317 }
4318 }
4319 // Put on IGVN worklist since at least addp's type was changed above.
4320 record_for_optimizer(addp);
4321 return true;
4322 }
4323
4324 //
4325 // Create a new version of orig_phi if necessary. Returns either the newly
4326 // created phi or an existing phi. Sets create_new to indicate whether a new
4327 // phi was created. Cache the last newly created phi in the node map.
4328 //
4329 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) {
4330 Compile *C = _compile;
4331 PhaseGVN* igvn = _igvn;
4332 new_created = false;
4333 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type());
4334 // nothing to do if orig_phi is bottom memory or matches alias_idx
4335 if (phi_alias_idx == alias_idx) {
4336 return orig_phi;
4337 }
4338 // Have we recently created a Phi for this alias index?
4339 PhiNode *result = get_map_phi(orig_phi->_idx);
4340 if (result != nullptr && C->get_alias_index(result->adr_type()) == alias_idx) {
4341 return result;
4342 }
4343 // Previous check may fail when the same wide memory Phi was split into Phis
4344 // for different memory slices. Search all Phis for this region.
4345 if (result != nullptr) {
4346 Node* region = orig_phi->in(0);
4347 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
4348 Node* phi = region->fast_out(i);
4349 if (phi->is_Phi() &&
4350 C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) {
4351 assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice");
4352 return phi->as_Phi();
4353 }
4354 }
4355 }
4356 if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) {
4357 if (C->do_escape_analysis() == true && !C->failing()) {
4358 // Retry compilation without escape analysis.
4359 // If this is the first failure, the sentinel string will "stick"
4360 // to the Compile object, and the C2Compiler will see it and retry.
4361 C->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
4362 }
4363 return nullptr;
4364 }
4365 orig_phi_worklist.append_if_missing(orig_phi);
4366 const TypePtr *atype = C->get_adr_type(alias_idx);
4367 result = PhiNode::make(orig_phi->in(0), nullptr, Type::MEMORY, atype);
4368 C->copy_node_notes_to(result, orig_phi);
4369 igvn->set_type(result, result->bottom_type());
4370 record_for_optimizer(result);
4371 set_map(orig_phi, result);
4372 new_created = true;
4373 return result;
4374 }
4375
4376 //
4377 // Return a new version of Memory Phi "orig_phi" with the inputs having the
4378 // specified alias index.
4379 //
4380 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, uint rec_depth) {
4381 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory");
4382 Compile *C = _compile;
4383 PhaseGVN* igvn = _igvn;
4384 bool new_phi_created;
4385 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created);
4386 if (!new_phi_created) {
4387 return result;
4388 }
4389 GrowableArray<PhiNode *> phi_list;
4390 GrowableArray<uint> cur_input;
4391 PhiNode *phi = orig_phi;
4392 uint idx = 1;
4393 bool finished = false;
4394 while(!finished) {
4395 while (idx < phi->req()) {
4396 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist, rec_depth + 1);
4397 if (mem != nullptr && mem->is_Phi()) {
4398 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created);
4399 if (new_phi_created) {
4400 // found an phi for which we created a new split, push current one on worklist and begin
4401 // processing new one
4402 phi_list.push(phi);
4403 cur_input.push(idx);
4404 phi = mem->as_Phi();
4405 result = newphi;
4406 idx = 1;
4407 continue;
4408 } else {
4409 mem = newphi;
4410 }
4411 }
4412 if (C->failing()) {
4413 return nullptr;
4414 }
4415 result->set_req(idx++, mem);
4416 }
4417 #ifdef ASSERT
4418 // verify that the new Phi has an input for each input of the original
4419 assert( phi->req() == result->req(), "must have same number of inputs.");
4420 assert( result->in(0) != nullptr && result->in(0) == phi->in(0), "regions must match");
4421 #endif
4422 // Check if all new phi's inputs have specified alias index.
4423 // Otherwise use old phi.
4424 for (uint i = 1; i < phi->req(); i++) {
4425 Node* in = result->in(i);
4426 assert((phi->in(i) == nullptr) == (in == nullptr), "inputs must correspond.");
4427 }
4428 // we have finished processing a Phi, see if there are any more to do
4429 finished = (phi_list.length() == 0 );
4430 if (!finished) {
4431 phi = phi_list.pop();
4432 idx = cur_input.pop();
4433 PhiNode *prev_result = get_map_phi(phi->_idx);
4434 prev_result->set_req(idx++, result);
4435 result = prev_result;
4436 }
4437 }
4438 return result;
4439 }
4440
4441 //
4442 // The next methods are derived from methods in MemNode.
4443 //
4444 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) {
4445 Node *mem = mmem;
4446 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally
4447 // means an array I have not precisely typed yet. Do not do any
4448 // alias stuff with it any time soon.
4449 if (toop->base() != Type::AnyPtr &&
4450 !(toop->isa_instptr() &&
4451 toop->is_instptr()->instance_klass()->is_java_lang_Object() &&
4452 toop->offset() == Type::OffsetBot)) {
4453 mem = mmem->memory_at(alias_idx);
4454 // Update input if it is progress over what we have now
4455 }
4456 return mem;
4457 }
4458
4459 //
4460 // Move memory users to their memory slices.
4461 //
4462 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) {
4463 Compile* C = _compile;
4464 PhaseGVN* igvn = _igvn;
4465 const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr();
4466 assert(tp != nullptr, "ptr type");
4467 int alias_idx = C->get_alias_index(tp);
4468 int general_idx = C->get_general_index(alias_idx);
4469
4470 // Move users first
4471 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4472 Node* use = n->fast_out(i);
4473 if (use->is_MergeMem()) {
4474 MergeMemNode* mmem = use->as_MergeMem();
4475 assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice");
4476 if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) {
4477 continue; // Nothing to do
4478 }
4479 // Replace previous general reference to mem node.
4480 uint orig_uniq = C->unique();
4481 Node* m = find_inst_mem(n, general_idx, orig_phis);
4482 assert(orig_uniq == C->unique(), "no new nodes");
4483 mmem->set_memory_at(general_idx, m);
4484 --imax;
4485 --i;
4486 } else if (use->is_MemBar()) {
4487 assert(!use->is_Initialize(), "initializing stores should not be moved");
4488 if (use->req() > MemBarNode::Precedent &&
4489 use->in(MemBarNode::Precedent) == n) {
4490 // Don't move related membars.
4491 record_for_optimizer(use);
4492 continue;
4493 }
4494 tp = use->as_MemBar()->adr_type()->isa_ptr();
4495 if ((tp != nullptr && C->get_alias_index(tp) == alias_idx) ||
4496 alias_idx == general_idx) {
4497 continue; // Nothing to do
4498 }
4499 // Move to general memory slice.
4500 uint orig_uniq = C->unique();
4501 Node* m = find_inst_mem(n, general_idx, orig_phis);
4502 assert(orig_uniq == C->unique(), "no new nodes");
4503 igvn->hash_delete(use);
4504 imax -= use->replace_edge(n, m, igvn);
4505 igvn->hash_insert(use);
4506 record_for_optimizer(use);
4507 --i;
4508 #ifdef ASSERT
4509 } else if (use->is_Mem()) {
4510 // Memory nodes should have new memory input.
4511 tp = igvn->type(use->in(MemNode::Address))->isa_ptr();
4512 assert(tp != nullptr, "ptr type");
4513 int idx = C->get_alias_index(tp);
4514 assert(get_map(use->_idx) != nullptr || idx == alias_idx,
4515 "Following memory nodes should have new memory input or be on the same memory slice");
4516 } else if (use->is_Phi()) {
4517 // Phi nodes should be split and moved already.
4518 tp = use->as_Phi()->adr_type()->isa_ptr();
4519 assert(tp != nullptr, "ptr type");
4520 int idx = C->get_alias_index(tp);
4521 assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice");
4522 } else {
4523 use->dump();
4524 assert(false, "should not be here");
4525 #endif
4526 }
4527 }
4528 }
4529
4530 //
4531 // Search memory chain of "mem" to find a MemNode whose address
4532 // is the specified alias index.
4533 //
4534 #define FIND_INST_MEM_RECURSION_DEPTH_LIMIT 1000
4535 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis, uint rec_depth) {
4536 if (rec_depth > FIND_INST_MEM_RECURSION_DEPTH_LIMIT) {
4537 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
4538 return nullptr;
4539 }
4540 if (orig_mem == nullptr) {
4541 return orig_mem;
4542 }
4543 Compile* C = _compile;
4544 PhaseGVN* igvn = _igvn;
4545 const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr();
4546 bool is_instance = (toop != nullptr) && toop->is_known_instance();
4547 Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);
4548 Node *prev = nullptr;
4549 Node *result = orig_mem;
4550 while (prev != result) {
4551 prev = result;
4552 if (result == start_mem) {
4553 break; // hit one of our sentinels
4554 }
4555 if (result->is_Mem()) {
4556 const Type *at = igvn->type(result->in(MemNode::Address));
4557 if (at == Type::TOP) {
4558 break; // Dead
4559 }
4560 assert (at->isa_ptr() != nullptr, "pointer type required.");
4561 int idx = C->get_alias_index(at->is_ptr());
4562 if (idx == alias_idx) {
4563 break; // Found
4564 }
4565 if (!is_instance && (at->isa_oopptr() == nullptr ||
4566 !at->is_oopptr()->is_known_instance())) {
4567 break; // Do not skip store to general memory slice.
4568 }
4569 result = result->in(MemNode::Memory);
4570 }
4571 if (!is_instance) {
4572 continue; // don't search further for non-instance types
4573 }
4574 // skip over a call which does not affect this memory slice
4575 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
4576 Node *proj_in = result->in(0);
4577 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) {
4578 break; // hit one of our sentinels
4579 } else if (proj_in->is_Call()) {
4580 // ArrayCopy node processed here as well
4581 CallNode *call = proj_in->as_Call();
4582 if (!call->may_modify(toop, igvn)) {
4583 result = call->in(TypeFunc::Memory);
4584 }
4585 } else if (proj_in->is_Initialize()) {
4586 AllocateNode* alloc = proj_in->as_Initialize()->allocation();
4587 // Stop if this is the initialization for the object instance which
4588 // which contains this memory slice, otherwise skip over it.
4589 if (alloc == nullptr || alloc->_idx != (uint)toop->instance_id()) {
4590 result = proj_in->in(TypeFunc::Memory);
4591 } else if (C->get_alias_index(result->adr_type()) != alias_idx) {
4592 assert(C->get_general_index(alias_idx) == C->get_alias_index(result->adr_type()), "should be projection for the same field/array element");
4593 result = get_map(result->_idx);
4594 assert(result != nullptr, "new projection should have been allocated");
4595 break;
4596 }
4597 } else if (proj_in->is_MemBar()) {
4598 // Check if there is an array copy for a clone
4599 // Step over GC barrier when ReduceInitialCardMarks is disabled
4600 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4601 Node* control_proj_ac = bs->step_over_gc_barrier(proj_in->in(0));
4602
4603 if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) {
4604 // Stop if it is a clone
4605 ArrayCopyNode* ac = control_proj_ac->in(0)->as_ArrayCopy();
4606 if (ac->may_modify(toop, igvn)) {
4607 break;
4608 }
4609 }
4610 result = proj_in->in(TypeFunc::Memory);
4611 }
4612 } else if (result->is_MergeMem()) {
4613 MergeMemNode *mmem = result->as_MergeMem();
4614 result = step_through_mergemem(mmem, alias_idx, toop);
4615 if (result == mmem->base_memory()) {
4616 // Didn't find instance memory, search through general slice recursively.
4617 result = mmem->memory_at(C->get_general_index(alias_idx));
4618 result = find_inst_mem(result, alias_idx, orig_phis, rec_depth + 1);
4619 if (C->failing()) {
4620 return nullptr;
4621 }
4622 mmem->set_memory_at(alias_idx, result);
4623 }
4624 } else if (result->is_Phi() &&
4625 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {
4626 Node *un = result->as_Phi()->unique_input(igvn);
4627 if (un != nullptr) {
4628 orig_phis.append_if_missing(result->as_Phi());
4629 result = un;
4630 } else {
4631 break;
4632 }
4633 } else if (result->is_ClearArray()) {
4634 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) {
4635 // Can not bypass initialization of the instance
4636 // we are looking for.
4637 break;
4638 }
4639 // Otherwise skip it (the call updated 'result' value).
4640 } else if (result->Opcode() == Op_SCMemProj) {
4641 Node* mem = result->in(0);
4642 Node* adr = nullptr;
4643 if (mem->is_LoadStore()) {
4644 adr = mem->in(MemNode::Address);
4645 } else {
4646 assert(mem->Opcode() == Op_EncodeISOArray ||
4647 mem->Opcode() == Op_StrCompressedCopy, "sanity");
4648 adr = mem->in(3); // Memory edge corresponds to destination array
4649 }
4650 const Type *at = igvn->type(adr);
4651 if (at != Type::TOP) {
4652 assert(at->isa_ptr() != nullptr, "pointer type required.");
4653 int idx = C->get_alias_index(at->is_ptr());
4654 if (idx == alias_idx) {
4655 // Assert in debug mode
4656 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
4657 break; // In product mode return SCMemProj node
4658 }
4659 }
4660 result = mem->in(MemNode::Memory);
4661 } else if (result->Opcode() == Op_StrInflatedCopy) {
4662 Node* adr = result->in(3); // Memory edge corresponds to destination array
4663 const Type *at = igvn->type(adr);
4664 if (at != Type::TOP) {
4665 assert(at->isa_ptr() != nullptr, "pointer type required.");
4666 int idx = C->get_alias_index(at->is_ptr());
4667 if (idx == alias_idx) {
4668 // Assert in debug mode
4669 assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field");
4670 break; // In product mode return SCMemProj node
4671 }
4672 }
4673 result = result->in(MemNode::Memory);
4674 }
4675 }
4676 if (result->is_Phi()) {
4677 PhiNode *mphi = result->as_Phi();
4678 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
4679 const TypePtr *t = mphi->adr_type();
4680 if (!is_instance) {
4681 // Push all non-instance Phis on the orig_phis worklist to update inputs
4682 // during Phase 4 if needed.
4683 orig_phis.append_if_missing(mphi);
4684 } else if (C->get_alias_index(t) != alias_idx) {
4685 // Create a new Phi with the specified alias index type.
4686 result = split_memory_phi(mphi, alias_idx, orig_phis, rec_depth + 1);
4687 }
4688 }
4689 // the result is either MemNode, PhiNode, InitializeNode.
4690 return result;
4691 }
4692
4693 //
4694 // Convert the types of non-escaped object to instance types where possible,
4695 // propagate the new type information through the graph, and update memory
4696 // edges and MergeMem inputs to reflect the new type.
4697 //
4698 // We start with allocations (and calls which may be allocations) on alloc_worklist.
4699 // The processing is done in 4 phases:
4700 //
4701 // Phase 1: Process possible allocations from alloc_worklist. Create instance
4702 // types for the CheckCastPP for allocations where possible.
4703 // Propagate the new types through users as follows:
4704 // casts and Phi: push users on alloc_worklist
4705 // AddP: cast Base and Address inputs to the instance type
4706 // push any AddP users on alloc_worklist and push any memnode
4707 // users onto memnode_worklist.
4708 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and
4709 // search the Memory chain for a store with the appropriate type
4710 // address type. If a Phi is found, create a new version with
4711 // the appropriate memory slices from each of the Phi inputs.
4712 // For stores, process the users as follows:
4713 // MemNode: push on memnode_worklist
4714 // MergeMem: push on mergemem_worklist
4715 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice
4716 // moving the first node encountered of each instance type to the
4717 // the input corresponding to its alias index.
4718 // appropriate memory slice.
4719 // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes.
4720 //
4721 // In the following example, the CheckCastPP nodes are the cast of allocation
4722 // results and the allocation of node 29 is non-escaped and eligible to be an
4723 // instance type.
4724 //
4725 // We start with:
4726 //
4727 // 7 Parm #memory
4728 // 10 ConI "12"
4729 // 19 CheckCastPP "Foo"
4730 // 20 AddP _ 19 19 10 Foo+12 alias_index=4
4731 // 29 CheckCastPP "Foo"
4732 // 30 AddP _ 29 29 10 Foo+12 alias_index=4
4733 //
4734 // 40 StoreP 25 7 20 ... alias_index=4
4735 // 50 StoreP 35 40 30 ... alias_index=4
4736 // 60 StoreP 45 50 20 ... alias_index=4
4737 // 70 LoadP _ 60 30 ... alias_index=4
4738 // 80 Phi 75 50 60 Memory alias_index=4
4739 // 90 LoadP _ 80 30 ... alias_index=4
4740 // 100 LoadP _ 80 20 ... alias_index=4
4741 //
4742 //
4743 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24
4744 // and creating a new alias index for node 30. This gives:
4745 //
4746 // 7 Parm #memory
4747 // 10 ConI "12"
4748 // 19 CheckCastPP "Foo"
4749 // 20 AddP _ 19 19 10 Foo+12 alias_index=4
4750 // 29 CheckCastPP "Foo" iid=24
4751 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24
4752 //
4753 // 40 StoreP 25 7 20 ... alias_index=4
4754 // 50 StoreP 35 40 30 ... alias_index=6
4755 // 60 StoreP 45 50 20 ... alias_index=4
4756 // 70 LoadP _ 60 30 ... alias_index=6
4757 // 80 Phi 75 50 60 Memory alias_index=4
4758 // 90 LoadP _ 80 30 ... alias_index=6
4759 // 100 LoadP _ 80 20 ... alias_index=4
4760 //
4761 // In phase 2, new memory inputs are computed for the loads and stores,
4762 // And a new version of the phi is created. In phase 4, the inputs to
4763 // node 80 are updated and then the memory nodes are updated with the
4764 // values computed in phase 2. This results in:
4765 //
4766 // 7 Parm #memory
4767 // 10 ConI "12"
4768 // 19 CheckCastPP "Foo"
4769 // 20 AddP _ 19 19 10 Foo+12 alias_index=4
4770 // 29 CheckCastPP "Foo" iid=24
4771 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24
4772 //
4773 // 40 StoreP 25 7 20 ... alias_index=4
4774 // 50 StoreP 35 7 30 ... alias_index=6
4775 // 60 StoreP 45 40 20 ... alias_index=4
4776 // 70 LoadP _ 50 30 ... alias_index=6
4777 // 80 Phi 75 40 60 Memory alias_index=4
4778 // 120 Phi 75 50 50 Memory alias_index=6
4779 // 90 LoadP _ 120 30 ... alias_index=6
4780 // 100 LoadP _ 80 20 ... alias_index=4
4781 //
4782 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist,
4783 GrowableArray<ArrayCopyNode*> &arraycopy_worklist,
4784 GrowableArray<MergeMemNode*> &mergemem_worklist,
4785 Unique_Node_List &reducible_merges) {
4786 DEBUG_ONLY(Unique_Node_List reduced_merges;)
4787 GrowableArray<Node *> memnode_worklist;
4788 GrowableArray<PhiNode *> orig_phis;
4789 PhaseIterGVN *igvn = _igvn;
4790 uint new_index_start = (uint) _compile->num_alias_types();
4791 VectorSet visited;
4792 ideal_nodes.clear(); // Reset for use with set_map/get_map.
4793
4794 // Phase 1: Process possible allocations from alloc_worklist.
4795 // Create instance types for the CheckCastPP for allocations where possible.
4796 //
4797 // (Note: don't forget to change the order of the second AddP node on
4798 // the alloc_worklist if the order of the worklist processing is changed,
4799 // see the comment in find_second_addp().)
4800 //
4801 while (alloc_worklist.length() != 0) {
4802 Node *n = alloc_worklist.pop();
4803 uint ni = n->_idx;
4804 if (n->is_Call()) {
4805 CallNode *alloc = n->as_Call();
4806 // copy escape information to call node
4807 PointsToNode* ptn = ptnode_adr(alloc->_idx);
4808 PointsToNode::EscapeState es = ptn->escape_state();
4809 // We have an allocation or call which returns a Java object,
4810 // see if it is non-escaped.
4811 if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) {
4812 continue;
4813 }
4814 // Find CheckCastPP for the allocate or for the return value of a call
4815 n = alloc->result_cast();
4816 if (n == nullptr) { // No uses except Initialize node
4817 if (alloc->is_Allocate()) {
4818 // Set the scalar_replaceable flag for allocation
4819 // so it could be eliminated if it has no uses.
4820 alloc->as_Allocate()->_is_scalar_replaceable = true;
4821 }
4822 continue;
4823 }
4824 if (!n->is_CheckCastPP()) { // not unique CheckCastPP.
4825 // we could reach here for allocate case if one init is associated with many allocs.
4826 if (alloc->is_Allocate()) {
4827 alloc->as_Allocate()->_is_scalar_replaceable = false;
4828 }
4829 continue;
4830 }
4831
4832 // The inline code for Object.clone() casts the allocation result to
4833 // java.lang.Object and then to the actual type of the allocated
4834 // object. Detect this case and use the second cast.
4835 // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when
4836 // the allocation result is cast to java.lang.Object and then
4837 // to the actual Array type.
4838 if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL
4839 && (alloc->is_AllocateArray() ||
4840 igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeInstKlassPtr::OBJECT)) {
4841 Node *cast2 = nullptr;
4842 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4843 Node *use = n->fast_out(i);
4844 if (use->is_CheckCastPP()) {
4845 cast2 = use;
4846 break;
4847 }
4848 }
4849 if (cast2 != nullptr) {
4850 n = cast2;
4851 } else {
4852 // Non-scalar replaceable if the allocation type is unknown statically
4853 // (reflection allocation), the object can't be restored during
4854 // deoptimization without precise type.
4855 continue;
4856 }
4857 }
4858
4859 const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
4860 if (t == nullptr) {
4861 continue; // not a TypeOopPtr
4862 }
4863 if (!t->klass_is_exact()) {
4864 continue; // not an unique type
4865 }
4866 if (alloc->is_Allocate()) {
4867 // Set the scalar_replaceable flag for allocation
4868 // so it could be eliminated.
4869 alloc->as_Allocate()->_is_scalar_replaceable = true;
4870 }
4871 set_escape_state(ptnode_adr(n->_idx), es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); // CheckCastPP escape state
4872 // in order for an object to be scalar-replaceable, it must be:
4873 // - a direct allocation (not a call returning an object)
4874 // - non-escaping
4875 // - eligible to be a unique type
4876 // - not determined to be ineligible by escape analysis
4877 set_map(alloc, n);
4878 set_map(n, alloc);
4879 const TypeOopPtr* tinst = t->cast_to_instance_id(ni);
4880 igvn->hash_delete(n);
4881 igvn->set_type(n, tinst);
4882 n->raise_bottom_type(tinst);
4883 igvn->hash_insert(n);
4884 record_for_optimizer(n);
4885 // Allocate an alias index for the header fields. Accesses to
4886 // the header emitted during macro expansion wouldn't have
4887 // correct memory state otherwise.
4888 _compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes()));
4889 _compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes()));
4890 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) {
4891 // Add a new NarrowMem projection for each existing NarrowMem projection with new adr type
4892 InitializeNode* init = alloc->as_Allocate()->initialization();
4893 assert(init != nullptr, "can't find Initialization node for this Allocate node");
4894 auto process_narrow_proj = [&](NarrowMemProjNode* proj) {
4895 const TypePtr* adr_type = proj->adr_type();
4896 const TypePtr* new_adr_type = tinst->with_offset(adr_type->offset());
4897 if (adr_type->isa_aryptr()) {
4898 // In the case of a flat inline type array, each field has its own slice so we need a
4899 // NarrowMemProj for each field of the flat array elements
4900 new_adr_type = new_adr_type->is_aryptr()->with_field_offset(adr_type->is_aryptr()->field_offset().get());
4901 }
4902 if (adr_type != new_adr_type && !init->already_has_narrow_mem_proj_with_adr_type(new_adr_type)) {
4903 // Do NOT remove the next line: ensure a new alias index is allocated for the instance type.
4904 uint alias_idx = _compile->get_alias_index(new_adr_type);
4905 assert(_compile->get_general_index(alias_idx) == _compile->get_alias_index(adr_type), "new adr type should be narrowed down from existing adr type");
4906 NarrowMemProjNode* new_proj = new NarrowMemProjNode(init, new_adr_type);
4907 igvn->set_type(new_proj, new_proj->bottom_type());
4908 record_for_optimizer(new_proj);
4909 set_map(proj, new_proj); // record it so ConnectionGraph::find_inst_mem() can find it
4910 }
4911 };
4912 init->for_each_narrow_mem_proj_with_new_uses(process_narrow_proj);
4913
4914 // First, put on the worklist all Field edges from Connection Graph
4915 // which is more accurate than putting immediate users from Ideal Graph.
4916 for (EdgeIterator e(ptn); e.has_next(); e.next()) {
4917 PointsToNode* tgt = e.get();
4918 if (tgt->is_Arraycopy()) {
4919 continue;
4920 }
4921 Node* use = tgt->ideal_node();
4922 assert(tgt->is_Field() && use->is_AddP(),
4923 "only AddP nodes are Field edges in CG");
4924 if (use->outcnt() > 0) { // Don't process dead nodes
4925 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base));
4926 if (addp2 != nullptr) {
4927 assert(alloc->is_AllocateArray(),"array allocation was expected");
4928 alloc_worklist.append_if_missing(addp2);
4929 }
4930 alloc_worklist.append_if_missing(use);
4931 }
4932 }
4933
4934 // An allocation may have an Initialize which has raw stores. Scan
4935 // the users of the raw allocation result and push AddP users
4936 // on alloc_worklist.
4937 Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms);
4938 assert (raw_result != nullptr, "must have an allocation result");
4939 for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) {
4940 Node *use = raw_result->fast_out(i);
4941 if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes
4942 Node* addp2 = find_second_addp(use, raw_result);
4943 if (addp2 != nullptr) {
4944 assert(alloc->is_AllocateArray(),"array allocation was expected");
4945 alloc_worklist.append_if_missing(addp2);
4946 }
4947 alloc_worklist.append_if_missing(use);
4948 } else if (use->is_MemBar()) {
4949 memnode_worklist.append_if_missing(use);
4950 }
4951 }
4952 }
4953 } else if (n->is_AddP()) {
4954 if (has_reducible_merge_base(n->as_AddP(), reducible_merges)) {
4955 // This AddP will go away when we reduce the Phi
4956 continue;
4957 }
4958 Node* addp_base = get_addp_base(n);
4959 JavaObjectNode* jobj = unique_java_object(addp_base);
4960 if (jobj == nullptr || jobj == phantom_obj) {
4961 #ifdef ASSERT
4962 ptnode_adr(get_addp_base(n)->_idx)->dump();
4963 ptnode_adr(n->_idx)->dump();
4964 assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation");
4965 #endif
4966 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
4967 return;
4968 }
4969 Node *base = get_map(jobj->idx()); // CheckCastPP node
4970 if (!split_AddP(n, base)) continue; // wrong type from dead path
4971 } else if (n->is_Phi() ||
4972 n->is_CheckCastPP() ||
4973 n->is_EncodeP() ||
4974 n->is_DecodeN() ||
4975 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {
4976 if (visited.test_set(n->_idx)) {
4977 assert(n->is_Phi(), "loops only through Phi's");
4978 continue; // already processed
4979 }
4980 // Reducible Phi's will be removed from the graph after split_unique_types
4981 // finishes. For now we just try to split out the SR inputs of the merge.
4982 Node* parent = n->in(1);
4983 if (reducible_merges.member(n)) {
4984 reduce_phi(n->as_Phi(), alloc_worklist);
4985 #ifdef ASSERT
4986 if (VerifyReduceAllocationMerges) {
4987 reduced_merges.push(n);
4988 }
4989 #endif
4990 continue;
4991 } else if (reducible_merges.member(parent)) {
4992 // 'n' is an user of a reducible merge (a Phi). It will be simplified as
4993 // part of reduce_merge.
4994 continue;
4995 }
4996 JavaObjectNode* jobj = unique_java_object(n);
4997 if (jobj == nullptr || jobj == phantom_obj) {
4998 #ifdef ASSERT
4999 ptnode_adr(n->_idx)->dump();
5000 assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation");
5001 #endif
5002 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
5003 return;
5004 } else {
5005 Node *val = get_map(jobj->idx()); // CheckCastPP node
5006 TypeNode *tn = n->as_Type();
5007 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
5008 assert(tinst != nullptr && tinst->is_known_instance() &&
5009 tinst->instance_id() == jobj->idx() , "instance type expected.");
5010
5011 const Type *tn_type = igvn->type(tn);
5012 const TypeOopPtr *tn_t;
5013 if (tn_type->isa_narrowoop()) {
5014 tn_t = tn_type->make_ptr()->isa_oopptr();
5015 } else {
5016 tn_t = tn_type->isa_oopptr();
5017 }
5018 if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) {
5019 if (tn_t->isa_aryptr()) {
5020 // Keep array properties (not flat/null-free)
5021 tinst = tinst->is_aryptr()->update_properties(tn_t->is_aryptr());
5022 if (tinst == nullptr) {
5023 continue; // Skip dead path with inconsistent properties
5024 }
5025 }
5026 if (tn_type->isa_narrowoop()) {
5027 tn_type = tinst->make_narrowoop();
5028 } else {
5029 tn_type = tinst;
5030 }
5031 igvn->hash_delete(tn);
5032 igvn->set_type(tn, tn_type);
5033 tn->set_type(tn_type);
5034 igvn->hash_insert(tn);
5035 record_for_optimizer(n);
5036 } else {
5037 assert(tn_type == TypePtr::NULL_PTR ||
5038 (tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t)),
5039 "unexpected type");
5040 continue; // Skip dead path with different type
5041 }
5042 }
5043 } else {
5044 DEBUG_ONLY(n->dump();)
5045 assert(false, "EA: unexpected node");
5046 continue;
5047 }
5048 // push allocation's users on appropriate worklist
5049 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
5050 Node *use = n->fast_out(i);
5051 if (use->is_Mem() && use->in(MemNode::Address) == n) {
5052 // Load/store to instance's field
5053 memnode_worklist.append_if_missing(use);
5054 } else if (use->is_MemBar()) {
5055 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
5056 memnode_worklist.append_if_missing(use);
5057 }
5058 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
5059 Node* addp2 = find_second_addp(use, n);
5060 if (addp2 != nullptr) {
5061 alloc_worklist.append_if_missing(addp2);
5062 }
5063 alloc_worklist.append_if_missing(use);
5064 } else if (use->is_Phi() ||
5065 use->is_CheckCastPP() ||
5066 use->is_EncodeNarrowPtr() ||
5067 use->is_DecodeNarrowPtr() ||
5068 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
5069 alloc_worklist.append_if_missing(use);
5070 #ifdef ASSERT
5071 } else if (use->is_Mem()) {
5072 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
5073 } else if (use->is_MergeMem()) {
5074 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
5075 } else if (use->is_SafePoint()) {
5076 // Look for MergeMem nodes for calls which reference unique allocation
5077 // (through CheckCastPP nodes) even for debug info.
5078 Node* m = use->in(TypeFunc::Memory);
5079 if (m->is_MergeMem()) {
5080 assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
5081 }
5082 } else if (use->Opcode() == Op_EncodeISOArray) {
5083 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
5084 // EncodeISOArray overwrites destination array
5085 memnode_worklist.append_if_missing(use);
5086 }
5087 } else if (use->Opcode() == Op_Return) {
5088 // Allocation is referenced by field of returned inline type
5089 assert(_compile->tf()->returns_inline_type_as_fields(), "EA: unexpected reference by ReturnNode");
5090 } else {
5091 uint op = use->Opcode();
5092 if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
5093 (use->in(MemNode::Memory) == n)) {
5094 // They overwrite memory edge corresponding to destination array,
5095 memnode_worklist.append_if_missing(use);
5096 } else if (!(op == Op_CmpP || op == Op_Conv2B ||
5097 op == Op_CastP2X ||
5098 op == Op_FastLock || op == Op_AryEq ||
5099 op == Op_StrComp || op == Op_CountPositives ||
5100 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
5101 op == Op_StrEquals || op == Op_VectorizedHashCode ||
5102 op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
5103 op == Op_SubTypeCheck || op == Op_InlineType || op == Op_FlatArrayCheck ||
5104 op == Op_ReinterpretS2HF ||
5105 op == Op_ReachabilityFence ||
5106 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
5107 n->dump();
5108 use->dump();
5109 assert(false, "EA: missing allocation reference path");
5110 }
5111 #endif
5112 }
5113 }
5114
5115 }
5116
5117 #ifdef ASSERT
5118 if (VerifyReduceAllocationMerges) {
5119 for (uint i = 0; i < reducible_merges.size(); i++) {
5120 Node* phi = reducible_merges.at(i);
5121
5122 if (!reduced_merges.member(phi)) {
5123 phi->dump(2);
5124 phi->dump(-2);
5125 assert(false, "This reducible merge wasn't reduced.");
5126 }
5127
5128 // At this point reducible Phis shouldn't have AddP users anymore; only SafePoints or Casts.
5129 for (DUIterator_Fast jmax, j = phi->fast_outs(jmax); j < jmax; j++) {
5130 Node* use = phi->fast_out(j);
5131 if (!use->is_SafePoint() && !use->is_CastPP()) {
5132 phi->dump(2);
5133 phi->dump(-2);
5134 assert(false, "Unexpected user of reducible Phi -> %d:%s:%d", use->_idx, use->Name(), use->outcnt());
5135 }
5136 }
5137 }
5138 }
5139 #endif
5140
5141 // Go over all ArrayCopy nodes and if one of the inputs has a unique
5142 // type, record it in the ArrayCopy node so we know what memory this
5143 // node uses/modified.
5144 for (int next = 0; next < arraycopy_worklist.length(); next++) {
5145 ArrayCopyNode* ac = arraycopy_worklist.at(next);
5146 Node* dest = ac->in(ArrayCopyNode::Dest);
5147 if (dest->is_AddP()) {
5148 dest = get_addp_base(dest);
5149 }
5150 JavaObjectNode* jobj = unique_java_object(dest);
5151 if (jobj != nullptr) {
5152 Node *base = get_map(jobj->idx());
5153 if (base != nullptr) {
5154 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr();
5155 ac->_dest_type = base_t;
5156 }
5157 }
5158 Node* src = ac->in(ArrayCopyNode::Src);
5159 if (src->is_AddP()) {
5160 src = get_addp_base(src);
5161 }
5162 jobj = unique_java_object(src);
5163 if (jobj != nullptr) {
5164 Node* base = get_map(jobj->idx());
5165 if (base != nullptr) {
5166 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr();
5167 ac->_src_type = base_t;
5168 }
5169 }
5170 }
5171
5172 // New alias types were created in split_AddP().
5173 uint new_index_end = (uint) _compile->num_alias_types();
5174
5175 _compile->print_method(PHASE_EA_AFTER_SPLIT_UNIQUE_TYPES_1, 5);
5176
5177 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and
5178 // compute new values for Memory inputs (the Memory inputs are not
5179 // actually updated until phase 4.)
5180 if (memnode_worklist.length() == 0)
5181 return; // nothing to do
5182 while (memnode_worklist.length() != 0) {
5183 Node *n = memnode_worklist.pop();
5184 if (visited.test_set(n->_idx)) {
5185 continue;
5186 }
5187 if (n->is_Phi()) {
5188 if ((uint) _compile->get_alias_index(n->as_Phi()->adr_type()) < new_index_start) {
5189 // Push memory phis on the orig_phis worklist to update
5190 // during Phase 4 if needed.
5191 orig_phis.append_if_missing(n->as_Phi());
5192 }
5193 } else if (n->is_ClearArray()) {
5194 // we don't need to do anything, but the users must be pushed
5195 } else if (n->is_MemBar()) { // MemBar nodes
5196 if (!n->is_Initialize()) { // memory projections for Initialize pushed below (so we get to all their uses)
5197 // we don't need to do anything, but the users must be pushed
5198 n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
5199 if (n == nullptr) {
5200 continue;
5201 }
5202 }
5203 } else if (n->is_CallLeaf()) {
5204 // Runtime calls with narrow memory input (no MergeMem node)
5205 // get the memory projection
5206 n = n->as_Call()->proj_out_or_null(TypeFunc::Memory);
5207 if (n == nullptr) {
5208 continue;
5209 }
5210 } else if (n->Opcode() == Op_StrInflatedCopy) {
5211 // Check direct uses of StrInflatedCopy.
5212 // It is memory type Node - no special SCMemProj node.
5213 } else if (n->Opcode() == Op_StrCompressedCopy ||
5214 n->Opcode() == Op_EncodeISOArray) {
5215 // get the memory projection
5216 n = n->find_out_with(Op_SCMemProj);
5217 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
5218 } else if (n->is_CallLeaf() && n->as_CallLeaf()->_name != nullptr &&
5219 strcmp(n->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
5220 n = n->as_CallLeaf()->proj_out(TypeFunc::Memory);
5221 } else if (n->is_Proj()) {
5222 assert(n->in(0)->is_Initialize(), "we only push memory projections for Initialize");
5223 } else {
5224 #ifdef ASSERT
5225 if (!n->is_Mem()) {
5226 n->dump();
5227 }
5228 assert(n->is_Mem(), "memory node required.");
5229 #endif
5230 Node *addr = n->in(MemNode::Address);
5231 const Type *addr_t = igvn->type(addr);
5232 if (addr_t == Type::TOP) {
5233 continue;
5234 }
5235 assert (addr_t->isa_ptr() != nullptr, "pointer type required.");
5236 int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
5237 assert ((uint)alias_idx < new_index_end, "wrong alias index");
5238 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
5239 if (_compile->failing()) {
5240 return;
5241 }
5242 if (mem != n->in(MemNode::Memory)) {
5243 // We delay the memory edge update since we need old one in
5244 // MergeMem code below when instances memory slices are separated.
5245 set_map(n, mem);
5246 }
5247 if (n->is_Load()) {
5248 continue; // don't push users
5249 } else if (n->is_LoadStore()) {
5250 // get the memory projection
5251 n = n->find_out_with(Op_SCMemProj);
5252 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
5253 }
5254 }
5255 // push user on appropriate worklist
5256 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
5257 Node *use = n->fast_out(i);
5258 if (use->is_Phi() || use->is_ClearArray()) {
5259 memnode_worklist.append_if_missing(use);
5260 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
5261 memnode_worklist.append_if_missing(use);
5262 } else if (use->is_MemBar() || use->is_CallLeaf()) {
5263 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
5264 memnode_worklist.append_if_missing(use);
5265 }
5266 } else if (use->is_Proj()) {
5267 assert(n->is_Initialize(), "We only push projections of Initialize");
5268 if (use->as_Proj()->_con == TypeFunc::Memory) { // Ignore precedent edge
5269 memnode_worklist.append_if_missing(use);
5270 }
5271 #ifdef ASSERT
5272 } else if (use->is_Mem()) {
5273 assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
5274 } else if (use->is_MergeMem()) {
5275 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
5276 } else if (use->Opcode() == Op_EncodeISOArray) {
5277 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
5278 // EncodeISOArray overwrites destination array
5279 memnode_worklist.append_if_missing(use);
5280 }
5281 } else if (use->is_CallLeaf() && use->as_CallLeaf()->_name != nullptr &&
5282 strcmp(use->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
5283 // store_unknown_inline overwrites destination array
5284 memnode_worklist.append_if_missing(use);
5285 } else {
5286 uint op = use->Opcode();
5287 if ((use->in(MemNode::Memory) == n) &&
5288 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
5289 // They overwrite memory edge corresponding to destination array,
5290 memnode_worklist.append_if_missing(use);
5291 } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
5292 op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
5293 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode ||
5294 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || op == Op_FlatArrayCheck)) {
5295 n->dump();
5296 use->dump();
5297 assert(false, "EA: missing memory path");
5298 }
5299 #endif
5300 }
5301 }
5302 }
5303
5304 // Phase 3: Process MergeMem nodes from mergemem_worklist.
5305 // Walk each memory slice moving the first node encountered of each
5306 // instance type to the input corresponding to its alias index.
5307 uint length = mergemem_worklist.length();
5308 for( uint next = 0; next < length; ++next ) {
5309 MergeMemNode* nmm = mergemem_worklist.at(next);
5310 assert(!visited.test_set(nmm->_idx), "should not be visited before");
5311 // Note: we don't want to use MergeMemStream here because we only want to
5312 // scan inputs which exist at the start, not ones we add during processing.
5313 // Note 2: MergeMem may already contains instance memory slices added
5314 // during find_inst_mem() call when memory nodes were processed above.
5315 igvn->hash_delete(nmm);
5316 uint nslices = MIN2(nmm->req(), new_index_start);
5317 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {
5318 Node* mem = nmm->in(i);
5319 Node* cur = nullptr;
5320 if (mem == nullptr || mem->is_top()) {
5321 continue;
5322 }
5323 // First, update mergemem by moving memory nodes to corresponding slices
5324 // if their type became more precise since this mergemem was created.
5325 while (mem->is_Mem()) {
5326 const Type* at = igvn->type(mem->in(MemNode::Address));
5327 if (at != Type::TOP) {
5328 assert (at->isa_ptr() != nullptr, "pointer type required.");
5329 uint idx = (uint)_compile->get_alias_index(at->is_ptr());
5330 if (idx == i) {
5331 if (cur == nullptr) {
5332 cur = mem;
5333 }
5334 } else {
5335 if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) {
5336 nmm->set_memory_at(idx, mem);
5337 }
5338 }
5339 }
5340 mem = mem->in(MemNode::Memory);
5341 }
5342 nmm->set_memory_at(i, (cur != nullptr) ? cur : mem);
5343 // Find any instance of the current type if we haven't encountered
5344 // already a memory slice of the instance along the memory chain.
5345 for (uint ni = new_index_start; ni < new_index_end; ni++) {
5346 if((uint)_compile->get_general_index(ni) == i) {
5347 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);
5348 if (nmm->is_empty_memory(m)) {
5349 Node* result = find_inst_mem(mem, ni, orig_phis);
5350 if (_compile->failing()) {
5351 return;
5352 }
5353 nmm->set_memory_at(ni, result);
5354 }
5355 }
5356 }
5357 }
5358 // Find the rest of instances values
5359 for (uint ni = new_index_start; ni < new_index_end; ni++) {
5360 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr();
5361 Node* result = step_through_mergemem(nmm, ni, tinst);
5362 if (result == nmm->base_memory()) {
5363 // Didn't find instance memory, search through general slice recursively.
5364 result = nmm->memory_at(_compile->get_general_index(ni));
5365 result = find_inst_mem(result, ni, orig_phis);
5366 if (_compile->failing()) {
5367 return;
5368 }
5369 nmm->set_memory_at(ni, result);
5370 }
5371 }
5372
5373 // If we have crossed the 3/4 point of max node limit it's too risky
5374 // to continue with EA/SR because we might hit the max node limit.
5375 if (_compile->live_nodes() >= _compile->max_node_limit() * 0.75) {
5376 if (_compile->do_reduce_allocation_merges()) {
5377 _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
5378 } else if (_invocation > 0) {
5379 _compile->record_failure(C2Compiler::retry_no_iterative_escape_analysis());
5380 } else {
5381 _compile->record_failure(C2Compiler::retry_no_escape_analysis());
5382 }
5383 return;
5384 }
5385
5386 igvn->hash_insert(nmm);
5387 record_for_optimizer(nmm);
5388 }
5389
5390 _compile->print_method(PHASE_EA_AFTER_SPLIT_UNIQUE_TYPES_3, 5);
5391
5392 // Phase 4: Update the inputs of non-instance memory Phis and
5393 // the Memory input of memnodes
5394 // First update the inputs of any non-instance Phi's from
5395 // which we split out an instance Phi. Note we don't have
5396 // to recursively process Phi's encountered on the input memory
5397 // chains as is done in split_memory_phi() since they will
5398 // also be processed here.
5399 for (int j = 0; j < orig_phis.length(); j++) {
5400 PhiNode *phi = orig_phis.at(j);
5401 int alias_idx = _compile->get_alias_index(phi->adr_type());
5402 igvn->hash_delete(phi);
5403 for (uint i = 1; i < phi->req(); i++) {
5404 Node *mem = phi->in(i);
5405 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
5406 if (_compile->failing()) {
5407 return;
5408 }
5409 if (mem != new_mem) {
5410 phi->set_req(i, new_mem);
5411 }
5412 }
5413 igvn->hash_insert(phi);
5414 record_for_optimizer(phi);
5415 }
5416
5417 // Update the memory inputs of MemNodes with the value we computed
5418 // in Phase 2 and move stores memory users to corresponding memory slices.
5419 // Disable memory split verification code until the fix for 6984348.
5420 // Currently it produces false negative results since it does not cover all cases.
5421 #if 0 // ifdef ASSERT
5422 visited.Reset();
5423 Node_Stack old_mems(arena, _compile->unique() >> 2);
5424 #endif
5425 for (uint i = 0; i < ideal_nodes.size(); i++) {
5426 Node* n = ideal_nodes.at(i);
5427 Node* nmem = get_map(n->_idx);
5428 assert(nmem != nullptr, "sanity");
5429 if (n->is_Mem()) {
5430 #if 0 // ifdef ASSERT
5431 Node* old_mem = n->in(MemNode::Memory);
5432 if (!visited.test_set(old_mem->_idx)) {
5433 old_mems.push(old_mem, old_mem->outcnt());
5434 }
5435 #endif
5436 assert(n->in(MemNode::Memory) != nmem, "sanity");
5437 if (!n->is_Load()) {
5438 // Move memory users of a store first.
5439 move_inst_mem(n, orig_phis);
5440 }
5441 // Now update memory input
5442 igvn->hash_delete(n);
5443 n->set_req(MemNode::Memory, nmem);
5444 igvn->hash_insert(n);
5445 record_for_optimizer(n);
5446 } else {
5447 assert(n->is_Allocate() || n->is_CheckCastPP() ||
5448 n->is_AddP() || n->is_Phi() || n->is_NarrowMemProj(), "unknown node used for set_map()");
5449 }
5450 }
5451 #if 0 // ifdef ASSERT
5452 // Verify that memory was split correctly
5453 while (old_mems.is_nonempty()) {
5454 Node* old_mem = old_mems.node();
5455 uint old_cnt = old_mems.index();
5456 old_mems.pop();
5457 assert(old_cnt == old_mem->outcnt(), "old mem could be lost");
5458 }
5459 #endif
5460 _compile->print_method(PHASE_EA_AFTER_SPLIT_UNIQUE_TYPES_4, 5);
5461 }
5462
5463 #ifndef PRODUCT
5464 int ConnectionGraph::_no_escape_counter = 0;
5465 int ConnectionGraph::_arg_escape_counter = 0;
5466 int ConnectionGraph::_global_escape_counter = 0;
5467
5468 static const char *node_type_names[] = {
5469 "UnknownType",
5470 "JavaObject",
5471 "LocalVar",
5472 "Field",
5473 "Arraycopy"
5474 };
5475
5476 static const char *esc_names[] = {
5477 "UnknownEscape",
5478 "NoEscape",
5479 "ArgEscape",
5480 "GlobalEscape"
5481 };
5482
5483 const char* PointsToNode::esc_name() const {
5484 return esc_names[(int)escape_state()];
5485 }
5486
5487 void PointsToNode::dump_header(bool print_state, outputStream* out) const {
5488 NodeType nt = node_type();
5489 out->print("%s(%d) ", node_type_names[(int) nt], _pidx);
5490 if (print_state) {
5491 EscapeState es = escape_state();
5492 EscapeState fields_es = fields_escape_state();
5493 out->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]);
5494 if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) {
5495 out->print("NSR ");
5496 }
5497 }
5498 }
5499
5500 void PointsToNode::dump(bool print_state, outputStream* out, bool newline) const {
5501 dump_header(print_state, out);
5502 if (is_Field()) {
5503 FieldNode* f = (FieldNode*)this;
5504 if (f->is_oop()) {
5505 out->print("oop ");
5506 }
5507 if (f->offset() > 0) {
5508 out->print("+%d ", f->offset());
5509 }
5510 out->print("(");
5511 for (BaseIterator i(f); i.has_next(); i.next()) {
5512 PointsToNode* b = i.get();
5513 out->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : ""));
5514 }
5515 out->print(" )");
5516 }
5517 out->print("[");
5518 for (EdgeIterator i(this); i.has_next(); i.next()) {
5519 PointsToNode* e = i.get();
5520 out->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : "");
5521 }
5522 out->print(" [");
5523 for (UseIterator i(this); i.has_next(); i.next()) {
5524 PointsToNode* u = i.get();
5525 bool is_base = false;
5526 if (PointsToNode::is_base_use(u)) {
5527 is_base = true;
5528 u = PointsToNode::get_use_node(u)->as_Field();
5529 }
5530 out->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : "");
5531 }
5532 out->print(" ]] ");
5533 if (_node == nullptr) {
5534 out->print("<null>%s", newline ? "\n" : "");
5535 } else {
5536 _node->dump(newline ? "\n" : "", false, out);
5537 }
5538 }
5539
5540 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) {
5541 bool first = true;
5542 int ptnodes_length = ptnodes_worklist.length();
5543 for (int i = 0; i < ptnodes_length; i++) {
5544 PointsToNode *ptn = ptnodes_worklist.at(i);
5545 if (ptn == nullptr || !ptn->is_JavaObject()) {
5546 continue;
5547 }
5548 PointsToNode::EscapeState es = ptn->escape_state();
5549 if ((es != PointsToNode::NoEscape) && !Verbose) {
5550 continue;
5551 }
5552 Node* n = ptn->ideal_node();
5553 if (n->is_Allocate() || (n->is_CallStaticJava() &&
5554 n->as_CallStaticJava()->is_boxing_method())) {
5555 if (first) {
5556 tty->cr();
5557 tty->print("======== Connection graph for ");
5558 _compile->method()->print_short_name();
5559 tty->cr();
5560 tty->print_cr("invocation #%d: %d iterations and %f sec to build connection graph with %d nodes and worklist size %d",
5561 _invocation, _build_iterations, _build_time, nodes_size(), ptnodes_worklist.length());
5562 tty->cr();
5563 first = false;
5564 }
5565 ptn->dump();
5566 // Print all locals and fields which reference this allocation
5567 for (UseIterator j(ptn); j.has_next(); j.next()) {
5568 PointsToNode* use = j.get();
5569 if (use->is_LocalVar()) {
5570 use->dump(Verbose);
5571 } else if (Verbose) {
5572 use->dump();
5573 }
5574 }
5575 tty->cr();
5576 }
5577 }
5578 }
5579
5580 void ConnectionGraph::print_statistics() {
5581 tty->print_cr("No escape = %d, Arg escape = %d, Global escape = %d", AtomicAccess::load(&_no_escape_counter), AtomicAccess::load(&_arg_escape_counter), AtomicAccess::load(&_global_escape_counter));
5582 }
5583
5584 void ConnectionGraph::escape_state_statistics(GrowableArray<JavaObjectNode*>& java_objects_worklist) {
5585 if (!PrintOptoStatistics || (_invocation > 0)) { // Collect data only for the first invocation
5586 return;
5587 }
5588 for (int next = 0; next < java_objects_worklist.length(); ++next) {
5589 JavaObjectNode* ptn = java_objects_worklist.at(next);
5590 if (ptn->ideal_node()->is_Allocate()) {
5591 if (ptn->escape_state() == PointsToNode::NoEscape) {
5592 AtomicAccess::inc(&ConnectionGraph::_no_escape_counter);
5593 } else if (ptn->escape_state() == PointsToNode::ArgEscape) {
5594 AtomicAccess::inc(&ConnectionGraph::_arg_escape_counter);
5595 } else if (ptn->escape_state() == PointsToNode::GlobalEscape) {
5596 AtomicAccess::inc(&ConnectionGraph::_global_escape_counter);
5597 } else {
5598 assert(false, "Unexpected Escape State");
5599 }
5600 }
5601 }
5602 }
5603
5604 void ConnectionGraph::trace_es_update_helper(PointsToNode* ptn, PointsToNode::EscapeState es, bool fields, const char* reason) const {
5605 if (_compile->directive()->TraceEscapeAnalysisOption) {
5606 assert(ptn != nullptr, "should not be null");
5607 assert(reason != nullptr, "should not be null");
5608 ptn->dump_header(true);
5609 PointsToNode::EscapeState new_es = fields ? ptn->escape_state() : es;
5610 PointsToNode::EscapeState new_fields_es = fields ? es : ptn->fields_escape_state();
5611 tty->print_cr("-> %s(%s) %s", esc_names[(int)new_es], esc_names[(int)new_fields_es], reason);
5612 }
5613 }
5614
5615 const char* ConnectionGraph::trace_propagate_message(PointsToNode* from) const {
5616 if (_compile->directive()->TraceEscapeAnalysisOption) {
5617 stringStream ss;
5618 ss.print("propagated from: ");
5619 from->dump(true, &ss, false);
5620 return ss.as_string();
5621 } else {
5622 return nullptr;
5623 }
5624 }
5625
5626 const char* ConnectionGraph::trace_arg_escape_message(CallNode* call) const {
5627 if (_compile->directive()->TraceEscapeAnalysisOption) {
5628 stringStream ss;
5629 ss.print("escapes as arg to:");
5630 call->dump("", false, &ss);
5631 return ss.as_string();
5632 } else {
5633 return nullptr;
5634 }
5635 }
5636
5637 const char* ConnectionGraph::trace_merged_message(PointsToNode* other) const {
5638 if (_compile->directive()->TraceEscapeAnalysisOption) {
5639 stringStream ss;
5640 ss.print("is merged with other object: ");
5641 other->dump_header(true, &ss);
5642 return ss.as_string();
5643 } else {
5644 return nullptr;
5645 }
5646 }
5647
5648 #endif
5649
5650 void ConnectionGraph::record_for_optimizer(Node *n) {
5651 _igvn->_worklist.push(n);
5652 _igvn->add_users_to_worklist(n);
5653 }